summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-12 11:10:15 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-12 22:08:39 +0200
commitfd4f80ce54d7f7b7503e0999f6a9d293d493846d (patch)
tree00fba34b8aabeb481c7128fccee635719ee44a3b /deps/v8
parent73d53fe9f56d7ce5de4b9c9ad5257dc601bbce14 (diff)
downloadnode-new-fd4f80ce54d7f7b7503e0999f6a9d293d493846d.tar.gz
deps: update V8 to 10.1.124.6
PR-URL: https://github.com/nodejs/node/pull/42657 Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.bazelrc13
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/.style.yapf2
-rw-r--r--deps/v8/.vpython325
-rw-r--r--deps/v8/AUTHORS8
-rw-r--r--deps/v8/BUILD.bazel401
-rw-r--r--deps/v8/BUILD.gn383
-rw-r--r--deps/v8/COMMON_OWNERS8
-rw-r--r--deps/v8/DEPS44
-rw-r--r--deps/v8/PRESUBMIT.py45
-rw-r--r--deps/v8/WATCHLISTS23
-rw-r--r--deps/v8/WORKSPACE56
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h4
-rw-r--r--deps/v8/bazel/BUILD.icu54
-rw-r--r--deps/v8/bazel/BUILD.trace_event_common10
-rw-r--r--deps/v8/bazel/BUILD.zlib23
-rw-r--r--deps/v8/bazel/OWNERS1
-rw-r--r--deps/v8/bazel/config/BUILD.bazel199
-rw-r--r--deps/v8/bazel/config/v8-target-cpu.bzl2
-rw-r--r--deps/v8/bazel/defs.bzl87
-rw-r--r--deps/v8/bazel/generate-inspector-files.cmd24
-rwxr-xr-xdeps/v8/bazel/generate-inspector-files.sh19
-rw-r--r--deps/v8/bazel/requirements.in1
-rw-r--r--deps/v8/bazel/requirements.txt81
-rw-r--r--deps/v8/bazel/v8-non-pointer-compression.bzl6
-rw-r--r--deps/v8/gni/OWNERS4
-rw-r--r--deps/v8/gni/release_branch_toggle.gni7
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni4
-rw-r--r--deps/v8/gni/v8.cmx8
-rw-r--r--deps/v8/gni/v8.gni13
-rw-r--r--deps/v8/include/OWNERS7
-rw-r--r--deps/v8/include/cppgc/README.md121
-rw-r--r--deps/v8/include/cppgc/default-platform.h3
-rw-r--r--deps/v8/include/cppgc/explicit-management.h26
-rw-r--r--deps/v8/include/cppgc/garbage-collected.h3
-rw-r--r--deps/v8/include/cppgc/heap-consistency.h13
-rw-r--r--deps/v8/include/cppgc/heap.h9
-rw-r--r--deps/v8/include/cppgc/internal/prefinalizer-handler.h30
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h54
-rw-r--r--deps/v8/include/cppgc/member.h7
-rw-r--r--deps/v8/include/cppgc/persistent.h61
-rw-r--r--deps/v8/include/cppgc/platform.h4
-rw-r--r--deps/v8/include/cppgc/prefinalizer.h45
-rw-r--r--deps/v8/include/cppgc/testing.h11
-rw-r--r--deps/v8/include/js_protocol.pdl16
-rw-r--r--deps/v8/include/libplatform/libplatform.h11
-rw-r--r--deps/v8/include/v8-array-buffer.h4
-rw-r--r--deps/v8/include/v8-callbacks.h14
-rw-r--r--deps/v8/include/v8-context.h17
-rw-r--r--deps/v8/include/v8-cppgc.h146
-rw-r--r--deps/v8/include/v8-debug.h25
-rw-r--r--deps/v8/include/v8-embedder-heap.h24
-rw-r--r--deps/v8/include/v8-embedder-state-scope.h5
-rw-r--r--deps/v8/include/v8-exception.h7
-rw-r--r--deps/v8/include/v8-fast-api-calls.h5
-rw-r--r--deps/v8/include/v8-initialization.h78
-rw-r--r--deps/v8/include/v8-inspector.h29
-rw-r--r--deps/v8/include/v8-internal.h318
-rw-r--r--deps/v8/include/v8-isolate.h19
-rw-r--r--deps/v8/include/v8-locker.h1
-rw-r--r--deps/v8/include/v8-message.h45
-rw-r--r--deps/v8/include/v8-metrics.h5
-rw-r--r--deps/v8/include/v8-object.h8
-rw-r--r--deps/v8/include/v8-platform.h160
-rw-r--r--deps/v8/include/v8-primitive.h6
-rw-r--r--deps/v8/include/v8-script.h47
-rw-r--r--deps/v8/include/v8-snapshot.h2
-rw-r--r--deps/v8/include/v8-traced-handle.h95
-rw-r--r--deps/v8/include/v8-value-serializer-version.h2
-rw-r--r--deps/v8/include/v8-value-serializer.h34
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8-weak-callback-info.h26
-rw-r--r--deps/v8/include/v8.h1
-rw-r--r--deps/v8/include/v8config.h38
-rw-r--r--deps/v8/infra/mb/PRESUBMIT.py4
-rw-r--r--deps/v8/infra/mb/gn_isolate_map.pyl2
-rw-r--r--deps/v8/infra/mb/mb_config.pyl33
-rw-r--r--deps/v8/infra/testing/PRESUBMIT.py31
-rw-r--r--deps/v8/infra/testing/builders.pyl45
-rw-r--r--deps/v8/samples/cppgc/hello-world.cc24
-rw-r--r--deps/v8/samples/hello-world.cc6
-rw-r--r--deps/v8/samples/process.cc6
-rw-r--r--deps/v8/samples/shell.cc6
-rw-r--r--deps/v8/src/DEPS4
-rw-r--r--deps/v8/src/api/OWNERS3
-rw-r--r--deps/v8/src/api/api-inl.h17
-rw-r--r--deps/v8/src/api/api-macros.h13
-rw-r--r--deps/v8/src/api/api.cc527
-rw-r--r--deps/v8/src/api/api.h16
-rw-r--r--deps/v8/src/asmjs/asm-js.cc31
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc5
-rw-r--r--deps/v8/src/asmjs/asm-parser.h1
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc16
-rw-r--r--deps/v8/src/ast/ast-value-factory.h33
-rw-r--r--deps/v8/src/ast/ast.cc244
-rw-r--r--deps/v8/src/ast/ast.h294
-rw-r--r--deps/v8/src/ast/scopes.cc215
-rw-r--r--deps/v8/src/ast/scopes.h65
-rw-r--r--deps/v8/src/base/atomic-utils.h15
-rw-r--r--deps/v8/src/base/atomicops.h17
-rw-r--r--deps/v8/src/base/bit-field.h2
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc13
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h1
-rw-r--r--deps/v8/src/base/build_config.h11
-rw-r--r--deps/v8/src/base/cpu.cc35
-rw-r--r--deps/v8/src/base/cpu.h6
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc2
-rw-r--r--deps/v8/src/base/emulated-virtual-address-subspace.cc98
-rw-r--r--deps/v8/src/base/emulated-virtual-address-subspace.h22
-rw-r--r--deps/v8/src/base/immediate-crash.h6
-rw-r--r--deps/v8/src/base/macros.h2
-rw-r--r--deps/v8/src/base/page-allocator.cc10
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc6
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h2
-rw-r--r--deps/v8/src/base/platform/mutex.cc6
-rw-r--r--deps/v8/src/base/platform/mutex.h2
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc38
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc12
-rw-r--r--deps/v8/src/base/platform/platform-darwin.cc107
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc262
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc2
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc162
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc177
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-starboard.cc12
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc152
-rw-r--r--deps/v8/src/base/platform/platform.h43
-rw-r--r--deps/v8/src/base/platform/semaphore.cc6
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/platform/time.cc85
-rw-r--r--deps/v8/src/base/platform/time.h5
-rw-r--r--deps/v8/src/base/pointer-with-payload.h (renamed from deps/v8/src/utils/pointer-with-payload.h)64
-rw-r--r--deps/v8/src/base/safe_conversions_impl.h3
-rw-r--r--deps/v8/src/base/sanitizer/lsan-page-allocator.cc18
-rw-r--r--deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc32
-rw-r--r--deps/v8/src/base/sanitizer/lsan-virtual-address-space.h19
-rw-r--r--deps/v8/src/base/sys-info.cc2
-rw-r--r--deps/v8/src/base/threaded-list.h13
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc5
-rw-r--r--deps/v8/src/base/virtual-address-space-page-allocator.cc6
-rw-r--r--deps/v8/src/base/virtual-address-space.cc170
-rw-r--r--deps/v8/src/base/virtual-address-space.h45
-rw-r--r--deps/v8/src/base/vlq-base64.h3
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h11
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h12
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h2
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc56
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc68
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h14
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h14
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h13
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h13
-rw-r--r--deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h99
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h11
-rw-r--r--deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h194
-rw-r--r--deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h77
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h14
-rw-r--r--deps/v8/src/bigint/bigint.h39
-rw-r--r--deps/v8/src/bigint/tostring.cc1
-rw-r--r--deps/v8/src/builtins/accessors.cc73
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc162
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc281
-rw-r--r--deps/v8/src/builtins/array-join.tq54
-rw-r--r--deps/v8/src/builtins/base.tq93
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc38
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-array.cc314
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc71
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc247
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc2
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc28
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-console.cc161
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc3
-rw-r--r--deps/v8/src/builtins/builtins-date.cc4
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h105
-rw-r--r--deps/v8/src/builtins/builtins-error.cc4
-rw-r--r--deps/v8/src/builtins/builtins-function.cc19
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc114
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc163
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc30
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-object.cc5
-rw-r--r--deps/v8/src/builtins/builtins-shadow-realms.cc248
-rw-r--r--deps/v8/src/builtins/builtins-shadowrealm-gen.cc186
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc49
-rw-r--r--deps/v8/src/builtins/builtins-struct.cc123
-rw-r--r--deps/v8/src/builtins/builtins-temporal-gen.cc98
-rw-r--r--deps/v8/src/builtins/builtins-temporal.cc735
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc16
-rw-r--r--deps/v8/src/builtins/builtins.cc83
-rw-r--r--deps/v8/src/builtins/builtins.h58
-rw-r--r--deps/v8/src/builtins/convert.tq57
-rw-r--r--deps/v8/src/builtins/data-view.tq11
-rw-r--r--deps/v8/src/builtins/frame-arguments.tq4
-rw-r--r--deps/v8/src/builtins/function.tq6
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc185
-rw-r--r--deps/v8/src/builtins/internal.tq9
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc177
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc163
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc175
-rw-r--r--deps/v8/src/builtins/number.tq10
-rw-r--r--deps/v8/src/builtins/object.tq5
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc166
-rw-r--r--deps/v8/src/builtins/promise-all.tq8
-rw-r--r--deps/v8/src/builtins/promise-any.tq4
-rw-r--r--deps/v8/src/builtins/promise-finally.tq1
-rw-r--r--deps/v8/src/builtins/promise-misc.tq85
-rw-r--r--deps/v8/src/builtins/promise-then.tq16
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc183
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc146
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc31
-rw-r--r--deps/v8/src/builtins/string-repeat.tq2
-rw-r--r--deps/v8/src/builtins/torque-internal.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq34
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq149
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq39
-rw-r--r--deps/v8/src/builtins/typed-array-subarray.tq31
-rw-r--r--deps/v8/src/builtins/typed-array.tq2
-rw-r--r--deps/v8/src/builtins/wasm.tq55
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc1066
-rw-r--r--deps/v8/src/codegen/OWNERS5
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc18
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h17
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h74
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc98
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h37
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h46
-rw-r--r--deps/v8/src/codegen/arm/reglist-arm.h56
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h15
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h8
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc4
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h73
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc124
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h85
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h165
-rw-r--r--deps/v8/src/codegen/arm64/reglist-arm64.h176
-rw-r--r--deps/v8/src/codegen/assembler.cc11
-rw-r--r--deps/v8/src/codegen/assembler.h12
-rw-r--r--deps/v8/src/codegen/callable.h6
-rw-r--r--deps/v8/src/codegen/code-factory.cc18
-rw-r--r--deps/v8/src/codegen/code-factory.h8
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc732
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h213
-rw-r--r--deps/v8/src/codegen/compiler.cc408
-rw-r--r--deps/v8/src/codegen/compiler.h37
-rw-r--r--deps/v8/src/codegen/cpu-features.h2
-rw-r--r--deps/v8/src/codegen/external-reference-table.h2
-rw-r--r--deps/v8/src/codegen/external-reference.cc42
-rw-r--r--deps/v8/src/codegen/external-reference.h52
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc8
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h57
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc25
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h11
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h18
-rw-r--r--deps/v8/src/codegen/ia32/reglist-ia32.h30
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h46
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc10
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h352
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc14
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h6
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.h4
-rw-r--r--deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h74
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc208
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h6
-rw-r--r--deps/v8/src/codegen/loong64/register-loong64.h73
-rw-r--r--deps/v8/src/codegen/loong64/reglist-loong64.h50
-rw-r--r--deps/v8/src/codegen/machine-type.cc4
-rw-r--r--deps/v8/src/codegen/machine-type.h48
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc15
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h64
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc165
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h11
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h100
-rw-r--r--deps/v8/src/codegen/mips/reglist-mips.h48
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc14
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h6
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h74
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc178
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h15
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h100
-rw-r--r--deps/v8/src/codegen/mips64/reglist-mips64.h48
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc16
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h6
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc10
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h5
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h12
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h72
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc141
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h14
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h91
-rw-r--r--deps/v8/src/codegen/ppc/reglist-ppc.h63
-rw-r--r--deps/v8/src/codegen/register-arch.h40
-rw-r--r--deps/v8/src/codegen/register-base.h85
-rw-r--r--deps/v8/src/codegen/register-configuration.cc100
-rw-r--r--deps/v8/src/codegen/register-configuration.h23
-rw-r--r--deps/v8/src/codegen/register.h95
-rw-r--r--deps/v8/src/codegen/reglist-base.h232
-rw-r--r--deps/v8/src/codegen/reglist.h56
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc82
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h95
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h48
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h75
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc216
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h6
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h102
-rw-r--r--deps/v8/src/codegen/riscv64/reglist-riscv64.h64
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc11
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h5
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h71
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc998
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h488
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h59
-rw-r--r--deps/v8/src/codegen/s390/reglist-s390.h58
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc75
-rw-r--r--deps/v8/src/codegen/safepoint-table.h127
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc2
-rw-r--r--deps/v8/src/codegen/signature.h9
-rw-r--r--deps/v8/src/codegen/source-position.cc28
-rw-r--r--deps/v8/src/codegen/source-position.h1
-rw-r--r--deps/v8/src/codegen/tnode.h15
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc13
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h3
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h11
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc50
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h15
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h60
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc115
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h34
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h28
-rw-r--r--deps/v8/src/codegen/x64/reglist-x64.h37
-rw-r--r--deps/v8/src/common/allow-deprecated.h37
-rw-r--r--deps/v8/src/common/globals.h183
-rw-r--r--deps/v8/src/common/message-template.h15
-rw-r--r--deps/v8/src/common/operation.h59
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h6
-rw-r--r--deps/v8/src/common/ptr-compr.h47
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc23
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h10
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc9
-rw-r--r--deps/v8/src/compiler/OWNERS10
-rw-r--r--deps/v8/src/compiler/access-builder.cc26
-rw-r--r--deps/v8/src/compiler/access-info.cc37
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc75
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc40
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc60
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h4
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc4
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc8
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h4
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc11
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h4
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.cc11
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc59
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc4
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc32
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc43
-rw-r--r--deps/v8/src/compiler/backend/instruction.h28
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc54
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc4
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc625
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc42
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc8
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc46
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc8
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.cc4
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc312
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc4
-rw-r--r--deps/v8/src/compiler/backend/register-allocation.h10
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc243
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h5
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc306
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h10
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc4
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc187
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc1210
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc16
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc157
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h4
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc106
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc6
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc476
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h10
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc155
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.cc42
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h109
-rw-r--r--deps/v8/src/compiler/c-linkage.cc104
-rw-r--r--deps/v8/src/compiler/code-assembler.cc8
-rw-r--r--deps/v8/src/compiler/code-assembler.h16
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc6
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc142
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h60
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc202
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h13
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc19
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h3
-rw-r--r--deps/v8/src/compiler/fast-api-calls.cc40
-rw-r--r--deps/v8/src/compiler/fast-api-calls.h2
-rw-r--r--deps/v8/src/compiler/frame-states.cc7
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc433
-rw-r--r--deps/v8/src/compiler/graph-assembler.h52
-rw-r--r--deps/v8/src/compiler/heap-refs.cc868
-rw-r--r--deps/v8/src/compiler/heap-refs.h30
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc14
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc422
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc8
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc43
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc119
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h18
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc226
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.h38
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc20
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h13
-rw-r--r--deps/v8/src/compiler/js-inlining.cc30
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc31
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc125
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h14
-rw-r--r--deps/v8/src/compiler/js-operator.cc98
-rw-r--r--deps/v8/src/compiler/js-operator.h87
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc20
-rw-r--r--deps/v8/src/compiler/linkage.cc10
-rw-r--r--deps/v8/src/compiler/linkage.h18
-rw-r--r--deps/v8/src/compiler/load-elimination.cc6
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc77
-rw-r--r--deps/v8/src/compiler/loop-analysis.h24
-rw-r--r--deps/v8/src/compiler/loop-unrolling.cc34
-rw-r--r--deps/v8/src/compiler/loop-unrolling.h2
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc6
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc68
-rw-r--r--deps/v8/src/compiler/machine-operator.cc4
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc74
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc17
-rw-r--r--deps/v8/src/compiler/node-properties.cc2
-rw-r--r--deps/v8/src/compiler/opcodes.h44
-rw-r--r--deps/v8/src/compiler/operation-typer.cc8
-rw-r--r--deps/v8/src/compiler/operator-properties.cc20
-rw-r--r--deps/v8/src/compiler/persistent-map.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc367
-rw-r--r--deps/v8/src/compiler/property-access-builder.h2
-rw-r--r--deps/v8/src/compiler/representation-change.cc36
-rw-r--r--deps/v8/src/compiler/representation-change.h18
-rw-r--r--deps/v8/src/compiler/select-lowering.cc2
-rw-r--r--deps/v8/src/compiler/simplified-lowering-verifier.cc251
-rw-r--r--deps/v8/src/compiler/simplified-lowering-verifier.h93
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc119
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc56
-rw-r--r--deps/v8/src/compiler/simplified-operator.h88
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc40
-rw-r--r--deps/v8/src/compiler/state-values-utils.h15
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc140
-rw-r--r--deps/v8/src/compiler/typer.cc37
-rw-r--r--deps/v8/src/compiler/types.cc14
-rw-r--r--deps/v8/src/compiler/types.h34
-rw-r--r--deps/v8/src/compiler/verifier.cc30
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1270
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h79
-rw-r--r--deps/v8/src/compiler/wasm-escape-analysis.cc7
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc114
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h10
-rw-r--r--deps/v8/src/compiler/wasm-loop-peeling.cc133
-rw-r--r--deps/v8/src/compiler/wasm-loop-peeling.h33
-rw-r--r--deps/v8/src/compiler/zone-stats.h1
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc166
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h29
-rw-r--r--deps/v8/src/d8/cov.h1
-rw-r--r--deps/v8/src/d8/d8-console.cc14
-rw-r--r--deps/v8/src/d8/d8-test.cc78
-rw-r--r--deps/v8/src/d8/d8.cc351
-rw-r--r--deps/v8/src/d8/d8.h13
-rw-r--r--deps/v8/src/debug/debug-coverage.cc12
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc28
-rw-r--r--deps/v8/src/debug/debug-frames.cc4
-rw-r--r--deps/v8/src/debug/debug-interface.cc176
-rw-r--r--deps/v8/src/debug/debug-interface.h41
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc10
-rw-r--r--deps/v8/src/debug/debug-scopes.cc19
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc6
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc89
-rw-r--r--deps/v8/src/debug/debug.cc235
-rw-r--r--deps/v8/src/debug/debug.h27
-rw-r--r--deps/v8/src/debug/interface-types.h7
-rw-r--r--deps/v8/src/debug/liveedit.cc6
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/packet.h2
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/transport.h3
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc20
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc53
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h4
-rw-r--r--deps/v8/src/deoptimizer/frame-description.h2
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc3
-rw-r--r--deps/v8/src/deoptimizer/translation-array.h2
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc2
-rw-r--r--deps/v8/src/diagnostics/eh-frame.h2
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc136
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc125
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc110
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h1
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc73
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc4
-rw-r--r--deps/v8/src/execution/OWNERS2
-rw-r--r--deps/v8/src/execution/arguments-inl.h21
-rw-r--r--deps/v8/src/execution/arguments.cc13
-rw-r--r--deps/v8/src/execution/arguments.h20
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h26
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h29
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc2
-rw-r--r--deps/v8/src/execution/clobber-registers.cc63
-rw-r--r--deps/v8/src/execution/clobber-registers.h18
-rw-r--r--deps/v8/src/execution/execution.cc24
-rw-r--r--deps/v8/src/execution/frame-constants.h14
-rw-r--r--deps/v8/src/execution/frames-inl.h3
-rw-r--r--deps/v8/src/execution/frames.cc147
-rw-r--r--deps/v8/src/execution/frames.h31
-rw-r--r--deps/v8/src/execution/futex-emulation.cc35
-rw-r--r--deps/v8/src/execution/futex-emulation.h10
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.h25
-rw-r--r--deps/v8/src/execution/isolate-data.h51
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h6
-rw-r--r--deps/v8/src/execution/isolate.cc1041
-rw-r--r--deps/v8/src/execution/isolate.h117
-rw-r--r--deps/v8/src/execution/local-isolate.cc17
-rw-r--r--deps/v8/src/execution/local-isolate.h14
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.h25
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.cc32
-rw-r--r--deps/v8/src/execution/messages.cc104
-rw-r--r--deps/v8/src/execution/messages.h9
-rw-r--r--deps/v8/src/execution/microtask-queue.cc4
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.h23
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.h23
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h31
-rw-r--r--deps/v8/src/execution/riscv64/frame-constants-riscv64.h28
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc344
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc261
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h29
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc13
-rw-r--r--deps/v8/src/execution/simulator-base.cc5
-rw-r--r--deps/v8/src/execution/simulator-base.h4
-rw-r--r--deps/v8/src/execution/stack-guard.cc1
-rw-r--r--deps/v8/src/execution/thread-local-top.cc1
-rw-r--r--deps/v8/src/execution/thread-local-top.h5
-rw-r--r--deps/v8/src/execution/tiering-manager.cc412
-rw-r--r--deps/v8/src/execution/tiering-manager.h (renamed from deps/v8/src/execution/runtime-profiler.h)50
-rw-r--r--deps/v8/src/execution/v8threads.cc3
-rw-r--r--deps/v8/src/execution/v8threads.h2
-rw-r--r--deps/v8/src/execution/vm-state-inl.h2
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h25
-rw-r--r--deps/v8/src/extensions/gc-extension.cc27
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc45
-rw-r--r--deps/v8/src/flags/flag-definitions.h195
-rw-r--r--deps/v8/src/flags/flags.h2
-rw-r--r--deps/v8/src/handles/global-handles.cc99
-rw-r--r--deps/v8/src/handles/global-handles.h4
-rw-r--r--deps/v8/src/handles/handles-inl.h1
-rw-r--r--deps/v8/src/handles/handles.cc48
-rw-r--r--deps/v8/src/handles/handles.h51
-rw-r--r--deps/v8/src/handles/local-handles-inl.h11
-rw-r--r--deps/v8/src/heap/OWNERS3
-rw-r--r--deps/v8/src/heap/allocation-observer.h131
-rw-r--r--deps/v8/src/heap/allocation-result.h74
-rw-r--r--deps/v8/src/heap/base/active-system-pages.cc71
-rw-r--r--deps/v8/src/heap/base/active-system-pages.h51
-rw-r--r--deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc3
-rw-r--r--deps/v8/src/heap/base/stack.cc25
-rw-r--r--deps/v8/src/heap/base/stack.h11
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h16
-rw-r--r--deps/v8/src/heap/code-object-registry.h1
-rw-r--r--deps/v8/src/heap/code-range.cc20
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h10
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc16
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc26
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc300
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h25
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h47
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state.h67
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc3
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h26
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc2
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc30
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h9
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.cc12
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.h2
-rw-r--r--deps/v8/src/heap/cppgc/globals.h21
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc5
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h12
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h19
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc2
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc7
-rw-r--r--deps/v8/src/heap/cppgc/heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc77
-rw-r--r--deps/v8/src/heap/cppgc/marker.h40
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h196
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h6
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h3
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc8
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc17
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h13
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.cc135
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.h68
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc24
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h1
-rw-r--r--deps/v8/src/heap/cppgc/testing.cc8
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc18
-rw-r--r--deps/v8/src/heap/embedder-tracing-inl.h46
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc87
-rw-r--r--deps/v8/src/heap/embedder-tracing.h52
-rw-r--r--deps/v8/src/heap/evacuation-allocator-inl.h (renamed from deps/v8/src/heap/local-allocator-inl.h)36
-rw-r--r--deps/v8/src/heap/evacuation-allocator.h (renamed from deps/v8/src/heap/local-allocator.h)12
-rw-r--r--deps/v8/src/heap/factory-base.cc34
-rw-r--r--deps/v8/src/heap/factory-base.h3
-rw-r--r--deps/v8/src/heap/factory-inl.h4
-rw-r--r--deps/v8/src/heap/factory.cc294
-rw-r--r--deps/v8/src/heap/factory.h72
-rw-r--r--deps/v8/src/heap/gc-tracer.cc542
-rw-r--r--deps/v8/src/heap/gc-tracer.h136
-rw-r--r--deps/v8/src/heap/heap-allocator-inl.h250
-rw-r--r--deps/v8/src/heap/heap-allocator.cc163
-rw-r--r--deps/v8/src/heap/heap-allocator.h119
-rw-r--r--deps/v8/src/heap/heap-inl.h234
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.cc3
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h17
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc28
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h6
-rw-r--r--deps/v8/src/heap/heap.cc1069
-rw-r--r--deps/v8/src/heap/heap.h245
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc5
-rw-r--r--deps/v8/src/heap/incremental-marking.cc171
-rw-r--r--deps/v8/src/heap/incremental-marking.h4
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h13
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc11
-rw-r--r--deps/v8/src/heap/invalidated-slots.h8
-rw-r--r--deps/v8/src/heap/large-spaces.cc73
-rw-r--r--deps/v8/src/heap/large-spaces.h4
-rw-r--r--deps/v8/src/heap/local-factory.cc5
-rw-r--r--deps/v8/src/heap/local-factory.h17
-rw-r--r--deps/v8/src/heap/local-heap-inl.h3
-rw-r--r--deps/v8/src/heap/local-heap.cc2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h11
-rw-r--r--deps/v8/src/heap/mark-compact.cc722
-rw-r--r--deps/v8/src/heap/mark-compact.h37
-rw-r--r--deps/v8/src/heap/marking-barrier.cc19
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h72
-rw-r--r--deps/v8/src/heap/marking-visitor.h67
-rw-r--r--deps/v8/src/heap/marking-worklist-inl.h38
-rw-r--r--deps/v8/src/heap/marking-worklist.cc30
-rw-r--r--deps/v8/src/heap/marking-worklist.h44
-rw-r--r--deps/v8/src/heap/memory-allocator.cc140
-rw-r--r--deps/v8/src/heap/memory-allocator.h176
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h7
-rw-r--r--deps/v8/src/heap/memory-chunk.cc28
-rw-r--r--deps/v8/src/heap/memory-chunk.h5
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h15
-rw-r--r--deps/v8/src/heap/new-spaces.cc119
-rw-r--r--deps/v8/src/heap/new-spaces.h34
-rw-r--r--deps/v8/src/heap/object-stats.cc86
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h13
-rw-r--r--deps/v8/src/heap/objects-visiting.h2
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h23
-rw-r--r--deps/v8/src/heap/paged-spaces.cc100
-rw-r--r--deps/v8/src/heap/paged-spaces.h41
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc11
-rw-r--r--deps/v8/src/heap/reference-summarizer.cc116
-rw-r--r--deps/v8/src/heap/reference-summarizer.h55
-rw-r--r--deps/v8/src/heap/remembered-set-inl.h16
-rw-r--r--deps/v8/src/heap/remembered-set.h13
-rw-r--r--deps/v8/src/heap/safepoint.cc34
-rw-r--r--deps/v8/src/heap/safepoint.h12
-rw-r--r--deps/v8/src/heap/scavenger-inl.h24
-rw-r--r--deps/v8/src/heap/scavenger.cc70
-rw-r--r--deps/v8/src/heap/scavenger.h12
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc40
-rw-r--r--deps/v8/src/heap/slot-set.cc2
-rw-r--r--deps/v8/src/heap/slot-set.h53
-rw-r--r--deps/v8/src/heap/spaces-inl.h33
-rw-r--r--deps/v8/src/heap/spaces.cc43
-rw-r--r--deps/v8/src/heap/spaces.h46
-rw-r--r--deps/v8/src/heap/sweeper.cc70
-rw-r--r--deps/v8/src/heap/sweeper.h13
-rw-r--r--deps/v8/src/heap/third-party/heap-api-stub.cc12
-rw-r--r--deps/v8/src/ic/OWNERS1
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc205
-rw-r--r--deps/v8/src/ic/accessor-assembler.h43
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc98
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h79
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h18
-rw-r--r--deps/v8/src/ic/handler-configuration.h6
-rw-r--r--deps/v8/src/ic/ic.cc160
-rw-r--r--deps/v8/src/ic/ic.h16
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc95
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h4
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/init/OWNERS3
-rw-r--r--deps/v8/src/init/bootstrapper.cc384
-rw-r--r--deps/v8/src/init/heap-symbols.h37
-rw-r--r--deps/v8/src/init/isolate-allocator.cc46
-rw-r--r--deps/v8/src/init/v8.cc142
-rw-r--r--deps/v8/src/init/v8.h9
-rw-r--r--deps/v8/src/inspector/BUILD.gn2
-rw-r--r--deps/v8/src/inspector/injected-script.cc43
-rw-r--r--deps/v8/src/inspector/injected-script.h1
-rw-r--r--deps/v8/src/inspector/inspected-context.cc2
-rw-r--r--deps/v8/src/inspector/inspected-context.h4
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json1
-rw-r--r--deps/v8/src/inspector/string-16.cc10
-rw-r--r--deps/v8/src/inspector/string-16.h11
-rw-r--r--deps/v8/src/inspector/string-util.cc4
-rw-r--r--deps/v8/src/inspector/string-util.h6
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc24
-rw-r--r--deps/v8/src/inspector/v8-console.cc90
-rw-r--r--deps/v8/src/inspector/v8-console.h13
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc171
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h19
-rw-r--r--deps/v8/src/inspector/v8-debugger-id.cc28
-rw-r--r--deps/v8/src/inspector/v8-debugger-id.h17
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc64
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc173
-rw-r--r--deps/v8/src/inspector/v8-debugger.h13
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc46
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc33
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc69
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h5
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc51
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h17
-rw-r--r--deps/v8/src/inspector/value-mirror.cc10
-rw-r--r--deps/v8/src/inspector/value-mirror.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc50
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h64
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc16
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc17
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc274
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h12
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc9
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h8
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc4
-rw-r--r--deps/v8/src/interpreter/bytecodes.h28
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc33
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc93
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc15
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc17
-rw-r--r--deps/v8/src/interpreter/interpreter.h6
-rw-r--r--deps/v8/src/json/json-parser.cc28
-rw-r--r--deps/v8/src/json/json-parser.h3
-rw-r--r--deps/v8/src/json/json-stringifier.cc8
-rw-r--r--deps/v8/src/libplatform/default-platform.cc9
-rw-r--r--deps/v8/src/libplatform/tracing/recorder.h6
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc2
-rw-r--r--deps/v8/src/libsampler/sampler.cc4
-rw-r--r--deps/v8/src/logging/code-events.h7
-rw-r--r--deps/v8/src/logging/counters-definitions.h35
-rw-r--r--deps/v8/src/logging/counters-scopes.h73
-rw-r--r--deps/v8/src/logging/counters.cc22
-rw-r--r--deps/v8/src/logging/counters.h45
-rw-r--r--deps/v8/src/logging/log.cc36
-rw-r--r--deps/v8/src/logging/log.h26
-rw-r--r--deps/v8/src/logging/runtime-call-stats-scope.h10
-rw-r--r--deps/v8/src/logging/runtime-call-stats.cc6
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h19
-rw-r--r--deps/v8/src/maglev/DEPS6
-rw-r--r--deps/v8/src/maglev/OWNERS3
-rw-r--r--deps/v8/src/maglev/maglev-basic-block.h107
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h135
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc378
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.h27
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc123
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.h137
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc45
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.h57
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc209
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h53
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc194
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h92
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc616
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h383
-rw-r--r--deps/v8/src/maglev/maglev-graph-labeller.h65
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc446
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.h85
-rw-r--r--deps/v8/src/maglev/maglev-graph-processor.h423
-rw-r--r--deps/v8/src/maglev/maglev-graph.h60
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h400
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc922
-rw-r--r--deps/v8/src/maglev/maglev-ir.h1461
-rw-r--r--deps/v8/src/maglev/maglev-regalloc-data.h83
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc875
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h112
-rw-r--r--deps/v8/src/maglev/maglev-register-frame-array.h113
-rw-r--r--deps/v8/src/maglev/maglev-vreg-allocator.h57
-rw-r--r--deps/v8/src/maglev/maglev.cc24
-rw-r--r--deps/v8/src/maglev/maglev.h28
-rw-r--r--deps/v8/src/numbers/conversions-inl.h13
-rw-r--r--deps/v8/src/numbers/integer-literal-inl.h43
-rw-r--r--deps/v8/src/numbers/integer-literal.h106
-rw-r--r--deps/v8/src/objects/all-objects-inl.h4
-rw-r--r--deps/v8/src/objects/backing-store.cc90
-rw-r--r--deps/v8/src/objects/backing-store.h3
-rw-r--r--deps/v8/src/objects/bigint.cc2
-rw-r--r--deps/v8/src/objects/call-site-info-inl.h (renamed from deps/v8/src/objects/stack-frame-info-inl.h)31
-rw-r--r--deps/v8/src/objects/call-site-info.cc (renamed from deps/v8/src/objects/stack-frame-info.cc)134
-rw-r--r--deps/v8/src/objects/call-site-info.h (renamed from deps/v8/src/objects/stack-frame-info.h)61
-rw-r--r--deps/v8/src/objects/call-site-info.tq (renamed from deps/v8/src/objects/stack-frame-info.tq)6
-rw-r--r--deps/v8/src/objects/code-inl.h261
-rw-r--r--deps/v8/src/objects/code-kind.cc2
-rw-r--r--deps/v8/src/objects/code-kind.h94
-rw-r--r--deps/v8/src/objects/code.cc12
-rw-r--r--deps/v8/src/objects/code.h138
-rw-r--r--deps/v8/src/objects/compilation-cache-table.cc2
-rw-r--r--deps/v8/src/objects/contexts-inl.h16
-rw-r--r--deps/v8/src/objects/contexts.cc68
-rw-r--r--deps/v8/src/objects/contexts.h38
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h33
-rw-r--r--deps/v8/src/objects/debug-objects.cc62
-rw-r--r--deps/v8/src/objects/debug-objects.h48
-rw-r--r--deps/v8/src/objects/debug-objects.tq60
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h3
-rw-r--r--deps/v8/src/objects/elements-kind.h5
-rw-r--r--deps/v8/src/objects/elements.cc94
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h112
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h96
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h7
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h50
-rw-r--r--deps/v8/src/objects/feedback-vector.cc201
-rw-r--r--deps/v8/src/objects/feedback-vector.h109
-rw-r--r--deps/v8/src/objects/feedback-vector.tq8
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h5
-rw-r--r--deps/v8/src/objects/fixed-array.h32
-rw-r--r--deps/v8/src/objects/foreign-inl.h7
-rw-r--r--deps/v8/src/objects/hash-table-inl.h74
-rw-r--r--deps/v8/src/objects/hash-table.h81
-rw-r--r--deps/v8/src/objects/heap-object.h26
-rw-r--r--deps/v8/src/objects/internal-index.h4
-rw-r--r--deps/v8/src/objects/intl-objects.cc462
-rw-r--r--deps/v8/src/objects/intl-objects.h80
-rw-r--r--deps/v8/src/objects/intl-objects.tq2
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h39
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc2
-rw-r--r--deps/v8/src/objects/js-array-buffer.h8
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq27
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc274
-rw-r--r--deps/v8/src/objects/js-date-time-format.h19
-rw-r--r--deps/v8/src/objects/js-function-inl.h95
-rw-r--r--deps/v8/src/objects/js-function.cc180
-rw-r--r--deps/v8/src/objects/js-function.h61
-rw-r--r--deps/v8/src/objects/js-function.tq15
-rw-r--r--deps/v8/src/objects/js-list-format.cc22
-rw-r--r--deps/v8/src/objects/js-list-format.h4
-rw-r--r--deps/v8/src/objects/js-locale.cc2
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h3
-rw-r--r--deps/v8/src/objects/js-number-format.cc939
-rw-r--r--deps/v8/src/objects/js-number-format.h41
-rw-r--r--deps/v8/src/objects/js-number-format.tq2
-rw-r--r--deps/v8/src/objects/js-objects-inl.h132
-rw-r--r--deps/v8/src/objects/js-objects.cc146
-rw-r--r--deps/v8/src/objects/js-objects.h100
-rw-r--r--deps/v8/src/objects/js-objects.tq14
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h3
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc51
-rw-r--r--deps/v8/src/objects/js-plural-rules.h6
-rw-r--r--deps/v8/src/objects/js-plural-rules.tq2
-rw-r--r--deps/v8/src/objects/js-promise.h3
-rw-r--r--deps/v8/src/objects/js-promise.tq2
-rw-r--r--deps/v8/src/objects/js-proxy.h4
-rw-r--r--deps/v8/src/objects/js-regexp.cc7
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc43
-rw-r--r--deps/v8/src/objects/js-shadow-realms-inl.h28
-rw-r--r--deps/v8/src/objects/js-shadow-realms.h39
-rw-r--r--deps/v8/src/objects/js-shadow-realms.tq5
-rw-r--r--deps/v8/src/objects/js-struct-inl.h30
-rw-r--r--deps/v8/src/objects/js-struct.h35
-rw-r--r--deps/v8/src/objects/js-struct.tq7
-rw-r--r--deps/v8/src/objects/js-temporal-objects-inl.h26
-rw-r--r--deps/v8/src/objects/js-temporal-objects.cc5196
-rw-r--r--deps/v8/src/objects/js-temporal-objects.h240
-rw-r--r--deps/v8/src/objects/js-temporal-objects.tq6
-rw-r--r--deps/v8/src/objects/lookup.cc34
-rw-r--r--deps/v8/src/objects/lookup.h2
-rw-r--r--deps/v8/src/objects/map-inl.h7
-rw-r--r--deps/v8/src/objects/map-updater.cc68
-rw-r--r--deps/v8/src/objects/map.cc173
-rw-r--r--deps/v8/src/objects/map.h2
-rw-r--r--deps/v8/src/objects/name-inl.h25
-rw-r--r--deps/v8/src/objects/name.h59
-rw-r--r--deps/v8/src/objects/name.tq35
-rw-r--r--deps/v8/src/objects/object-list-macros.h372
-rw-r--r--deps/v8/src/objects/object-macros-undef.h13
-rw-r--r--deps/v8/src/objects/object-macros.h91
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h92
-rw-r--r--deps/v8/src/objects/objects-definitions.h2
-rw-r--r--deps/v8/src/objects/objects-inl.h219
-rw-r--r--deps/v8/src/objects/objects.cc349
-rw-r--r--deps/v8/src/objects/objects.h97
-rw-r--r--deps/v8/src/objects/oddball-inl.h5
-rw-r--r--deps/v8/src/objects/oddball.h2
-rw-r--r--deps/v8/src/objects/option-utils.h71
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc7
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h3
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.cc77
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.h25
-rw-r--r--deps/v8/src/objects/property-array-inl.h26
-rw-r--r--deps/v8/src/objects/property-array.h4
-rw-r--r--deps/v8/src/objects/property-cell.h8
-rw-r--r--deps/v8/src/objects/property-descriptor.cc2
-rw-r--r--deps/v8/src/objects/property-details.h5
-rw-r--r--deps/v8/src/objects/scope-info-inl.h110
-rw-r--r--deps/v8/src/objects/scope-info.cc168
-rw-r--r--deps/v8/src/objects/scope-info.h48
-rw-r--r--deps/v8/src/objects/scope-info.tq64
-rw-r--r--deps/v8/src/objects/script.h7
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h32
-rw-r--r--deps/v8/src/objects/shared-function-info.cc21
-rw-r--r--deps/v8/src/objects/shared-function-info.h12
-rw-r--r--deps/v8/src/objects/shared-function-info.tq23
-rw-r--r--deps/v8/src/objects/source-text-module.cc3
-rw-r--r--deps/v8/src/objects/string-inl.h113
-rw-r--r--deps/v8/src/objects/string-table-inl.h2
-rw-r--r--deps/v8/src/objects/string-table.cc92
-rw-r--r--deps/v8/src/objects/string.cc27
-rw-r--r--deps/v8/src/objects/string.h40
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.h1
-rw-r--r--deps/v8/src/objects/symbol-table.cc22
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h34
-rw-r--r--deps/v8/src/objects/tagged-field.h9
-rw-r--r--deps/v8/src/objects/template-objects.cc51
-rw-r--r--deps/v8/src/objects/transitions-inl.h76
-rw-r--r--deps/v8/src/objects/transitions.cc223
-rw-r--r--deps/v8/src/objects/transitions.h81
-rw-r--r--deps/v8/src/objects/turbofan-types.tq65
-rw-r--r--deps/v8/src/objects/value-serializer.cc369
-rw-r--r--deps/v8/src/objects/value-serializer.h32
-rw-r--r--deps/v8/src/objects/visitors.h5
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/expression-scope.h2
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc2
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h22
-rw-r--r--deps/v8/src/parsing/parse-info.cc16
-rw-r--r--deps/v8/src/parsing/parse-info.h23
-rw-r--r--deps/v8/src/parsing/parser-base.h80
-rw-r--r--deps/v8/src/parsing/parser.cc152
-rw-r--r--deps/v8/src/parsing/parser.h16
-rw-r--r--deps/v8/src/parsing/preparse-data.cc4
-rw-r--r--deps/v8/src/parsing/preparser.h3
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc13
-rw-r--r--deps/v8/src/parsing/scanner.cc16
-rw-r--r--deps/v8/src/parsing/scanner.h1
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc13
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc5
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc356
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h91
-rw-r--r--deps/v8/src/profiler/profile-generator.cc11
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc5
-rw-r--r--deps/v8/src/profiler/profiler-listener.h3
-rw-r--r--deps/v8/src/profiler/strings-storage.cc2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc16
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc9
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc11
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc9
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc11
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc17
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h3
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc101
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc6
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h13
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc5
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc3
-rw-r--r--deps/v8/src/regexp/regexp.cc5
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc74
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h3
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc14
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h3
-rw-r--r--deps/v8/src/roots/OWNERS2
-rw-r--r--deps/v8/src/roots/roots.h61
-rw-r--r--deps/v8/src/runtime/runtime-array.cc35
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc63
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc52
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc42
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc22
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc106
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc128
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc6
-rw-r--r--deps/v8/src/runtime/runtime-function.cc16
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc10
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc8
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc192
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc12
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc36
-rw-r--r--deps/v8/src/runtime/runtime-module.cc7
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc15
-rw-r--r--deps/v8/src/runtime/runtime-object.cc440
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc40
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc153
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc40
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc82
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc68
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc64
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc10
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc60
-rw-r--r--deps/v8/src/runtime/runtime-test.cc442
-rw-r--r--deps/v8/src/runtime/runtime-trace.cc25
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc35
-rw-r--r--deps/v8/src/runtime/runtime-utils.h97
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc259
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc10
-rw-r--r--deps/v8/src/runtime/runtime.h304
-rw-r--r--deps/v8/src/sandbox/OWNERS (renamed from deps/v8/src/security/OWNERS)0
-rw-r--r--deps/v8/src/sandbox/external-pointer-inl.h (renamed from deps/v8/src/security/external-pointer-inl.h)79
-rw-r--r--deps/v8/src/sandbox/external-pointer-table-inl.h149
-rw-r--r--deps/v8/src/sandbox/external-pointer-table.cc97
-rw-r--r--deps/v8/src/sandbox/external-pointer-table.h205
-rw-r--r--deps/v8/src/sandbox/external-pointer.h (renamed from deps/v8/src/security/external-pointer.h)18
-rw-r--r--deps/v8/src/sandbox/sandbox.cc332
-rw-r--r--deps/v8/src/sandbox/sandbox.h195
-rw-r--r--deps/v8/src/sandbox/sandboxed-pointer-inl.h49
-rw-r--r--deps/v8/src/sandbox/sandboxed-pointer.h23
-rw-r--r--deps/v8/src/security/caged-pointer-inl.h53
-rw-r--r--deps/v8/src/security/caged-pointer.h23
-rw-r--r--deps/v8/src/security/external-pointer-table.cc24
-rw-r--r--deps/v8/src/security/external-pointer-table.h74
-rw-r--r--deps/v8/src/security/vm-cage.cc322
-rw-r--r--deps/v8/src/security/vm-cage.h205
-rw-r--r--deps/v8/src/snapshot/OWNERS1
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc300
-rw-r--r--deps/v8/src/snapshot/deserializer.h16
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc12
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc2
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.cc14
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h6
-rw-r--r--deps/v8/src/snapshot/serializer.cc82
-rw-r--r--deps/v8/src/snapshot/serializer.h19
-rw-r--r--deps/v8/src/snapshot/shared-heap-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/shared-heap-serializer.cc52
-rw-r--r--deps/v8/src/snapshot/shared-heap-serializer.h4
-rw-r--r--deps/v8/src/snapshot/snapshot.cc37
-rw-r--r--deps/v8/src/snapshot/snapshot.h20
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc6
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc5
-rw-r--r--deps/v8/src/strings/string-builder-inl.h2
-rw-r--r--deps/v8/src/strings/string-hasher-inl.h27
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/strings/uri.cc9
-rw-r--r--deps/v8/src/temporal/temporal-parser.cc12
-rw-r--r--deps/v8/src/temporal/temporal-parser.h6
-rw-r--r--deps/v8/src/torque/OWNERS2
-rw-r--r--deps/v8/src/torque/ast.h26
-rw-r--r--deps/v8/src/torque/constants.h2
-rw-r--r--deps/v8/src/torque/declaration-visitor.h18
-rw-r--r--deps/v8/src/torque/declarations.cc6
-rw-r--r--deps/v8/src/torque/declarations.h6
-rw-r--r--deps/v8/src/torque/earley-parser.cc1
-rw-r--r--deps/v8/src/torque/earley-parser.h2
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc86
-rw-r--r--deps/v8/src/torque/implementation-visitor.h3
-rw-r--r--deps/v8/src/torque/kythe-data.h2
-rw-r--r--deps/v8/src/torque/runtime-macro-shims.h13
-rw-r--r--deps/v8/src/torque/torque-parser.cc175
-rw-r--r--deps/v8/src/torque/type-oracle.h4
-rw-r--r--deps/v8/src/torque/type-visitor.cc10
-rw-r--r--deps/v8/src/torque/type-visitor.h2
-rw-r--r--deps/v8/src/torque/types.cc6
-rw-r--r--deps/v8/src/torque/types.h1
-rw-r--r--deps/v8/src/tracing/trace-categories.h5
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc4
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.h2
-rw-r--r--deps/v8/src/trap-handler/handler-outside-simulator.cc6
-rw-r--r--deps/v8/src/trap-handler/handler-shared.cc4
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h6
-rw-r--r--deps/v8/src/utils/allocation.cc45
-rw-r--r--deps/v8/src/utils/allocation.h56
-rw-r--r--deps/v8/src/utils/bit-vector.h160
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h50
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h63
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h52
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h94
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc25
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h19
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc432
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h32
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h412
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h24
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h512
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h27
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h87
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h973
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h59
-rw-r--r--deps/v8/src/wasm/c-api.cc78
-rw-r--r--deps/v8/src/wasm/code-space-access.cc56
-rw-r--r--deps/v8/src/wasm/code-space-access.h31
-rw-r--r--deps/v8/src/wasm/compilation-environment.h6
-rw-r--r--deps/v8/src/wasm/decoder.h9
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h906
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc15
-rw-r--r--deps/v8/src/wasm/function-compiler.cc23
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc201
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h10
-rw-r--r--deps/v8/src/wasm/init-expr-interface.cc144
-rw-r--r--deps/v8/src/wasm/init-expr-interface.h18
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc5
-rw-r--r--deps/v8/src/wasm/memory-protection-key.cc199
-rw-r--r--deps/v8/src/wasm/memory-protection-key.h4
-rw-r--r--deps/v8/src/wasm/module-compiler.cc128
-rw-r--r--deps/v8/src/wasm/module-compiler.h4
-rw-r--r--deps/v8/src/wasm/module-decoder.cc598
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc480
-rw-r--r--deps/v8/src/wasm/module-instantiate.h6
-rw-r--r--deps/v8/src/wasm/stacks.h21
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc2
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h1
-rw-r--r--deps/v8/src/wasm/value-type.h132
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc252
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h25
-rw-r--r--deps/v8/src/wasm/wasm-constants.h27
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc47
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc42
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc31
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h14
-rw-r--r--deps/v8/src/wasm/wasm-features.cc5
-rw-r--r--deps/v8/src/wasm/wasm-features.h10
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc10
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h14
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc12
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h29
-rw-r--r--deps/v8/src/wasm/wasm-js.cc196
-rw-r--r--deps/v8/src/wasm/wasm-limits.h4
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc179
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h64
-rw-r--r--deps/v8/src/wasm/wasm-module.cc23
-rw-r--r--deps/v8/src/wasm/wasm-module.h249
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h74
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc294
-rw-r--r--deps/v8/src/wasm/wasm-objects.h63
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq19
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h16
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h409
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc207
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc380
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h74
-rw-r--r--deps/v8/src/wasm/wasm-value.h1
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc1633
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h168
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc2
-rw-r--r--deps/v8/src/zone/zone.cc26
-rw-r--r--deps/v8/src/zone/zone.h24
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status2
-rw-r--r--deps/v8/test/cctest/BUILD.gn7
-rw-r--r--deps/v8/test/cctest/cctest.cc49
-rw-r--r--deps/v8/test/cctest/cctest.h108
-rw-r--r--deps/v8/test/cctest/cctest.status50
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc39
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc13
-rw-r--r--deps/v8/test/cctest/disasm-regex-helper.cc8
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc9
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc28
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc202
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc7
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc30
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc25
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc52
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc13
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden56
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden61
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden108
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden92
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFE.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden256
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden32
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden165
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden272
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStore.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden528
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden1052
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden32
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden92
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden512
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h2
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc33
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc34
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc39
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc86
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc55
-rw-r--r--deps/v8/test/cctest/test-api.cc280
-rw-r--r--deps/v8/test/cctest/test-array-list.cc3
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc59
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc126
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc113
-rw-r--r--deps/v8/test/cctest/test-assembler-loong64.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc16
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv64.cc484
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc25
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc28
-rw-r--r--deps/v8/test/cctest/test-compiler.cc5
-rw-r--r--deps/v8/test/cctest/test-concurrent-prototype.cc5
-rw-r--r--deps/v8/test/cctest/test-concurrent-script-context-table.cc16
-rw-r--r--deps/v8/test/cctest/test-concurrent-transition-array.cc87
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc18
-rw-r--r--deps/v8/test/cctest/test-debug.cc70
-rw-r--r--deps/v8/test/cctest/test-descriptor-array.cc43
-rw-r--r--deps/v8/test/cctest/test-disasm-loong64.cc8
-rw-r--r--deps/v8/test/cctest/test-disasm-riscv64.cc50
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc19
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc28
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc6
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc57
-rw-r--r--deps/v8/test/cctest/test-helper-riscv64.h1
-rw-r--r--deps/v8/test/cctest/test-icache.cc28
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc3
-rw-r--r--deps/v8/test/cctest/test-log.cc30
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc8
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-loong64.cc4
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-parsing.cc19
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc19
-rw-r--r--deps/v8/test/cctest/test-serialize.cc62
-rw-r--r--deps/v8/test/cctest/test-shared-strings.cc215
-rw-r--r--deps/v8/test/cctest/test-strings.cc43
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc9
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc13
-rw-r--r--deps/v8/test/cctest/test-temporal-parser.cc112
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc10
-rw-r--r--deps/v8/test/cctest/test-transitions.cc68
-rw-r--r--deps/v8/test/cctest/test-transitions.h7
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc8
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc140
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h7
-rw-r--r--deps/v8/test/cctest/test-web-snapshots.cc214
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc389
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc23
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc31
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc11
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc34
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc14
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc15
-rw-r--r--deps/v8/test/cctest/wasm/wasm-atomics-utils.h12
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc25
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h42
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc79
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h18
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc2
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js19
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js8
-rw-r--r--deps/v8/test/debugger/debugger.status7
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc10
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status1
-rw-r--r--deps/v8/test/fuzzer/inspector/regress-1297964411
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc292
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc274
-rw-r--r--deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha12
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation.js8
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then.js7
-rw-r--r--deps/v8/test/inspector/debugger/async-set-timeout-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/async-set-timeout.js7
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt30
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame.js8
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt42
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise.js7
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-load-more.js7
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-url-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-expected.txt38
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-nested-super.js79
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods.js10
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js16
-rw-r--r--deps/v8/test/inspector/debugger/destroy-in-break-program2.js4
-rw-r--r--deps/v8/test/inspector/debugger/external-stack-trace.js10
-rw-r--r--deps/v8/test/inspector/debugger/framework-break-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/framework-break.js16
-rw-r--r--deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js16
-rw-r--r--deps/v8/test/inspector/debugger/framework-precise-ranges.js7
-rw-r--r--deps/v8/test/inspector/debugger/framework-stepping-expected.txt62
-rw-r--r--deps/v8/test/inspector/debugger/framework-stepping.js14
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js3
-rw-r--r--deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt34
-rw-r--r--deps/v8/test/inspector/debugger/other-pause-reasons.js79
-rw-r--r--deps/v8/test/inspector/debugger/pause-at-negative-offset.js2
-rw-r--r--deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js7
-rw-r--r--deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation-expected.txt26
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation.js87
-rw-r--r--deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js95
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call.js2
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task.js15
-rw-r--r--deps/v8/test/inspector/debugger/step-into-next-script-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/step-into-next-script.js14
-rw-r--r--deps/v8/test/inspector/debugger/step-out-async-await-expected.txt32
-rw-r--r--deps/v8/test/inspector/debugger/step-out-async-await.js94
-rw-r--r--deps/v8/test/inspector/debugger/wasm-externref-global.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js115
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation-expected.txt49
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation.js106
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js7
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap.js3
-rw-r--r--deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs-expected.txt7
-rw-r--r--deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs.js129
-rw-r--r--deps/v8/test/inspector/inspector-test.cc35
-rw-r--r--deps/v8/test/inspector/inspector.status9
-rw-r--r--deps/v8/test/inspector/isolate-data.cc19
-rw-r--r--deps/v8/test/inspector/protocol-test.js8
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1220203-expected.txt8
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1220203.js42
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1281031-expected.txt2
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1281031.js9
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1283049-expected.txt5
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1283049.js29
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1290861-expected.txt4
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1290861.js39
-rw-r--r--deps/v8/test/inspector/runtime/client-console-api-message-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/console-context-expected.txt38
-rw-r--r--deps/v8/test/inspector/runtime/console-context.js9
-rw-r--r--deps/v8/test/inspector/runtime/console-formatter-expected.txt700
-rw-r--r--deps/v8/test/inspector/runtime/console-formatter.js144
-rw-r--r--deps/v8/test/inspector/runtime/console-message-before-enable-expected.txt93
-rw-r--r--deps/v8/test/inspector/runtime/console-message-before-enable.js37
-rw-r--r--deps/v8/test/inspector/runtime/error-stack-expected.txt420
-rw-r--r--deps/v8/test/inspector/runtime/error-stack-trace-limit-expected.txt820
-rw-r--r--deps/v8/test/inspector/runtime/error-stack-trace-limit.js61
-rw-r--r--deps/v8/test/inspector/runtime/error-stack.js39
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/get-exception-details-expected.txt112
-rw-r--r--deps/v8/test/inspector/runtime/get-exception-details.js49
-rw-r--r--deps/v8/test/inspector/runtime/set-max-call-stack-size-expected.txt146
-rw-r--r--deps/v8/test/inspector/runtime/set-max-call-stack-size.js106
-rw-r--r--deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt10
-rw-r--r--deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints.js14
-rw-r--r--deps/v8/test/intl/enumeration/calendar-sorted.js2
-rw-r--r--deps/v8/test/intl/enumeration/callendar-syntax-valid.js2
-rw-r--r--deps/v8/test/intl/enumeration/collation-sorted.js2
-rw-r--r--deps/v8/test/intl/enumeration/collation-syntax-valid.js2
-rw-r--r--deps/v8/test/intl/enumeration/currency-sorted.js2
-rw-r--r--deps/v8/test/intl/enumeration/currency-syntax-valid.js2
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js2
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-sorted.js2
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js2
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js2
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-name.js2
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-property.js2
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-valid-key.js2
-rw-r--r--deps/v8/test/intl/enumeration/timeZone-sorted.js2
-rw-r--r--deps/v8/test/intl/enumeration/unit-sorted.js2
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/intl/locale/locale-calendars.js2
-rw-r--r--deps/v8/test/intl/locale/locale-collations.js2
-rw-r--r--deps/v8/test/intl/locale/locale-info-check-property.js2
-rw-r--r--deps/v8/test/intl/locale/locale-info-check-return-types.js2
-rw-r--r--deps/v8/test/intl/locale/locale-info-ext.js2
-rw-r--r--deps/v8/test/intl/locale/locale-info-no-undefined.js2
-rw-r--r--deps/v8/test/intl/locale/locale-info-timezones-sorted.js2
-rw-r--r--deps/v8/test/intl/locale/locale-numberingSystems.js2
-rw-r--r--deps/v8/test/intl/number-format/format-range-v3.js158
-rw-r--r--deps/v8/test/intl/number-format/rounding-increment-resolved-match-v3.js13
-rw-r--r--deps/v8/test/intl/number-format/rounding-increment-v3.js23
-rw-r--r--deps/v8/test/intl/number-format/rounding-increment-value-v3.js23
-rw-r--r--deps/v8/test/intl/number-format/rounding-mode-table-v3.js30
-rw-r--r--deps/v8/test/intl/number-format/rounding-mode-v3.js60
-rw-r--r--deps/v8/test/intl/number-format/sign-display-v3.js29
-rw-r--r--deps/v8/test/intl/number-format/trailing-zero-display-resolved-options-v3.js19
-rw-r--r--deps/v8/test/intl/number-format/trailing-zero-display-v3.js24
-rw-r--r--deps/v8/test/intl/number-format/use-grouping-v3.js114
-rw-r--r--deps/v8/test/intl/plural-rules/select-range.js7
-rw-r--r--deps/v8/test/intl/testcfg.py6
-rw-r--r--deps/v8/test/js-perf-test/Array/includes.js67
-rw-r--r--deps/v8/test/js-perf-test/Array/index-of.js67
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js27
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/GetKeyedProperty.js (renamed from deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js)0
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/GetNamedProperty.js (renamed from deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js)0
-rw-r--r--deps/v8/test/js-perf-test/JSTests2.json21
-rw-r--r--deps/v8/test/js-perf-test/JSTests3.json12
-rw-r--r--deps/v8/test/message/README.md4
-rw-r--r--deps/v8/test/message/asm-assignment-undeclared.js2
-rw-r--r--deps/v8/test/message/asm-function-mismatch-def.js2
-rw-r--r--deps/v8/test/message/asm-function-mismatch-use.js2
-rw-r--r--deps/v8/test/message/asm-function-redefined.js2
-rw-r--r--deps/v8/test/message/asm-function-undefined.js2
-rw-r--r--deps/v8/test/message/asm-function-variable-collision.js2
-rw-r--r--deps/v8/test/message/asm-import-wrong-annotation.js2
-rw-r--r--deps/v8/test/message/asm-import-wrong-object.js2
-rw-r--r--deps/v8/test/message/asm-linking-bogus-heap.js2
-rw-r--r--deps/v8/test/message/asm-linking-bogus-stdlib.js2
-rw-r--r--deps/v8/test/message/asm-linking-missing-heap.js2
-rw-r--r--deps/v8/test/message/asm-missing-parameter-annotation.js2
-rw-r--r--deps/v8/test/message/asm-missing-return-annotation.js2
-rw-r--r--deps/v8/test/message/asm-table-mismatch-def.js2
-rw-r--r--deps/v8/test/message/asm-table-mismatch-use.js2
-rw-r--r--deps/v8/test/message/asm-table-redefined.js2
-rw-r--r--deps/v8/test/message/asm-table-undefined.js2
-rw-r--r--deps/v8/test/message/asm-table-variable-collision.js2
-rw-r--r--deps/v8/test/message/fail/console.js2
-rw-r--r--deps/v8/test/message/fail/console.out6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-1.js6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-1.out6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-2.js6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-2.out6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-3.js6
-rw-r--r--deps/v8/test/message/fail/data-view-invalid-length-3.out6
-rw-r--r--deps/v8/test/message/fail/settimeout.js2
-rw-r--r--deps/v8/test/message/fail/settimeout.out4
-rw-r--r--deps/v8/test/message/message.status7
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_not_same.out2
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_true.out2
-rw-r--r--deps/v8/test/message/mjsunit/success/assert-promise-result-rejects.js2
-rw-r--r--deps/v8/test/message/mjsunit/success/assert-promise-result-resolves-empty.js2
-rw-r--r--deps/v8/test/message/mjsunit/success/assert-promise-result-resolves.js2
-rw-r--r--deps/v8/test/message/testcfg.py3
-rw-r--r--deps/v8/test/message/wasm-finish-compilation.js2
-rw-r--r--deps/v8/test/message/wasm-function-name-async.js2
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.js2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.js2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.js2
-rw-r--r--deps/v8/test/message/wasm-module-name-async.js2
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.js2
-rw-r--r--deps/v8/test/message/wasm-no-name-async.js2
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.js2
-rw-r--r--deps/v8/test/message/wasm-trace-liftoff.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js2
-rw-r--r--deps/v8/test/message/wasm-trace-turbofan.js2
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.js1
-rw-r--r--deps/v8/test/mjsunit/BUILD.gn7
-rw-r--r--deps/v8/test/mjsunit/asm/asm-validation.js28
-rw-r--r--deps/v8/test/mjsunit/baseline/batch-compilation.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/bound-functions-serialize.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js66
-rw-r--r--deps/v8/test/mjsunit/compiler/catch-block-load.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-add-static.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-bound-function.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-object.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-receiver.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-pretenure.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls-wasm.js141
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/js-create-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/js-create.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1225607.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1226988.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1227324.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9945-1.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9945-2.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-store-store-elim.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-accessors.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-apply.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/store-data-property-in-literal-private.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js46
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps3.js41
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js33
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js45
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js2
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking-2.js227
-rw-r--r--deps/v8/test/mjsunit/d8/d8-multiple-module-exec.js8
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker.js17
-rw-r--r--deps/v8/test/mjsunit/debugPrint.js27
-rw-r--r--deps/v8/test/mjsunit/es6/classes-super-in-heritage.js49
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-groupby.js183
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js172
-rw-r--r--deps/v8/test/mjsunit/harmony/optional-chaining-this-private.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/private-brand-nested-super.js131
-rw-r--r--deps/v8/test/mjsunit/harmony/private-name-surrogate-pair.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js64
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js2
-rw-r--r--deps/v8/test/mjsunit/interrupt-budget-override.js2
-rw-r--r--deps/v8/test/mjsunit/maglev/00.js18
-rw-r--r--deps/v8/test/mjsunit/maglev/01.js20
-rw-r--r--deps/v8/test/mjsunit/maglev/02.js20
-rw-r--r--deps/v8/test/mjsunit/maglev/03.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/04.js16
-rw-r--r--deps/v8/test/mjsunit/maglev/05.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/06.js25
-rw-r--r--deps/v8/test/mjsunit/maglev/07.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/08.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/09.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/10.js26
-rw-r--r--deps/v8/test/mjsunit/maglev/11.js39
-rw-r--r--deps/v8/test/mjsunit/maglev/12.js27
-rw-r--r--deps/v8/test/mjsunit/maglev/13.js17
-rw-r--r--deps/v8/test/mjsunit/maglev/14.js31
-rw-r--r--deps/v8/test/mjsunit/maglev/15.js17
-rw-r--r--deps/v8/test/mjsunit/maglev/16.js30
-rw-r--r--deps/v8/test/mjsunit/maglev/17.js27
-rw-r--r--deps/v8/test/mjsunit/maglev/18.js26
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js173
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status152
-rw-r--r--deps/v8/test/mjsunit/optimized-array-includes.js358
-rw-r--r--deps/v8/test/mjsunit/optimized-array-indexof.js360
-rw-r--r--deps/v8/test/mjsunit/promise-hooks.js137
-rw-r--r--deps/v8/test/mjsunit/regress-1146106.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1000635.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1003730.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1076569.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1079446.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1083272.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1083763.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1084953.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1137979.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1138075.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1138611.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1154961.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1163715.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1168435.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1172797.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1201114.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1208805.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1223733.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1225561.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-12495.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-12580.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-12657.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385565.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3969.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4578.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-752764.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-794822.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-936077.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-binary-bitwise-bigint-smi-mix-opt-depot.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1017159.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1031479.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1206289.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1236962.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1262750.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1276923.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1277863.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1278086.js79
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1290587.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-977089.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-990582.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12122.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12472.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12595.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12671.js74
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12688.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1185464.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1200231.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-12624.js51
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1271244.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1271538.js44
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1272204.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1279151.js15
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1282224.js31
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1283042.js29
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1283395.js51
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1284980.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1286253.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1289678.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1290079.js47
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1294384.js91
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1296876.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7785.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808848.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808980.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-964607.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-inlining-throw.js71
-rw-r--r--deps/v8/test/mjsunit/shared-memory/client-gc.js7
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string-in-code-object.js18
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string-in-weak-map.js23
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string.js37
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-atomics-workers.js41
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js35
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js54
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js39
-rw-r--r--deps/v8/test/mjsunit/statistics-extension.js12
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-constructor.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js127
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-date-until.js4
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-fields.js53
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-from.js2
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-merge-fields.js15
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js153
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-month.js3
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js64
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-abs.js6
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-add.js54
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-constructor.js84
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-from.js66
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-negated.js12
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-valueOf.js3
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-with.js90
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-add.js2
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-time-from.js2
-rw-r--r--deps/v8/test/mjsunit/temporal/temporal-helpers.js24
-rw-r--r--deps/v8/test/mjsunit/temporal/time-zone-constructor.js12
-rw-r--r--deps/v8/test/mjsunit/testcfg.py12
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie.js2
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie_archs.js4
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie_webassembly.js4
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test-large.log54
-rw-r--r--deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js1345
-rw-r--r--deps/v8/test/mjsunit/typedarray-helpers.js72
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js683
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js2650
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/call-ref.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/element-segments-with-reftypes.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-api.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-externref.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/extended-constants.js74
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-liftoff.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/externref.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-nominal.js157
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-optimizations.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-shared-memory.js90
-rw-r--r--deps/v8/test/mjsunit/wasm/imported-function-types.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/inlining.js84
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/load-immutable.js137
-rw-r--r--deps/v8/test/mjsunit/wasm/memory64.js109
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-table-element-section.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/print-code.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-tables.js73
-rw-r--r--deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js61
-rw-r--r--deps/v8/test/mjsunit/wasm/speculative-inlining.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/stack-switching.js198
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access-liftoff.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy-externref.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/test-partial-serialization.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js63
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js205
-rw-r--r--deps/v8/test/mjsunit/web-snapshot-helpers.js0
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-1.js246
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-2.js (renamed from deps/v8/test/mjsunit/web-snapshot/web-snapshot.js)306
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-3.js104
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js83
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js24
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc20
-rw-r--r--deps/v8/test/mozilla/mozilla.status10
-rw-r--r--deps/v8/test/test262/BUILD.gn2
-rw-r--r--deps/v8/test/test262/test262.status2690
-rw-r--r--deps/v8/test/test262/testcfg.py18
-rw-r--r--deps/v8/test/torque/test-torque.tq4
-rw-r--r--deps/v8/test/unittests/BUILD.gn29
-rw-r--r--deps/v8/test/unittests/api/interceptor-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc30
-rw-r--r--deps/v8/test/unittests/base/virtual-address-space-unittest.cc266
-rw-r--r--deps/v8/test/unittests/codegen/register-configuration-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc212
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc71
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc12
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc24
-rw-r--r--deps/v8/test/unittests/heap/base/active-system-pages-unittest.cc81
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc89
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc6
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc348
-rw-r--r--deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc17
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/testing-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc31
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc183
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.cc3
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/marking-worklist-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc13
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc31
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc29
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc32
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc31
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc6
-rw-r--r--deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc2
-rw-r--r--deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc6
-rw-r--r--deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc40
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc1444
-rw-r--r--deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc2
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc4
-rw-r--r--deps/v8/test/unittests/sandbox/sandbox-unittest.cc155
-rw-r--r--deps/v8/test/unittests/security/virtual-memory-cage-unittest.cc152
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc10
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc13
-rw-r--r--deps/v8/test/unittests/unittests.status4
-rw-r--r--deps/v8/test/unittests/utils/allocation-unittest.cc15
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc423
-rw-r--r--deps/v8/test/unittests/wasm/memory-protection-unittest.cc9
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc552
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc160
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc12
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc6
-rw-r--r--deps/v8/test/wasm-api-tests/hostref.cc10
-rw-r--r--deps/v8/test/wasm-api-tests/reflect.cc4
-rw-r--r--deps/v8/test/wasm-api-tests/serialize.cc18
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-test.h2
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status12
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py2
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/third_party/google_benchmark/BUILD.gn6
-rw-r--r--deps/v8/third_party/google_benchmark/precompiled_headers/benchmark/export.h31
-rw-r--r--deps/v8/third_party/googletest/BUILD.gn2
-rw-r--r--deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h4
-rw-r--r--deps/v8/third_party/inspector_protocol/BUILD.gn2
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py7
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/convert_protocol_to_json.py10
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/cbor.cc10
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/protocol_core_test.cc23
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/serializer_traits.h158
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/serializer_traits_test.cc226
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template3
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template13
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template202
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template77
-rw-r--r--deps/v8/third_party/test262-harness/LICENSE28
-rw-r--r--deps/v8/third_party/test262-harness/OWNERS2
-rw-r--r--deps/v8/third_party/test262-harness/README.md19
-rw-r--r--deps/v8/third_party/test262-harness/README.v815
-rw-r--r--deps/v8/third_party/test262-harness/__init__.py1
-rw-r--r--deps/v8/third_party/test262-harness/excludelist.xml3
-rw-r--r--deps/v8/third_party/test262-harness/src/__init__.py0
-rw-r--r--deps/v8/third_party/test262-harness/src/_common.py18
-rw-r--r--deps/v8/third_party/test262-harness/src/_monkeyYaml.py147
-rw-r--r--deps/v8/third_party/test262-harness/src/_packager.py335
-rw-r--r--deps/v8/third_party/test262-harness/src/_packagerConfig.py117
-rw-r--r--deps/v8/third_party/test262-harness/src/parseTestRecord.py113
-rw-r--r--deps/v8/third_party/test262-harness/src/templates/runner.bestPractice.html170
-rw-r--r--deps/v8/third_party/test262-harness/src/templates/runner.intl402.html192
-rw-r--r--deps/v8/third_party/test262-harness/src/templates/runner.test262.html203
-rwxr-xr-xdeps/v8/third_party/test262-harness/src/test262.py664
-rw-r--r--deps/v8/third_party/test262-harness/test/README.md11
-rw-r--r--deps/v8/third_party/test262-harness/test/fixtures/negative.js11
-rw-r--r--deps/v8/third_party/test262-harness/test/fixtures/test262-old-headers.js19
-rw-r--r--deps/v8/third_party/test262-harness/test/fixtures/test262-yaml-headers.js18
-rw-r--r--deps/v8/third_party/test262-harness/test/test_common.py64
-rw-r--r--deps/v8/third_party/test262-harness/test/test_monkeyYaml.py210
-rw-r--r--deps/v8/third_party/test262-harness/test/test_parseTestRecord.py183
-rw-r--r--deps/v8/third_party/test262-harness/test/test_test262.py274
-rw-r--r--deps/v8/third_party/zlib/google/BUILD.gn2
-rw-r--r--deps/v8/third_party/zlib/google/compression_utils_unittest.cc16
-rw-r--r--deps/v8/third_party/zlib/google/redact.h31
-rw-r--r--deps/v8/third_party/zlib/google/zip.cc112
-rw-r--r--deps/v8/third_party/zlib/google/zip.h58
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.cc2
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.h2
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc560
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.h310
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader_unittest.cc699
-rw-r--r--deps/v8/third_party/zlib/google/zip_unittest.cc337
-rw-r--r--deps/v8/third_party/zlib/google/zip_writer.cc15
-rw-r--r--deps/v8/third_party/zlib/google/zip_writer.h2
-rw-r--r--deps/v8/tools/BUILD.gn2
-rw-r--r--deps/v8/tools/PRESUBMIT.py5
-rwxr-xr-xdeps/v8/tools/callstats-from-telemetry.sh15
-rw-r--r--deps/v8/tools/callstats.html24
-rwxr-xr-xdeps/v8/tools/callstats.py17
-rw-r--r--deps/v8/tools/clusterfuzz/PRESUBMIT.py8
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/BUILD.gn (renamed from deps/v8/tools/clusterfuzz/BUILD.gn)2
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py28
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/d8.py (renamed from deps/v8/tools/clusterfuzz/testdata/baseline/d8.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/v8_build_config.json (renamed from deps/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build1/d8.py (renamed from deps/v8/tools/clusterfuzz/testdata/build1/d8.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build1/v8_build_config.json (renamed from deps/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build2/d8.py (renamed from deps/v8/tools/clusterfuzz/testdata/build2/d8.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build2/v8_build_config.json (renamed from deps/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build3/d8.py (renamed from deps/v8/tools/clusterfuzz/testdata/build3/d8.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/build3/v8_build_config.json (renamed from deps/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt (renamed from deps/v8/tools/clusterfuzz/testdata/failure_output.txt)4
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt (renamed from deps/v8/tools/clusterfuzz/testdata/failure_output_arch.txt)4
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt (renamed from deps/v8/tools/clusterfuzz/testdata/failure_output_second.txt)4
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/fuzz-123.js (renamed from deps/v8/tools/clusterfuzz/testdata/fuzz-123.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt (renamed from deps/v8/tools/clusterfuzz/testdata/smoke_test_output.txt)4
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/toolchain/BUILD.gn (renamed from deps/v8/tools/clusterfuzz/toolchain/BUILD.gn)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_commands.py (renamed from deps/v8/tools/clusterfuzz/v8_commands.py)20
-rwxr-xr-xdeps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py (renamed from deps/v8/tools/clusterfuzz/v8_foozzie.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_harness_adjust.js (renamed from deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js)0
-rwxr-xr-xdeps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py (renamed from deps/v8/tools/clusterfuzz/v8_foozzie_test.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_config.py (renamed from deps/v8/tools/clusterfuzz/v8_fuzz_config.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json (renamed from deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json (renamed from deps/v8/tools/clusterfuzz/v8_fuzz_flags.json)5
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_mock.js (renamed from deps/v8/tools/clusterfuzz/v8_mock.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_mock_archs.js (renamed from deps/v8/tools/clusterfuzz/v8_mock_archs.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_mock_webassembly.js (renamed from deps/v8/tools/clusterfuzz/v8_mock_webassembly.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_smoke_tests.js (renamed from deps/v8/tools/clusterfuzz/v8_smoke_tests.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.js (renamed from deps/v8/tools/clusterfuzz/v8_suppressions.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.py (renamed from deps/v8/tools/clusterfuzz/v8_suppressions.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js4
-rw-r--r--deps/v8/tools/clusterfuzz/trials/BUILD.gn8
-rw-r--r--deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py57
-rw-r--r--deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json38
-rw-r--r--deps/v8/tools/codemap.mjs51
-rw-r--r--deps/v8/tools/compare_torque_output.py2
-rw-r--r--deps/v8/tools/csvparser.mjs7
-rw-r--r--deps/v8/tools/debug_helper/debug-macro-shims.h25
-rw-r--r--deps/v8/tools/debug_helper/gen-heap-constants.py6
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc4
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.cc10
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.h1
-rwxr-xr-xdeps/v8/tools/dev/gm.py14
-rw-r--r--deps/v8/tools/dumpcpp.mjs1
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn10
-rw-r--r--deps/v8/tools/gcmole/GCMOLE.gn6
-rw-r--r--deps/v8/tools/gcmole/Makefile11
-rw-r--r--deps/v8/tools/gcmole/README2
-rwxr-xr-xdeps/v8/tools/gcmole/bootstrap.sh36
-rw-r--r--deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha12
-rw-r--r--deps/v8/tools/gcmole/gcmole.cc303
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/gcmole/gcmole.py703
-rwxr-xr-xdeps/v8/tools/gcmole/package.sh6
-rw-r--r--deps/v8/tools/gdbinit15
-rwxr-xr-xdeps/v8/tools/gen-keywords-gen-h.py3
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py14
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py17
-rwxr-xr-xdeps/v8/tools/grokdump.py297
-rw-r--r--deps/v8/tools/heap-layout/heap-layout-viewer-template.html14
-rw-r--r--deps/v8/tools/heap-layout/heap-layout-viewer.mjs225
-rw-r--r--deps/v8/tools/heap-layout/heap-size-trend-viewer-template.html14
-rw-r--r--deps/v8/tools/heap-layout/heap-size-trend-viewer.mjs266
-rw-r--r--deps/v8/tools/heap-layout/index.css24
-rw-r--r--deps/v8/tools/heap-layout/index.html72
-rw-r--r--deps/v8/tools/heap-layout/space-categories.mjs32
-rw-r--r--deps/v8/tools/heap-layout/trace-file-reader.mjs110
-rw-r--r--deps/v8/tools/heap-stats/categories.js2
-rw-r--r--deps/v8/tools/heap-stats/index.html31
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js21
-rw-r--r--deps/v8/tools/index.html8
-rw-r--r--deps/v8/tools/js/helper.mjs53
-rw-r--r--deps/v8/tools/js/log-file-reader-template.html (renamed from deps/v8/tools/system-analyzer/view/log-file-reader-template.html)14
-rw-r--r--deps/v8/tools/js/web-api-helper.mjs261
-rw-r--r--deps/v8/tools/logreader.mjs20
-rw-r--r--deps/v8/tools/mb/PRESUBMIT.py16
-rwxr-xr-xdeps/v8/tools/mb/mb.py2
-rw-r--r--deps/v8/tools/predictable_wrapper.py10
-rwxr-xr-xdeps/v8/tools/process-wasm-compilation-times.py11
-rw-r--r--deps/v8/tools/profile.mjs81
-rw-r--r--deps/v8/tools/release/PRESUBMIT.py8
-rwxr-xr-xdeps/v8/tools/release/auto_push.py5
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py5
-rwxr-xr-xdeps/v8/tools/release/auto_tag.py204
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py231
-rw-r--r--deps/v8/tools/release/common_includes.py30
-rwxr-xr-xdeps/v8/tools/release/create_release.py11
-rw-r--r--deps/v8/tools/release/git_recipes.py2
-rwxr-xr-xdeps/v8/tools/release/list_deprecated.py35
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py5
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py5
-rwxr-xr-xdeps/v8/tools/release/script_test.py5
-rwxr-xr-xdeps/v8/tools/release/search_related_commits.py221
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py11
-rwxr-xr-xdeps/v8/tools/release/test_search_related_commits.py274
-rw-r--r--deps/v8/tools/run_perf.py25
-rw-r--r--deps/v8/tools/sanitizers/tsan_suppressions.txt4
-rw-r--r--deps/v8/tools/splaytree.mjs34
-rw-r--r--deps/v8/tools/system-analyzer/helper.mjs53
-rw-r--r--deps/v8/tools/system-analyzer/index.css1
-rw-r--r--deps/v8/tools/system-analyzer/index.html2
-rw-r--r--deps/v8/tools/system-analyzer/index.mjs29
-rw-r--r--deps/v8/tools/system-analyzer/log/code.mjs4
-rw-r--r--deps/v8/tools/system-analyzer/log/tick.mjs24
-rw-r--r--deps/v8/tools/system-analyzer/processor.mjs11
-rw-r--r--deps/v8/tools/system-analyzer/view/code-panel-template.html11
-rw-r--r--deps/v8/tools/system-analyzer/view/code-panel.mjs176
-rw-r--r--deps/v8/tools/system-analyzer/view/helper.mjs156
-rw-r--r--deps/v8/tools/system-analyzer/view/log-file-reader.mjs116
-rw-r--r--deps/v8/tools/system-analyzer/view/property-link-table.mjs249
-rw-r--r--deps/v8/tools/system-analyzer/view/script-panel.mjs95
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs37
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs15
-rw-r--r--deps/v8/tools/testrunner/PRESUBMIT.py5
-rw-r--r--deps/v8/tools/testrunner/base_runner.py24
-rw-r--r--deps/v8/tools/testrunner/local/command.py11
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py76
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py10
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py15
-rw-r--r--deps/v8/tools/testrunner/outproc/message.py8
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py80
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/tick-processor.html157
-rw-r--r--deps/v8/tools/tickprocessor.mjs19
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py14
-rw-r--r--deps/v8/tools/turbolizer/OWNERS1
-rw-r--r--deps/v8/tools/unittests/compare_torque_output_test.py15
-rwxr-xr-xdeps/v8/tools/unittests/predictable_wrapper_test.py2
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py10
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py13
-rw-r--r--deps/v8/tools/unittests/testdata/predictable_mocked.py2
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json3
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json3
-rw-r--r--deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json3
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py43
-rw-r--r--deps/v8/tools/v8heapconst.py959
-rw-r--r--deps/v8/tools/v8windbg/BUILD.gn4
-rwxr-xr-xdeps/v8/tools/wasm/code-size-factors.py79
2040 files changed, 94694 insertions, 43284 deletions
diff --git a/deps/v8/.bazelrc b/deps/v8/.bazelrc
index e0127628ca..95bfad4a35 100644
--- a/deps/v8/.bazelrc
+++ b/deps/v8/.bazelrc
@@ -2,17 +2,22 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# V8 bazel port only supports clang
-build --action_env=BAZEL_COMPILER=clang
-build --action_env=CC=clang
-build --action_env=CXX=clang++
+# Pass CC, CXX and PATH from the environment
+build --action_env=CC
+build --action_env=CXX
build --action_env=PATH
+# Use Clang compiler
+build:clang --action_env=BAZEL_COMPILER=clang
+build:clang --action_env=CC=clang
+build:clang --action_env=CXX=clang++
+
# V8 debug config
build:debug --compilation_mode=dbg
build:debug --config=v8_enable_debugging_features
build:debug --//:v8_enable_fast_mksnapshot
build:debug --//:v8_enable_backtrace
+build:debug --//:v8_enable_handle_zapping
# v8_enable_debugging_features flags
build:v8_enable_debugging_features --//:v8_enable_verify_heap
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 66116d82ca..b8d1d934bc 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -54,7 +54,6 @@
!/test/mjsunit/tools/*.log
/test/mozilla/data
/test/test262/data
-/test/test262/harness
/test/wasm-js/data
/test/wasm-js/tests
/test/wasm-js/tests.tar.gz
@@ -76,6 +75,7 @@
!/third_party/googletest/src/googletest/include/gtest
/third_party/googletest/src/googletest/include/gtest/*
!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+!/third_party/test262-harness
!/third_party/v8
!/third_party/wasm-api
/tools/clang
diff --git a/deps/v8/.style.yapf b/deps/v8/.style.yapf
new file mode 100644
index 0000000000..de0c6a70f3
--- /dev/null
+++ b/deps/v8/.style.yapf
@@ -0,0 +1,2 @@
+[style]
+based_on_style = chromium
diff --git a/deps/v8/.vpython3 b/deps/v8/.vpython3
index 95e52ee59e..d1842bb8dd 100644
--- a/deps/v8/.vpython3
+++ b/deps/v8/.vpython3
@@ -44,3 +44,28 @@ wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.15.0"
>
+
+wheel: <
+ name: "infra/python/wheels/coverage/${vpython_platform}"
+ version: "version:5.5.chromium.2"
+>
+
+wheel: <
+ name: "infra/python/wheels/pbr-py2_py3"
+ version: "version:3.0.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/funcsigs-py2_py3"
+ version: "version:1.0.2"
+>
+
+wheel: <
+ name: "infra/python/wheels/mock-py2_py3"
+ version: "version:2.0.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/numpy/${vpython_platform}"
+ version: "version:1.2x.supported.1"
+>
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index b89eacba9f..f05ba729c5 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -61,8 +61,11 @@ Andrei Kashcha <anvaka@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Anna Henningsen <anna@addaleax.net>
Antoine du Hamel <duhamelantoine1995@gmail.com>
+Anton Bershanskiy <8knots@protonmail.com>
Anton Bikineev <ant.bikineev@gmail.com>
+Ao Wang <wangao.james@bytedance.com>
Archil Sharashenidze <achosharashenidze@gmail.com>
+Bala Avulapati <bavulapati@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Coe <bencoe@gmail.com>
Ben Newman <ben@meteor.com>
@@ -104,6 +107,7 @@ Fedor Indutny <fedor@indutny.com>
Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
+Gao Sheng <gaosheng08@meituan.com>
Geoffrey Garside <ggarside@gmail.com>
Gergely Nagy <ngg@ngg.hu>
Gilang Mentari Hamidy <gilang@hamidy.net>
@@ -111,6 +115,7 @@ Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
Haichuan Wang <hc.opensource@gmail.com>
Hannu Trey <hannu.trey@gmail.com>
+Harshil Jain <twitharshil@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
@@ -142,6 +147,7 @@ Junha Park <jpark3@scu.edu>
Junming Huang <kiminghjm@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
+Keith Smiley <keithbsmiley@gmail.com>
Kevin Gibbons <bakkot@gmail.com>
Kris Selden <kris.selden@gmail.com>
Kyounga Ra <kyounga@alticast.com>
@@ -256,9 +262,11 @@ Yu Yin <xwafish@gmail.com>
Yujie Wang <hex6770@gmail.com>
Yuri Iozzelli <yuri@leaningtech.com>
Yusif Khudhur <yusif.khudhur@gmail.com>
+Yuxiang Cao <caoyxsh@outlook.com>
Zac Hansen <xaxxon@gmail.com>
Zeynep Cankara <zeynepcankara402@gmail.com>
Zhao Jiazhong <kyslie3100@gmail.com>
Zheng Liu <i6122f@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
+Yang Xiang <xiangyangemail@gmail.com>
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index 1bad423e03..a632cc4fe8 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -3,6 +3,8 @@
# found in the LICENSE file.
load("@bazel_skylib//lib:selects.bzl", "selects")
+load("@rules_python//python:defs.bzl", "py_binary")
+load("@v8_python_deps//:requirements.bzl", "requirement")
load(
"@v8//:bazel/defs.bzl",
"v8_binary",
@@ -18,13 +20,6 @@ load(
)
load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression")
-config_setting(
- name = "is_debug",
- values = {
- "compilation_mode": "dbg",
- },
-)
-
# =================================================
# Flags
# =================================================
@@ -58,7 +53,6 @@ config_setting(
# v8_can_use_fpu_instructions
# v8_use_mips_abi_hardfloat
# v8_enable_gdbjit
-# v8_enable_minor_mc
# v8_check_header_includes
# v8_enable_shared_ro_heap
# v8_enable_lazy_source_positions
@@ -71,28 +65,22 @@ config_setting(
# v8_verify_torque_generation_invariance
# v8_enable_snapshot_compression
# v8_control_flow_integrity
-# v8_enable_virtual_memory_cage
+# v8_enable_sandbox
# cppgc_enable_caged_heap
# cppgc_enable_check_assignments_in_prefinalizers
# cppgc_enable_object_names
# cppgc_enable_verify_heap
# cppgc_enable_young_generation
# v8_enable_zone_compression
-# v8_enable_heap_sandbox
# v8_enable_precise_zone_stats
# v8_enable_swiss_name_dictionary
# v8_generate_external_defines_header
# v8_dict_property_const_tracking
# v8_enable_map_packing
-# v8_allow_javascript_in_promise_hooks
+# v8_enable_javascript_promise_hooks
# v8_enable_allocation_folding
# v8_allocation_site_tracking
-v8_string(
- name = "v8_root",
- default = "third_party/v8/HEAD",
-)
-
v8_flag(name = "v8_android_log_stdout")
v8_flag(name = "v8_annotate_torque_ir")
@@ -115,10 +103,9 @@ v8_flag(name = "v8_enable_debug_code")
v8_flag(name = "v8_enable_disassembler")
-v8_flag(
- name = "v8_enable_handle_zapping",
- default = True,
-)
+v8_flag(name = "v8_enable_handle_zapping")
+
+v8_flag(name = "v8_enable_runtime_call_stats")
v8_flag(name = "v8_enable_hugepage")
@@ -136,11 +123,6 @@ v8_flag(
default = True,
)
-v8_flag(
- name = "v8_enable_minor_mc",
- default = True,
-)
-
v8_flag(name = "v8_enable_object_print")
v8_flag(name = "v8_enable_slow_dchecks")
@@ -199,7 +181,7 @@ selects.config_setting_group(
name = "v8_target_x64_default_pointer_compression",
match_all = [
":v8_enable_pointer_compression_is_none",
- "@config//:v8_target_x64",
+ "@v8//bazel/config:v8_target_x64",
],
)
@@ -208,7 +190,7 @@ selects.config_setting_group(
name = "v8_target_arm64_default_pointer_compression",
match_all = [
":v8_enable_pointer_compression_is_none",
- "@config//:v8_target_arm64",
+ "@v8//bazel/config:v8_target_arm64",
],
)
@@ -253,7 +235,7 @@ selects.config_setting_group(
selects.config_setting_group(
name = "should_add_rdynamic",
match_all = [
- "@config//:is_linux",
+ "@v8//bazel/config:is_linux",
":is_v8_enable_backtrace",
],
)
@@ -273,9 +255,9 @@ v8_config(
"v8_enable_hugepage": "ENABLE_HUGEPAGE",
"v8_enable_future": "V8_ENABLE_FUTURE",
"v8_enable_lazy_source_positions": "V8_ENABLE_LAZY_SOURCE_POSITIONS",
- "v8_enable_minor_mc": "ENABLE_MINOR_MC",
"v8_enable_object_print": "OBJECT_PRINT",
"v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS",
+ "v8_enable_runtime_call_stats": "V8_RUNTIME_CALL_STATS",
"v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS",
"v8_enable_trace_maps": "V8_TRACE_MAPS",
"v8_enable_v8_checks": "V8_ENABLE_CHECKS",
@@ -291,37 +273,51 @@ v8_config(
"V8_ADVANCED_BIGINT_ALGORITHMS",
"V8_CONCURRENT_MARKING",
] + select({
- ":is_debug": [
+ "@v8//bazel/config:is_debug": [
"DEBUG",
"V8_ENABLE_CHECKS",
],
"//conditions:default": [],
}) + select(
{
- "@config//:v8_target_ia32": ["V8_TARGET_ARCH_IA32"],
- "@config//:v8_target_x64": ["V8_TARGET_ARCH_X64"],
- "@config//:v8_target_arm": [
+ "@v8//bazel/config:v8_target_ia32": ["V8_TARGET_ARCH_IA32"],
+ "@v8//bazel/config:v8_target_x64": ["V8_TARGET_ARCH_X64"],
+ "@v8//bazel/config:v8_target_arm": [
"V8_TARGET_ARCH_ARM",
"CAN_USE_ARMV7_INSTRUCTIONS",
"CAN_USE_VFP3_INSTRUCTIONS",
],
- "@config//:v8_target_arm64": ["V8_TARGET_ARCH_ARM64"],
+ "@v8//bazel/config:v8_target_arm64": ["V8_TARGET_ARCH_ARM64"],
+ "@v8//bazel/config:v8_target_s390x": [
+ "V8_TARGET_ARCH_S390",
+ "V8_TARGET_ARCH_S390X",
+ ],
+ "@v8//bazel/config:v8_target_riscv64": [
+ # NOTE: Bazel rules for riscv64 weren't tested on a real system.
+ "V8_TARGET_ARCH_RISCV64",
+ "CAN_USE_FPU_INSTRUCTIONS",
+ ],
+ "@v8//bazel/config:v8_target_ppc64le": [
+ # NOTE: Bazel rules for ppc64le weren't tested on a real system.
+ "V8_TARGET_ARCH_PPC64",
+ "V8_TARGET_ARCH_PPC_LE",
+ ],
},
no_match_error = "Please specify a target cpu supported by v8",
) + select({
- "@config//:is_android": [
+ "@v8//bazel/config:is_android": [
"V8_HAVE_TARGET_OS",
"V8_TARGET_OS_ANDROID",
],
- "@config//:is_linux": [
+ "@v8//bazel/config:is_linux": [
"V8_HAVE_TARGET_OS",
"V8_TARGET_OS_LINUX",
],
- "@config//:is_macos": [
+ "@v8//bazel/config:is_macos": [
"V8_HAVE_TARGET_OS",
- "V8_TARGET_OS_MACOSX",
+ "V8_TARGET_OS_MACOS",
],
- "@config//:is_windows": [
+ "@v8//bazel/config:is_windows": [
"V8_HAVE_TARGET_OS",
"V8_TARGET_OS_WIN",
"UNICODE",
@@ -413,7 +409,6 @@ filegroup(
"include/cppgc/internal/name-trait.h",
"include/cppgc/internal/persistent-node.h",
"include/cppgc/internal/pointer-policies.h",
- "include/cppgc/internal/prefinalizer-handler.h",
"include/cppgc/internal/write-barrier.h",
"include/cppgc/liveness-broker.h",
"include/cppgc/macros.h",
@@ -595,6 +590,7 @@ filegroup(
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
+ "src/base/pointer-with-payload.h",
"src/base/platform/wrappers.h",
"src/base/region-allocator.cc",
"src/base/region-allocator.h",
@@ -623,7 +619,7 @@ filegroup(
"src/base/vlq-base64.h",
"src/base/platform/yield-processor.h",
] + select({
- "@config//:is_posix": [
+ "@v8//bazel/config:is_posix": [
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
"src/base/platform/platform-posix-time.cc",
@@ -631,19 +627,20 @@ filegroup(
],
"//conditions:default": [],
}) + select({
- "@config//:is_linux": [
+ "@v8//bazel/config:is_linux": [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-linux.cc",
],
- "@config//:is_android": [
+ "@v8//bazel/config:is_android": [
"src/base/debug/stack_trace_android.cc",
"src/base/platform/platform-linux.cc",
],
- "@config//:is_macos": [
+ "@v8//bazel/config:is_macos": [
"src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-darwin.cc",
"src/base/platform/platform-macos.cc",
],
- "@config//:is_windows": [
+ "@v8//bazel/config:is_windows": [
"src/base/win32-headers.h",
"src/base/debug/stack_trace_win.cc",
"src/base/platform/platform-win32.cc",
@@ -655,7 +652,6 @@ filegroup(
filegroup(
name = "v8_libplatform_files",
srcs = [
- "base/trace_event/common/trace_event_common.h",
"include/libplatform/libplatform.h",
"include/libplatform/libplatform-export.h",
"include/libplatform/v8-tracing.h",
@@ -832,6 +828,7 @@ filegroup(
"src/objects/api-callbacks.tq",
"src/objects/arguments.tq",
"src/objects/bigint.tq",
+ "src/objects/call-site-info.tq",
"src/objects/cell.tq",
"src/objects/code.tq",
"src/objects/contexts.tq",
@@ -857,6 +854,8 @@ filegroup(
"src/objects/js-proxy.tq",
"src/objects/js-regexp-string-iterator.tq",
"src/objects/js-regexp.tq",
+ "src/objects/js-shadow-realms.tq",
+ "src/objects/js-struct.tq",
"src/objects/js-temporal-objects.tq",
"src/objects/js-weak-refs.tq",
"src/objects/literal-objects.tq",
@@ -878,7 +877,6 @@ filegroup(
"src/objects/script.tq",
"src/objects/shared-function-info.tq",
"src/objects/source-text-module.tq",
- "src/objects/stack-frame-info.tq",
"src/objects/string.tq",
"src/objects/struct.tq",
"src/objects/swiss-hash-table-helpers.tq",
@@ -923,6 +921,8 @@ filegroup(
filegroup(
name = "torque_base_files",
srcs = [
+ "src/numbers/integer-literal-inl.h",
+ "src/numbers/integer-literal.h",
"src/torque/ast.h",
"src/torque/cc-generator.cc",
"src/torque/cc-generator.h",
@@ -980,10 +980,9 @@ filegroup(
name = "v8_base_without_compiler_files",
srcs = [
":cppgc_base_files",
- ":v8_cppgc_shared_files",
+ ":v8_heap_base_files",
":v8_bigint",
":generated_bytecode_builtins_list",
- "base/trace_event/common/trace_event_common.h",
"include/cppgc/common.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
@@ -1053,8 +1052,10 @@ filegroup(
"src/builtins/builtins-promise.h",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
+ "src/builtins/builtins-shadow-realms.cc",
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
+ "src/builtins/builtins-struct.cc",
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-temporal.cc",
"src/builtins/builtins-trace.cc",
@@ -1116,9 +1117,11 @@ filegroup(
"src/codegen/pending-optimization-table.cc",
"src/codegen/pending-optimization-table.h",
"src/codegen/register-arch.h",
+ "src/codegen/register-base.h",
"src/codegen/register-configuration.cc",
"src/codegen/register-configuration.h",
"src/codegen/register.h",
+ "src/codegen/reglist-base.h",
"src/codegen/reglist.h",
"src/codegen/reloc-info.cc",
"src/codegen/reloc-info.h",
@@ -1142,9 +1145,11 @@ filegroup(
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.cc",
"src/common/assert-scope.h",
+ "src/common/allow-deprecated.h",
"src/common/checks.h",
"src/common/high-allocation-throughput-scope.h",
"src/common/message-template.h",
+ "src/common/operation.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
"src/compiler-dispatcher/lazy-compile-dispatcher.cc",
@@ -1214,6 +1219,8 @@ filegroup(
"src/execution/arguments-inl.h",
"src/execution/arguments.cc",
"src/execution/arguments.h",
+ "src/execution/clobber-registers.cc",
+ "src/execution/clobber-registers.h",
"src/execution/encoded-c-signature.cc",
"src/execution/encoded-c-signature.h",
"src/execution/embedder-state.h",
@@ -1247,8 +1254,6 @@ filegroup(
"src/execution/protectors-inl.h",
"src/execution/protectors.cc",
"src/execution/protectors.h",
- "src/execution/runtime-profiler.cc",
- "src/execution/runtime-profiler.h",
"src/execution/shared-mutex-guard-if-off-thread.h",
"src/execution/simulator-base.cc",
"src/execution/simulator-base.h",
@@ -1259,6 +1264,8 @@ filegroup(
"src/execution/thread-id.h",
"src/execution/thread-local-top.cc",
"src/execution/thread-local-top.h",
+ "src/execution/tiering-manager.cc",
+ "src/execution/tiering-manager.h",
"src/execution/v8threads.cc",
"src/execution/v8threads.h",
"src/execution/vm-state-inl.h",
@@ -1291,8 +1298,11 @@ filegroup(
"src/handles/maybe-handles.h",
"src/handles/persistent-handles.cc",
"src/handles/persistent-handles.h",
+ "src/heap/base/active-system-pages.cc",
+ "src/heap/base/active-system-pages.h",
"src/heap/allocation-observer.cc",
"src/heap/allocation-observer.h",
+ "src/heap/allocation-result.h",
"src/heap/allocation-stats.h",
"src/heap/array-buffer-sweeper.cc",
"src/heap/array-buffer-sweeper.h",
@@ -1318,6 +1328,8 @@ filegroup(
"src/heap/concurrent-marking.h",
"src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-heap.h",
+ "src/heap/cppgc-js/cpp-marking-state.h",
+ "src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
@@ -1327,6 +1339,7 @@ filegroup(
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
+ "src/heap/embedder-tracing-inl.h",
"src/heap/factory-base.cc",
"src/heap/factory-base.h",
"src/heap/factory-base-inl.h",
@@ -1342,6 +1355,9 @@ filegroup(
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h",
+ "src/heap/heap-allocator-inl.h",
+ "src/heap/heap-allocator.cc",
+ "src/heap/heap-allocator.h",
"src/heap/heap-controller.cc",
"src/heap/heap-controller.h",
"src/heap/heap-inl.h",
@@ -1366,8 +1382,8 @@ filegroup(
"src/heap/large-spaces.h",
"src/heap/linear-allocation-area.h",
"src/heap/list.h",
- "src/heap/local-allocator-inl.h",
- "src/heap/local-allocator.h",
+ "src/heap/evacuation-allocator-inl.h",
+ "src/heap/evacuation-allocator.h",
"src/heap/local-factory.cc",
"src/heap/local-factory.h",
"src/heap/local-factory-inl.h",
@@ -1549,6 +1565,9 @@ filegroup(
"src/objects/bigint-inl.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
+ "src/objects/call-site-info-inl.h",
+ "src/objects/call-site-info.cc",
+ "src/objects/call-site-info.h",
"src/objects/cell-inl.h",
"src/objects/cell.h",
"src/objects/code-inl.h",
@@ -1635,8 +1654,13 @@ filegroup(
"src/objects/js-regexp-string-iterator.h",
"src/objects/js-regexp.cc",
"src/objects/js-regexp.h",
+ "src/objects/js-shadow-realms.h",
+ "src/objects/js-shadow-realms-inl.h",
+ "src/objects/js-struct.h",
+ "src/objects/js-struct-inl.h",
"src/objects/js-temporal-objects.h",
"src/objects/js-temporal-objects-inl.h",
+ "src/objects/js-temporal-objects.cc",
"src/objects/js-weak-refs.h",
"src/objects/js-weak-refs-inl.h",
"src/objects/keys.cc",
@@ -1727,9 +1751,6 @@ filegroup(
"src/objects/source-text-module.cc",
"src/objects/source-text-module.h",
"src/objects/source-text-module-inl.h",
- "src/objects/stack-frame-info-inl.h",
- "src/objects/stack-frame-info.cc",
- "src/objects/stack-frame-info.h",
"src/objects/string-comparator.cc",
"src/objects/string-comparator.h",
"src/objects/string-inl.h",
@@ -1737,6 +1758,7 @@ filegroup(
"src/objects/string-set.h",
"src/objects/string-table-inl.h",
"src/objects/string-table.cc",
+ "src/objects/symbol-table.cc",
"src/objects/string-table.h",
"src/objects/string.cc",
"src/objects/string.h",
@@ -1918,14 +1940,15 @@ filegroup(
"src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
- "src/security/external-pointer-table.cc",
- "src/security/vm-cage.cc",
- "src/security/caged-pointer-inl.h",
- "src/security/caged-pointer.h",
- "src/security/external-pointer-inl.h",
- "src/security/external-pointer-table.h",
- "src/security/external-pointer.h",
- "src/security/vm-cage.h",
+ "src/sandbox/external-pointer-inl.h",
+ "src/sandbox/external-pointer.h",
+ "src/sandbox/external-pointer-table.cc",
+ "src/sandbox/external-pointer-table-inl.h",
+ "src/sandbox/external-pointer-table.h",
+ "src/sandbox/sandbox.cc",
+ "src/sandbox/sandbox.h",
+ "src/sandbox/sandboxed-pointer-inl.h",
+ "src/sandbox/sandboxed-pointer.h",
"src/base/sanitizer/asan.h",
"src/base/sanitizer/lsan-page-allocator.cc",
"src/base/sanitizer/lsan-page-allocator.h",
@@ -1964,8 +1987,6 @@ filegroup(
"src/snapshot/shared-heap-serializer.cc",
"src/snapshot/snapshot-compression.cc",
"src/snapshot/snapshot-compression.h",
- "third_party/zlib/google/compression_utils_portable.h",
- "third_party/zlib/google/compression_utils_portable.cc",
"src/snapshot/snapshot-data.cc",
"src/snapshot/snapshot-data.h",
"src/snapshot/snapshot-source-sink.cc",
@@ -2014,8 +2035,8 @@ filegroup(
"src/tracing/traced-value.h",
"src/tracing/tracing-category-observer.cc",
"src/tracing/tracing-category-observer.h",
- "src/trap-handler/handler-inside.cc",
"src/trap-handler/handler-inside-posix.h",
+ "src/trap-handler/handler-inside.cc",
"src/trap-handler/handler-outside.cc",
"src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
@@ -2037,7 +2058,6 @@ filegroup(
"src/utils/memcopy.h",
"src/utils/ostreams.cc",
"src/utils/ostreams.h",
- "src/utils/pointer-with-payload.h",
"src/utils/scoped-list.h",
"src/utils/utils-inl.h",
"src/utils/utils.cc",
@@ -2070,7 +2090,7 @@ filegroup(
"src/heap/third-party/heap-api.h",
"src/heap/third-party/heap-api-stub.cc",
] + select({
- "@config//:v8_target_ia32": [
+ "@v8//bazel/config:v8_target_ia32": [
"src/baseline/ia32/baseline-assembler-ia32-inl.h",
"src/baseline/ia32/baseline-compiler-ia32-inl.h",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
@@ -2080,11 +2100,13 @@ filegroup(
"src/codegen/ia32/assembler-ia32.cc",
"src/codegen/ia32/assembler-ia32.h",
"src/codegen/ia32/constants-ia32.h",
+ "src/codegen/ia32/fma-instr.h",
"src/codegen/ia32/interface-descriptors-ia32-inl.h",
"src/codegen/ia32/sse-instr.h",
"src/codegen/ia32/macro-assembler-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
+ "src/codegen/ia32/reglist-ia32.h",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
@@ -2098,7 +2120,7 @@ filegroup(
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
],
- "@config//:v8_target_x64": [
+ "@v8//bazel/config:v8_target_x64": [
"src/baseline/x64/baseline-assembler-x64-inl.h",
"src/baseline/x64/baseline-compiler-x64-inl.h",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
@@ -2114,6 +2136,7 @@ filegroup(
"src/codegen/x64/macro-assembler-x64.cc",
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
+ "src/codegen/x64/reglist-x64.h",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
@@ -2130,7 +2153,7 @@ filegroup(
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
],
- "@config//:v8_target_arm": [
+ "@v8//bazel/config:v8_target_arm": [
"src/baseline/arm/baseline-assembler-arm-inl.h",
"src/baseline/arm/baseline-compiler-arm-inl.h",
"src/codegen/arm/assembler-arm-inl.h",
@@ -2143,6 +2166,7 @@ filegroup(
"src/codegen/arm/macro-assembler-arm.cc",
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
+ "src/codegen/arm/reglist-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
@@ -2161,7 +2185,7 @@ filegroup(
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
],
- "@config//:v8_target_arm64": [
+ "@v8//bazel/config:v8_target_arm64": [
"src/baseline/arm64/baseline-assembler-arm64-inl.h",
"src/baseline/arm64/baseline-compiler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64-inl.h",
@@ -2181,6 +2205,7 @@ filegroup(
"src/codegen/arm64/macro-assembler-arm64.h",
"src/codegen/arm64/register-arm64.cc",
"src/codegen/arm64/register-arm64.h",
+ "src/codegen/arm64/reglist-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
"src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
@@ -2204,31 +2229,121 @@ filegroup(
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
],
+ "@v8//bazel/config:v8_target_s390x": [
+ "src/baseline/s390/baseline-assembler-s390-inl.h",
+ "src/baseline/s390/baseline-compiler-s390-inl.h",
+ "src/codegen/s390/assembler-s390.cc",
+ "src/codegen/s390/assembler-s390.h",
+ "src/codegen/s390/assembler-s390-inl.h",
+ "src/codegen/s390/constants-s390.cc",
+ "src/codegen/s390/constants-s390.h",
+ "src/codegen/s390/cpu-s390.cc",
+ "src/codegen/s390/interface-descriptors-s390-inl.h",
+ "src/codegen/s390/macro-assembler-s390.cc",
+ "src/codegen/s390/macro-assembler-s390.h",
+ "src/codegen/s390/register-s390.h",
+ "src/codegen/s390/reglist-s390.h",
+ "src/compiler/backend/s390/code-generator-s390.cc",
+ "src/compiler/backend/s390/instruction-codes-s390.h",
+ "src/compiler/backend/s390/instruction-scheduler-s390.cc",
+ "src/compiler/backend/s390/instruction-selector-s390.cc",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.cc",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.h",
+ "src/deoptimizer/s390/deoptimizer-s390.cc",
+ "src/diagnostics/s390/disasm-s390.cc",
+ "src/diagnostics/s390/eh-frame-s390.cc",
+ "src/diagnostics/s390/unwinder-s390.cc",
+ "src/execution/s390/frame-constants-s390.cc",
+ "src/execution/s390/frame-constants-s390.h",
+ "src/execution/s390/simulator-s390.cc",
+ "src/execution/s390/simulator-s390.h",
+ "src/regexp/s390/regexp-macro-assembler-s390.cc",
+ "src/regexp/s390/regexp-macro-assembler-s390.h",
+ "src/wasm/baseline/s390/liftoff-assembler-s390.h",
+ ],
+ "@v8//bazel/config:v8_target_riscv64": [
+ "src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
+ "src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
+ "src/codegen/riscv64/assembler-riscv64.cc",
+ "src/codegen/riscv64/assembler-riscv64.h",
+ "src/codegen/riscv64/assembler-riscv64-inl.h",
+ "src/codegen/riscv64/constants-riscv64.cc",
+ "src/codegen/riscv64/constants-riscv64.h",
+ "src/codegen/riscv64/cpu-riscv64.cc",
+ "src/codegen/riscv64/interface-descriptors-riscv64-inl.h",
+ "src/codegen/riscv64/macro-assembler-riscv64.cc",
+ "src/codegen/riscv64/macro-assembler-riscv64.h",
+ "src/codegen/riscv64/register-riscv64.h",
+ "src/codegen/riscv64/reglist-riscv64.h",
+ "src/compiler/backend/riscv64/code-generator-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
+ "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
+ "src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
+ "src/diagnostics/riscv64/disasm-riscv64.cc",
+ "src/diagnostics/riscv64/unwinder-riscv64.cc",
+ "src/execution/riscv64/frame-constants-riscv64.cc",
+ "src/execution/riscv64/frame-constants-riscv64.h",
+ "src/execution/riscv64/simulator-riscv64.cc",
+ "src/execution/riscv64/simulator-riscv64.h",
+ "src/regexp/riscv64/regexp-macro-assembler-riscv64.cc",
+ "src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
+ "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
+ ],
+ "@v8//bazel/config:v8_target_ppc64le": [
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc.cc",
+ "src/codegen/ppc/assembler-ppc.h",
+ "src/codegen/ppc/assembler-ppc-inl.h",
+ "src/codegen/ppc/constants-ppc.cc",
+ "src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/cpu-ppc.cc",
+ "src/codegen/ppc/interface-descriptors-ppc-inl.h",
+ "src/codegen/ppc/macro-assembler-ppc.cc",
+ "src/codegen/ppc/macro-assembler-ppc.h",
+ "src/codegen/ppc/register-ppc.h",
+ "src/codegen/ppc/reglist-ppc.h",
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
+ "src/deoptimizer/ppc/deoptimizer-ppc.cc",
+ "src/diagnostics/ppc/disasm-ppc.cc",
+ "src/diagnostics/ppc/eh-frame-ppc.cc",
+ "src/diagnostics/ppc/unwinder-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.h",
+ "src/execution/ppc/simulator-ppc.cc",
+ "src/execution/ppc/simulator-ppc.h",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.cc",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
+ ],
}) + select({
# Only for x64 builds and for arm64 with x64 host simulator.
- "@config//:is_posix_x64": [
+ "@v8//bazel/config:is_non_android_posix_x64": [
"src/trap-handler/handler-inside-posix.cc",
"src/trap-handler/handler-outside-posix.cc",
],
"//conditions:default": [],
}) + select({
- "@config//:v8_arm64_simulator": [
+ "@v8//bazel/config:v8_arm64_simulator": [
"src/trap-handler/trap-handler-simulator.h",
"src/trap-handler/handler-outside-simulator.cc",
],
"//conditions:default": [],
}) + select({
- "@config//:is_windows": [
+ "@v8//bazel/config:is_windows": [
"src/trap-handler/handler-inside-win.cc",
- "src/trap-handler/handler-outside-win.cc",
"src/trap-handler/handler-inside-win.h",
- # Needed on windows to work around https://github.com/bazelbuild/bazel/issues/6337
- "third_party/zlib/zlib.h",
- "third_party/zlib/zconf.h",
+ "src/trap-handler/handler-outside-win.cc",
],
"//conditions:default": [],
}) + select({
- "@config//:is_windows_64bit": [
+ "@v8//bazel/config:is_windows_64bit": [
"src/diagnostics/unwinding-info-win64.cc",
"src/diagnostics/unwinding-info-win64.h",
],
@@ -2247,6 +2362,7 @@ filegroup(
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.h",
"src/compiler/wasm-inlining.h",
+ "src/compiler/wasm-loop-peeling.h",
"src/debug/debug-wasm-objects.cc",
"src/debug/debug-wasm-objects.h",
"src/debug/debug-wasm-objects-inl.h",
@@ -2512,8 +2628,6 @@ filegroup(
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.cc",
"src/compiler/js-heap-broker.h",
- "src/compiler/js-heap-copy-reducer.cc",
- "src/compiler/js-heap-copy-reducer.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-inlining-heuristic.cc",
@@ -2603,6 +2717,8 @@ filegroup(
"src/compiler/select-lowering.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
+ "src/compiler/simplified-lowering-verifier.cc",
+ "src/compiler/simplified-lowering-verifier.h",
"src/compiler/simplified-operator.cc",
"src/compiler/simplified-operator.h",
"src/compiler/simplified-operator-reducer.cc",
@@ -2632,6 +2748,7 @@ filegroup(
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-inlining.cc",
],
@@ -2680,9 +2797,11 @@ filegroup(
"src/builtins/builtins-proxy-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
+ "src/builtins/builtins-shadowrealm-gen.cc",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
"src/builtins/builtins-string-gen.cc",
"src/builtins/builtins-string-gen.h",
+ "src/builtins/builtins-temporal-gen.cc",
"src/builtins/builtins-typed-array-gen.cc",
"src/builtins/builtins-typed-array-gen.h",
"src/builtins/builtins-utils-gen.h",
@@ -2709,11 +2828,16 @@ filegroup(
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
+ "src/numbers/integer-literal-inl.h",
+ "src/numbers/integer-literal.h",
] + select({
- "@config//:v8_target_ia32": ["src/builtins/ia32/builtins-ia32.cc"],
- "@config//:v8_target_x64": ["src/builtins/x64/builtins-x64.cc"],
- "@config//:v8_target_arm": ["src/builtins/arm/builtins-arm.cc"],
- "@config//:v8_target_arm64": ["src/builtins/arm64/builtins-arm64.cc"],
+ "@v8//bazel/config:v8_target_ia32": ["src/builtins/ia32/builtins-ia32.cc"],
+ "@v8//bazel/config:v8_target_x64": ["src/builtins/x64/builtins-x64.cc"],
+ "@v8//bazel/config:v8_target_arm": ["src/builtins/arm/builtins-arm.cc"],
+ "@v8//bazel/config:v8_target_arm64": ["src/builtins/arm64/builtins-arm64.cc"],
+ "@v8//bazel/config:v8_target_s390x": ["src/builtins/s390/builtins-s390.cc"],
+ "@v8//bazel/config:v8_target_riscv64": ["src/builtins/riscv64/builtins-riscv64.cc"],
+ "@v8//bazel/config:v8_target_ppc64le": ["src/builtins/ppc/builtins-ppc.cc"],
}) + select({
":is_v8_enable_webassembly": [
"src/builtins/builtins-wasm-gen.cc",
@@ -2744,6 +2868,7 @@ filegroup(
"src/heap/cppgc/gc-info-table.h",
"src/heap/cppgc/gc-invoker.cc",
"src/heap/cppgc/gc-invoker.h",
+ "src/heap/cppgc/globals.h",
"src/heap/cppgc/heap.cc",
"src/heap/cppgc/heap.h",
"src/heap/cppgc/heap-base.cc",
@@ -2800,6 +2925,8 @@ filegroup(
"src/heap/cppgc/process-heap-statistics.h",
"src/heap/cppgc/raw-heap.cc",
"src/heap/cppgc/raw-heap.h",
+ "src/heap/cppgc/remembered-set.cc",
+ "src/heap/cppgc/remembered-set.h",
"src/heap/cppgc/source-location.cc",
"src/heap/cppgc/stats-collector.cc",
"src/heap/cppgc/stats-collector.h",
@@ -2819,24 +2946,28 @@ filegroup(
)
filegroup(
- name = "v8_cppgc_shared_files",
+ name = "v8_heap_base_files",
srcs = [
+ "src/heap/base/active-system-pages.cc",
+ "src/heap/base/active-system-pages.h",
"src/heap/base/stack.cc",
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
- "src/heap/cppgc/globals.h",
] + select({
# Note these cannot be v8_target_is_* selects because these contain
# inline assembly that runs inside the executable. Since these are
# linked directly into mksnapshot, they must use the actual target cpu.
- "@config//:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
- "@config//:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
- "@config//:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
- "@config//:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
- "@config//:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.S"],
- "@config//:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.S"],
- "@config//:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
+ "@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"],
+ "@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.S"],
+ "@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.S"],
+ "@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
}),
)
@@ -2977,7 +3108,6 @@ filegroup(
"third_party/inspector_protocol/crdtp/protocol_core.h",
"third_party/inspector_protocol/crdtp/serializable.cc",
"third_party/inspector_protocol/crdtp/serializable.h",
- "third_party/inspector_protocol/crdtp/serializer_traits.h",
"third_party/inspector_protocol/crdtp/span.cc",
"third_party/inspector_protocol/crdtp/span.h",
"third_party/inspector_protocol/crdtp/status.cc",
@@ -2990,16 +3120,15 @@ filegroup(
srcs = [
"src/init/setup-isolate-deserialize.cc",
] + select({
- "@config//:v8_target_arm": [
+ "@v8//bazel/config:v8_target_arm": [
"google3/snapshots/arm/noicu/embedded.S",
"google3/snapshots/arm/noicu/snapshot.cc",
],
- "@config//:v8_target_ia32": [
+ "@v8//bazel/config:v8_target_ia32": [
"google3/snapshots/ia32/noicu/embedded.S",
"google3/snapshots/ia32/noicu/snapshot.cc",
],
- "@config//:v8_target_arm64": [":noicu/generated_snapshot_files"],
- "@config//:v8_target_x64": [":noicu/generated_snapshot_files"],
+ "//conditions:default": [":noicu/generated_snapshot_files"],
}),
)
@@ -3008,16 +3137,15 @@ filegroup(
srcs = [
"src/init/setup-isolate-deserialize.cc",
] + select({
- "@config//:v8_target_arm": [
+ "@v8//bazel/config:v8_target_arm": [
"google3/snapshots/arm/icu/embedded.S",
"google3/snapshots/arm/icu/snapshot.cc",
],
- "@config//:v8_target_ia32": [
+ "@v8//bazel/config:v8_target_ia32": [
"google3/snapshots/ia32/icu/embedded.S",
"google3/snapshots/ia32/icu/snapshot.cc",
],
- "@config//:v8_target_arm64": [":icu/generated_snapshot_files"],
- "@config//:v8_target_x64": [":icu/generated_snapshot_files"],
+ "//conditions:default": [":icu/generated_snapshot_files"],
}),
)
@@ -3048,7 +3176,7 @@ v8_torque(
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
}) + select({
- "@config//:v8_target_is_32_bits": ["-m32"],
+ "@v8//bazel/config:v8_target_is_32_bits": ["-m32"],
"//conditions:default": [],
}),
extras = [
@@ -3077,9 +3205,39 @@ v8_torque(
noicu_srcs = [":noicu/torque_files"],
)
+py_binary(
+ name = "code_generator",
+ srcs = [
+ "third_party/inspector_protocol/code_generator.py",
+ "third_party/inspector_protocol/pdl.py",
+ ],
+ data = [
+ "third_party/inspector_protocol/lib/Forward_h.template",
+ "third_party/inspector_protocol/lib/Object_cpp.template",
+ "third_party/inspector_protocol/lib/Object_h.template",
+ "third_party/inspector_protocol/lib/Protocol_cpp.template",
+ "third_party/inspector_protocol/lib/ValueConversions_cpp.template",
+ "third_party/inspector_protocol/lib/ValueConversions_h.template",
+ "third_party/inspector_protocol/lib/Values_cpp.template",
+ "third_party/inspector_protocol/lib/Values_h.template",
+ "third_party/inspector_protocol/lib/base_string_adapter_cc.template",
+ "third_party/inspector_protocol/lib/base_string_adapter_h.template",
+ "third_party/inspector_protocol/templates/Exported_h.template",
+ "third_party/inspector_protocol/templates/Imported_h.template",
+ "third_party/inspector_protocol/templates/TypeBuilder_cpp.template",
+ "third_party/inspector_protocol/templates/TypeBuilder_h.template",
+ ],
+ deps = [
+ requirement("jinja2"),
+ ],
+)
+
genrule(
name = "generated_inspector_files",
- srcs = ["include/js_protocol.pdl"],
+ srcs = [
+ "include/js_protocol.pdl",
+ "src/inspector/inspector_protocol_config.json",
+ ],
outs = [
"include/inspector/Debugger.h",
"include/inspector/Runtime.h",
@@ -3100,10 +3258,16 @@ genrule(
"src/inspector/protocol/Schema.cpp",
"src/inspector/protocol/Schema.h",
],
- cmd = "bazel/generate-inspector-files.sh $(@D)",
- cmd_bat = "bazel\\generate-inspector-files.cmd $(@D)",
local = 1,
+ cmd = "$(location :code_generator) --jinja_dir . \
+ --inspector_protocol_dir third_party/inspector_protocol \
+ --config $(location :src/inspector/inspector_protocol_config.json) \
+ --config_value protocol.path=$(location :include/js_protocol.pdl) \
+ --output_base $(@D)/src/inspector",
message = "Generating inspector files",
+ tools = [
+ ":code_generator",
+ ],
)
filegroup(
@@ -3216,7 +3380,7 @@ cc_library(
":torque_base_files",
],
copts = select({
- "@config//:is_posix": [ "-fexceptions" ],
+ "@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
features = ["-use_header_modules"],
@@ -3234,7 +3398,7 @@ v8_library(
],
icu_deps = [
":icu/generated_torque_headers",
- "@icu",
+ "//external:icu",
],
icu_srcs = [
":generated_regexp_special_case",
@@ -3249,23 +3413,30 @@ v8_library(
],
deps = [
":v8_libbase",
- "@zlib",
+ "//external:base_trace_event_common",
+ "//external:zlib",
+ "//external:zlib_compression_utils",
],
)
v8_library(
name = "v8",
srcs = [":v8_inspector_files"],
+ hdrs = [":public_header_files"],
icu_deps = [":icu/v8_libshared"],
icu_srcs = [":icu/snapshot_files"],
noicu_deps = [":noicu/v8_libshared"],
noicu_srcs = [":noicu/snapshot_files"],
+ visibility = ["//visibility:public"],
)
# TODO(victorgomes): Check if v8_enable_webassembly is true.
v8_library(
name = "wee8",
srcs = [":wee8_files"],
+ hdrs = [":public_wasm_c_api_header_files"],
+ strip_include_prefix = "third_party",
+ visibility = ["//visibility:public"],
deps = [":noicu/v8"],
)
@@ -3312,7 +3483,7 @@ v8_binary(
"UNISTR_FROM_CHAR_EXPLICIT=",
],
deps = [
- "@icu",
+ "//external:icu",
],
)
@@ -3323,12 +3494,12 @@ v8_binary(
":torque_base_files",
],
copts = select({
- "@config//:is_posix": [ "-fexceptions" ],
+ "@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
features = ["-use_header_modules"],
linkopts = select({
- "@config//:is_android": ["-llog"],
+ "@v8//bazel/config:is_android": ["-llog"],
"//conditions:default": [],
}),
deps = ["v8_libbase"],
@@ -3339,7 +3510,7 @@ v8_binary(
srcs = [":mksnapshot_files"],
icu_deps = [":icu/v8_libshared"],
linkopts = select({
- "@config//:is_android": ["-llog"],
+ "@v8//bazel/config:is_android": ["-llog"],
"//conditions:default": [],
}),
noicu_deps = [":v8_libshared_noicu"],
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 19731feebe..7b19ee86d8 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -83,7 +83,7 @@ declare_args() {
v8_enable_hugepage = false
# Sets -dENABLE_HANDLE_ZAPPING.
- v8_enable_handle_zapping = is_debug
+ v8_enable_handle_zapping = !is_on_release_branch || is_debug
# Enable slow dchecks.
v8_enable_slow_dchecks = false
@@ -198,6 +198,10 @@ declare_args() {
# Sets -dV8_EXTERNAL_CODE_SPACE
v8_enable_external_code_space = ""
+ # Enable the Maglev compiler.
+ # Sets -dV8_ENABLE_MAGLEV
+ v8_enable_maglev = ""
+
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@@ -225,9 +229,6 @@ declare_args() {
(is_linux || is_chromeos || is_mac)) ||
(v8_current_cpu == "ppc64" && (is_linux || is_chromeos))
- # Enable minor mark compact.
- v8_enable_minor_mc = true
-
# Check that each header can be included in isolation (requires also
# setting the "check_v8_header_includes" gclient variable to run a
# specific hook).
@@ -281,9 +282,6 @@ declare_args() {
# ARM64.
v8_control_flow_integrity = false
- # Enable object names in cppgc for debug purposes.
- cppgc_enable_object_names = false
-
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
cppgc_enable_caged_heap =
v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
@@ -295,21 +293,24 @@ declare_args() {
# Enable allocations during prefinalizer invocations.
cppgc_allow_allocations_in_prefinalizers = false
- # Enable young generation in cppgc.
- cppgc_enable_young_generation = false
-
# Enable V8 zone compression experimental feature.
# Sets -DV8_COMPRESS_ZONES.
v8_enable_zone_compression = ""
- # Enable V8 heap sandbox experimental feature.
- # Sets -DV8_HEAP_SANDBOX.
- v8_enable_heap_sandbox = ""
+ # Enable the experimental V8 sandbox.
+ # Sets -DV8_SANDBOX.
+ v8_enable_sandbox = false
+
+ # Enable external pointer sandboxing. Requires v8_enable_sandbox.
+ # Sets -DV8_SANDBOXED_EXTERNAL_POINRTERS.
+ v8_enable_sandboxed_external_pointers = false
- # Enable the Virtual Memory Cage, which contains the pointer compression cage
- # as well as ArrayBuffer BackingStores and WASM memory cages.
- # Sets -DV8_VIRTUAL_MEMORY_CAGE.
- v8_enable_virtual_memory_cage = ""
+ # Enable sandboxed pointers. Requires v8_enable_sandbox.
+ # Sets -DV8_SANDBOXED_POINTERS.
+ v8_enable_sandboxed_pointers = false
+
+ # Enable all available sandbox features. Implies v8_enable_sandbox.
+ v8_enable_sandbox_future = false
# Experimental feature for collecting per-class zone memory stats.
# Requires use_rtti = true
@@ -333,12 +334,15 @@ declare_args() {
v8_enable_map_packing = false
# Allow for JS promise hooks (instead of just C++).
- v8_allow_javascript_in_promise_hooks = false
+ v8_enable_javascript_promise_hooks = false
# Enable allocation folding globally (sets -dV8_ALLOCATION_FOLDING).
# When it's disabled, the --turbo-allocation-folding runtime flag will be ignored.
v8_enable_allocation_folding = true
+ # Enable runtime verification of heap snapshots produced for devtools.
+ v8_enable_heap_snapshot_verify = ""
+
# Enable global allocation site tracking.
v8_allocation_site_tracking = true
@@ -348,11 +352,16 @@ declare_args() {
# This is only used by nodejs.
v8_scriptormodule_legacy_lifetime = false
- # If enabled, the receiver is always included in the actual and formal
- # parameter count of function with JS linkage.
- # TODO(v8:11112): Remove once all architectures support the flag and it is
- # enabled unconditionally.
- v8_include_receiver_in_argc = true
+ # Change code emission and runtime features to be CET shadow-stack compliant
+ # (incomplete and experimental).
+ v8_enable_cet_shadow_stack = false
+
+ # Get VMEX priviledge at startup.
+ # It allows to run V8 without "deprecated-ambient-replace-as-executable".
+ # Sets -DV8_USE_VMEX_RESOURCE.
+ # TODO(victorgomes): Remove this flag once Chormium no longer needs
+ # the deprecated feature.
+ v8_fuchsia_use_vmex_resource = is_fuchsia && !build_with_chromium
}
# Derived defaults.
@@ -377,6 +386,10 @@ if (v8_enable_test_features == "") {
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = v8_enable_debugging_features
}
+if (v8_enable_heap_snapshot_verify == "") {
+ v8_enable_heap_snapshot_verify =
+ v8_enable_debugging_features || dcheck_always_on
+}
if (v8_enable_snapshot_code_comments) {
assert(v8_code_comments == true || v8_code_comments == "",
"v8_enable_snapshot_code_comments conflicts with v8_code_comments.")
@@ -403,18 +416,21 @@ if (v8_enable_fast_torque == "") {
if (v8_enable_zone_compression == "") {
v8_enable_zone_compression = false
}
-if (v8_enable_heap_sandbox == "") {
- v8_enable_heap_sandbox = false
-}
-if (v8_enable_virtual_memory_cage == "") {
- v8_enable_virtual_memory_cage = v8_enable_heap_sandbox
-}
if (v8_enable_short_builtin_calls == "") {
v8_enable_short_builtin_calls =
v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
}
if (v8_enable_external_code_space == "") {
- v8_enable_external_code_space = false
+ # Can't use !is_android here, because Torque toolchain is affected by
+ # the value of this flag but actually runs on the host side.
+ v8_enable_external_code_space =
+ v8_enable_pointer_compression &&
+ (v8_current_cpu == "x64" ||
+ (target_os != "android" && target_os != "fuchsia" &&
+ v8_current_cpu == "arm64"))
+}
+if (v8_enable_maglev == "") {
+ v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
}
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
@@ -452,7 +468,16 @@ if (v8_multi_arch_build &&
"clang_x64_pointer_compression") {
v8_enable_pointer_compression = !v8_enable_pointer_compression
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
+ v8_enable_external_code_space = v8_enable_pointer_compression
+}
+
+# Check if it is a Chromium build and activate PAC/BTI if needed.
+# TODO(cavalcantii): have a single point of integration with PAC/BTI flags.
+if (build_with_chromium && v8_current_cpu == "arm64" &&
+ arm_control_flow_integrity == "standard") {
+ v8_control_flow_integrity = true
}
+
if (v8_enable_short_builtin_calls &&
((!v8_enable_pointer_compression && v8_current_cpu != "x64") ||
v8_control_flow_integrity)) {
@@ -467,15 +492,17 @@ if (v8_enable_shared_ro_heap == "") {
v8_enable_pointer_compression_shared_cage
}
-# Check if it is a Chromium build and activate PAC/BTI if needed.
-if (build_with_chromium && v8_current_cpu == "arm64" &&
- arm_control_flow_integrity == "standard") {
- v8_control_flow_integrity = true
+# Enable the v8 sandbox on 64-bit Chromium builds.
+if (build_with_chromium && v8_enable_pointer_compression_shared_cage &&
+ v8_enable_external_code_space) {
+ v8_enable_sandbox = true
}
-# Enable the virtual memory cage on 64-bit Chromium builds.
-if (build_with_chromium && v8_enable_pointer_compression_shared_cage) {
- v8_enable_virtual_memory_cage = true
+# Enable all available sandbox features if sandbox future is enabled.
+if (v8_enable_sandbox_future) {
+ v8_enable_sandboxed_pointers = true
+ v8_enable_sandboxed_external_pointers = true
+ v8_enable_sandbox = true
}
assert(!v8_disable_write_barriers || v8_enable_single_generation,
@@ -500,18 +527,17 @@ assert(!v8_enable_map_packing || v8_current_cpu == "x64",
assert(!v8_enable_external_code_space || v8_enable_pointer_compression,
"External code space feature requires pointer compression")
-assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
- "V8 Heap Sandbox requires pointer compression")
+assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage,
+ "The sandbox requires the shared pointer compression cage")
-assert(!v8_enable_heap_sandbox || !v8_enable_external_code_space,
- "V8 Heap Sandbox is not compatible with external code space YET")
+assert(!v8_enable_sandbox || v8_enable_external_code_space,
+ "The sandbox requires the external code space")
-assert(!v8_enable_heap_sandbox || v8_enable_virtual_memory_cage,
- "The Heap Sandbox requires the virtual memory cage")
+assert(!v8_enable_sandboxed_pointers || v8_enable_sandbox,
+ "Sandboxed pointers require the sandbox")
-assert(
- !v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage,
- "V8 VirtualMemoryCage requires the shared pointer compression cage")
+assert(!v8_enable_sandboxed_external_pointers || v8_enable_sandbox,
+ "Sandboxed external pointers require the sandbox")
assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
@@ -541,6 +567,10 @@ if (v8_enable_single_generation == true) {
assert(!v8_enable_conservative_stack_scanning || v8_enable_single_generation,
"Conservative stack scanning requires single generation")
+if (v8_fuchsia_use_vmex_resource) {
+ assert(target_os == "fuchsia", "VMEX resource only available on Fuchsia")
+}
+
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -673,8 +703,9 @@ external_v8_defines = [
"V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE",
"V8_31BIT_SMIS_ON_64BIT_ARCH",
"V8_COMPRESS_ZONES",
- "V8_HEAP_SANDBOX",
- "V8_VIRTUAL_MEMORY_CAGE",
+ "V8_SANDBOX",
+ "V8_SANDBOXED_POINTERS",
+ "V8_SANDBOXED_EXTERNAL_POINTERS",
"V8_DEPRECATION_WARNINGS",
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
@@ -702,11 +733,14 @@ if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
if (v8_enable_zone_compression) {
enabled_external_v8_defines += [ "V8_COMPRESS_ZONES" ]
}
-if (v8_enable_heap_sandbox) {
- enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ]
+if (v8_enable_sandbox) {
+ enabled_external_v8_defines += [ "V8_SANDBOX" ]
+}
+if (v8_enable_sandboxed_pointers) {
+ enabled_external_v8_defines += [ "V8_SANDBOXED_POINTERS" ]
}
-if (v8_enable_virtual_memory_cage) {
- enabled_external_v8_defines += [ "V8_VIRTUAL_MEMORY_CAGE" ]
+if (v8_enable_sandboxed_external_pointers) {
+ enabled_external_v8_defines += [ "V8_SANDBOXED_EXTERNAL_POINTERS" ]
}
if (v8_deprecation_warnings) {
enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ]
@@ -810,10 +844,6 @@ config("features") {
defines +=
[ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ]
- assert(
- !v8_enable_raw_heap_snapshots,
- "This flag is deprecated and is now available through the inspector interface as an argument to profiler's method `takeHeapSnapshot`. Consider using blink's flag `enable_additional_blink_object_names` to get better naming of internal objects.")
-
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
@@ -832,9 +862,6 @@ config("features") {
if (v8_enable_hugepage) {
defines += [ "ENABLE_HUGEPAGE" ]
}
- if (v8_enable_minor_mc) {
- defines += [ "ENABLE_MINOR_MC" ]
- }
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
@@ -870,6 +897,9 @@ config("features") {
if (v8_enable_debug_code) {
defines += [ "V8_ENABLE_DEBUG_CODE" ]
}
+ if (v8_enable_heap_snapshot_verify) {
+ defines += [ "V8_ENABLE_HEAP_SNAPSHOT_VERIFY" ]
+ }
if (v8_enable_snapshot_native_code_counters) {
defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ]
}
@@ -915,6 +945,9 @@ config("features") {
if (v8_control_flow_integrity) {
defines += [ "V8_ENABLE_CONTROL_FLOW_INTEGRITY" ]
}
+ if (v8_enable_cet_shadow_stack) {
+ defines += [ "V8_ENABLE_CET_SHADOW_STACK" ]
+ }
if (v8_enable_wasm_gdb_remote_debugging) {
defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ]
}
@@ -930,6 +963,9 @@ config("features") {
if (v8_enable_external_code_space) {
defines += [ "V8_EXTERNAL_CODE_SPACE" ]
}
+ if (v8_enable_maglev) {
+ defines += [ "V8_ENABLE_MAGLEV" ]
+ }
if (v8_enable_swiss_name_dictionary) {
defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
@@ -945,8 +981,8 @@ config("features") {
if (v8_dict_property_const_tracking) {
defines += [ "V8_DICT_PROPERTY_CONST_TRACKING" ]
}
- if (v8_allow_javascript_in_promise_hooks) {
- defines += [ "V8_ALLOW_JAVASCRIPT_IN_PROMISE_HOOKS" ]
+ if (v8_enable_javascript_promise_hooks) {
+ defines += [ "V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS" ]
}
if (v8_enable_allocation_folding) {
defines += [ "V8_ALLOCATION_FOLDING" ]
@@ -960,8 +996,8 @@ config("features") {
if (v8_advanced_bigint_algorithms) {
defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
}
- if (v8_include_receiver_in_argc) {
- defines += [ "V8_INCLUDE_RECEIVER_IN_ARGC" ]
+ if (v8_fuchsia_use_vmex_resource) {
+ defines += [ "V8_USE_VMEX_RESOURCE" ]
}
}
@@ -993,7 +1029,7 @@ config("toolchain") {
]
}
- # TODO(jochen): Add support for arm_test_noprobe.
+ # TODO(infra): Add support for arm_test_noprobe.
if (current_cpu != "arm") {
# These defines ares used for the ARM simulator.
@@ -1006,11 +1042,17 @@ config("toolchain") {
}
if (v8_current_cpu == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
- if (v8_control_flow_integrity) {
- # TODO(v8:10026): Enable this in src/build.
- if (current_cpu == "arm64") {
+ if (current_cpu == "arm64") {
+ # This will enable PAC+BTI in code generation and static code.
+ if (v8_control_flow_integrity) {
+ # TODO(v8:10026): Enable this in src/build.
cflags += [ "-mbranch-protection=standard" ]
asmflags = [ "-mmark-bti-property" ]
+ } else if (build_with_chromium && arm_control_flow_integrity == "pac") {
+ # This should enable PAC only in C++ code (and no CFI in runtime
+ # generated code). For details, see crbug.com/919548.
+ cflags += [ "-mbranch-protection=pac-ret" ]
+ asmflags = [ "-mbranch-protection=pac-ret" ]
}
}
}
@@ -1055,7 +1097,7 @@ config("toolchain") {
defines += [ "FPU_MODE_FP32" ]
}
- # TODO(jochen): Add support for mips_arch_variant rx and loongson.
+ # TODO(infra): Add support for mips_arch_variant rx and loongson.
}
if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") {
@@ -1177,14 +1219,14 @@ config("toolchain") {
defines += [ "V8_TARGET_OS_LINUX" ]
} else if (target_os == "mac") {
defines += [ "V8_HAVE_TARGET_OS" ]
- defines += [ "V8_TARGET_OS_MACOSX" ]
+ defines += [ "V8_TARGET_OS_MACOS" ]
} else if (target_os == "win") {
defines += [ "V8_HAVE_TARGET_OS" ]
defines += [ "V8_TARGET_OS_WIN" ]
}
- # TODO(jochen): Support v8_enable_prof on Windows.
- # TODO(jochen): Add support for compiling with simulators.
+ # TODO(infra): Support v8_enable_prof on Windows.
+ # TODO(infra): Add support for compiling with simulators.
if (v8_enable_debugging_features) {
if ((is_linux || is_chromeos) && v8_enable_backtrace) {
@@ -1716,6 +1758,7 @@ torque_files = [
"src/objects/api-callbacks.tq",
"src/objects/arguments.tq",
"src/objects/bigint.tq",
+ "src/objects/call-site-info.tq",
"src/objects/cell.tq",
"src/objects/code.tq",
"src/objects/contexts.tq",
@@ -1741,6 +1784,8 @@ torque_files = [
"src/objects/js-proxy.tq",
"src/objects/js-regexp-string-iterator.tq",
"src/objects/js-regexp.tq",
+ "src/objects/js-shadow-realms.tq",
+ "src/objects/js-struct.tq",
"src/objects/js-temporal-objects.tq",
"src/objects/js-weak-refs.tq",
"src/objects/literal-objects.tq",
@@ -1762,7 +1807,6 @@ torque_files = [
"src/objects/script.tq",
"src/objects/shared-function-info.tq",
"src/objects/source-text-module.tq",
- "src/objects/stack-frame-info.tq",
"src/objects/string.tq",
"src/objects/struct.tq",
"src/objects/swiss-hash-table-helpers.tq",
@@ -2197,12 +2241,14 @@ action("v8_dump_build_config") {
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
- "v8_enable_virtual_memory_cage=$v8_enable_virtual_memory_cage",
+ "v8_enable_sandbox=$v8_enable_sandbox",
+ "v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap",
"v8_enable_third_party_heap=$v8_enable_third_party_heap",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_dict_property_const_tracking=$v8_dict_property_const_tracking",
"v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_target_cpu=\"$v8_target_cpu\"",
+ "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
]
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
@@ -2317,9 +2363,11 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-proxy-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
+ "src/builtins/builtins-shadowrealm-gen.cc",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
"src/builtins/builtins-string-gen.cc",
"src/builtins/builtins-string-gen.h",
+ "src/builtins/builtins-temporal-gen.cc",
"src/builtins/builtins-typed-array-gen.cc",
"src/builtins/builtins-typed-array-gen.h",
"src/builtins/builtins-utils-gen.h",
@@ -2346,6 +2394,8 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
+ "src/numbers/integer-literal-inl.h",
+ "src/numbers/integer-literal.h",
]
if (v8_enable_webassembly) {
@@ -2679,8 +2729,10 @@ v8_header_set("v8_internal_headers") {
"src/codegen/optimized-compilation-info.h",
"src/codegen/pending-optimization-table.h",
"src/codegen/register-arch.h",
+ "src/codegen/register-base.h",
"src/codegen/register-configuration.h",
"src/codegen/register.h",
+ "src/codegen/reglist-base.h",
"src/codegen/reglist.h",
"src/codegen/reloc-info.h",
"src/codegen/safepoint-table.h",
@@ -2693,10 +2745,12 @@ v8_header_set("v8_internal_headers") {
"src/codegen/tnode.h",
"src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.h",
+ "src/common/allow-deprecated.h",
"src/common/assert-scope.h",
"src/common/checks.h",
"src/common/high-allocation-throughput-scope.h",
"src/common/message-template.h",
+ "src/common/operation.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
@@ -2765,7 +2819,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.h",
- "src/compiler/js-heap-copy-reducer.h",
"src/compiler/js-inlining-heuristic.h",
"src/compiler/js-inlining.h",
"src/compiler/js-intrinsic-lowering.h",
@@ -2812,6 +2865,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/schedule.h",
"src/compiler/scheduler.h",
"src/compiler/select-lowering.h",
+ "src/compiler/simplified-lowering-verifier.h",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.h",
@@ -2860,6 +2914,7 @@ v8_header_set("v8_internal_headers") {
"src/diagnostics/unwinder.h",
"src/execution/arguments-inl.h",
"src/execution/arguments.h",
+ "src/execution/clobber-registers.h",
"src/execution/embedder-state.h",
"src/execution/encoded-c-signature.h",
"src/execution/execution.h",
@@ -2880,13 +2935,13 @@ v8_header_set("v8_internal_headers") {
"src/execution/pointer-authentication.h",
"src/execution/protectors-inl.h",
"src/execution/protectors.h",
- "src/execution/runtime-profiler.h",
"src/execution/shared-mutex-guard-if-off-thread.h",
"src/execution/simulator-base.h",
"src/execution/simulator.h",
"src/execution/stack-guard.h",
"src/execution/thread-id.h",
"src/execution/thread-local-top.h",
+ "src/execution/tiering-manager.h",
"src/execution/v8threads.h",
"src/execution/vm-state-inl.h",
"src/execution/vm-state.h",
@@ -2906,6 +2961,7 @@ v8_header_set("v8_internal_headers") {
"src/handles/maybe-handles.h",
"src/handles/persistent-handles.h",
"src/heap/allocation-observer.h",
+ "src/heap/allocation-result.h",
"src/heap/allocation-stats.h",
"src/heap/array-buffer-sweeper.h",
"src/heap/barrier.h",
@@ -2920,11 +2976,16 @@ v8_header_set("v8_internal_headers") {
"src/heap/concurrent-allocator.h",
"src/heap/concurrent-marking.h",
"src/heap/cppgc-js/cpp-heap.h",
+ "src/heap/cppgc-js/cpp-marking-state-inl.h",
+ "src/heap/cppgc-js/cpp-marking-state.h",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
+ "src/heap/embedder-tracing-inl.h",
"src/heap/embedder-tracing.h",
+ "src/heap/evacuation-allocator-inl.h",
+ "src/heap/evacuation-allocator.h",
"src/heap/factory-base-inl.h",
"src/heap/factory-base.h",
"src/heap/factory-inl.h",
@@ -2934,6 +2995,8 @@ v8_header_set("v8_internal_headers") {
"src/heap/free-list.h",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.h",
+ "src/heap/heap-allocator-inl.h",
+ "src/heap/heap-allocator.h",
"src/heap/heap-controller.h",
"src/heap/heap-inl.h",
"src/heap/heap-layout-tracer.h",
@@ -2949,8 +3012,6 @@ v8_header_set("v8_internal_headers") {
"src/heap/large-spaces.h",
"src/heap/linear-allocation-area.h",
"src/heap/list.h",
- "src/heap/local-allocator-inl.h",
- "src/heap/local-allocator.h",
"src/heap/local-factory-inl.h",
"src/heap/local-factory.h",
"src/heap/local-heap-inl.h",
@@ -3066,6 +3127,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/backing-store.h",
"src/objects/bigint-inl.h",
"src/objects/bigint.h",
+ "src/objects/call-site-info-inl.h",
+ "src/objects/call-site-info.h",
"src/objects/cell-inl.h",
"src/objects/cell.h",
"src/objects/code-inl.h",
@@ -3140,6 +3203,10 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-regexp.h",
"src/objects/js-segments-inl.h",
"src/objects/js-segments.h",
+ "src/objects/js-shadow-realms-inl.h",
+ "src/objects/js-shadow-realms.h",
+ "src/objects/js-struct-inl.h",
+ "src/objects/js-struct.h",
"src/objects/js-temporal-objects-inl.h",
"src/objects/js-temporal-objects.h",
"src/objects/js-weak-refs-inl.h",
@@ -3213,8 +3280,6 @@ v8_header_set("v8_internal_headers") {
"src/objects/smi.h",
"src/objects/source-text-module-inl.h",
"src/objects/source-text-module.h",
- "src/objects/stack-frame-info-inl.h",
- "src/objects/stack-frame-info.h",
"src/objects/string-comparator.h",
"src/objects/string-inl.h",
"src/objects/string-set-inl.h",
@@ -3315,12 +3380,13 @@ v8_header_set("v8_internal_headers") {
"src/roots/roots.h",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.h",
- "src/security/caged-pointer-inl.h",
- "src/security/caged-pointer.h",
- "src/security/external-pointer-inl.h",
- "src/security/external-pointer-table.h",
- "src/security/external-pointer.h",
- "src/security/vm-cage.h",
+ "src/sandbox/external-pointer-inl.h",
+ "src/sandbox/external-pointer-table-inl.h",
+ "src/sandbox/external-pointer-table.h",
+ "src/sandbox/external-pointer.h",
+ "src/sandbox/sandbox.h",
+ "src/sandbox/sandboxed-pointer-inl.h",
+ "src/sandbox/sandboxed-pointer.h",
"src/snapshot/code-serializer.h",
"src/snapshot/context-deserializer.h",
"src/snapshot/context-serializer.h",
@@ -3376,7 +3442,6 @@ v8_header_set("v8_internal_headers") {
"src/utils/locked-queue.h",
"src/utils/memcopy.h",
"src/utils/ostreams.h",
- "src/utils/pointer-with-payload.h",
"src/utils/scoped-list.h",
"src/utils/utils-inl.h",
"src/utils/utils.h",
@@ -3402,6 +3467,30 @@ v8_header_set("v8_internal_headers") {
sources -= [ "//base/trace_event/common/trace_event_common.h" ]
}
+ if (v8_enable_maglev) {
+ sources += [
+ "src/maglev/maglev-basic-block.h",
+ "src/maglev/maglev-code-gen-state.h",
+ "src/maglev/maglev-code-generator.h",
+ "src/maglev/maglev-compilation-info.h",
+ "src/maglev/maglev-compilation-unit.h",
+ "src/maglev/maglev-compiler.h",
+ "src/maglev/maglev-concurrent-dispatcher.h",
+ "src/maglev/maglev-graph-builder.h",
+ "src/maglev/maglev-graph-labeller.h",
+ "src/maglev/maglev-graph-printer.h",
+ "src/maglev/maglev-graph-processor.h",
+ "src/maglev/maglev-graph.h",
+ "src/maglev/maglev-interpreter-frame-state.h",
+ "src/maglev/maglev-ir.h",
+ "src/maglev/maglev-regalloc-data.h",
+ "src/maglev/maglev-regalloc.h",
+ "src/maglev/maglev-register-frame-array.h",
+ "src/maglev/maglev-vreg-allocator.h",
+ "src/maglev/maglev.h",
+ ]
+ }
+
if (v8_enable_webassembly) {
sources += [
"src/asmjs/asm-js.h",
@@ -3413,6 +3502,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.h",
"src/compiler/wasm-inlining.h",
+ "src/compiler/wasm-loop-peeling.h",
"src/debug/debug-wasm-objects-inl.h",
"src/debug/debug-wasm-objects.h",
"src/trap-handler/trap-handler-internal.h",
@@ -3529,6 +3619,10 @@ v8_header_set("v8_internal_headers") {
]
}
+ if (v8_enable_heap_snapshot_verify) {
+ sources += [ "src/heap/reference-summarizer.h" ]
+ }
+
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/baseline/ia32/baseline-assembler-ia32-inl.h",
@@ -3539,6 +3633,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/ia32/interface-descriptors-ia32-inl.h",
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
+ "src/codegen/ia32/reglist-ia32.h",
"src/codegen/ia32/sse-instr.h",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
@@ -3558,6 +3653,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/x64/interface-descriptors-x64-inl.h",
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
+ "src/codegen/x64/reglist-x64.h",
"src/codegen/x64/sse-instr.h",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
@@ -3591,6 +3687,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/arm/interface-descriptors-arm-inl.h",
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
+ "src/codegen/arm/reglist-arm.h",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/execution/arm/frame-constants-arm.h",
@@ -3612,6 +3709,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/arm64/macro-assembler-arm64-inl.h",
"src/codegen/arm64/macro-assembler-arm64.h",
"src/codegen/arm64/register-arm64.h",
+ "src/codegen/arm64/reglist-arm64.h",
"src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
@@ -3648,6 +3746,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/mips/constants-mips.h",
"src/codegen/mips/macro-assembler-mips.h",
"src/codegen/mips/register-mips.h",
+ "src/codegen/mips/reglist-mips.h",
"src/compiler/backend/mips/instruction-codes-mips.h",
"src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.h",
@@ -3663,6 +3762,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/mips64/constants-mips64.h",
"src/codegen/mips64/macro-assembler-mips64.h",
"src/codegen/mips64/register-mips64.h",
+ "src/codegen/mips64/reglist-mips64.h",
"src/compiler/backend/mips64/instruction-codes-mips64.h",
"src/execution/mips64/frame-constants-mips64.h",
"src/execution/mips64/simulator-mips64.h",
@@ -3678,6 +3778,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/loong64/constants-loong64.h",
"src/codegen/loong64/macro-assembler-loong64.h",
"src/codegen/loong64/register-loong64.h",
+ "src/codegen/loong64/reglist-loong64.h",
"src/compiler/backend/loong64/instruction-codes-loong64.h",
"src/execution/loong64/frame-constants-loong64.h",
"src/execution/loong64/simulator-loong64.h",
@@ -3692,6 +3793,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/ppc/interface-descriptors-ppc-inl.h",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
+ "src/codegen/ppc/reglist-ppc.h",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/execution/ppc/frame-constants-ppc.h",
@@ -3701,14 +3803,15 @@ v8_header_set("v8_internal_headers") {
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
- "src/baseline/s390/baseline-assembler-s390-inl.h",
- "src/baseline/s390/baseline-compiler-s390-inl.h",
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.h",
"src/codegen/ppc/interface-descriptors-ppc-inl.h",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
+ "src/codegen/ppc/reglist-ppc.h",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/execution/ppc/frame-constants-ppc.h",
@@ -3726,6 +3829,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/s390/interface-descriptors-s390-inl.h",
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
+ "src/codegen/s390/reglist-s390.h",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/unwinding-info-writer-s390.h",
"src/execution/s390/frame-constants-s390.h",
@@ -3742,6 +3846,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/riscv64/constants-riscv64.h",
"src/codegen/riscv64/macro-assembler-riscv64.h",
"src/codegen/riscv64/register-riscv64.h",
+ "src/codegen/riscv64/reglist-riscv64.h",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/execution/riscv64/frame-constants-riscv64.h",
"src/execution/riscv64/simulator-riscv64.h",
@@ -3762,6 +3867,7 @@ v8_header_set("v8_internal_headers") {
":cppgc_headers",
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_heap_base_headers",
":v8_libbase",
]
}
@@ -3822,7 +3928,6 @@ v8_compiler_sources = [
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-graph.cc",
"src/compiler/js-heap-broker.cc",
- "src/compiler/js-heap-copy-reducer.cc",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining.cc",
"src/compiler/js-intrinsic-lowering.cc",
@@ -3864,6 +3969,7 @@ v8_compiler_sources = [
"src/compiler/schedule.cc",
"src/compiler/scheduler.cc",
"src/compiler/select-lowering.cc",
+ "src/compiler/simplified-lowering-verifier.cc",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-operator-reducer.cc",
"src/compiler/simplified-operator.cc",
@@ -3885,6 +3991,7 @@ if (v8_enable_webassembly) {
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-inlining.cc",
+ "src/compiler/wasm-loop-peeling.cc",
]
}
@@ -4007,8 +4114,10 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-object.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
+ "src/builtins/builtins-shadow-realms.cc",
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
+ "src/builtins/builtins-struct.cc",
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-temporal.cc",
"src/builtins/builtins-trace.cc",
@@ -4077,6 +4186,7 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/perf-jit.cc",
"src/diagnostics/unwinder.cc",
"src/execution/arguments.cc",
+ "src/execution/clobber-registers.cc",
"src/execution/embedder-state.cc",
"src/execution/encoded-c-signature.cc",
"src/execution/execution.cc",
@@ -4088,11 +4198,11 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/messages.cc",
"src/execution/microtask-queue.cc",
"src/execution/protectors.cc",
- "src/execution/runtime-profiler.cc",
"src/execution/simulator-base.cc",
"src/execution/stack-guard.cc",
"src/execution/thread-id.cc",
"src/execution/thread-local-top.cc",
+ "src/execution/tiering-manager.cc",
"src/execution/v8threads.cc",
"src/extensions/cputracemark-extension.cc",
"src/extensions/externalize-string-extension.cc",
@@ -4127,6 +4237,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/free-list.cc",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-tracer.cc",
+ "src/heap/heap-allocator.cc",
"src/heap/heap-controller.cc",
"src/heap/heap-layout-tracer.cc",
"src/heap/heap-write-barrier.cc",
@@ -4205,6 +4316,7 @@ v8_source_set("v8_base_without_compiler") {
"src/numbers/math-random.cc",
"src/objects/backing-store.cc",
"src/objects/bigint.cc",
+ "src/objects/call-site-info.cc",
"src/objects/code-kind.cc",
"src/objects/code.cc",
"src/objects/compilation-cache-table.cc",
@@ -4232,6 +4344,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-segment-iterator.cc",
"src/objects/js-segmenter.cc",
"src/objects/js-segments.cc",
+ "src/objects/js-temporal-objects.cc",
"src/objects/keys.cc",
"src/objects/literal-objects.cc",
"src/objects/lookup-cache.cc",
@@ -4250,11 +4363,11 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/scope-info.cc",
"src/objects/shared-function-info.cc",
"src/objects/source-text-module.cc",
- "src/objects/stack-frame-info.cc",
"src/objects/string-comparator.cc",
"src/objects/string-table.cc",
"src/objects/string.cc",
"src/objects/swiss-name-dictionary.cc",
+ "src/objects/symbol-table.cc",
"src/objects/synthetic-module.cc",
"src/objects/tagged-impl.cc",
"src/objects/template-objects.cc",
@@ -4340,8 +4453,8 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
- "src/security/external-pointer-table.cc",
- "src/security/vm-cage.cc",
+ "src/sandbox/external-pointer-table.cc",
+ "src/sandbox/sandbox.cc",
"src/snapshot/code-serializer.cc",
"src/snapshot/context-deserializer.cc",
"src/snapshot/context-serializer.cc",
@@ -4394,8 +4507,23 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone.cc",
]
- if (v8_enable_webassembly) {
+ if (v8_enable_maglev) {
sources += [
+ "src/maglev/maglev-code-generator.cc",
+ "src/maglev/maglev-compilation-info.cc",
+ "src/maglev/maglev-compilation-unit.cc",
+ "src/maglev/maglev-compiler.cc",
+ "src/maglev/maglev-concurrent-dispatcher.cc",
+ "src/maglev/maglev-graph-builder.cc",
+ "src/maglev/maglev-graph-printer.cc",
+ "src/maglev/maglev-ir.cc",
+ "src/maglev/maglev-regalloc.cc",
+ "src/maglev/maglev.cc",
+ ]
+ }
+
+ if (v8_enable_webassembly) {
+ sources += [ ### gcmole(all) ###
"src/asmjs/asm-js.cc",
"src/asmjs/asm-parser.cc",
"src/asmjs/asm-scanner.cc",
@@ -4468,6 +4596,10 @@ v8_source_set("v8_base_without_compiler") {
]
}
+ if (v8_enable_heap_snapshot_verify) {
+ sources += [ "src/heap/reference-summarizer.cc" ]
+ }
+
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/codegen/ia32/assembler-ia32.cc",
@@ -4732,8 +4864,8 @@ v8_source_set("v8_base_without_compiler") {
deps = [
":torque_generated_definitions",
":v8_bigint",
- ":v8_cppgc_shared",
":v8_headers",
+ ":v8_heap_base",
":v8_libbase",
":v8_shared_internal_headers",
":v8_tracing",
@@ -4830,6 +4962,8 @@ v8_source_set("torque_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ "src/numbers/integer-literal-inl.h",
+ "src/numbers/integer-literal.h",
"src/torque/ast.h",
"src/torque/cc-generator.cc",
"src/torque/cc-generator.h",
@@ -5029,6 +5163,7 @@ v8_component("v8_libbase") {
"src/base/platform/time.h",
"src/base/platform/wrappers.h",
"src/base/platform/yield-processor.h",
+ "src/base/pointer-with-payload.h",
"src/base/region-allocator.cc",
"src/base/region-allocator.h",
"src/base/ring-buffer.h",
@@ -5120,6 +5255,7 @@ v8_component("v8_libbase") {
if (host_os == "mac") {
sources += [
"src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-darwin.cc",
"src/base/platform/platform-macos.cc",
]
} else {
@@ -5139,14 +5275,24 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace_fuchsia.cc",
"src/base/platform/platform-fuchsia.cc",
]
- deps += [ "//third_party/fuchsia-sdk/sdk/pkg/zx" ]
- } else if (is_mac || is_ios) {
+ deps += [
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel",
+ "//third_party/fuchsia-sdk/sdk/pkg/fdio",
+ "//third_party/fuchsia-sdk/sdk/pkg/zx",
+ ]
+ } else if (is_mac) {
sources += [
"src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-darwin.cc",
"src/base/platform/platform-macos.cc",
]
+ } else if (is_ios) {
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-darwin.cc",
+ ]
} else if (is_win) {
- # TODO(jochen): Add support for cygwin.
+ # TODO(infra): Add support for cygwin.
sources += [
"src/base/debug/stack_trace_win.cc",
"src/base/platform/platform-win32.cc",
@@ -5195,7 +5341,7 @@ v8_component("v8_libbase") {
[ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ]
}
- # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
+ # TODO(infra): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
v8_component("v8_libplatform") {
@@ -5325,13 +5471,23 @@ v8_source_set("v8_bigint") {
configs = [ ":internal_config" ]
}
-v8_source_set("v8_cppgc_shared") {
+v8_source_set("v8_heap_base_headers") {
sources = [
- "src/heap/base/stack.cc",
+ "src/heap/base/active-system-pages.h",
"src/heap/base/stack.h",
- "src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
- "src/heap/cppgc/globals.h",
+ ]
+
+ configs = [ ":internal_config" ]
+
+ public_deps = [ ":v8_libbase" ]
+}
+
+v8_source_set("v8_heap_base") {
+ sources = [
+ "src/heap/base/active-system-pages.cc",
+ "src/heap/base/stack.cc",
+ "src/heap/base/worklist.cc",
]
if (is_clang || !is_win) {
@@ -5369,7 +5525,7 @@ v8_source_set("v8_cppgc_shared") {
configs = [ ":internal_config" ]
public_deps = [
- ":cppgc_headers",
+ ":v8_heap_base_headers",
":v8_libbase",
]
}
@@ -5404,7 +5560,6 @@ v8_header_set("cppgc_headers") {
"include/cppgc/internal/name-trait.h",
"include/cppgc/internal/persistent-node.h",
"include/cppgc/internal/pointer-policies.h",
- "include/cppgc/internal/prefinalizer-handler.h",
"include/cppgc/internal/write-barrier.h",
"include/cppgc/liveness-broker.h",
"include/cppgc/macros.h",
@@ -5459,6 +5614,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/gc-info.cc",
"src/heap/cppgc/gc-invoker.cc",
"src/heap/cppgc/gc-invoker.h",
+ "src/heap/cppgc/globals.h",
"src/heap/cppgc/heap-base.cc",
"src/heap/cppgc/heap-base.h",
"src/heap/cppgc/heap-consistency.cc",
@@ -5515,6 +5671,8 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/process-heap.h",
"src/heap/cppgc/raw-heap.cc",
"src/heap/cppgc/raw-heap.h",
+ "src/heap/cppgc/remembered-set.cc",
+ "src/heap/cppgc/remembered-set.h",
"src/heap/cppgc/source-location.cc",
"src/heap/cppgc/stats-collector.cc",
"src/heap/cppgc/stats-collector.h",
@@ -5551,7 +5709,7 @@ v8_source_set("cppgc_base") {
public_deps = [
":cppgc_headers",
- ":v8_cppgc_shared",
+ ":v8_heap_base",
":v8_libbase",
":v8_libplatform",
]
@@ -5862,6 +6020,7 @@ group("v8_clusterfuzz") {
deps = [
":d8",
":v8_simple_inspector_fuzzer",
+ "tools/clusterfuzz/trials:v8_clusterfuzz_resources",
]
if (v8_multi_arch_build) {
@@ -5870,7 +6029,7 @@ group("v8_clusterfuzz") {
":d8(//build/toolchain/linux:clang_x64_v8_arm64)",
":d8(//build/toolchain/linux:clang_x86)",
":d8(//build/toolchain/linux:clang_x86_v8_arm)",
- ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)",
+ ":d8(tools/clusterfuzz/foozzie/toolchain:clang_x64_pointer_compression)",
]
}
}
@@ -5981,10 +6140,10 @@ if (is_component_build) {
}
}
- v8_component("v8_cppgc_shared_for_testing") {
+ v8_component("v8_heap_base_for_testing") {
testonly = true
- public_deps = [ ":v8_cppgc_shared" ]
+ public_deps = [ ":v8_heap_base" ]
configs = []
public_configs = [ ":external_config" ]
@@ -6033,10 +6192,10 @@ if (is_component_build) {
}
}
- group("v8_cppgc_shared_for_testing") {
+ group("v8_heap_base_for_testing") {
testonly = true
- public_deps = [ ":v8_cppgc_shared" ]
+ public_deps = [ ":v8_heap_base" ]
public_configs = [ ":external_config" ]
}
@@ -6086,7 +6245,7 @@ v8_executable("d8") {
}
if (v8_correctness_fuzzer) {
- deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ]
+ deps += [ "tools/clusterfuzz/foozzie:v8_correctness_fuzzer_resources" ]
}
defines = []
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index add6b07ed6..39f241b3e9 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -1,16 +1,13 @@
adamk@chromium.org
ahaas@chromium.org
-bbudge@chromium.org
bikineev@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
clemensb@chromium.org
danno@chromium.org
-delphick@chromium.org
dinfuehr@chromium.org
ecmziegler@chromium.org
gdeepti@chromium.org
-gsathya@chromium.org
hablich@chromium.org
hpayer@chromium.org
ishell@chromium.org
@@ -23,16 +20,15 @@ mathias@chromium.org
marja@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
-mvstanton@chromium.org
nicohartmann@chromium.org
+nikolaos@chromium.org
omerkatz@chromium.org
pthier@chromium.org
-sigurds@chromium.org
syg@chromium.org
szuend@chromium.org
+tebbi@chromium.org
thibaudm@chromium.org
vahl@chromium.org
verwaest@chromium.org
victorgomes@chromium.org
yangguo@chromium.org
-zhin@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 102f46264b..61577d45ab 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -40,10 +40,10 @@ vars = {
'reclient_version': 're_client_version:0.40.0.40ff5a5',
# GN CIPD package version.
- 'gn_version': 'git_revision:fc295f3ac7ca4fe7acc6cb5fb052d22909ef3a8f',
+ 'gn_version': 'git_revision:bd99dbf98cbdefe18a4128189665c5761263bcfb',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:31175eb1a2712bb75d06a9bad5d4dd3f2a09cd1f',
+ 'luci_go': 'git_revision:cb424e70e75136736a86359ef070aa96425fe7a3',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -72,20 +72,20 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version
# and whatever else without interference from each other.
- 'android_sdk_sources_version': 'Yw53980aNNn0n9l58lN7u0wSVmxlY0OM1zFnGDQeJs4C',
+ 'android_sdk_sources_version': '7EcXjyZWkTu3sCA8d8eRXg_aCBCYt8ihXgxp29VXLs8C',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'Ez2NWws2SJYCF6qw2O-mSCqK6424l3ZdSTpppLyVR_cC',
+ 'android_sdk_cmdline-tools_version': 'PGPmqJtSIQ84If155ba7iTU846h5WJ-bL5d_OoUWEWYC',
}
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7f36dbc19d31e2aad895c60261ca8f726442bfbb',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd115b033c4e53666b535cbd1985ffe60badad082',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '9cfc74504f0c5093fe6799e70f15bded2423b5b4',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '3d9590754d5d23e62d15472c5baf6777ca59df20',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '075dd7e22837a69189003e4fa84499acf63188cf',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '113dd1badbcbffea108a8c95ac7c89c22bfd25f3',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'e435ad79c17b1888b34df88d6a30a094936e3836',
'buildtools/linux64': {
@@ -111,9 +111,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '89f2e82120461d34098edd216e57aa743f441107',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a897d0f3f8e8c28ac2abf848f3b695b724409298',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'c8c0ec928e46328fa284e7290c4ef052c7d285d4',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd1c7f92b8b0bff8d9f710ca40e44563a63db376e',
'buildtools/win': {
'packages': [
{
@@ -139,9 +139,7 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '1f16a6ad0edd10e774e336d8b331471b0c3bb360',
- 'test/test262/harness':
- Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f7fb969cc4934bbc5aa29a378d59325eaa84f475',
'third_party/aemu-linux-x64': {
'packages': [
{
@@ -163,11 +161,11 @@ deps = {
'dep_type': 'cipd',
},
'third_party/android_ndk': {
- 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7',
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '9644104c8cf85bf1bdce5b1c0691e9778572c3f8',
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'abc362f16dfc1a6cc082298ed54504bef11eb9e7',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '87b4b48de3c8204224d63612c287eb5a447a562d',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -209,7 +207,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '49839733a7f26070e8d666d91fae177711154e1d',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b3fe2c177912640bc676b332a2f41dc812ea5843',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -217,18 +215,18 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0a233e176044b6d9b9ff9fb30b589bfb18f9ca04',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'b199f549263a02900faef8c8c3d581c580e837c3',
'third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5e0b0d0b67e889360eaa456cc17ce47d89a92167',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '7c9c220d13ab367d49420144a257886ebfbce278',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'ab867074da2423c2d9cf225233191a01f043485d',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '5704cd4c8cea889d68f9ae29ca5aaee97ef91816',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4c5650f68866e3c2e60361d5c4c95c6f335fb64b',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ae5e06dd35c6137d335331b0815cf1f60fd7e3c5',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'edf883ad2db9c723b058a6a17a146d68d6343143',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '8a5b728e4f43b0eabdb9ea450f956d67cfb22719',
'third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52',
'third_party/ittapi': {
@@ -242,7 +240,7 @@ deps = {
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/logdog/logdog':
- Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '17ec234f823f7bff6ada6584fdbbee9d54b8fc58',
+ Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399',
'third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd',
'third_party/perfetto':
@@ -274,9 +272,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'efd9399ae01364926be2a38946127fdf463480db',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'b0676a1f52484bf53a1a49d0e48ff8abc430fafe',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '336fcfd099995c128bc93e97b8263cc6fc891cc8',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b60d34c100e5a8f4b01d838527f000faab673da3',
'tools/clang/dsymutil': {
'packages': [
{
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 61963c62f6..cb6888d32a 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -36,6 +36,9 @@ import os
import re
import sys
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
_EXCLUDED_PATHS = (
r"^test[\\\/].*",
@@ -223,7 +226,7 @@ def _CheckUnwantedDependencies(input_api, output_api):
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
- description_with_path = '%s\n %s' % (path, rule_description)
+ description_with_path = '{}\n {}'.format(path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
@@ -285,8 +288,8 @@ def _CheckHeadersHaveIncludeGuards(input_api, output_api):
break
if not file_omitted and not all(found_patterns):
- problems.append(
- '%s: Missing include guard \'%s\'' % (local_path, guard_macro))
+ problems.append('{}: Missing include guard \'{}\''.format(
+ local_path, guard_macro))
if problems:
return [output_api.PresubmitError(
@@ -320,8 +323,8 @@ def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (include_directive_pattern.search(line)):
- problems.append(
- '%s:%d\n %s' % (local_path, line_number, line.strip()))
+ problems.append('{}:{}\n {}'.format(local_path, line_number,
+ line.strip()))
if problems:
return [output_api.PresubmitError(include_error, problems)]
@@ -341,11 +344,13 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
file_inclusion_pattern = r'.+\.cc'
base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
- inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
- comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
+ inclusion_pattern = input_api.re.compile(
+ r'({})\s*\('.format(base_function_pattern))
+ comment_pattern = input_api.re.compile(
+ r'//.*({})'.format(base_function_pattern))
exclusion_pattern = input_api.re.compile(
- r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
- base_function_pattern, base_function_pattern))
+ r'::[A-Za-z0-9_]+({})|({})[^;]+'.format(base_function_pattern,
+ base_function_pattern) + '\{')
def FilterFile(affected_file):
files_to_skip = (_EXCLUDED_PATHS +
@@ -363,8 +368,8 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
- problems.append(
- '%s:%d\n %s' % (local_path, line_number, line.strip()))
+ problems.append('{}:{}\n {}'.format(local_path, line_number,
+ line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
@@ -423,7 +428,7 @@ def _SkipTreeCheck(input_api, output_api):
def _CheckCommitMessageBugEntry(input_api, output_api):
"""Check that bug entries are well-formed in commit message."""
bogus_bug_msg = (
- 'Bogus BUG entry: %s. Please specify the issue tracker prefix and the '
+ 'Bogus BUG entry: {}. Please specify the issue tracker prefix and the '
'issue number, separated by a colon, e.g. v8:123 or chromium:12345.')
results = []
for bug in (input_api.change.BUG or '').split(','):
@@ -437,12 +442,13 @@ def _CheckCommitMessageBugEntry(input_api, output_api):
prefix_guess = 'chromium'
else:
prefix_guess = 'v8'
- results.append('BUG entry requires issue tracker prefix, e.g. %s:%s' %
- (prefix_guess, bug))
+ results.append(
+ 'BUG entry requires issue tracker prefix, e.g. {}:{}'.format(
+ prefix_guess, bug))
except ValueError:
- results.append(bogus_bug_msg % bug)
+ results.append(bogus_bug_msg.format(bug))
elif not re.match(r'\w+:\d+', bug):
- results.append(bogus_bug_msg % bug)
+ results.append(bogus_bug_msg.format(bug))
return [output_api.PresubmitError(r) for r in results]
@@ -459,8 +465,8 @@ def _CheckJSONFiles(input_api, output_api):
try:
json.load(j)
except Exception as e:
- results.append(
- 'JSON validation failed for %s. Error:\n%s' % (f.LocalPath(), e))
+ results.append('JSON validation failed for {}. Error:\n{}'.format(
+ f.LocalPath(), e))
return [output_api.PresubmitError(r) for r in results]
@@ -509,8 +515,7 @@ def _CheckNoexceptAnnotations(input_api, output_api):
include_deletes=False):
with open(f.LocalPath()) as fh:
for match in re.finditer(regexp, fh.read()):
- errors.append('in {}: {}'.format(f.LocalPath(),
- match.group().strip()))
+ errors.append(f'in {f.LocalPath()}: {match.group().strip()}')
if errors:
return [output_api.PresubmitPromptOrNotify(
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index b1dc86db9d..feadac3fab 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -48,12 +48,6 @@
'|include/js_protocol.pdl' \
'|include/v8-inspector*\.h',
},
- 'interpreter': {
- 'filepath': 'src/interpreter/' \
- '|src/compiler/bytecode' \
- '|test/cctest/interpreter/' \
- '|test/unittests/interpreter/',
- },
'baseline': {
'filepath': 'src/baseline/',
},
@@ -66,10 +60,6 @@
'arm': {
'filepath': '/arm/',
},
- 'csa': {
- 'filepath': 'src/codegen/code-stub-assembler\.(cc|h)$' \
- '|src/builtins/.*-gen.(cc|h)$',
- },
'merges': {
'filepath': '.',
},
@@ -80,13 +70,13 @@
'value_serializer': {
'filepath': 'src/value-serializer',
},
+ 'maglev': {
+ 'filepath': 'src/maglev/',
+ },
'parser': {
'filepath': 'src/ast/' \
'|src/parsing/',
},
- 'torque': {
- 'filepath': '.*\.tq$',
- },
'tracing': {
'filepath': 'src/tracing/',
},
@@ -114,11 +104,10 @@
},
'WATCHLISTS': {
- 'csa': [
- 'jgruber+watch@chromium.org',
- ],
- 'torque': [
+ 'maglev': [
'jgruber+watch@chromium.org',
+ 'leszeks+watch@chromium.org',
+ 'verwaest+watch@chromium.org',
],
'snapshot': [
'jgruber+watch@chromium.org',
diff --git a/deps/v8/WORKSPACE b/deps/v8/WORKSPACE
index 32fff02aab..490e973a76 100644
--- a/deps/v8/WORKSPACE
+++ b/deps/v8/WORKSPACE
@@ -5,31 +5,69 @@
workspace(name = "v8")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
http_archive(
name = "bazel_skylib",
+ sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
- sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
)
+
load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace")
+
bazel_skylib_workspace()
-new_local_repository(
- name = "config",
- path = "bazel/config",
- build_file = "bazel/config/BUILD.bazel",
+http_archive(
+ name = "rules_python",
+ sha256 = "a30abdfc7126d497a7698c29c46ea9901c6392d6ed315171a6df5ce433aa4502",
+ strip_prefix = "rules_python-0.6.0",
+ url = "https://github.com/bazelbuild/rules_python/archive/0.6.0.tar.gz",
+)
+
+load("@rules_python//python:pip.bzl", "pip_install")
+
+pip_install(
+ name = "v8_python_deps",
+ extra_pip_args = ["--require-hashes"],
+ requirements = "//:bazel/requirements.txt",
)
new_local_repository(
- name = "zlib",
- path = "third_party/zlib",
+ name = "com_googlesource_chromium_zlib",
build_file = "bazel/BUILD.zlib",
+ path = "third_party/zlib",
+)
+
+bind(
+ name = "zlib",
+ actual = "@com_googlesource_chromium_zlib//:zlib",
+)
+
+bind(
+ name = "zlib_compression_utils",
+ actual = "@com_googlesource_chromium_zlib//:zlib_compression_utils",
)
new_local_repository(
- name = "icu",
- path = "third_party/icu",
+ name = "com_googlesource_chromium_icu",
build_file = "bazel/BUILD.icu",
+ path = "third_party/icu",
+)
+
+bind(
+ name = "icu",
+ actual = "@com_googlesource_chromium_icu//:icu",
+)
+
+new_local_repository(
+ name = "com_googlesource_chromium_base_trace_event_common",
+ build_file = "bazel/BUILD.trace_event_common",
+ path = "base/trace_event/common",
+)
+
+bind(
+ name = "base_trace_event_common",
+ actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common",
)
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 1fd2283dec..fb1ce8a053 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -259,10 +259,10 @@ template <>
perfetto::ThreadTrack BASE_EXPORT
ConvertThreadId(const ::base::PlatformThreadId& thread);
-#if defined(OS_WIN)
+#if BUILDFLAG(IS_WIN)
template <>
perfetto::ThreadTrack BASE_EXPORT ConvertThreadId(const int& thread);
-#endif // defined(OS_WIN)
+#endif // BUILDFLAG(IS_WIN)
} // namespace legacy
diff --git a/deps/v8/bazel/BUILD.icu b/deps/v8/bazel/BUILD.icu
index 662e11ec03..2ae79a5784 100644
--- a/deps/v8/bazel/BUILD.icu
+++ b/deps/v8/bazel/BUILD.icu
@@ -4,15 +4,31 @@
filegroup(
name = "icudata",
- srcs = [ "common/icudtl.dat" ]
+ srcs = ["common/icudtl.dat"],
)
cc_library(
name = "icuuc",
- srcs = glob([
+ srcs = glob([
"source/common/**/*.h",
- "source/common/**/*.cpp"
+ "source/common/**/*.cpp",
]),
+ copts = select({
+ "@platforms//os:windows": [
+ "/wd4005", # Macro redefinition.
+ "/wd4068", # Unknown pragmas.
+ "/wd4267", # Conversion from size_t on 64-bits.
+ "/utf-8", # ICU source files are in UTF-8.
+ ],
+ "//conditions:default": [
+ "-Wno-unused-function",
+ "-Wno-parentheses",
+ "-Wno-unused-function",
+ "-Wno-unused-variable",
+ "-Wno-deprecated-declarations",
+ ],
+ }),
+ data = [":icudata"],
defines = [
"U_COMMON_IMPLEMENTATION",
"U_ICUDATAENTRY_IN_COMMON",
@@ -34,35 +50,19 @@ cc_library(
],
"//conditions:default": [],
}),
- copts = select({
- "@platforms//os:windows": [
- "/wd4005", # Macro redefinition.
- "/wd4068", # Unknown pragmas.
- "/wd4267", # Conversion from size_t on 64-bits.
- "/utf-8", # ICU source files are in UTF-8.
- ],
- "//conditions:default": [
- "-Wno-unused-function",
- "-Wno-parentheses",
- "-Wno-unused-function",
- "-Wno-unused-variable",
- "-Wno-deprecated-declarations",
- ],
- }),
includes = [
"source/common",
"source/i18n",
],
tags = ["requires-rtti"],
- data = [ ":icudata" ],
alwayslink = 1,
)
cc_library(
name = "icui18n",
- srcs = glob([
+ srcs = glob([
"source/i18n/**/*.h",
- "source/i18n/**/*.cpp"
+ "source/i18n/**/*.cpp",
]),
copts = select({
"@platforms//os:windows": [
@@ -83,19 +83,19 @@ cc_library(
],
"//conditions:default": [],
}),
- deps = [ ":icuuc" ],
+ deps = [":icuuc"],
alwayslink = 1,
)
cc_library(
name = "icu",
+ srcs = [
+ "source/stubdata/stubdata.cpp",
+ ],
hdrs = glob([
"source/common/unicode/*.h",
"source/i18n/unicode/*.h",
]),
- srcs = [
- "source/stubdata/stubdata.cpp",
- ],
copts = select({
"@platforms//os:windows": [
"/wd4005", # Macro redefinition.
@@ -116,10 +116,10 @@ cc_library(
"//conditions:default": [],
}),
include_prefix = "third_party/icu",
+ visibility = ["//visibility:public"],
deps = [
+ ":icui18n",
":icuuc",
- ":icui18n"
],
- visibility = ["//visibility:public"],
alwayslink = 1,
)
diff --git a/deps/v8/bazel/BUILD.trace_event_common b/deps/v8/bazel/BUILD.trace_event_common
new file mode 100644
index 0000000000..685b284071
--- /dev/null
+++ b/deps/v8/bazel/BUILD.trace_event_common
@@ -0,0 +1,10 @@
+# Copyright 2021 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+cc_library(
+ name = "trace_event_common",
+ hdrs = ["trace_event_common.h"],
+ include_prefix = "base/trace_event/common",
+ visibility = ["//visibility:public"],
+)
diff --git a/deps/v8/bazel/BUILD.zlib b/deps/v8/bazel/BUILD.zlib
index 140f761fbb..25a2c35313 100644
--- a/deps/v8/bazel/BUILD.zlib
+++ b/deps/v8/bazel/BUILD.zlib
@@ -9,6 +9,7 @@ cc_library(
"chromeconf.h",
"compress.c",
"contrib/optimizations/insert_string.h",
+ "contrib/optimizations/slide_hash_neon.h",
"cpu_features.c",
"cpu_features.h",
"crc32.c",
@@ -35,14 +36,10 @@ cc_library(
"zlib.h",
"zutil.c",
"zutil.h",
- "google/compression_utils_portable.h",
- "google/compression_utils_portable.cc",
- ],
+ ],
hdrs = [
"zlib.h",
- "google/compression_utils_portable.h",
],
- include_prefix = "third_party/zlib",
defines = [
"CHROMIUM_ZLIB_NO_CHROMECONF",
"CPU_NO_SIMD",
@@ -52,5 +49,21 @@ cc_library(
"HAVE_HIDDEN",
],
}),
+ include_prefix = "third_party/zlib",
visibility = ["//visibility:public"],
)
+
+cc_library(
+ name = "zlib_compression_utils",
+ srcs = [
+ "google/compression_utils_portable.cc",
+ ],
+ hdrs = [
+ "google/compression_utils_portable.h",
+ ],
+ include_prefix = "third_party/zlib",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//external:zlib",
+ ],
+)
diff --git a/deps/v8/bazel/OWNERS b/deps/v8/bazel/OWNERS
index 8636f621c4..502862b9bc 100644
--- a/deps/v8/bazel/OWNERS
+++ b/deps/v8/bazel/OWNERS
@@ -1,5 +1,4 @@
# Google3 V8 owners
ahaas@chromium.org
cbruni@chromium.org
-delphick@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel
index 78a1b5debd..ac79c42808 100644
--- a/deps/v8/bazel/config/BUILD.bazel
+++ b/deps/v8/bazel/config/BUILD.bazel
@@ -16,6 +16,20 @@ package(
)
config_setting(
+ name = "is_fastbuild",
+ values = {
+ "compilation_mode": "fastbuild",
+ },
+)
+
+config_setting(
+ name = "is_debug",
+ values = {
+ "compilation_mode": "dbg",
+ },
+)
+
+config_setting(
name = "platform_cpu_x64",
constraint_values = ["@platforms//cpu:x86_64"],
)
@@ -27,7 +41,7 @@ config_setting(
config_setting(
name = "platform_cpu_arm64",
- constraint_values = ["@platforms//cpu:arm"],
+ constraint_values = ["@platforms//cpu:aarch64"],
)
config_setting(
@@ -35,6 +49,21 @@ config_setting(
constraint_values = ["@platforms//cpu:arm"],
)
+config_setting(
+ name = "platform_cpu_s390x",
+ constraint_values = ["@platforms//cpu:s390x"],
+)
+
+config_setting(
+ name = "platform_cpu_riscv64",
+ constraint_values = ["@platforms//cpu:riscv64"],
+)
+
+config_setting(
+ name = "platform_cpu_ppc64le",
+ constraint_values = ["@platforms//cpu:ppc"],
+)
+
v8_target_cpu(
name = "v8_target_cpu",
build_setting_default = "none",
@@ -58,15 +87,30 @@ v8_configure_target_cpu(
)
v8_configure_target_cpu(
- name = "arm",
+ name = "arm64",
matching_configs = [":platform_cpu_arm64"],
)
v8_configure_target_cpu(
- name = "arm64",
+ name = "arm",
matching_configs = [":platform_cpu_arm"],
)
+v8_configure_target_cpu(
+ name = "s390x",
+ matching_configs = [":platform_cpu_s390x"],
+)
+
+v8_configure_target_cpu(
+ name = "riscv64",
+ matching_configs = [":platform_cpu_riscv64"],
+)
+
+v8_configure_target_cpu(
+ name = "ppc64le",
+ matching_configs = [":platform_cpu_ppc64le"],
+)
+
selects.config_setting_group(
name = "v8_target_is_32_bits",
match_any = [
@@ -110,6 +154,9 @@ selects.config_setting_group(
":v8_target_arm64",
":is_x64",
":is_arm64",
+ ":is_s390x",
+ ":is_riscv64",
+ ":is_ppc64le",
],
)
@@ -131,44 +178,174 @@ selects.config_setting_group(
)
selects.config_setting_group(
+ name = "is_non_android_posix",
+ match_any = [
+ ":is_linux",
+ ":is_macos",
+ ],
+)
+
+selects.config_setting_group(
name = "is_posix_x64",
match_all = [
":is_posix",
":is_x64",
- ]
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_non_android_posix_x64",
+ match_all = [
+ ":is_non_android_posix",
+ ":is_x64",
+ ],
)
selects.config_setting_group(
name = "is_inline_asm_x64",
- match_all = [":is_posix", ":is_x64"],
+ match_all = [
+ ":is_posix",
+ ":is_x64",
+ ],
)
selects.config_setting_group(
name = "is_inline_asm_ia32",
- match_all = [":is_posix", ":is_ia32"],
+ match_all = [
+ ":is_posix",
+ ":is_ia32",
+ ],
)
selects.config_setting_group(
name = "is_inline_asm_arm64",
- match_all = [":is_posix", ":is_arm64"],
+ match_all = [
+ ":is_posix",
+ ":is_arm64",
+ ],
)
selects.config_setting_group(
name = "is_inline_asm_arm",
- match_all = [":is_posix", ":is_arm"],
+ match_all = [
+ ":is_posix",
+ ":is_arm",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_inline_asm_s390x",
+ match_all = [
+ ":is_posix",
+ ":is_s390x",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_inline_asm_riscv64",
+ match_all = [
+ ":is_posix",
+ ":is_riscv64",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_inline_asm_ppc64le",
+ match_all = [
+ ":is_posix",
+ ":is_ppc64le",
+ ],
)
selects.config_setting_group(
name = "is_msvc_asm_x64",
- match_all = [":is_windows", ":is_x64"],
+ match_all = [
+ ":is_windows",
+ ":is_x64",
+ ],
)
selects.config_setting_group(
name = "is_msvc_asm_ia32",
- match_all = [":is_windows", ":is_ia32"],
+ match_all = [
+ ":is_windows",
+ ":is_ia32",
+ ],
)
selects.config_setting_group(
name = "is_msvc_asm_arm64",
- match_all = [":is_windows", ":is_arm64"],
+ match_all = [
+ ":is_windows",
+ ":is_arm64",
+ ],
+)
+
+config_setting(
+ name = "is_compiler_default",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "compiler",
+ },
+)
+
+selects.config_setting_group(
+ name = "is_compiler_default_on_linux",
+ match_all = [
+ ":is_compiler_default",
+ ":is_linux",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_compiler_default_on_macos",
+ match_all = [
+ ":is_compiler_default",
+ ":is_macos",
+ ],
+)
+
+config_setting(
+ name = "is_compiler_clang",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "clang",
+ },
+)
+
+selects.config_setting_group(
+ name = "is_clang",
+ match_any = [
+ ":is_compiler_default_on_macos",
+ ":is_compiler_clang",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_clang_s390x",
+ match_all = [
+ ":is_clang",
+ ":is_s390x",
+ ],
+)
+
+config_setting(
+ name = "is_compiler_gcc",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "gcc",
+ },
+)
+
+selects.config_setting_group(
+ name = "is_gcc",
+ match_any = [
+ ":is_compiler_default_on_linux",
+ ":is_compiler_gcc",
+ ],
+)
+
+selects.config_setting_group(
+ name = "is_gcc_fastbuild",
+ match_all = [
+ ":is_gcc",
+ ":is_fastbuild",
+ ],
)
diff --git a/deps/v8/bazel/config/v8-target-cpu.bzl b/deps/v8/bazel/config/v8-target-cpu.bzl
index 2d5d241ebf..a0ce9d1cb3 100644
--- a/deps/v8/bazel/config/v8-target-cpu.bzl
+++ b/deps/v8/bazel/config/v8-target-cpu.bzl
@@ -14,7 +14,7 @@ V8CpuTypeInfo = provider(
)
def _host_target_cpu_impl(ctx):
- allowed_values = ["arm", "arm64", "ia32", "x64", "none"]
+ allowed_values = ["arm", "arm64", "ia32", "ppc64le", "riscv64", "s390x", "x64", "none"]
cpu_type = ctx.build_setting_value
if cpu_type in allowed_values:
return V8CpuTypeInfo(value = cpu_type)
diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl
index fc428ba16c..ed7dea9581 100644
--- a/deps/v8/bazel/defs.bzl
+++ b/deps/v8/bazel/defs.bzl
@@ -89,38 +89,77 @@ def _default_args():
return struct(
deps = [":define_flags"],
defines = select({
- "@config//:is_windows": [
+ "@v8//bazel/config:is_windows": [
"UNICODE",
"_UNICODE",
"_CRT_RAND_S",
- "_WIN32_WINNT=0x0602", # Override bazel default to Windows 8
+ "_WIN32_WINNT=0x0602", # Override bazel default to Windows 8
],
"//conditions:default": [],
}),
copts = select({
- "@config//:is_posix": [
+ "@v8//bazel/config:is_posix": [
"-fPIC",
+ "-fno-strict-aliasing",
"-Werror",
"-Wextra",
+ "-Wno-unknown-warning-option",
"-Wno-bitwise-instead-of-logical",
"-Wno-builtin-assume-aligned-alignment",
"-Wno-unused-parameter",
"-Wno-implicit-int-float-conversion",
"-Wno-deprecated-copy",
"-Wno-non-virtual-dtor",
- "-std=c++17",
"-isystem .",
],
"//conditions:default": [],
+ }) + select({
+ "@v8//bazel/config:is_clang": [
+ "-Wno-invalid-offsetof",
+ "-std=c++17",
+ ],
+ "@v8//bazel/config:is_gcc": [
+ "-Wno-extra",
+ "-Wno-array-bounds",
+ "-Wno-class-memaccess",
+ "-Wno-comments",
+ "-Wno-deprecated-declarations",
+ "-Wno-implicit-fallthrough",
+ "-Wno-int-in-bool-context",
+ "-Wno-maybe-uninitialized",
+ "-Wno-mismatched-new-delete",
+ "-Wno-redundant-move",
+ "-Wno-return-type",
+ "-Wno-stringop-overflow",
+ # Use GNU dialect, because GCC doesn't allow using
+ # ##__VA_ARGS__ when in standards-conforming mode.
+ "-std=gnu++17",
+ ],
+ "@v8//bazel/config:is_windows": [
+ "/std:c++17",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "@v8//bazel/config:is_gcc_fastbuild": [
+ # Non-debug builds without optimizations fail because
+ # of recursive inlining of "always_inline" functions.
+ "-O1",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "@v8//bazel/config:is_clang_s390x": [
+ "-fno-integrated-as",
+ ],
+ "//conditions:default": [],
}),
includes = ["include"],
linkopts = select({
- "@config//:is_windows": [
+ "@v8//bazel/config:is_windows": [
"Winmm.lib",
"DbgHelp.lib",
"Advapi32.lib",
],
- "@config//:is_macos": ["-pthread"],
+ "@v8//bazel/config:is_macos": ["-pthread"],
"//conditions:default": ["-Wl,--no-as-needed -ldl -pthread"],
}) + select({
":should_add_rdynamic": ["-rdynamic"],
@@ -209,6 +248,7 @@ def v8_library(
linkstatic = 1,
**kwargs
)
+
# Alias target used because of cc_library bug in bazel on windows
# https://github.com/bazelbuild/bazel/issues/14237
# TODO(victorgomes): Remove alias once bug is fixed
@@ -227,6 +267,7 @@ def v8_library(
linkstatic = 1,
**kwargs
)
+
# Alias target used because of cc_library bug in bazel on windows
# https://github.com/bazelbuild/bazel/issues/14237
# TODO(victorgomes): Remove alias once bug is fixed
@@ -248,8 +289,10 @@ def v8_library(
)
def _torque_impl(ctx):
- v8root = "."
- prefix = ctx.attr.prefix
+ if ctx.workspace_name == "v8":
+ v8root = "."
+ else:
+ v8root = "external/v8"
# Arguments
args = []
@@ -301,7 +344,6 @@ _v8_torque = rule(
cfg = "exec",
),
"args": attr.string_list(),
- "v8root": attr.label(default = ":v8_root"),
},
)
@@ -313,7 +355,7 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
args = args,
extras = extras,
tool = select({
- "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression",
+ "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
}),
)
@@ -324,32 +366,44 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
args = args,
extras = extras,
tool = select({
- "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression",
+ "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
}),
)
def _v8_target_cpu_transition_impl(settings, attr):
+ # Check for an existing v8_target_cpu flag.
+ if "@v8//bazel/config:v8_target_cpu" in settings:
+ if settings["@v8//bazel/config:v8_target_cpu"] != "none":
+ return
+
+ # Auto-detect target architecture based on the --cpu flag.
mapping = {
"haswell": "x64",
"k8": "x64",
"x86_64": "x64",
+ "darwin": "x64",
"darwin_x86_64": "x64",
+ "x64_windows": "x64",
"x86": "ia32",
- "ppc": "ppc64",
+ "aarch64": "arm64",
"arm64-v8a": "arm64",
"arm": "arm64",
+ "darwin_arm64": "arm64",
"armeabi-v7a": "arm32",
+ "s390x": "s390x",
+ "riscv64": "riscv64",
+ "ppc": "ppc64le",
}
v8_target_cpu = mapping[settings["//command_line_option:cpu"]]
- return {"@config//:v8_target_cpu": v8_target_cpu}
+ return {"@v8//bazel/config:v8_target_cpu": v8_target_cpu}
# Set the v8_target_cpu to be the correct architecture given the cpu specified
# on the command line.
v8_target_cpu_transition = transition(
implementation = _v8_target_cpu_transition_impl,
- inputs = ["//command_line_option:cpu"],
- outputs = ["@config//:v8_target_cpu"],
+ inputs = ["@v8//bazel/config:v8_target_cpu", "//command_line_option:cpu"],
+ outputs = ["@v8//bazel/config:v8_target_cpu"],
)
def _mksnapshot(ctx):
@@ -453,7 +507,8 @@ def build_config_content(cpu, icu):
("v8_enable_webassembly", "false"),
("v8_control_flow_integrity", "false"),
("v8_enable_single_generation", "false"),
- ("v8_enable_virtual_memory_cage", "false"),
+ ("v8_enable_sandbox", "false"),
+ ("v8_enable_shared_ro_heap", "false"),
("v8_target_cpu", cpu),
])
diff --git a/deps/v8/bazel/generate-inspector-files.cmd b/deps/v8/bazel/generate-inspector-files.cmd
deleted file mode 100644
index 202dd81d7c..0000000000
--- a/deps/v8/bazel/generate-inspector-files.cmd
+++ /dev/null
@@ -1,24 +0,0 @@
-REM Copyright 2021 the V8 project authors. All rights reserved.
-REM Use of this source code is governed by a BSD-style license that can be
-REM found in the LICENSE file.
-
-set BAZEL_OUT=%1
-
-REM Bazel nukes all env vars, and we need the following for gn to work
-set DEPOT_TOOLS_WIN_TOOLCHAIN=0
-set ProgramFiles(x86)=C:\Program Files (x86)
-set windir=C:\Windows
-
-REM Create a default GN output folder
-cmd.exe /S /E:ON /V:ON /D /c gn gen out/inspector
-
-REM Generate inspector files
-cmd.exe /S /E:ON /V:ON /D /c autoninja -C out/inspector gen/src/inspector/protocol/Forward.h
-
-REM Create directories in bazel output folder
-MKDIR -p %BAZEL_OUT%\include\inspector
-MKDIR -p %BAZEL_OUT%\src\inspector\protocol
-
-REM Copy generated files to bazel output folder
-COPY out\inspector\gen\include\inspector\* %BAZEL_OUT%\include\inspector\
-COPY out\inspector\gen\src\inspector\protocol\* %BAZEL_OUT%\src\inspector\protocol\ \ No newline at end of file
diff --git a/deps/v8/bazel/generate-inspector-files.sh b/deps/v8/bazel/generate-inspector-files.sh
deleted file mode 100755
index 7fd4ab2a56..0000000000
--- a/deps/v8/bazel/generate-inspector-files.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2021 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-BAZEL_OUT=$1
-
-# Create a default GN output folder
-gn gen out/inspector
-
-# Generate inspector files
-autoninja -C out/inspector src/inspector:inspector
-
-# Create directories in bazel output folder
-mkdir -p $BAZEL_OUT/include/inspector
-mkdir -p $BAZEL_OUT/src/inspector/protocol
-
-# Copy generated files to bazel output folder
-cp out/inspector/gen/include/inspector/* $BAZEL_OUT/include/inspector/
-cp out/inspector/gen/src/inspector/protocol/* $BAZEL_OUT/src/inspector/protocol/
diff --git a/deps/v8/bazel/requirements.in b/deps/v8/bazel/requirements.in
new file mode 100644
index 0000000000..7f7afbf3bf
--- /dev/null
+++ b/deps/v8/bazel/requirements.in
@@ -0,0 +1 @@
+jinja2
diff --git a/deps/v8/bazel/requirements.txt b/deps/v8/bazel/requirements.txt
new file mode 100644
index 0000000000..a9c132f688
--- /dev/null
+++ b/deps/v8/bazel/requirements.txt
@@ -0,0 +1,81 @@
+#
+# This file is autogenerated by pip-compile with python 3.9
+# To update, run:
+#
+# pip-compile --generate-hashes requirements.in
+#
+jinja2==3.0.3 \
+ --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
+ --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+ # via -r requirements.in
+markupsafe==2.0.1 \
+ --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \
+ --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \
+ --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \
+ --hash=sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194 \
+ --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \
+ --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \
+ --hash=sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724 \
+ --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \
+ --hash=sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646 \
+ --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \
+ --hash=sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6 \
+ --hash=sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a \
+ --hash=sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6 \
+ --hash=sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad \
+ --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \
+ --hash=sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38 \
+ --hash=sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac \
+ --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \
+ --hash=sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6 \
+ --hash=sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047 \
+ --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \
+ --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \
+ --hash=sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b \
+ --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \
+ --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \
+ --hash=sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a \
+ --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \
+ --hash=sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1 \
+ --hash=sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9 \
+ --hash=sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864 \
+ --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \
+ --hash=sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee \
+ --hash=sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f \
+ --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \
+ --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \
+ --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \
+ --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \
+ --hash=sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b \
+ --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \
+ --hash=sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86 \
+ --hash=sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6 \
+ --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \
+ --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \
+ --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \
+ --hash=sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28 \
+ --hash=sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e \
+ --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \
+ --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \
+ --hash=sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f \
+ --hash=sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d \
+ --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \
+ --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \
+ --hash=sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145 \
+ --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \
+ --hash=sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c \
+ --hash=sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1 \
+ --hash=sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a \
+ --hash=sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207 \
+ --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \
+ --hash=sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53 \
+ --hash=sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd \
+ --hash=sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134 \
+ --hash=sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85 \
+ --hash=sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9 \
+ --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \
+ --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \
+ --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \
+ --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \
+ --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872
+ # via jinja2
diff --git a/deps/v8/bazel/v8-non-pointer-compression.bzl b/deps/v8/bazel/v8-non-pointer-compression.bzl
index 4f1c6bc003..7bb23591ca 100644
--- a/deps/v8/bazel/v8-non-pointer-compression.bzl
+++ b/deps/v8/bazel/v8-non-pointer-compression.bzl
@@ -1,12 +1,12 @@
def _v8_disable_pointer_compression(settings, attr):
return {
- "//third_party/v8/HEAD:v8_enable_pointer_compression": "False",
+ "//:v8_enable_pointer_compression": "False",
}
v8_disable_pointer_compression = transition(
implementation = _v8_disable_pointer_compression,
inputs = [],
- outputs = ["//third_party/v8/HEAD:v8_enable_pointer_compression"],
+ outputs = ["//:v8_enable_pointer_compression"],
)
# The implementation of transition_rule: all this does is copy the
@@ -51,7 +51,7 @@ v8_binary_non_pointer_compression = rule(
# consequences for your build. The whitelist defaults to "everything".
# But you can redefine it more strictly if you feel that's prudent.
"_allowlist_function_transition": attr.label(
- default = "//tools/allowlists/function_transition_allowlist",
+ default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
# Making this executable means it works with "$ bazel run".
diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS
index cb04fa0838..e87e9c95a1 100644
--- a/deps/v8/gni/OWNERS
+++ b/deps/v8/gni/OWNERS
@@ -1 +1,5 @@
file:../INFRA_OWNERS
+
+per-file v8.cmx=victorgomes@chromium.org
+per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+per-file release_branch_toggle.gni=lutz@chromium.org \ No newline at end of file
diff --git a/deps/v8/gni/release_branch_toggle.gni b/deps/v8/gni/release_branch_toggle.gni
new file mode 100644
index 0000000000..c502c8c62e
--- /dev/null
+++ b/deps/v8/gni/release_branch_toggle.gni
@@ -0,0 +1,7 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+declare_args() {
+ is_on_release_branch = true
+} \ No newline at end of file
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index feabd079e0..39b196521c 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -70,6 +70,10 @@ if (v8_snapshot_toolchain == "") {
# therefore snapshots will need to be built using native mksnapshot
# in combination with qemu
v8_snapshot_toolchain = current_toolchain
+ } else if (host_cpu == "arm64" && current_cpu == "x64") {
+ # Cross-build from arm64 to intel (likely on an Apple Silicon mac).
+ v8_snapshot_toolchain =
+ "//build/toolchain/${host_os}:clang_arm64_v8_$v8_current_cpu"
} else if (host_cpu == "x64") {
# This is a cross-compile from an x64 host to either a non-Intel target
# cpu or a different target OS. Clang will always be used by default on the
diff --git a/deps/v8/gni/v8.cmx b/deps/v8/gni/v8.cmx
index 8cd8b75fdf..45fd74a09f 100644
--- a/deps/v8/gni/v8.cmx
+++ b/deps/v8/gni/v8.cmx
@@ -1,4 +1,11 @@
{
+ "facets": {
+ "fuchsia.test": {
+ "system-services": [
+ "fuchsia.kernel.VmexResource"
+ ]
+ }
+ },
"sandbox": {
"dev": [
"null",
@@ -18,6 +25,7 @@
"fuchsia.device.NameProvider",
"fuchsia.fonts.Provider",
"fuchsia.intl.PropertyProvider",
+ "fuchsia.kernel.VmexResource",
"fuchsia.logger.Log",
"fuchsia.logger.LogSink",
"fuchsia.media.Audio",
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index fe445307f9..7b9da1f06a 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -5,6 +5,7 @@
import("//build/config/gclient_args.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
+import("release_branch_toggle.gni")
import("split_static_library.gni")
declare_args() {
@@ -31,10 +32,6 @@ declare_args() {
# Support for backtrace_symbols on linux.
v8_enable_backtrace = ""
- # This flag is deprecated and is now available through the inspector interface
- # as an argument to profiler's method `takeHeapSnapshot`.
- v8_enable_raw_heap_snapshots = false
-
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
@@ -73,7 +70,7 @@ declare_args() {
v8_enable_webassembly = ""
# Enable runtime call stats.
- v8_enable_runtime_call_stats = true
+ v8_enable_runtime_call_stats = !is_on_release_branch
# Add fuzzilli fuzzer support.
v8_fuzzilli = false
@@ -85,6 +82,12 @@ declare_args() {
cppgc_is_standalone = false
+ # Enable object names in cppgc for debug purposes.
+ cppgc_enable_object_names = false
+
+ # Enable young generation in cppgc.
+ cppgc_enable_young_generation = false
+
# Enable advanced BigInt algorithms, costing about 10-30 KB binary size
# depending on platform. Disabled on Android to save binary size.
v8_advanced_bigint_algorithms = !is_android
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 0222513df2..535040c539 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -7,9 +7,12 @@ yangguo@chromium.org
per-file *DEPS=file:../COMMON_OWNERS
per-file v8-internal.h=file:../COMMON_OWNERS
-per-file v8-inspector.h=file:../src/inspector/OWNERS
-per-file v8-inspector-protocol.h=file:../src/inspector/OWNERS
+
+per-file v8-debug.h=file:../src/debug/OWNERS
+
per-file js_protocol.pdl=file:../src/inspector/OWNERS
+per-file v8-inspector*=file:../src/inspector/OWNERS
+per-file v8-inspector*=file:../src/inspector/OWNERS
# Needed by the auto_tag builder
per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
diff --git a/deps/v8/include/cppgc/README.md b/deps/v8/include/cppgc/README.md
index e454399853..a7d08f86b3 100644
--- a/deps/v8/include/cppgc/README.md
+++ b/deps/v8/include/cppgc/README.md
@@ -1,16 +1,133 @@
# Oilpan: C++ Garbage Collection
Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector.
+Oilpan implements mark-and-sweep garbage collection (GC) with limited compaction (for a subset of objects).
**Key properties**
+
- Trace-based garbage collection;
+- Incremental and concurrent marking;
+- Incremental and concurrent sweeping;
- Precise on-heap memory layout;
- Conservative on-stack memory layout;
- Allows for collection with and without considering stack;
-- Incremental and concurrent marking;
-- Incremental and concurrent sweeping;
- Non-incremental and non-concurrent compaction for selected spaces;
See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code.
Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api).
+
+## Threading model
+
+Oilpan features thread-local garbage collection and assumes heaps are not shared among threads.
+In other words, objects are accessed and ultimately reclaimed by the garbage collector on the same thread that allocates them.
+This allows Oilpan to run garbage collection in parallel with mutators running in other threads.
+
+References to objects belonging to another thread's heap are modeled using cross-thread roots.
+This is even true for on-heap to on-heap references.
+
+## Heap partitioning
+
+Oilpan's heaps are partitioned into spaces.
+The space for an object is chosen depending on a number of criteria, e.g.:
+
+- Objects over 64KiB are allocated in a large object space
+- Objects can be assigned to a dedicated custom space.
+ Custom spaces can also be marked as compactable.
+- Other objects are allocated in one of the normal page spaces bucketed depending on their size.
+
+## Precise and conservative garbage collection
+
+Oilpan supports two kinds of GCs:
+
+1. **Conservative GC.**
+A GC is called conservative when it is executed while the regular native stack is not empty.
+In this case, the native stack might contain references to objects in Oilpan's heap, which should be kept alive.
+The GC scans the native stack and treats the pointers discovered via the native stack as part of the root set.
+This kind of GC is considered imprecise because values on stack other than references may accidentally appear as references to on-heap object, which means these objects will be kept alive despite being in practice unreachable from the application as an actual reference.
+
+2. **Precise GC.**
+A precise GC is triggered at the end of an event loop, which is controlled by an embedder via a platform.
+At this point, it is guaranteed that there are no on-stack references pointing to Oilpan's heap.
+This means there is no risk of confusing other value types with references.
+Oilpan has precise knowledge of on-heap object layouts, and so it knows exactly where pointers lie in memory.
+Oilpan can just start marking from the regular root set and collect all garbage precisely.
+
+## Atomic, incremental and concurrent garbage collection
+
+Oilpan has three modes of operation:
+
+1. **Atomic GC.**
+The entire GC cycle, including all its phases (e.g. see [Marking](#Marking-phase) and [Sweeping](#Sweeping-phase)), are executed back to back in a single pause.
+This mode of operation is also known as Stop-The-World (STW) garbage collection.
+It results in the most jank (due to a single long pause), but is overall the most efficient (e.g. no need for write barriers).
+
+2. **Incremental GC.**
+Garbage collection work is split up into multiple steps which are interleaved with the mutator, i.e. user code chunked into tasks.
+Each step is a small chunk of work that is executed either as dedicated tasks between mutator tasks or, as needed, during mutator tasks.
+Using incremental GC introduces the need for write barriers that record changes to the object graph so that a consistent state is observed and no objects are accidentally considered dead and reclaimed.
+The incremental steps are followed by a smaller atomic pause to finalize garbage collection.
+The smaller pause times, due to smaller chunks of work, helps with reducing jank.
+
+3. **Concurrent GC.**
+This is the most common type of GC.
+It builds on top of incremental GC and offloads much of the garbage collection work away from the mutator thread and on to background threads.
+Using concurrent GC allows the mutator thread to spend less time on GC and more on the actual mutator.
+
+## Marking phase
+
+The marking phase consists of the following steps:
+
+1. Mark all objects in the root set.
+
+2. Mark all objects transitively reachable from the root set by calling `Trace()` methods defined on each object.
+
+3. Clear out all weak handles to unreachable objects and run weak callbacks.
+
+The marking phase can be executed atomically in a stop-the-world manner, in which all 3 steps are executed one after the other.
+
+Alternatively, it can also be executed incrementally/concurrently.
+With incremental/concurrent marking, step 1 is executed in a short pause after which the mutator regains control.
+Step 2 is repeatedly executed in an interleaved manner with the mutator.
+When the GC is ready to finalize, i.e. step 2 is (almost) finished, another short pause is triggered in which step 2 is finished and step 3 is performed.
+
+To prevent a user-after-free (UAF) issues it is required for Oilpan to know about all edges in the object graph.
+This means that all pointers except on-stack pointers must be wrapped with Oilpan's handles (i.e., Persistent<>, Member<>, WeakMember<>).
+Raw pointers to on-heap objects create an edge that Oilpan cannot observe and cause UAF issues
+Thus, raw pointers shall not be used to reference on-heap objects (except for raw pointers on native stacks).
+
+## Sweeping phase
+
+The sweeping phase consists of the following steps:
+
+1. Invoke pre-finalizers.
+At this point, no destructors have been invoked and no memory has been reclaimed.
+Pre-finalizers are allowed to access any other on-heap objects, even those that may get destructed.
+
+2. Sweeping invokes destructors of the dead (unreachable) objects and reclaims memory to be reused by future allocations.
+
+Assumptions should not be made about the order and the timing of their execution.
+There is no guarantee on the order in which the destructors are invoked.
+That's why destructors must not access any other on-heap objects (which might have already been destructed).
+If some destructor unavoidably needs to access other on-heap objects, it will have to be converted to a pre-finalizer.
+The pre-finalizer is allowed to access other on-heap objects.
+
+The mutator is resumed before all destructors have ran.
+For example, imagine a case where X is a client of Y, and Y holds a list of clients.
+If the code relies on X's destructor removing X from the list, there is a risk that Y iterates the list and calls some method of X which may touch other on-heap objects.
+This causes a use-after-free.
+Care must be taken to make sure that X is explicitly removed from the list before the mutator resumes its execution in a way that doesn't rely on X's destructor (e.g. a pre-finalizer).
+
+Similar to marking, sweeping can be executed in either an atomic stop-the-world manner or incrementally/concurrently.
+With incremental/concurrent sweeping, step 2 is interleaved with mutator.
+Incremental/concurrent sweeping can be atomically finalized in case it is needed to trigger another GC cycle.
+Even with concurrent sweeping, destructors are guaranteed to run on the thread the object has been allocated on to preserve C++ semantics.
+
+Notes:
+
+* Weak processing runs only when the holder object of the WeakMember outlives the pointed object.
+If the holder object and the pointed object die at the same time, weak processing doesn't run.
+It is wrong to write code assuming that the weak processing always runs.
+
+* Pre-finalizers are heavy because the thread needs to scan all pre-finalizers at each sweeping phase to determine which pre-finalizers should be invoked (the thread needs to invoke pre-finalizers of dead objects).
+Adding pre-finalizers to frequently created objects should be avoided.
diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h
index 2ccdeddd83..f9af756c39 100644
--- a/deps/v8/include/cppgc/default-platform.h
+++ b/deps/v8/include/cppgc/default-platform.h
@@ -6,7 +6,6 @@
#define INCLUDE_CPPGC_DEFAULT_PLATFORM_H_
#include <memory>
-#include <vector>
#include "cppgc/platform.h"
#include "libplatform/libplatform.h"
@@ -64,6 +63,8 @@ class V8_EXPORT DefaultPlatform : public Platform {
return v8_platform_->GetTracingController();
}
+ v8::Platform* GetV8Platform() const { return v8_platform_.get(); }
+
protected:
static constexpr v8::Isolate* kNoIsolate = nullptr;
diff --git a/deps/v8/include/cppgc/explicit-management.h b/deps/v8/include/cppgc/explicit-management.h
index cdb6af4858..0290328dcc 100644
--- a/deps/v8/include/cppgc/explicit-management.h
+++ b/deps/v8/include/cppgc/explicit-management.h
@@ -15,11 +15,27 @@ namespace cppgc {
class HeapHandle;
+namespace subtle {
+
+template <typename T>
+void FreeUnreferencedObject(HeapHandle& heap_handle, T& object);
+template <typename T>
+bool Resize(T& object, AdditionalBytes additional_bytes);
+
+} // namespace subtle
+
namespace internal {
-V8_EXPORT void FreeUnreferencedObject(HeapHandle&, void*);
-V8_EXPORT bool Resize(void*, size_t);
+class ExplicitManagementImpl final {
+ private:
+ V8_EXPORT static void FreeUnreferencedObject(HeapHandle&, void*);
+ V8_EXPORT static bool Resize(void*, size_t);
+ template <typename T>
+ friend void subtle::FreeUnreferencedObject(HeapHandle&, T&);
+ template <typename T>
+ friend bool subtle::Resize(T&, AdditionalBytes);
+};
} // namespace internal
namespace subtle {
@@ -45,7 +61,8 @@ template <typename T>
void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) {
static_assert(IsGarbageCollectedTypeV<T>,
"Object must be of type GarbageCollected.");
- internal::FreeUnreferencedObject(heap_handle, &object);
+ internal::ExplicitManagementImpl::FreeUnreferencedObject(heap_handle,
+ &object);
}
/**
@@ -73,7 +90,8 @@ template <typename T>
bool Resize(T& object, AdditionalBytes additional_bytes) {
static_assert(IsGarbageCollectedTypeV<T>,
"Object must be of type GarbageCollected.");
- return internal::Resize(&object, sizeof(T) + additional_bytes.value);
+ return internal::ExplicitManagementImpl::Resize(
+ &object, sizeof(T) + additional_bytes.value);
}
} // namespace subtle
diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h
index 75d127ee9c..6737c8be49 100644
--- a/deps/v8/include/cppgc/garbage-collected.h
+++ b/deps/v8/include/cppgc/garbage-collected.h
@@ -62,7 +62,8 @@ class GarbageCollected {
// virtual destructor requires an unambiguous, accessible 'operator delete'.
void operator delete(void*) {
#ifdef V8_ENABLE_CHECKS
- internal::Abort();
+ internal::Fatal(
+ "Manually deleting a garbage collected object is not allowed");
#endif // V8_ENABLE_CHECKS
}
void operator delete[](void*) = delete;
diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h
index 8e603d5d8a..54a4dbc21e 100644
--- a/deps/v8/include/cppgc/heap-consistency.h
+++ b/deps/v8/include/cppgc/heap-consistency.h
@@ -149,6 +149,19 @@ class HeapConsistency final {
internal::WriteBarrier::GenerationalBarrier(params, slot);
}
+ /**
+ * Generational barrier for source object that may contain outgoing pointers
+ * to objects in young generation.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param inner_pointer Pointer to the source object.
+ */
+ static V8_INLINE void GenerationalBarrierForSourceObject(
+ const WriteBarrierParams& params, const void* inner_pointer) {
+ internal::WriteBarrier::GenerationalBarrierForSourceObject(params,
+ inner_pointer);
+ }
+
private:
HeapConsistency() = delete;
};
diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h
index 136c4fb44d..aa3c6f468a 100644
--- a/deps/v8/include/cppgc/heap.h
+++ b/deps/v8/include/cppgc/heap.h
@@ -68,8 +68,8 @@ class V8_EXPORT Heap {
*/
kAtomic,
/**
- * Incremental marking, i.e. interleave marking is the rest of the
- * application on the same thread.
+ * Incremental marking interleaves marking with the rest of the application
+ * workload on the same thread.
*/
kIncremental,
/**
@@ -87,6 +87,11 @@ class V8_EXPORT Heap {
*/
kAtomic,
/**
+ * Incremental sweeping interleaves sweeping with the rest of the
+ * application workload on the same thread.
+ */
+ kIncremental,
+ /**
* Incremental and concurrent sweeping. Sweeping is split and interleaved
* with the rest of the application.
*/
diff --git a/deps/v8/include/cppgc/internal/prefinalizer-handler.h b/deps/v8/include/cppgc/internal/prefinalizer-handler.h
deleted file mode 100644
index 64b07ec911..0000000000
--- a/deps/v8/include/cppgc/internal/prefinalizer-handler.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
-#define INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
-
-#include "cppgc/heap.h"
-#include "cppgc/liveness-broker.h"
-
-namespace cppgc {
-namespace internal {
-
-class V8_EXPORT PreFinalizerRegistrationDispatcher final {
- public:
- using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*);
- struct PreFinalizer {
- void* object;
- PreFinalizerCallback callback;
-
- bool operator==(const PreFinalizer& other) const;
- };
-
- static void RegisterPrefinalizer(PreFinalizer pre_finalizer);
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index 67f039c658..cdb7ec6f9e 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -70,10 +70,6 @@ class V8_EXPORT WriteBarrier final {
// Returns the required write barrier for a given `value`.
static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
- template <typename HeapHandleCallback>
- static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject(
- const void* value, Params& params, HeapHandleCallback callback);
-
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
@@ -84,9 +80,13 @@ class V8_EXPORT WriteBarrier final {
#if defined(CPPGC_YOUNG_GENERATION)
static V8_INLINE void GenerationalBarrier(const Params& params,
const void* slot);
-#else // !CPPGC_YOUNG_GENERATION
+ static V8_INLINE void GenerationalBarrierForSourceObject(
+ const Params& params, const void* inner_pointer);
+#else // !CPPGC_YOUNG_GENERATION
static V8_INLINE void GenerationalBarrier(const Params& params,
const void* slot) {}
+ static V8_INLINE void GenerationalBarrierForSourceObject(
+ const Params& params, const void* inner_pointer) {}
#endif // CPPGC_YOUNG_GENERATION
#if V8_ENABLE_CHECKS
@@ -124,8 +124,10 @@ class V8_EXPORT WriteBarrier final {
#if defined(CPPGC_YOUNG_GENERATION)
static CagedHeapLocalData& GetLocalData(HeapHandle&);
static void GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
- const AgeTable& ageTable,
+ const AgeTable& age_table,
const void* slot, uintptr_t value_offset);
+ static void GenerationalBarrierForSourceObjectSlow(
+ const CagedHeapLocalData& local_data, const void* object);
#endif // CPPGC_YOUNG_GENERATION
static AtomicEntryFlag incremental_or_concurrent_marking_flag_;
@@ -157,13 +159,6 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return GetNoSlot(value, params, callback);
}
- template <typename HeapHandleCallback>
- static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
- const void* value, WriteBarrier::Params& params,
- HeapHandleCallback callback) {
- return GetNoSlot(value, params, callback);
- }
-
private:
WriteBarrierTypeForCagedHeapPolicy() = delete;
@@ -292,15 +287,6 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
callback);
}
- template <typename HeapHandleCallback>
- static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
- const void* value, WriteBarrier::Params& params,
- HeapHandleCallback callback) {
- // The slot will never be used in `Get()` below.
- return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
- callback);
- }
-
private:
template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch;
@@ -376,15 +362,6 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
}
// static
-template <typename HeapHandleCallback>
-WriteBarrier::Type
-WriteBarrier::GetWriteBarrierTypeForExternallyReferencedObject(
- const void* value, Params& params, HeapHandleCallback callback) {
- return WriteBarrierTypePolicy::GetForExternallyReferenced(value, params,
- callback);
-}
-
-// static
void WriteBarrier::DijkstraMarkingBarrier(const Params& params,
const void* object) {
CheckParams(Type::kMarking, params);
@@ -433,6 +410,21 @@ void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) {
GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset);
}
+// static
+void WriteBarrier::GenerationalBarrierForSourceObject(
+ const Params& params, const void* inner_pointer) {
+ CheckParams(Type::kGenerational, params);
+
+ const CagedHeapLocalData& local_data = params.caged_heap();
+ const AgeTable& age_table = local_data.age_table;
+
+ // Assume that if the first element is in young generation, the whole range is
+ // in young generation.
+ if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return;
+
+ GenerationalBarrierForSourceObjectSlow(local_data, inner_pointer);
+}
+
#endif // !CPPGC_YOUNG_GENERATION
} // namespace internal
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
index 38105b8e43..66a8cfd802 100644
--- a/deps/v8/include/cppgc/member.h
+++ b/deps/v8/include/cppgc/member.h
@@ -26,7 +26,7 @@ class MemberBase {
protected:
struct AtomicInitializerTag {};
- MemberBase() = default;
+ MemberBase() : raw_(nullptr) {}
explicit MemberBase(const void* value) : raw_(value) {}
MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); }
@@ -46,7 +46,10 @@ class MemberBase {
void ClearFromGC() const { raw_ = nullptr; }
private:
- mutable const void* raw_ = nullptr;
+ // All constructors initialize `raw_`. Do not add a default value here as it
+ // results in a non-atomic write on some builds, even when the atomic version
+ // of the constructor is used.
+ mutable const void* raw_;
};
// The basic class from which all Member classes are 'generated'.
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
index 182fb08549..244f94c819 100644
--- a/deps/v8/include/cppgc/persistent.h
+++ b/deps/v8/include/cppgc/persistent.h
@@ -118,10 +118,10 @@ class BasicPersistent final : public PersistentBase,
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicPersistent(internal::BasicMember<U, MemberBarrierPolicy,
- MemberWeaknessTag, MemberCheckingPolicy>
- member,
- const SourceLocation& loc = SourceLocation::Current())
+ BasicPersistent(
+ const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy>& member,
+ const SourceLocation& loc = SourceLocation::Current())
: BasicPersistent(member.Get(), loc) {}
~BasicPersistent() { Clear(); }
@@ -159,9 +159,8 @@ class BasicPersistent final : public PersistentBase,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent& operator=(
- internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
- MemberCheckingPolicy>
- member) {
+ const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy>& member) {
return operator=(member.Get());
}
@@ -292,12 +291,12 @@ template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
-bool operator==(const BasicPersistent<T1, PersistentWeaknessPolicy,
- PersistentLocationPolicy,
- PersistentCheckingPolicy>& p,
- BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>
- m) {
+bool operator==(
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ p,
+ const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>& m) {
return p.Get() == m.Get();
}
@@ -305,12 +304,12 @@ template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
-bool operator!=(const BasicPersistent<T1, PersistentWeaknessPolicy,
- PersistentLocationPolicy,
- PersistentCheckingPolicy>& p,
- BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>
- m) {
+bool operator!=(
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ p,
+ const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>& m) {
return !(p == m);
}
@@ -318,12 +317,12 @@ template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
-bool operator==(BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>
- m,
- const BasicPersistent<T1, PersistentWeaknessPolicy,
- PersistentLocationPolicy,
- PersistentCheckingPolicy>& p) {
+bool operator==(
+ const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>& m,
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ p) {
return m.Get() == p.Get();
}
@@ -331,12 +330,12 @@ template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
-bool operator!=(BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>
- m,
- const BasicPersistent<T1, PersistentWeaknessPolicy,
- PersistentLocationPolicy,
- PersistentCheckingPolicy>& p) {
+bool operator!=(
+ const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>& m,
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ p) {
return !(m == p);
}
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
index 3276a26b65..5d5f8796ad 100644
--- a/deps/v8/include/cppgc/platform.h
+++ b/deps/v8/include/cppgc/platform.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "cppgc/source-location.h"
#include "v8-platform.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
@@ -145,7 +146,8 @@ V8_EXPORT void ShutdownProcess();
namespace internal {
-V8_EXPORT void Abort();
+V8_EXPORT void Fatal(const std::string& reason = std::string(),
+ const SourceLocation& = SourceLocation::Current());
} // namespace internal
diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h
index 6153b37ff5..51f2eac8ed 100644
--- a/deps/v8/include/cppgc/prefinalizer.h
+++ b/deps/v8/include/cppgc/prefinalizer.h
@@ -6,23 +6,17 @@
#define INCLUDE_CPPGC_PREFINALIZER_H_
#include "cppgc/internal/compiler-specific.h"
-#include "cppgc/internal/prefinalizer-handler.h"
#include "cppgc/liveness-broker.h"
namespace cppgc {
namespace internal {
-template <typename T>
-class PrefinalizerRegistration final {
+class V8_EXPORT PrefinalizerRegistration final {
public:
- explicit PrefinalizerRegistration(T* self) {
- static_assert(sizeof(&T::InvokePreFinalizer) > 0,
- "USING_PRE_FINALIZER(T) must be defined.");
+ using Callback = bool (*)(const cppgc::LivenessBroker&, void*);
- cppgc::internal::PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- {self, T::InvokePreFinalizer});
- }
+ PrefinalizerRegistration(void*, Callback);
void* operator new(size_t, void* location) = delete;
void* operator new(size_t) = delete;
@@ -30,6 +24,35 @@ class PrefinalizerRegistration final {
} // namespace internal
+/**
+ * Macro must be used in the private section of `Class` and registers a
+ * prefinalization callback `void Class::PreFinalizer()`. The callback is
+ * invoked on garbage collection after the collector has found an object to be
+ * dead.
+ *
+ * Callback properties:
+ * - The callback is invoked before a possible destructor for the corresponding
+ * object.
+ * - The callback may access the whole object graph, irrespective of whether
+ * objects are considered dead or alive.
+ * - The callback is invoked on the same thread as the object was created on.
+ *
+ * Example:
+ * \code
+ * class WithPrefinalizer : public GarbageCollected<WithPrefinalizer> {
+ * CPPGC_USING_PRE_FINALIZER(WithPrefinalizer, Dispose);
+ *
+ * public:
+ * void Trace(Visitor*) const {}
+ * void Dispose() { prefinalizer_called = true; }
+ * ~WithPrefinalizer() {
+ * // prefinalizer_called == true
+ * }
+ * private:
+ * bool prefinalizer_called = false;
+ * };
+ * \endcode
+ */
#define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \
public: \
static bool InvokePreFinalizer(const cppgc::LivenessBroker& liveness_broker, \
@@ -43,8 +66,8 @@ class PrefinalizerRegistration final {
} \
\
private: \
- CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration<Class> \
- prefinalizer_dummy_{this}; \
+ CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \
+ prefinalizer_dummy_{this, Class::InvokePreFinalizer}; \
static_assert(true, "Force semicolon.")
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/testing.h b/deps/v8/include/cppgc/testing.h
index 229ce140f9..bddd1fc163 100644
--- a/deps/v8/include/cppgc/testing.h
+++ b/deps/v8/include/cppgc/testing.h
@@ -19,8 +19,13 @@ class HeapHandle;
namespace testing {
/**
- * Overrides the state of the stack with the provided value. Takes precedence
- * over other parameters that set the stack state. Must no be nested.
+ * Overrides the state of the stack with the provided value. Parameters passed
+ * to explicit garbage collection calls still take precedence. Must not be
+ * nested.
+ *
+ * This scope is useful to make the garbage collector consider the stack when
+ * tasks that invoke garbage collection (through the provided platform) contain
+ * interesting pointers on its stack.
*/
class V8_EXPORT V8_NODISCARD OverrideEmbedderStackStateScope final {
CPPGC_STACK_ALLOCATED();
@@ -93,6 +98,8 @@ class V8_EXPORT StandaloneTestingHeap final {
HeapHandle& heap_handle_;
};
+V8_EXPORT bool IsHeapObjectOld(void*);
+
} // namespace testing
} // namespace cppgc
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index b34c8551ad..09c420e3a6 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -104,7 +104,9 @@ domain Debugger
# Location in the source code.
Location location
# JavaScript script name or url.
- string url
+ # Deprecated in favor of using the `location.scriptId` to resolve the URL via a previously
+ # sent `Debugger.scriptParsed` event.
+ deprecated string url
# Scope chain for this call frame.
array of Scope scopeChain
# `this` object for this call frame.
@@ -1550,6 +1552,18 @@ domain Runtime
parameters
string name
+ # This method tries to lookup and populate exception details for a
+ # JavaScript Error object.
+ # Note that the stackTrace portion of the resulting exceptionDetails will
+ # only be populated if the Runtime domain was enabled at the time when the
+ # Error was thrown.
+ experimental command getExceptionDetails
+ parameters
+ # The error object for which to resolve the exception details.
+ RemoteObjectId errorObjectId
+ returns
+ optional ExceptionDetails exceptionDetails
+
# Notification is issued every time when binding is called.
experimental event bindingCalled
parameters
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index fb79bcfe40..9ec60c04f9 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -90,17 +90,6 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
double idle_time_in_seconds);
/**
- * Attempts to set the tracing controller for the given platform.
- *
- * The |platform| has to be created using |NewDefaultPlatform|.
- *
- */
-V8_DEPRECATED("Access the DefaultPlatform directly")
-V8_PLATFORM_EXPORT void SetTracingController(
- v8::Platform* platform,
- v8::platform::tracing::TracingController* tracing_controller);
-
-/**
* Notifies the given platform about the Isolate getting deleted soon. Has to be
* called for all Isolates which are deleted - unless we're shutting down the
* platform.
diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h
index 0ce2b65368..e9047b79ce 100644
--- a/deps/v8/include/v8-array-buffer.h
+++ b/deps/v8/include/v8-array-buffer.h
@@ -175,8 +175,8 @@ class V8_EXPORT ArrayBuffer : public Object {
/**
* Convenience allocator.
*
- * When the virtual memory cage is enabled, this allocator will allocate its
- * backing memory inside the cage. Otherwise, it will rely on malloc/free.
+ * When the sandbox is enabled, this allocator will allocate its backing
+ * memory inside the sandbox. Otherwise, it will rely on malloc/free.
*
* Caller takes ownership, i.e. the returned object needs to be freed using
* |delete allocator| once it is no longer in use.
diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h
index b70d59dbec..70b9c2ae93 100644
--- a/deps/v8/include/v8-callbacks.h
+++ b/deps/v8/include/v8-callbacks.h
@@ -368,6 +368,20 @@ using HostInitializeImportMetaObjectCallback = void (*)(Local<Context> context,
Local<Object> meta);
/**
+ * HostCreateShadowRealmContextCallback is called each time a ShadowRealm is
+ * being constructed in the initiator_context.
+ *
+ * The method combines Context creation and implementation defined abstract
+ * operation HostInitializeShadowRealm into one.
+ *
+ * The embedder should use v8::Context::New or v8::Context:NewFromSnapshot to
+ * create a new context. If the creation fails, the embedder must propagate
+ * that exception by returning an empty MaybeLocal.
+ */
+using HostCreateShadowRealmContextCallback =
+ MaybeLocal<Context> (*)(Local<Context> initiator_context);
+
+/**
* PrepareStackTraceCallback is called when the stack property of an error is
* first accessed. The return value will be used as the stack value. If this
* callback is registed, the |Error.prepareStackTrace| API will be disabled.
diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h
index d398ac4b21..72dfbaad74 100644
--- a/deps/v8/include/v8-context.h
+++ b/deps/v8/include/v8-context.h
@@ -313,17 +313,6 @@ class V8_EXPORT Context : public Data {
explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
~BackupIncumbentScope();
- /**
- * Returns address that is comparable with JS stack address. Note that JS
- * stack may be allocated separately from the native stack. See also
- * |TryCatch::JSStackComparableAddressPrivate| for details.
- */
- V8_DEPRECATED(
- "This is private V8 information that should not be exposed in the API.")
- uintptr_t JSStackComparableAddress() const {
- return JSStackComparableAddressPrivate();
- }
-
private:
friend class internal::Isolate;
@@ -379,7 +368,7 @@ Local<Value> Context::GetEmbedderData(int index) {
}
void* Context::GetAlignedPointerFromEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
+#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
A ctx = *reinterpret_cast<const A*>(this);
@@ -387,10 +376,10 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
value_offset += I::kEmbedderDataSlotRawPayloadOffset;
#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
+ internal::Isolate* isolate = I::GetIsolateForSandbox(ctx);
return reinterpret_cast<void*>(
I::ReadExternalPointerField(isolate, embedder_data, value_offset,
internal::kEmbedderDataSlotPayloadTag));
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 8ec826a595..201773f59d 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -12,7 +12,6 @@
#include "cppgc/common.h"
#include "cppgc/custom-space.h"
#include "cppgc/heap-statistics.h"
-#include "cppgc/internal/write-barrier.h"
#include "cppgc/visitor.h"
#include "v8-internal.h" // NOLINT(build/include_directory)
#include "v8-platform.h" // NOLINT(build/include_directory)
@@ -78,9 +77,6 @@ struct WrapperDescriptor final {
};
struct V8_EXPORT CppHeapCreateParams {
- CppHeapCreateParams(const CppHeapCreateParams&) = delete;
- CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;
-
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
WrapperDescriptor wrapper_descriptor;
};
@@ -148,6 +144,14 @@ class V8_EXPORT CppHeap {
*/
void CollectGarbageForTesting(cppgc::EmbedderStackState stack_state);
+ /**
+ * Performs a stop-the-world minor garbage collection for testing purposes.
+ *
+ * \param stack_state The stack state to assume for the garbage collection.
+ */
+ void CollectGarbageInYoungGenerationForTesting(
+ cppgc::EmbedderStackState stack_state);
+
private:
CppHeap() = default;
@@ -170,140 +174,6 @@ class JSVisitor : public cppgc::Visitor {
};
/**
- * **DO NOT USE: Use the appropriate managed types.**
- *
- * Consistency helpers that aid in maintaining a consistent internal state of
- * the garbage collector.
- */
-class V8_EXPORT JSHeapConsistency final {
- public:
- using WriteBarrierParams = cppgc::internal::WriteBarrier::Params;
- using WriteBarrierType = cppgc::internal::WriteBarrier::Type;
-
- /**
- * Gets the required write barrier type for a specific write.
- *
- * Note: Handling for C++ to JS references.
- *
- * \param ref The reference being written to.
- * \param params Parameters that may be used for actual write barrier calls.
- * Only filled if return value indicates that a write barrier is needed. The
- * contents of the `params` are an implementation detail.
- * \param callback Callback returning the corresponding heap handle. The
- * callback is only invoked if the heap cannot otherwise be figured out. The
- * callback must not allocate.
- * \returns whether a write barrier is needed and which barrier to invoke.
- */
- template <typename HeapHandleCallback>
- V8_DEPRECATED("Write barriers automatically emitted by TracedReference.")
- static V8_INLINE WriteBarrierType
- GetWriteBarrierType(const TracedReferenceBase& ref,
- WriteBarrierParams& params,
- HeapHandleCallback callback) {
- if (ref.IsEmpty()) return WriteBarrierType::kNone;
-
- if (V8_LIKELY(!cppgc::internal::WriteBarrier::
- IsAnyIncrementalOrConcurrentMarking())) {
- return cppgc::internal::WriteBarrier::Type::kNone;
- }
- cppgc::HeapHandle& handle = callback();
- if (!cppgc::subtle::HeapState::IsMarking(handle)) {
- return cppgc::internal::WriteBarrier::Type::kNone;
- }
- params.heap = &handle;
-#if V8_ENABLE_CHECKS
- params.type = cppgc::internal::WriteBarrier::Type::kMarking;
-#endif // !V8_ENABLE_CHECKS
- return cppgc::internal::WriteBarrier::Type::kMarking;
- }
-
- /**
- * Gets the required write barrier type for a specific write.
- *
- * Note: Handling for JS to C++ references.
- *
- * \param wrapper The wrapper that has been written into.
- * \param wrapper_index The wrapper index in `wrapper` that has been written
- * into.
- * \param wrappable The value that was written.
- * \param params Parameters that may be used for actual write barrier calls.
- * Only filled if return value indicates that a write barrier is needed. The
- * contents of the `params` are an implementation detail.
- * \param callback Callback returning the corresponding heap handle. The
- * callback is only invoked if the heap cannot otherwise be figured out. The
- * callback must not allocate.
- * \returns whether a write barrier is needed and which barrier to invoke.
- */
- template <typename HeapHandleCallback>
- V8_DEPRECATE_SOON(
- "Write barriers automatically emitted when using "
- "`SetAlignedPointerInInternalFields()`.")
- static V8_INLINE WriteBarrierType
- GetWriteBarrierType(v8::Local<v8::Object>& wrapper, int wrapper_index,
- const void* wrappable, WriteBarrierParams& params,
- HeapHandleCallback callback) {
-#if V8_ENABLE_CHECKS
- CheckWrapper(wrapper, wrapper_index, wrappable);
-#endif // V8_ENABLE_CHECKS
- return cppgc::internal::WriteBarrier::
- GetWriteBarrierTypeForExternallyReferencedObject(wrappable, params,
- callback);
- }
-
- /**
- * Conservative Dijkstra-style write barrier that processes an object if it
- * has not yet been processed.
- *
- * \param params The parameters retrieved from `GetWriteBarrierType()`.
- * \param ref The reference being written to.
- */
- V8_DEPRECATED("Write barriers automatically emitted by TracedReference.")
- static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params,
- cppgc::HeapHandle& heap_handle,
- const TracedReferenceBase& ref) {
- cppgc::internal::WriteBarrier::CheckParams(WriteBarrierType::kMarking,
- params);
- DijkstraMarkingBarrierSlow(heap_handle, ref);
- }
-
- /**
- * Conservative Dijkstra-style write barrier that processes an object if it
- * has not yet been processed.
- *
- * \param params The parameters retrieved from `GetWriteBarrierType()`.
- * \param object The pointer to the object. May be an interior pointer to a
- * an interface of the actual object.
- */
- V8_DEPRECATE_SOON(
- "Write barriers automatically emitted when using "
- "`SetAlignedPointerInInternalFields()`.")
- static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params,
- cppgc::HeapHandle& heap_handle,
- const void* object) {
- cppgc::internal::WriteBarrier::DijkstraMarkingBarrier(params, object);
- }
-
- /**
- * Generational barrier for maintaining consistency when running with multiple
- * generations.
- *
- * \param params The parameters retrieved from `GetWriteBarrierType()`.
- * \param ref The reference being written to.
- */
- V8_DEPRECATED("Write barriers automatically emitted by TracedReference.")
- static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params,
- const TracedReferenceBase& ref) {}
-
- private:
- JSHeapConsistency() = delete;
-
- static void CheckWrapper(v8::Local<v8::Object>&, int, const void*);
-
- static void DijkstraMarkingBarrierSlow(cppgc::HeapHandle&,
- const TracedReferenceBase& ref);
-};
-
-/**
* Provided as input to `CppHeap::CollectCustomSpaceStatisticsAtLastGC()`.
*
* Its method is invoked with the results of the statistic collection.
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index a13ae3f6d6..52255f3700 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -7,8 +7,8 @@
#include <stdint.h>
-#include "v8-local-handle.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-script.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -21,12 +21,17 @@ class String;
class V8_EXPORT StackFrame {
public:
/**
+ * Returns the source location, 0-based, for the associated function call.
+ */
+ Location GetLocation() const;
+
+ /**
* Returns the number, 1-based, of the line for the associate function call.
* This method will return Message::kNoLineNumberInfo if it is unable to
* retrieve the line number, or if kLineNumber was not passed as an option
* when capturing the StackTrace.
*/
- int GetLineNumber() const;
+ int GetLineNumber() const { return GetLocation().GetLineNumber() + 1; }
/**
* Returns the 1-based column offset on the line for the associated function
@@ -35,7 +40,7 @@ class V8_EXPORT StackFrame {
* the column number, or if kColumnOffset was not passed as an option when
* capturing the StackTrace.
*/
- int GetColumn() const;
+ int GetColumn() const { return GetLocation().GetColumnNumber() + 1; }
/**
* Returns the id of the script for the function for this StackFrame.
@@ -144,6 +149,18 @@ class V8_EXPORT StackTrace {
*/
static Local<StackTrace> CurrentStackTrace(
Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed);
+
+ /**
+ * Returns the first valid script name or source URL starting at the top of
+ * the JS stack. The returned string is either an empty handle if no script
+ * name/url was found or a non-zero-length string.
+ *
+ * This method is equivalent to calling StackTrace::CurrentStackTrace and
+ * walking the resulting frames from the beginning until a non-empty script
+ * name/url is found. The difference is that this method won't allocate
+ * a stack trace.
+ */
+ static Local<String> CurrentScriptNameOrSourceURL(Isolate* isolate);
};
} // namespace v8
diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h
index 81390f1a7c..43f96d7f0a 100644
--- a/deps/v8/include/v8-embedder-heap.h
+++ b/deps/v8/include/v8-embedder-heap.h
@@ -51,7 +51,11 @@ class V8_EXPORT EmbedderRootsHandler {
* being treated as roots.
*/
virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
- virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
+
+ V8_DEPRECATED("See v8::TracedGlobal class comment.")
+ virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) {
+ return true;
+ }
/**
* Used in combination with |IsRoot|. Called by V8 when an
@@ -88,6 +92,7 @@ class V8_EXPORT EmbedderHeapTracer {
class V8_EXPORT TracedGlobalHandleVisitor {
public:
virtual ~TracedGlobalHandleVisitor() = default;
+ V8_DEPRECATED("See v8::TracedGlobal class comment.")
virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
};
@@ -125,14 +130,6 @@ class V8_EXPORT EmbedderHeapTracer {
void SetStackStart(void* stack_start);
/**
- * Called by the embedder to notify V8 of an empty execution stack.
- */
- V8_DEPRECATED(
- "This call only optimized internal caches which V8 is able to figure out "
- "on its own now.")
- void NotifyEmptyEmbedderStack();
-
- /**
* Called by v8 to register internal fields of found wrappers.
*
* The embedder is expected to store them somewhere and trace reachable
@@ -197,6 +194,7 @@ class V8_EXPORT EmbedderHeapTracer {
*/
virtual bool IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
+ V8_DEPRECATED("See v8::TracedGlobal class comment.")
virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
/**
@@ -206,14 +204,6 @@ class V8_EXPORT EmbedderHeapTracer {
const v8::TracedReference<v8::Value>& handle);
/*
- * Called by the embedder to immediately perform a full garbage collection.
- *
- * Should only be used in testing code.
- */
- V8_DEPRECATE_SOON("Use Isolate::RequestGarbageCollectionForTesting instead")
- void GarbageCollectionForTesting(EmbedderStackState stack_state);
-
- /*
* Called by the embedder to signal newly allocated or freed memory. Not bound
* to tracing phases. Embedders should trade off when increments are reported
* as V8 may consult global heuristics on whether to trigger garbage
diff --git a/deps/v8/include/v8-embedder-state-scope.h b/deps/v8/include/v8-embedder-state-scope.h
index 37e1f2bacb..d8a3b08d5c 100644
--- a/deps/v8/include/v8-embedder-state-scope.h
+++ b/deps/v8/include/v8-embedder-state-scope.h
@@ -19,9 +19,10 @@ class EmbedderState;
// A StateTag represents a possible state of the embedder.
enum class EmbedderStateTag : uint8_t {
+ // reserved
EMPTY = 0,
- // embedder can define any state in between
- OTHER = UINT8_MAX,
+ OTHER = 1,
+ // embedder can define any state after
};
// A stack-allocated class that manages an embedder state on the isolate.
diff --git a/deps/v8/include/v8-exception.h b/deps/v8/include/v8-exception.h
index faa46487f8..64126c420a 100644
--- a/deps/v8/include/v8-exception.h
+++ b/deps/v8/include/v8-exception.h
@@ -169,13 +169,6 @@ class V8_EXPORT TryCatch {
*/
void SetCaptureMessage(bool value);
- V8_DEPRECATED(
- "This is private information that should not be exposed by the API")
- static void* JSStackComparableAddress(TryCatch* handler) {
- if (handler == nullptr) return nullptr;
- return reinterpret_cast<void*>(handler->JSStackComparableAddressPrivate());
- }
-
TryCatch(const TryCatch&) = delete;
void operator=(const TryCatch&) = delete;
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 0c0c1cbd5a..3403de93ec 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -532,10 +532,6 @@ class V8_EXPORT CFunction {
};
};
-struct ApiObject {
- uintptr_t address;
-};
-
/**
* A struct which may be passed to a fast call callback, like so:
* \code
@@ -660,7 +656,6 @@ struct CTypeInfoTraits {};
V(void, kVoid) \
V(v8::Local<v8::Value>, kV8Value) \
V(v8::Local<v8::Object>, kV8Value) \
- V(ApiObject, kApiObject) \
V(AnyCType, kAny)
// ApiObject was a temporary solution to wrap the pointer to the v8::Value.
diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h
index 7a2ae9316a..99022cec45 100644
--- a/deps/v8/include/v8-initialization.h
+++ b/deps/v8/include/v8-initialization.h
@@ -99,8 +99,10 @@ class V8_EXPORT V8 {
const int kBuildConfiguration =
(internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
(internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
- (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) |
- (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0);
+ (internal::SandboxedExternalPointersAreEnabled()
+ ? kSandboxedExternalPointers
+ : 0) |
+ (internal::SandboxIsEnabled() ? kSandbox : 0);
return Initialize(kBuildConfiguration);
}
@@ -181,64 +183,74 @@ class V8_EXPORT V8 {
* V8 was disposed.
*/
static void DisposePlatform();
- V8_DEPRECATE_SOON("Use DisposePlatform()")
+ V8_DEPRECATED("Use DisposePlatform()")
static void ShutdownPlatform() { DisposePlatform(); }
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_SANDBOX
//
- // Virtual Memory Cage related API.
+ // Sandbox related API.
//
// This API is not yet stable and subject to changes in the future.
//
/**
- * Initializes the virtual memory cage for V8.
+ * Initializes the V8 sandbox.
*
* This must be invoked after the platform was initialized but before V8 is
- * initialized. The virtual memory cage is torn down during platform shutdown.
+ * initialized. The sandbox is torn down during platform shutdown.
* Returns true on success, false otherwise.
*
- * TODO(saelo) Once it is no longer optional to create the virtual memory
- * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization
- * will likely happen as part of V8::Initialize, at which point this function
- * should be removed.
+ * TODO(saelo) Once it is no longer optional to initialize the sandbox when
+ * compiling with V8_SANDBOX, the sandbox initialization will likely happen
+ * as part of V8::Initialize, at which point this function should be removed.
*/
- static bool InitializeVirtualMemoryCage();
+ static bool InitializeSandbox();
+ V8_DEPRECATE_SOON("Use InitializeSandbox()")
+ static bool InitializeVirtualMemoryCage() { return InitializeSandbox(); }
/**
- * Provides access to the virtual memory cage page allocator.
+ * Provides access to the virtual address subspace backing the sandbox.
*
- * This allocator allocates pages inside the virtual memory cage. It can for
- * example be used to obtain virtual memory for ArrayBuffer backing stores,
- * which must be located inside the cage.
+ * This can be used to allocate pages inside the sandbox, for example to
+ * obtain virtual memory for ArrayBuffer backing stores, which must be
+ * located inside the sandbox.
*
- * It should be assumed that an attacker can corrupt data inside the cage,
- * and so in particular the contents of pages returned by this allocator,
- * arbitrarily and concurrently. Due to this, it is recommended to to only
- * place pure data buffers in pages obtained through this allocator.
+ * It should be assumed that an attacker can corrupt data inside the sandbox,
+ * and so in particular the contents of pages allocagted in this virtual
+ * address space, arbitrarily and concurrently. Due to this, it is
+ * recommended to to only place pure data buffers in them.
*
- * This function must only be called after initializing the virtual memory
- * cage and V8.
+ * This function must only be called after initializing the sandbox.
*/
+ static VirtualAddressSpace* GetSandboxAddressSpace();
+ V8_DEPRECATE_SOON("Use GetSandboxAddressSpace()")
static PageAllocator* GetVirtualMemoryCagePageAllocator();
/**
- * Returns the size of the virtual memory cage in bytes.
+ * Returns the size of the sandbox in bytes.
*
- * If the cage has not been initialized, or if the initialization failed,
+ * If the sandbox has not been initialized, or if the initialization failed,
* this returns zero.
*/
- static size_t GetVirtualMemoryCageSizeInBytes();
+ static size_t GetSandboxSizeInBytes();
+ V8_DEPRECATE_SOON("Use GetSandboxSizeInBytes()")
+ static size_t GetVirtualMemoryCageSizeInBytes() {
+ return GetSandboxSizeInBytes();
+ }
/**
- * Returns whether the virtual memory cage is configured securely.
+ * Returns whether the sandbox is configured securely.
*
- * If V8 cannot create a proper virtual memory cage, it will fall back to
- * creating a cage that doesn't have the desired security properties but at
- * least still allows V8 to function. This API can be used to determine if
- * such an insecure cage is being used, in which case it will return false.
+ * If V8 cannot create a proper sandbox, it will fall back to creating a
+ * sandbox that doesn't have the desired security properties but at least
+ * still allows V8 to function. This API can be used to determine if such an
+ * insecure sandbox is being used, in which case it will return false.
*/
- static bool IsUsingSecureVirtualMemoryCage();
+ static bool IsSandboxConfiguredSecurely();
+ V8_DEPRECATE_SOON("Use IsSandboxConfiguredSecurely()")
+ static bool IsUsingSecureVirtualMemoryCage() {
+ return IsSandboxConfiguredSecurely();
+ }
#endif
/**
@@ -274,8 +286,8 @@ class V8_EXPORT V8 {
enum BuildConfigurationFeatures {
kPointerCompression = 1 << 0,
k31BitSmis = 1 << 1,
- kHeapSandbox = 1 << 2,
- kVirtualMemoryCage = 1 << 3,
+ kSandboxedExternalPointers = 1 << 2,
+ kSandbox = 1 << 3,
};
/**
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 2a258d505a..edd968c766 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -23,6 +23,10 @@ class Value;
namespace v8_inspector {
+namespace internal {
+class V8DebuggerId;
+} // namespace internal
+
namespace protocol {
namespace Debugger {
namespace API {
@@ -106,6 +110,30 @@ class V8_EXPORT V8ContextInfo {
V8ContextInfo& operator=(const V8ContextInfo&) = delete;
};
+// This debugger id tries to be unique by generating two random
+// numbers, which should most likely avoid collisions.
+// Debugger id has a 1:1 mapping to context group. It is used to
+// attribute stack traces to a particular debugging, when doing any
+// cross-debugger operations (e.g. async step in).
+// See also Runtime.UniqueDebuggerId in the protocol.
+class V8_EXPORT V8DebuggerId {
+ public:
+ V8DebuggerId() = default;
+ V8DebuggerId(const V8DebuggerId&) = default;
+ V8DebuggerId& operator=(const V8DebuggerId&) = default;
+
+ std::unique_ptr<StringBuffer> toString() const;
+ bool isValid() const;
+ std::pair<int64_t, int64_t> pair() const;
+
+ private:
+ friend class internal::V8DebuggerId;
+ explicit V8DebuggerId(std::pair<int64_t, int64_t>);
+
+ int64_t m_first = 0;
+ int64_t m_second = 0;
+};
+
class V8_EXPORT V8StackTrace {
public:
virtual StringView firstNonEmptySourceURL() const = 0;
@@ -276,6 +304,7 @@ class V8_EXPORT V8Inspector {
virtual void contextDestroyed(v8::Local<v8::Context>) = 0;
virtual void resetContextGroup(int contextGroupId) = 0;
virtual v8::MaybeLocal<v8::Context> contextById(int contextId) = 0;
+ virtual V8DebuggerId uniqueDebuggerId(int contextId) = 0;
// Various instrumentation.
virtual void idleStarted() = 0;
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index f49b54557c..196518a2db 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -29,6 +29,13 @@ class Isolate;
typedef uintptr_t Address;
static const Address kNullAddress = 0;
+constexpr int KB = 1024;
+constexpr int MB = KB * 1024;
+constexpr int GB = MB * 1024;
+#ifdef V8_TARGET_ARCH_X64
+constexpr size_t TB = size_t{GB} * 1024;
+#endif
+
/**
* Configuration of tagging scheme.
*/
@@ -109,6 +116,11 @@ struct SmiTagging<8> {
};
#ifdef V8_COMPRESS_POINTERS
+// See v8:7703 or src/common/ptr-compr-inl.h for details about pointer
+// compression.
+constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
+constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
+
static_assert(
kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
@@ -121,36 +133,6 @@ constexpr bool PointerCompressionIsEnabled() {
return kApiTaggedSize != kApiSystemPointerSize;
}
-constexpr bool HeapSandboxIsEnabled() {
-#ifdef V8_HEAP_SANDBOX
- return true;
-#else
- return false;
-#endif
-}
-
-using ExternalPointer_t = Address;
-
-// If the heap sandbox is enabled, these tag values will be ORed with the
-// external pointers in the external pointer table to prevent use of pointers of
-// the wrong type. When a pointer is loaded, it is ANDed with the inverse of the
-// expected type's tag. The tags are constructed in a way that guarantees that a
-// failed type check will result in one or more of the top bits of the pointer
-// to be set, rendering the pointer inacessible. This construction allows
-// performing the type check and removing GC marking bits from the pointer at
-// the same time.
-enum ExternalPointerTag : uint64_t {
- kExternalPointerNullTag = 0x0000000000000000,
- kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111
- kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111
- kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111
- kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111
- kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111
- kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111
-};
-
-constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
-
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
#else
@@ -171,6 +153,164 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
kSmiTag;
}
+/*
+ * Sandbox related types, constants, and functions.
+ */
+constexpr bool SandboxIsEnabled() {
+#ifdef V8_SANDBOX
+ return true;
+#else
+ return false;
+#endif
+}
+
+constexpr bool SandboxedExternalPointersAreEnabled() {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ return true;
+#else
+ return false;
+#endif
+}
+
+// SandboxedPointers are guaranteed to point into the sandbox. This is achieved
+// for example by storing them as offset rather than as raw pointers.
+using SandboxedPointer_t = Address;
+
+// ExternalPointers point to objects located outside the sandbox. When sandboxed
+// external pointers are enabled, these are stored in an external pointer table
+// and referenced from HeapObjects through indices.
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+using ExternalPointer_t = uint32_t;
+#else
+using ExternalPointer_t = Address;
+#endif
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+// Size of the sandbox, excluding the guard regions surrounding it.
+constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
+constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
+
+// Required alignment of the sandbox. For simplicity, we require the
+// size of the guard regions to be a multiple of this, so that this specifies
+// the alignment of the sandbox including and excluding surrounding guard
+// regions. The alignment requirement is due to the pointer compression cage
+// being located at the start of the sandbox.
+constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment;
+
+// Sandboxed pointers are stored inside the heap as offset from the sandbox
+// base shifted to the left. This way, it is guaranteed that the offset is
+// smaller than the sandbox size after shifting it to the right again. This
+// constant specifies the shift amount.
+constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2;
+
+// Size of the guard regions surrounding the sandbox. This assumes a worst-case
+// scenario of a 32-bit unsigned index used to access an array of 64-bit
+// values.
+constexpr size_t kSandboxGuardRegionSize = 32ULL * GB;
+
+static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
+ "The size of the guard regions around the sandbox must be a "
+ "multiple of its required alignment.");
+
+// Minimum size of the sandbox, excluding the guard regions surrounding it. If
+// the virtual memory reservation for the sandbox fails, its size is currently
+// halved until either the reservation succeeds or the minimum size is reached.
+// A minimum of 32GB allows the 4GB pointer compression region as well as the
+// ArrayBuffer partition and two 10GB Wasm memory cages to fit into the
+// sandbox. 32GB should also be the minimum possible size of the userspace
+// address space as there are some machine configurations with only 36 virtual
+// address bits.
+constexpr size_t kSandboxMinimumSize = 32ULL * GB;
+
+static_assert(kSandboxMinimumSize <= kSandboxSize,
+ "The minimal size of the sandbox must be smaller or equal to the "
+ "regular size.");
+
+// On OSes where reserving virtual memory is too expensive to reserve the
+// entire address space backing the sandbox, notably Windows pre 8.1, we create
+// a partially reserved sandbox that doesn't actually reserve most of the
+// memory, and so doesn't have the desired security properties as unrelated
+// memory allocations could end up inside of it, but which still ensures that
+// objects that should be located inside the sandbox are allocated within
+// kSandboxSize bytes from the start of the sandbox. The minimum size of the
+// region that is actually reserved for such a sandbox is specified by this
+// constant and should be big enough to contain the pointer compression cage as
+// well as the ArrayBuffer partition.
+constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
+
+static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize,
+ "The sandbox must be larger than the pointer compression cage "
+ "contained within it.");
+static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
+ "The minimum reservation size for a sandbox must be larger than "
+ "the pointer compression cage contained within it.");
+
+// For now, even if the sandbox is enabled, we still allow backing stores to be
+// allocated outside of it as fallback. This will simplify the initial rollout.
+// However, if sandboxed pointers are also enabled, we must always place
+// backing stores inside the sandbox as they will be referenced though them.
+#ifdef V8_SANDBOXED_POINTERS
+constexpr bool kAllowBackingStoresOutsideSandbox = false;
+#else
+constexpr bool kAllowBackingStoresOutsideSandbox = true;
+#endif // V8_SANDBOXED_POINTERS
+
+// The size of the virtual memory reservation for an external pointer table.
+// This determines the maximum number of entries in a table. Using a maximum
+// size allows omitting bounds checks on table accesses if the indices are
+// guaranteed (e.g. through shifting) to be below the maximum index. This
+// value must be a power of two.
+static const size_t kExternalPointerTableReservationSize = 128 * MB;
+
+// The maximum number of entries in an external pointer table.
+static const size_t kMaxSandboxedExternalPointers =
+ kExternalPointerTableReservationSize / kApiSystemPointerSize;
+
+// The external pointer table indices stored in HeapObjects as external
+// pointers are shifted to the left by this amount to guarantee that they are
+// smaller than the maximum table size.
+static const uint32_t kExternalPointerIndexShift = 8;
+static_assert((1 << (32 - kExternalPointerIndexShift)) ==
+ kMaxSandboxedExternalPointers,
+ "kExternalPointerTableReservationSize and "
+ "kExternalPointerIndexShift don't match");
+
+#endif // V8_SANDBOX_IS_AVAILABLE
+
+// If sandboxed external pointers are enabled, these tag values will be ORed
+// with the external pointers in the external pointer table to prevent use of
+// pointers of the wrong type. When a pointer is loaded, it is ANDed with the
+// inverse of the expected type's tag. The tags are constructed in a way that
+// guarantees that a failed type check will result in one or more of the top
+// bits of the pointer to be set, rendering the pointer inacessible. Besides
+// the type tag bits (48 through 62), the tags also have the GC mark bit (63)
+// set, so that the mark bit is automatically set when a pointer is written
+// into the external pointer table (in which case it is clearly alive) and is
+// cleared when the pointer is loaded. The exception to this is the free entry
+// tag, which doesn't have the mark bit set, as the entry is not alive. This
+// construction allows performing the type check and removing GC marking bits
+// (the MSB) from the pointer at the same time.
+// Note: this scheme assumes a 48-bit address space and will likely break if
+// more virtual address bits are used.
+constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
+constexpr uint64_t kExternalPointerTagShift = 48;
+#define MAKE_TAG(v) (static_cast<uint64_t>(v) << kExternalPointerTagShift)
+// clang-format off
+enum ExternalPointerTag : uint64_t {
+ kExternalPointerNullTag = MAKE_TAG(0b0000000000000000),
+ kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000),
+ kExternalStringResourceTag = MAKE_TAG(0b1000000011111111),
+ kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111),
+ kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111),
+ kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111),
+ kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111),
+ kCodeEntryPointTag = MAKE_TAG(0b1000000111110111),
+ kExternalObjectValueTag = MAKE_TAG(0b1000000111111011),
+};
+// clang-format on
+#undef MAKE_TAG
+
// Converts encoded external pointer to address.
V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
ExternalPointer_t pointer,
@@ -214,7 +354,7 @@ class Internals {
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
#endif
static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
@@ -250,10 +390,10 @@ class Internals {
kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
static const int kExternalPointerTableBufferOffset = 0;
- static const int kExternalPointerTableLengthOffset =
- kExternalPointerTableBufferOffset + kApiSystemPointerSize;
static const int kExternalPointerTableCapacityOffset =
- kExternalPointerTableLengthOffset + kApiInt32Size;
+ kExternalPointerTableBufferOffset + kApiSystemPointerSize;
+ static const int kExternalPointerTableFreelistHeadOffset =
+ kExternalPointerTableCapacityOffset + kApiInt32Size;
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
@@ -432,9 +572,9 @@ class Internals {
#endif
}
- V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox(
+ V8_INLINE static internal::Isolate* GetIsolateForSandbox(
internal::Address obj) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
return internal::IsolateFromNeverReadOnlySpaceObject(obj);
#else
// Not used in non-sandbox mode.
@@ -445,7 +585,7 @@ class Internals {
V8_INLINE static Address DecodeExternalPointer(
const Isolate* isolate, ExternalPointer_t encoded_pointer,
ExternalPointerTag tag) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
#else
return encoded_pointer;
@@ -455,7 +595,7 @@ class Internals {
V8_INLINE static internal::Address ReadExternalPointerField(
internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
ExternalPointerTag tag) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
internal::ExternalPointer_t encoded_value =
ReadRawField<uint32_t>(heap_object_ptr, offset);
// We currently have to treat zero as nullptr in embedder slots.
@@ -467,10 +607,6 @@ class Internals {
}
#ifdef V8_COMPRESS_POINTERS
- // See v8:7703 or src/ptr-compr.* for details about pointer compression.
- static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
- static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
-
V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
internal::Address addr) {
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
@@ -486,100 +622,6 @@ class Internals {
#endif // V8_COMPRESS_POINTERS
};
-constexpr bool VirtualMemoryCageIsEnabled() {
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- return true;
-#else
- return false;
-#endif
-}
-
-// CagedPointers are guaranteed to point into the virtual memory cage. This is
-// achieved for example by storing them as offset from the cage base rather
-// than as raw pointers.
-using CagedPointer_t = Address;
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#define GB (1ULL << 30)
-#define TB (1ULL << 40)
-
-// Size of the virtual memory cage, excluding the guard regions surrounding it.
-constexpr size_t kVirtualMemoryCageSizeLog2 = 40; // 1 TB
-constexpr size_t kVirtualMemoryCageSize = 1ULL << kVirtualMemoryCageSizeLog2;
-
-// Required alignment of the virtual memory cage. For simplicity, we require the
-// size of the guard regions to be a multiple of this, so that this specifies
-// the alignment of the cage including and excluding surrounding guard regions.
-// The alignment requirement is due to the pointer compression cage being
-// located at the start of the virtual memory cage.
-constexpr size_t kVirtualMemoryCageAlignment =
- Internals::kPtrComprCageBaseAlignment;
-
-// Caged pointers are stored inside the heap as offset from the cage base
-// shifted to the left. This way, it is guaranteed that the offset is smaller
-// than the cage size after shifting it to the right again. This constant
-// specifies the shift amount.
-constexpr uint64_t kCagedPointerShift = 64 - kVirtualMemoryCageSizeLog2;
-
-// Size of the guard regions surrounding the virtual memory cage. This assumes a
-// worst-case scenario of a 32-bit unsigned index being used to access an array
-// of 64-bit values.
-constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB;
-
-static_assert((kVirtualMemoryCageGuardRegionSize %
- kVirtualMemoryCageAlignment) == 0,
- "The size of the virtual memory cage guard region must be a "
- "multiple of its required alignment.");
-
-// Minimum size of the virtual memory cage, excluding the guard regions
-// surrounding it. If the cage reservation fails, its size is currently halved
-// until either the reservation succeeds or the minimum size is reached. A
-// minimum of 32GB allows the 4GB pointer compression region as well as the
-// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
-// 32GB should also be the minimum possible size of the userspace address space
-// as there are some machine configurations with only 36 virtual address bits.
-constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB;
-
-static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize,
- "The minimal size of the virtual memory cage must be smaller or "
- "equal to the regular size.");
-
-// On OSes where reservation virtual memory is too expensive to create a real
-// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually
-// reserve most of the memory, and so doesn't have the desired security
-// properties, but still ensures that objects that should be located inside the
-// cage are allocated within kVirtualMemoryCageSize bytes from the start of the
-// cage, and so appear to be inside the cage. The minimum size of the virtual
-// memory range that is actually reserved for a fake cage is specified by this
-// constant and should be big enough to contain the pointer compression region
-// as well as the ArrayBuffer partition.
-constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB;
-
-static_assert(kVirtualMemoryCageMinimumSize >
- Internals::kPtrComprCageReservationSize,
- "The virtual memory cage must be larger than the pointer "
- "compression cage contained within it.");
-static_assert(kFakeVirtualMemoryCageMinReservationSize >
- Internals::kPtrComprCageReservationSize,
- "The reservation for a fake virtual memory cage must be larger "
- "than the pointer compression cage contained within it.");
-
-// For now, even if the virtual memory cage is enabled, we still allow backing
-// stores to be allocated outside of it as fallback. This will simplify the
-// initial rollout. However, if the heap sandbox is also enabled, we already use
-// the "enforcing mode" of the virtual memory cage. This is useful for testing.
-#ifdef V8_HEAP_SANDBOX
-constexpr bool kAllowBackingStoresOutsideCage = false;
-#else
-constexpr bool kAllowBackingStoresOutsideCage = true;
-#endif // V8_HEAP_SANDBOX
-
-#undef GB
-#undef TB
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
template <bool PerformCheck>
@@ -608,6 +650,10 @@ V8_INLINE void PerformCastCheck(T* data) {
// how static casts work with std::shared_ptr.
class BackingStoreBase {};
+// The maximum value in enum GarbageCollectionReason, defined in heap.h.
+// This is needed for histograms sampling garbage collection reasons.
+constexpr int kGarbageCollectionReasonMaxValue = 25;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h
index 2fc7daf40b..2849d7cae1 100644
--- a/deps/v8/include/v8-isolate.h
+++ b/deps/v8/include/v8-isolate.h
@@ -10,7 +10,6 @@
#include <memory>
#include <utility>
-#include <vector>
#include "cppgc/common.h"
#include "v8-array-buffer.h" // NOLINT(build/include_directory)
@@ -225,6 +224,7 @@ class V8_EXPORT Isolate {
/**
* Explicitly specify a startup snapshot blob. The embedder owns the blob.
+ * The embedder *must* ensure that the snapshot is from a trusted source.
*/
StartupData* snapshot_blob = nullptr;
@@ -283,6 +283,12 @@ class V8_EXPORT Isolate {
int embedder_wrapper_object_index = -1;
/**
+ * Callbacks to invoke in case of fatal or OOM errors.
+ */
+ FatalErrorCallback fatal_error_callback = nullptr;
+ OOMErrorCallback oom_error_callback = nullptr;
+
+ /**
* The following parameter is experimental and may change significantly.
* This is currently for internal testing.
*/
@@ -523,6 +529,8 @@ class V8_EXPORT Isolate {
kWasmMultiValue = 110,
kWasmExceptionHandling = 111,
kInvalidatedMegaDOMProtector = 112,
+ kFunctionPrototypeArguments = 113,
+ kFunctionPrototypeCaller = 114,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -628,7 +636,7 @@ class V8_EXPORT Isolate {
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
- V8_DEPRECATE_SOON("Use HostImportModuleDynamicallyCallback")
+ V8_DEPRECATED("Use HostImportModuleDynamicallyCallback")
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback);
void SetHostImportModuleDynamicallyCallback(
@@ -642,6 +650,13 @@ class V8_EXPORT Isolate {
HostInitializeImportMetaObjectCallback callback);
/**
+ * This specifies the callback called by the upcoming ShadowRealm
+ * construction language feature to retrieve host created globals.
+ */
+ void SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallback callback);
+
+ /**
* This specifies the callback called when the stack property of Error
* is accessed.
*/
diff --git a/deps/v8/include/v8-locker.h b/deps/v8/include/v8-locker.h
index 88ce4beb62..7ca5bf6e42 100644
--- a/deps/v8/include/v8-locker.h
+++ b/deps/v8/include/v8-locker.h
@@ -127,6 +127,7 @@ class V8_EXPORT Locker {
* The current implementation is quite confusing and leads to unexpected
* results if anybody uses v8::Locker in the current process.
*/
+ V8_DEPRECATE_SOON("This method will be removed.")
static bool WasEverUsed();
V8_DEPRECATED("Use WasEverUsed instead")
static bool IsActive();
diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h
index d771a49ff9..a13276412a 100644
--- a/deps/v8/include/v8-message.h
+++ b/deps/v8/include/v8-message.h
@@ -61,31 +61,6 @@ class ScriptOriginOptions {
*/
class V8_EXPORT ScriptOrigin {
public:
- #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATED("Use constructor with primitive C++ types")
- #endif
- ScriptOrigin(
- Local<Value> resource_name, Local<Integer> resource_line_offset,
- Local<Integer> resource_column_offset,
- Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
- Local<Integer> script_id = Local<Integer>(),
- Local<Value> source_map_url = Local<Value>(),
- Local<Boolean> resource_is_opaque = Local<Boolean>(),
- Local<Boolean> is_wasm = Local<Boolean>(),
- Local<Boolean> is_module = Local<Boolean>(),
- Local<Data> host_defined_options = Local<Data>());
- #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATED("Use constructor that takes an isolate")
- #endif
- explicit ScriptOrigin(Local<Value> resource_name,
- int resource_line_offset = 0,
- int resource_column_offset = 0,
- bool resource_is_shared_cross_origin = false,
- int script_id = -1,
- Local<Value> source_map_url = Local<Value>(),
- bool resource_is_opaque = false, bool is_wasm = false,
- bool is_module = false,
- Local<Data> host_defined_options = Local<Data>());
V8_INLINE ScriptOrigin(Isolate* isolate, Local<Value> resource_name,
int resource_line_offset = 0,
int resource_column_offset = 0,
@@ -108,12 +83,6 @@ class V8_EXPORT ScriptOrigin {
}
V8_INLINE Local<Value> ResourceName() const;
- V8_DEPRECATED("Use getter with primitive C++ types.")
- V8_INLINE Local<Integer> ResourceLineOffset() const;
- V8_DEPRECATED("Use getter with primitive C++ types.")
- V8_INLINE Local<Integer> ResourceColumnOffset() const;
- V8_DEPRECATED("Use getter with primitive C++ types.")
- V8_INLINE Local<Integer> ScriptID() const;
V8_INLINE int LineOffset() const;
V8_INLINE int ColumnOffset() const;
V8_INLINE int ScriptId() const;
@@ -220,8 +189,6 @@ class V8_EXPORT Message {
bool IsSharedCrossOrigin() const;
bool IsOpaque() const;
- V8_DEPRECATED("Use the version that takes a std::ostream&.")
- static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out);
static const int kNoLineNumberInfo = 0;
@@ -232,18 +199,6 @@ class V8_EXPORT Message {
Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
-Local<Integer> ScriptOrigin::ResourceLineOffset() const {
- return v8::Integer::New(isolate_, resource_line_offset_);
-}
-
-Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
- return v8::Integer::New(isolate_, resource_column_offset_);
-}
-
-Local<Integer> ScriptOrigin::ScriptID() const {
- return v8::Integer::New(isolate_, script_id_);
-}
-
Local<Data> ScriptOrigin::GetHostDefinedOptions() const {
return host_defined_options_;
}
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index 62738442f7..01bc538e22 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -21,6 +21,7 @@ class Isolate;
namespace metrics {
struct GarbageCollectionPhases {
+ int64_t total_wall_clock_duration_in_us = -1;
int64_t compact_wall_clock_duration_in_us = -1;
int64_t mark_wall_clock_duration_in_us = -1;
int64_t sweep_wall_clock_duration_in_us = -1;
@@ -34,6 +35,7 @@ struct GarbageCollectionSizes {
};
struct GarbageCollectionFullCycle {
+ int reason = -1;
GarbageCollectionPhases total;
GarbageCollectionPhases total_cpp;
GarbageCollectionPhases main_thread;
@@ -73,6 +75,7 @@ struct GarbageCollectionFullMainThreadBatchedIncrementalSweep {
};
struct GarbageCollectionYoungCycle {
+ int reason = -1;
int64_t total_wall_clock_duration_in_us = -1;
int64_t main_thread_wall_clock_duration_in_us = -1;
double collection_rate_in_percent;
@@ -230,6 +233,8 @@ struct V8_EXPORT LongTaskStats {
int64_t gc_full_atomic_wall_clock_duration_us = 0;
int64_t gc_full_incremental_wall_clock_duration_us = 0;
int64_t gc_young_wall_clock_duration_us = 0;
+ // Only collected with --slow-histograms
+ int64_t v8_execute_us = 0;
};
} // namespace metrics
diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h
index e047c413ac..11ff03dd20 100644
--- a/deps/v8/include/v8-object.h
+++ b/deps/v8/include/v8-object.h
@@ -594,7 +594,7 @@ class V8_EXPORT Object : public Value {
/**
* Returns the context in which the object was created.
*/
- V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
+ V8_DEPRECATED("Use MaybeLocal<Context> GetCreationContext()")
Local<Context> CreationContext();
MaybeLocal<Context> GetCreationContext();
@@ -735,7 +735,7 @@ Local<Value> Object::GetInternalField(int index) {
}
void* Object::GetAlignedPointerFromInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
+#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<A*>(this);
@@ -744,10 +744,10 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
auto instance_type = I::GetInstanceType(obj);
if (v8::internal::CanHaveInternalField(instance_type)) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
offset += I::kEmbedderDataSlotRawPayloadOffset;
#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ internal::Isolate* isolate = I::GetIsolateForSandbox(obj);
A value = I::ReadExternalPointerField(
isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
return reinterpret_cast<void*>(value);
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 9e226331f8..91b3fd9cc3 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -401,6 +401,8 @@ class PageAllocator {
// this is used to set the MAP_JIT flag on Apple Silicon.
// TODO(jkummerow): Remove this when Wasm has a platform-independent
// w^x implementation.
+ // TODO(saelo): Remove this once all JIT pages are allocated through the
+ // VirtualAddressSpace API.
kNoAccessWillJitLater
};
@@ -510,8 +512,59 @@ class PageAllocator {
virtual bool CanAllocateSharedPages() { return false; }
};
+// Opaque type representing a handle to a shared memory region.
+using PlatformSharedMemoryHandle = intptr_t;
+static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
+
+// Conversion routines from the platform-dependent shared memory identifiers
+// into the opaque PlatformSharedMemoryHandle type. These use the underlying
+// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
+// to avoid pulling in large OS header files into this header file. Instead,
+// the users of these routines are expected to include the respecitve OS
+// headers in addition to this one.
+#if V8_OS_MACOS
+// Convert between a shared memory handle and a mach_port_t referencing a memory
+// entry object.
+inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
+ unsigned int port) {
+ return static_cast<PlatformSharedMemoryHandle>(port);
+}
+inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
+ PlatformSharedMemoryHandle handle) {
+ return static_cast<unsigned int>(handle);
+}
+#elif V8_OS_FUCHSIA
+// Convert between a shared memory handle and a zx_handle_t to a VMO.
+inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
+ return static_cast<PlatformSharedMemoryHandle>(handle);
+}
+inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ return static_cast<uint32_t>(handle);
+}
+#elif V8_OS_WIN
+// Convert between a shared memory handle and a Windows HANDLE to a file mapping
+// object.
+inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
+ void* handle) {
+ return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
+}
+inline void* FileMappingFromSharedMemoryHandle(
+ PlatformSharedMemoryHandle handle) {
+ return reinterpret_cast<void*>(handle);
+}
+#else
+// Convert between a shared memory handle and a file descriptor.
+inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
+ return static_cast<PlatformSharedMemoryHandle>(fd);
+}
+inline int FileDescriptorFromSharedMemoryHandle(
+ PlatformSharedMemoryHandle handle) {
+ return static_cast<int>(handle);
+}
+#endif
+
/**
- * Page permissions.
+ * Possible permissions for memory pages.
*/
enum class PagePermissions {
kNoAccess,
@@ -528,17 +581,21 @@ enum class PagePermissions {
* sub-spaces and (private or shared) memory pages can be allocated, freed, and
* modified. This interface is meant to eventually replace the PageAllocator
* interface, and can be used as an alternative in the meantime.
+ *
+ * This API is not yet stable and may change without notice!
*/
class VirtualAddressSpace {
public:
using Address = uintptr_t;
VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
- Address base, size_t size)
+ Address base, size_t size,
+ PagePermissions max_page_permissions)
: page_size_(page_size),
allocation_granularity_(allocation_granularity),
base_(base),
- size_(size) {}
+ size_(size),
+ max_page_permissions_(max_page_permissions) {}
virtual ~VirtualAddressSpace() = default;
@@ -576,6 +633,14 @@ class VirtualAddressSpace {
size_t size() const { return size_; }
/**
+ * The maximum page permissions that pages allocated inside this space can
+ * obtain.
+ *
+ * \returns the maximum page permissions.
+ */
+ PagePermissions max_page_permissions() const { return max_page_permissions_; }
+
+ /**
* Sets the random seed so that GetRandomPageAddress() will generate
* repeatable sequences of random addresses.
*
@@ -598,6 +663,7 @@ class VirtualAddressSpace {
* given address first. If that fails, the allocation is attempted to be
* placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
* zero for the hint always causes this function to choose a random address.
+ * The hint, if specified, must be aligned to the specified alignment.
*
* \param size The size of the allocation in bytes. Must be a multiple of the
* allocation_granularity().
@@ -618,16 +684,16 @@ class VirtualAddressSpace {
/**
* Frees previously allocated pages.
*
+ * This function will terminate the process on failure as this implies a bug
+ * in the client. As such, there is no return value.
+ *
* \param address The start address of the pages to free. This address must
- * have been obtains from a call to AllocatePages.
+ * have been obtained through a call to AllocatePages.
*
* \param size The size in bytes of the region to free. This must match the
* size passed to AllocatePages when the pages were allocated.
- *
- * \returns true on success, false otherwise.
*/
- virtual V8_WARN_UNUSED_RESULT bool FreePages(Address address,
- size_t size) = 0;
+ virtual void FreePages(Address address, size_t size) = 0;
/**
* Sets permissions of all allocated pages in the given range.
@@ -646,6 +712,77 @@ class VirtualAddressSpace {
Address address, size_t size, PagePermissions permissions) = 0;
/**
+ * Creates a guard region at the specified address.
+ *
+ * Guard regions are guaranteed to cause a fault when accessed and generally
+ * do not count towards any memory consumption limits. Further, allocating
+ * guard regions can usually not fail in subspaces if the region does not
+ * overlap with another region, subspace, or page allocation.
+ *
+ * \param address The start address of the guard region. Must be aligned to
+ * the allocation_granularity().
+ *
+ * \param size The size of the guard region in bytes. Must be a multiple of
+ * the allocation_granularity().
+ *
+ * \returns true on success, false otherwise.
+ */
+ virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
+ size_t size) = 0;
+
+ /**
+ * Frees an existing guard region.
+ *
+ * This function will terminate the process on failure as this implies a bug
+ * in the client. As such, there is no return value.
+ *
+ * \param address The start address of the guard region to free. This address
+ * must have previously been used as address parameter in a successful
+ * invocation of AllocateGuardRegion.
+ *
+ * \param size The size in bytes of the guard region to free. This must match
+ * the size passed to AllocateGuardRegion when the region was created.
+ */
+ virtual void FreeGuardRegion(Address address, size_t size) = 0;
+
+ /**
+ * Allocates shared memory pages with the given permissions.
+ *
+ * \param hint Placement hint. See AllocatePages.
+ *
+ * \param size The size of the allocation in bytes. Must be a multiple of the
+ * allocation_granularity().
+ *
+ * \param permissions The page permissions of the newly allocated pages.
+ *
+ * \param handle A platform-specific handle to a shared memory object. See
+ * the SharedMemoryHandleFromX routines above for ways to obtain these.
+ *
+ * \param offset The offset in the shared memory object at which the mapping
+ * should start. Must be a multiple of the allocation_granularity().
+ *
+ * \returns the start address of the allocated pages on success, zero on
+ * failure.
+ */
+ virtual V8_WARN_UNUSED_RESULT Address
+ AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
+ PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
+
+ /**
+ * Frees previously allocated shared pages.
+ *
+ * This function will terminate the process on failure as this implies a bug
+ * in the client. As such, there is no return value.
+ *
+ * \param address The start address of the pages to free. This address must
+ * have been obtained through a call to AllocateSharedPages.
+ *
+ * \param size The size in bytes of the region to free. This must match the
+ * size passed to AllocateSharedPages when the pages were allocated.
+ */
+ virtual void FreeSharedPages(Address address, size_t size) = 0;
+
+ /**
* Whether this instance can allocate subspaces or not.
*
* \returns true if subspaces can be allocated, false if not.
@@ -668,14 +805,14 @@ class VirtualAddressSpace {
* \param alignment The alignment of the subspace in bytes. Must be a multiple
* of the allocation_granularity() and should be a power of two.
*
- * \param max_permissions The maximum permissions that pages allocated in the
- * subspace can obtain.
+ * \param max_page_permissions The maximum permissions that pages allocated in
+ * the subspace can obtain.
*
* \returns a new subspace or nullptr on failure.
*/
virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) = 0;
+ PagePermissions max_page_permissions) = 0;
//
// TODO(v8) maybe refactor the methods below before stabilizing the API. For
@@ -715,6 +852,7 @@ class VirtualAddressSpace {
const size_t allocation_granularity_;
const Address base_;
const size_t size_;
+ const PagePermissions max_page_permissions_;
};
/**
diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h
index 11c01876c7..1b6de16686 100644
--- a/deps/v8/include/v8-primitive.h
+++ b/deps/v8/include/v8-primitive.h
@@ -585,8 +585,6 @@ class V8_EXPORT Symbol : public Name {
/**
* Returns the description string of the symbol, or undefined if none.
*/
- V8_DEPRECATED("Use Symbol::Description(isolate)")
- Local<Value> Description() const;
Local<Value> Description(Isolate* isolate) const;
/**
@@ -787,7 +785,7 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ internal::Isolate* isolate = I::GetIsolateForSandbox(obj);
A value =
I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
internal::kExternalStringResourceTag);
@@ -811,7 +809,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
ExternalStringResourceBase* resource;
if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ internal::Isolate* isolate = I::GetIsolateForSandbox(obj);
A value =
I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
internal::kExternalStringResourceTag);
diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h
index 770c796396..88252ac189 100644
--- a/deps/v8/include/v8-script.h
+++ b/deps/v8/include/v8-script.h
@@ -47,7 +47,7 @@ class V8_EXPORT ScriptOrModule {
* The options that were passed by the embedder as HostDefinedOptions to
* the ScriptOrigin.
*/
- V8_DEPRECATE_SOON("Use HostDefinedOptions")
+ V8_DEPRECATED("Use HostDefinedOptions")
Local<PrimitiveArray> GetHostDefinedOptions();
Local<Data> HostDefinedOptions();
};
@@ -173,29 +173,6 @@ class V8_EXPORT Module : public Data {
Local<Value> GetException() const;
/**
- * Returns the number of modules requested by this module.
- */
- V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().")
- int GetModuleRequestsLength() const;
-
- /**
- * Returns the ith module specifier in this module.
- * i must be < GetModuleRequestsLength() and >= 0.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
- Local<String> GetModuleRequest(int i) const;
-
- /**
- * Returns the source location (line number and column number) of the ith
- * module specifier's first occurrence in this module.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
- "Module::SourceOffsetToLocation().")
- Location GetModuleRequestLocation(int i) const;
-
- /**
* Returns the ModuleRequests for this module.
*/
Local<FixedArray> GetModuleRequests() const;
@@ -211,9 +188,6 @@ class V8_EXPORT Module : public Data {
*/
int GetIdentityHash() const;
- using ResolveCallback =
- MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
- Local<Module> referrer);
using ResolveModuleCallback = MaybeLocal<Module> (*)(
Local<Context> context, Local<String> specifier,
Local<FixedArray> import_assertions, Local<Module> referrer);
@@ -225,11 +199,6 @@ class V8_EXPORT Module : public Data {
* instantiation. (In the case where the callback throws an exception, that
* exception is propagated.)
*/
- V8_DEPRECATED(
- "Use the version of InstantiateModule that takes a ResolveModuleCallback "
- "parameter")
- V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
- ResolveCallback callback);
V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
Local<Context> context, ResolveModuleCallback callback);
@@ -407,6 +376,7 @@ class V8_EXPORT ScriptCompiler {
class Source {
public:
// Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ // The caller *must* ensure that the cached data is from a trusted source.
V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
CachedData* cached_data = nullptr,
ConsumeCodeCacheTask* consume_cache_task = nullptr);
@@ -473,18 +443,6 @@ class V8_EXPORT ScriptCompiler {
* V8 has parsed the data it received so far.
*/
virtual size_t GetMoreData(const uint8_t** src) = 0;
-
- /**
- * [DEPRECATED]: No longer used, will be removed soon.
- */
- V8_DEPRECATED("Not used")
- virtual bool SetBookmark() { return false; }
-
- /**
- * [DEPRECATED]: No longer used, will be removed soon.
- */
- V8_DEPRECATED("Not used")
- virtual void ResetToBookmark() {}
};
/**
@@ -692,6 +650,7 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
+ V8_DEPRECATED("Use CompileFunction")
static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
Local<Context> context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h
index ed02598c36..2400357cf6 100644
--- a/deps/v8/include/v8-snapshot.h
+++ b/deps/v8/include/v8-snapshot.h
@@ -5,8 +5,6 @@
#ifndef INCLUDE_V8_SNAPSHOT_H_
#define INCLUDE_V8_SNAPSHOT_H_
-#include <vector>
-
#include "v8-internal.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h
index 7db34a970c..2dcd1d1fb9 100644
--- a/deps/v8/include/v8-traced-handle.h
+++ b/deps/v8/include/v8-traced-handle.h
@@ -11,10 +11,8 @@
#include <atomic>
#include <memory>
-#include <string>
#include <type_traits>
#include <utility>
-#include <vector>
#include "v8-internal.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
@@ -199,6 +197,21 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* A traced handle with destructor that clears the handle. For more details see
* BasicTracedReference.
+ *
+ * This type is being deprecated and embedders are encouraged to use
+ * `v8::TracedReference` in combination with `v8::CppHeap`. If this is not
+ * possible, the following provides feature parity:
+ *
+ * \code
+ * template <typename T>
+ * struct TracedGlobalPolyfill {
+ * v8::TracedReference<T> traced_reference;
+ * v8::Global<T> weak_reference_for_callback;
+ * };
+ * \endcode
+ *
+ * In this example, `weak_reference_for_callback` can be used to emulate
+ * `SetFinalizationCallback()`.
*/
template <typename T>
class TracedGlobal : public BasicTracedReference<T> {
@@ -213,6 +226,7 @@ class TracedGlobal : public BasicTracedReference<T> {
/**
* An empty TracedGlobal without storage cell.
*/
+ V8_DEPRECATED("See class comment.")
TracedGlobal() : BasicTracedReference<T>() {}
/**
@@ -222,6 +236,7 @@ class TracedGlobal : public BasicTracedReference<T> {
* pointing to the same object.
*/
template <class S>
+ V8_DEPRECATED("See class comment.")
TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
this->val_ =
this->New(isolate, that.val_, &this->val_,
@@ -492,18 +507,20 @@ V8_INLINE bool operator!=(const v8::Local<U>& lhs,
template <class T>
template <class S>
-void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
+void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
+ this->Reset();
if (other.IsEmpty()) return;
- this->val_ = this->New(isolate, other.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithDestructor,
- internal::GlobalHandleStoreMode::kAssigningStore);
+ this->SetSlotThreadSafe(
+ this->New(isolate, other.val_, &this->val_,
+ internal::GlobalHandleDestructionMode::kWithoutDestructor,
+ internal::GlobalHandleStoreMode::kAssigningStore));
}
template <class T>
template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) noexcept {
+TracedReference<T>& TracedReference<T>::operator=(
+ TracedReference<S>&& rhs) noexcept {
static_assert(std::is_base_of<T, S>::value, "type check");
*this = std::move(rhs.template As<T>());
return *this;
@@ -511,14 +528,16 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) noexcept {
template <class T>
template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
+TracedReference<T>& TracedReference<T>::operator=(
+ const TracedReference<S>& rhs) {
static_assert(std::is_base_of<T, S>::value, "type check");
*this = rhs.template As<T>();
return *this;
}
template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) noexcept {
+TracedReference<T>& TracedReference<T>::operator=(
+ TracedReference&& rhs) noexcept {
if (this != &rhs) {
internal::MoveTracedGlobalReference(
reinterpret_cast<internal::Address**>(&rhs.val_),
@@ -528,7 +547,7 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) noexcept {
}
template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
+TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
if (this != &rhs) {
this->Reset();
if (rhs.val_ != nullptr) {
@@ -540,22 +559,36 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
return *this;
}
+void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
+ using I = internal::Internals;
+ if (IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+uint16_t TracedReferenceBase::WrapperClassId() const {
+ using I = internal::Internals;
+ if (IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
template <class T>
template <class S>
-void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
+void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
static_assert(std::is_base_of<T, S>::value, "type check");
- this->Reset();
+ Reset();
if (other.IsEmpty()) return;
- this->SetSlotThreadSafe(
- this->New(isolate, other.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithoutDestructor,
- internal::GlobalHandleStoreMode::kAssigningStore));
+ this->val_ = this->New(isolate, other.val_, &this->val_,
+ internal::GlobalHandleDestructionMode::kWithDestructor,
+ internal::GlobalHandleStoreMode::kAssigningStore);
}
template <class T>
template <class S>
-TracedReference<T>& TracedReference<T>::operator=(
- TracedReference<S>&& rhs) noexcept {
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) noexcept {
static_assert(std::is_base_of<T, S>::value, "type check");
*this = std::move(rhs.template As<T>());
return *this;
@@ -563,16 +596,14 @@ TracedReference<T>& TracedReference<T>::operator=(
template <class T>
template <class S>
-TracedReference<T>& TracedReference<T>::operator=(
- const TracedReference<S>& rhs) {
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
static_assert(std::is_base_of<T, S>::value, "type check");
*this = rhs.template As<T>();
return *this;
}
template <class T>
-TracedReference<T>& TracedReference<T>::operator=(
- TracedReference&& rhs) noexcept {
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) noexcept {
if (this != &rhs) {
internal::MoveTracedGlobalReference(
reinterpret_cast<internal::Address**>(&rhs.val_),
@@ -582,7 +613,7 @@ TracedReference<T>& TracedReference<T>::operator=(
}
template <class T>
-TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
if (this != &rhs) {
this->Reset();
if (rhs.val_ != nullptr) {
@@ -594,22 +625,6 @@ TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
return *this;
}
-void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
- using I = internal::Internals;
- if (IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- *reinterpret_cast<uint16_t*>(addr) = class_id;
-}
-
-uint16_t TracedReferenceBase::WrapperClassId() const {
- using I = internal::Internals;
- if (IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- return *reinterpret_cast<uint16_t*>(addr);
-}
-
template <class T>
void TracedGlobal<T>::SetFinalizationCallback(
void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
diff --git a/deps/v8/include/v8-value-serializer-version.h b/deps/v8/include/v8-value-serializer-version.h
index c72911c64d..25eb19ca75 100644
--- a/deps/v8/include/v8-value-serializer-version.h
+++ b/deps/v8/include/v8-value-serializer-version.h
@@ -17,7 +17,7 @@
namespace v8 {
-constexpr uint32_t CurrentValueSerializerFormatVersion() { return 13; }
+constexpr uint32_t CurrentValueSerializerFormatVersion() { return 15; }
} // namespace v8
diff --git a/deps/v8/include/v8-value-serializer.h b/deps/v8/include/v8-value-serializer.h
index 574567bd5a..078f367c64 100644
--- a/deps/v8/include/v8-value-serializer.h
+++ b/deps/v8/include/v8-value-serializer.h
@@ -67,6 +67,23 @@ class V8_EXPORT ValueSerializer {
virtual Maybe<uint32_t> GetWasmModuleTransferId(
Isolate* isolate, Local<WasmModuleObject> module);
+
+ /**
+ * Returns whether shared values are supported. GetSharedValueId is only
+ * called if SupportsSharedValues() returns true.
+ */
+ virtual bool SupportsSharedValues() const;
+
+ /**
+ * Called when the ValueSerializer serializes a value that is shared across
+ * Isolates. The embedder must return an ID for the object. This function
+ * must be idempotent for the same object. When deserializing, the ID will
+ * be passed to ValueDeserializer::Delegate::GetSharedValueFromId as
+ * |shared_value_id|.
+ */
+ virtual Maybe<uint32_t> GetSharedValueId(Isolate* isolate,
+ Local<Value> shared_value);
+
/**
* Allocates memory for the buffer of at least the size provided. The actual
* size (which may be greater or equal) is written to |actual_size|. If no
@@ -166,17 +183,30 @@ class V8_EXPORT ValueDeserializer {
/**
* Get a WasmModuleObject given a transfer_id previously provided
- * by ValueSerializer::GetWasmModuleTransferId
+ * by ValueSerializer::Delegate::GetWasmModuleTransferId
*/
virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id);
/**
* Get a SharedArrayBuffer given a clone_id previously provided
- * by ValueSerializer::GetSharedArrayBufferId
+ * by ValueSerializer::Delegate::GetSharedArrayBufferId
*/
virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
Isolate* isolate, uint32_t clone_id);
+
+ /**
+ * Returns whether shared values are supported. GetSharedValueFromId is only
+ * called if SupportsSharedValues() returns true.
+ */
+ virtual bool SupportsSharedValues() const;
+
+ /**
+ * Get a value shared across Isolates given a shared_value_id provided by
+ * ValueSerializer::Delegate::GetSharedValueId.
+ */
+ virtual MaybeLocal<Value> GetSharedValueFromId(Isolate* isolate,
+ uint32_t shared_value_id);
};
ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 1b2795a877..4e3fa99ea6 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 8
-#define V8_BUILD_NUMBER 177
-#define V8_PATCH_LEVEL 9
+#define V8_MAJOR_VERSION 10
+#define V8_MINOR_VERSION 1
+#define V8_BUILD_NUMBER 124
+#define V8_PATCH_LEVEL 6
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8-weak-callback-info.h b/deps/v8/include/v8-weak-callback-info.h
index ff3c08238e..f1677e9da0 100644
--- a/deps/v8/include/v8-weak-callback-info.h
+++ b/deps/v8/include/v8-weak-callback-info.h
@@ -51,12 +51,26 @@ class WeakCallbackInfo {
void* embedder_fields_[kEmbedderFieldsInWeakCallback];
};
-// kParameter will pass a void* parameter back to the callback, kInternalFields
-// will pass the first two internal fields back to the callback, kFinalizer
-// will pass a void* parameter back, but is invoked before the object is
-// actually collected, so it can be resurrected. In the last case, it is not
-// possible to request a second pass callback.
-enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
+/**
+ * Weakness type for weak handles.
+ */
+enum class WeakCallbackType {
+ /**
+ * Passes a user-defined void* parameter back to the callback.
+ */
+ kParameter,
+ /**
+ * Passes the first two internal fields of the object back to the callback.
+ */
+ kInternalFields,
+ /**
+ * Passes a user-defined void* parameter back to the callback. Will do so
+ * before the object is actually reclaimed, allowing it to be resurrected. In
+ * this case it is not possible to set a second-pass callback.
+ */
+ kFinalizer V8_ENUM_DEPRECATE_SOON("Resurrecting finalizers are deprecated "
+ "and will not be supported going forward.")
+};
template <class T>
void* WeakCallbackInfo<T>::GetInternalField(int index) const {
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index dd91f880b7..1067d3eb24 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -19,7 +19,6 @@
#include <stdint.h>
#include <memory>
-#include <vector>
#include "cppgc/common.h"
#include "v8-array-buffer.h" // NOLINT(build/include_directory)
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 1242d4289c..77fd65c6c5 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -65,13 +65,14 @@ path. Add it with -I<path> to the command line
// Operating system detection (host)
//
// V8_OS_ANDROID - Android
-// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD)
+// V8_OS_BSD - BSDish (macOS, Net/Free/Open/DragonFlyBSD)
// V8_OS_CYGWIN - Cygwin
// V8_OS_DRAGONFLYBSD - DragonFlyBSD
// V8_OS_FREEBSD - FreeBSD
// V8_OS_FUCHSIA - Fuchsia
-// V8_OS_LINUX - Linux
-// V8_OS_MACOSX - Mac OS X
+// V8_OS_LINUX - Linux (Android, ChromeOS, Linux, ...)
+// V8_OS_DARWIN - Darwin (macOS, iOS)
+// V8_OS_MACOS - macOS
// V8_OS_IOS - iOS
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
@@ -89,13 +90,14 @@ path. Add it with -I<path> to the command line
# define V8_OS_STRING "android"
#elif defined(__APPLE__)
-# define V8_OS_BSD 1
-# define V8_OS_MACOSX 1
# define V8_OS_POSIX 1
+# define V8_OS_BSD 1
+# define V8_OS_DARWIN 1
# if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
# define V8_OS_IOS 1
# define V8_OS_STRING "ios"
# else
+# define V8_OS_MACOS 1
# define V8_OS_STRING "macos"
# endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
@@ -169,7 +171,7 @@ path. Add it with -I<path> to the command line
// V8_TARGET_OS_FUCHSIA
// V8_TARGET_OS_IOS
// V8_TARGET_OS_LINUX
-// V8_TARGET_OS_MACOSX
+// V8_TARGET_OS_MACOS
// V8_TARGET_OS_WIN
//
// If not set explicitly, these fall back to corresponding V8_OS_ values.
@@ -181,7 +183,7 @@ path. Add it with -I<path> to the command line
&& !defined(V8_TARGET_OS_FUCHSIA) \
&& !defined(V8_TARGET_OS_IOS) \
&& !defined(V8_TARGET_OS_LINUX) \
- && !defined(V8_TARGET_OS_MACOSX) \
+ && !defined(V8_TARGET_OS_MACOS) \
&& !defined(V8_TARGET_OS_WIN)
# error No known target OS defined.
# endif
@@ -192,7 +194,7 @@ path. Add it with -I<path> to the command line
|| defined(V8_TARGET_OS_FUCHSIA) \
|| defined(V8_TARGET_OS_IOS) \
|| defined(V8_TARGET_OS_LINUX) \
- || defined(V8_TARGET_OS_MACOSX) \
+ || defined(V8_TARGET_OS_MACOS) \
|| defined(V8_TARGET_OS_WIN)
# error A target OS is defined but V8_HAVE_TARGET_OS is unset.
# endif
@@ -214,8 +216,8 @@ path. Add it with -I<path> to the command line
# define V8_TARGET_OS_LINUX
#endif
-#ifdef V8_OS_MACOSX
-# define V8_TARGET_OS_MACOSX
+#ifdef V8_OS_MACOS
+# define V8_TARGET_OS_MACOS
#endif
#ifdef V8_OS_WIN
@@ -232,7 +234,7 @@ path. Add it with -I<path> to the command line
# define V8_TARGET_OS_STRING "ios"
#elif defined(V8_TARGET_OS_LINUX)
# define V8_TARGET_OS_STRING "linux"
-#elif defined(V8_TARGET_OS_MACOSX)
+#elif defined(V8_TARGET_OS_MACOS)
# define V8_TARGET_OS_STRING "macos"
#elif defined(V8_TARGET_OS_WINDOWS)
# define V8_TARGET_OS_STRING "windows"
@@ -578,17 +580,15 @@ V8 shared library set USING_V8_SHARED.
#endif // V8_OS_WIN
-// The virtual memory cage is available (i.e. defined) when pointer compression
-// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as
-// well. This allows better test coverage of the cage.
+// The sandbox is available (i.e. defined) when pointer compression
+// is enabled, but it is only used when V8_SANDBOX is enabled as
+// well. This allows better test coverage of the sandbox.
#if defined(V8_COMPRESS_POINTERS)
-#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
+#define V8_SANDBOX_IS_AVAILABLE
#endif
-// CagedPointers are currently only used if the heap sandbox is enabled.
-// In the future, they will be enabled when the virtual memory cage is enabled.
-#if defined(V8_HEAP_SANDBOX)
-#define V8_CAGED_POINTERS
+#if defined(V8_SANDBOX) && !defined(V8_SANDBOX_IS_AVAILABLE)
+#error Inconsistent configuration: sandbox is enabled but not available
#endif
// From C++17 onwards, static constexpr member variables are defined to be
diff --git a/deps/v8/infra/mb/PRESUBMIT.py b/deps/v8/infra/mb/PRESUBMIT.py
index 39d15e80b4..85e257c2d4 100644
--- a/deps/v8/infra/mb/PRESUBMIT.py
+++ b/deps/v8/infra/mb/PRESUBMIT.py
@@ -7,6 +7,10 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
def _CommonChecks(input_api, output_api):
results = []
diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl
index 6aa0de2358..a5031f1a20 100644
--- a/deps/v8/infra/mb/gn_isolate_map.pyl
+++ b/deps/v8/infra/mb/gn_isolate_map.pyl
@@ -60,7 +60,7 @@
"type": "script",
},
"run-gcmole": {
- "label": "//tools/gcmole:v8_run_gcmole",
+ "label": "//tools/gcmole:v8_gcmole_files",
"type": "script",
},
"run-num-fuzzer": {
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 82964dd7d4..e20cd6e7bd 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -116,6 +116,7 @@
'V8 Linux64 - pointer compression - builder': 'release_x64_pointer_compression',
'V8 Linux64 - pointer compression without dchecks':
'release_x64_pointer_compression_without_dchecks',
+ 'V8 Linux64 - python3 - builder': 'release_x64',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
'V8 Linux64 gcc - debug builder': 'debug_x64_gcc',
@@ -169,7 +170,6 @@
},
'client.v8.perf' : {
'V8 Arm - builder - perf': 'official_arm',
- 'V8 Arm64 - builder - perf': 'official_arm64',
'V8 Android Arm - builder - perf': 'official_android_arm',
'V8 Android Arm64 - builder - perf': 'official_android_arm64',
'V8 Linux - builder - perf': 'official_x86',
@@ -238,6 +238,7 @@
'v8_linux64_nodcheck_rel_ng': 'release_x64',
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
+ 'v8_linux64_python3_rel_ng': 'release_x64',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation',
@@ -470,8 +471,6 @@
# Official configs for arm
'official_arm': [
'release_bot', 'arm', 'hard_float', 'official', 'disable_pgo'],
- 'official_arm64': [
- 'release_bot', 'arm64', 'hard_float', 'official', 'disable_pgo'],
'official_android_arm': [
'release_bot', 'arm', 'android', 'minimal_symbols',
'android_strip_outputs', 'official', 'disable_pgo'],
@@ -525,7 +524,7 @@
'release_x64_minimal_symbols': [
'release_bot', 'x64', 'minimal_symbols'],
'release_x64_pointer_compression': [
- 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
+ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
'v8_disable_pointer_compression'],
'release_x64_pointer_compression_without_dchecks': [
'release_bot', 'x64', 'v8_disable_pointer_compression'],
@@ -574,13 +573,14 @@
'debug_x64_fuchsia': [
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
- 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'v8_check_header_includes'],
+ 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx',
+ 'v8_check_header_includes'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_heap_sandbox': [
- 'debug_bot', 'x64', 'v8_enable_heap_sandbox'],
+ 'debug_bot', 'x64', 'v8_enable_sandbox_future'],
'debug_x64_heap_sandbox_arm64_sim': [
- 'debug_bot', 'simulate_arm64', 'v8_enable_heap_sandbox'],
+ 'debug_bot', 'simulate_arm64', 'v8_enable_sandbox_future'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_non_default_cppgc': [
@@ -619,10 +619,11 @@
'release_bot', 'x86', 'asan', 'lsan', 'symbolized',
'v8_verify_heap'],
'release_x86_gcc': [
- 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'v8_check_header_includes'],
- 'release_x86_gcc_minimal_symbols': [
- 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'minimal_symbols',
+ 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'no_custom_libcxx',
'v8_check_header_includes'],
+ 'release_x86_gcc_minimal_symbols': [
+ 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'no_custom_libcxx',
+ 'minimal_symbols', 'v8_check_header_includes'],
'release_x86_gcmole': [
'release_bot', 'x86', 'gcmole'],
'release_x86_gcmole_trybot': [
@@ -720,7 +721,7 @@
'disable_concurrent_marking': {
# Disable concurrent marking and atomic object field writes in order to
- # increase the TSAN coverage for background tasks. We need to keep the
+ # increase the TSAN coverage for background tasks. We need to keep the
# atomic marking state enabled because that is needed for the concurrent
# write-barrier used by background compilation.
'gn_args': 'v8_enable_concurrent_marking=false '
@@ -922,8 +923,8 @@
'gn_args': 'v8_enable_runtime_call_stats=false',
},
- 'v8_enable_heap_sandbox': {
- 'gn_args': 'v8_enable_heap_sandbox=true',
+ 'v8_enable_sandbox_future': {
+ 'gn_args': 'v8_enable_sandbox_future=true',
},
'v8_enable_lite_mode': {
@@ -934,6 +935,10 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
+ 'v8_enable_javascript_promise_hooks': {
+ 'gn_args': 'v8_enable_javascript_promise_hooks=true',
+ },
+
'v8_enable_google_benchmark': {
'gn_args': 'v8_enable_google_benchmark=true',
},
@@ -950,7 +955,7 @@
'gn_args': 'v8_enable_pointer_compression=false',
},
'v8_enable_single_generation': {
- 'gn_args': 'v8_enable_single_generation=true '
+ 'gn_args': 'v8_enable_single_generation=true '
'v8_disable_write_barriers=true',
},
'v8_enable_test_features': {
diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py
index 46ae05163d..2e93155d81 100644
--- a/deps/v8/infra/testing/PRESUBMIT.py
+++ b/deps/v8/infra/testing/PRESUBMIT.py
@@ -11,10 +11,9 @@ For simplicity, we check all pyl files on any changes in this folder.
import ast
import os
-try:
- basestring # Python 2
-except NameError: # Python 3
- basestring = str
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
SUPPORTED_BUILDER_SPEC_KEYS = [
'swarming_dimensions',
@@ -58,14 +57,14 @@ def check_keys(error_msg, src_dict, supported_keys):
errors = []
for key in src_dict.keys():
if key not in supported_keys:
- errors += error_msg('Key "%s" must be one of %s' % (key, supported_keys))
+ errors += error_msg(f'Key "{key}" must be one of {supported_keys}')
return errors
def _check_properties(error_msg, src_dict, prop_name, supported_keys):
properties = src_dict.get(prop_name, {})
if not isinstance(properties, dict):
- return error_msg('Value for %s must be a dict' % prop_name)
+ return error_msg(f'Value for {prop_name} must be a dict')
return check_keys(error_msg, properties, supported_keys)
@@ -77,11 +76,11 @@ def _check_int_range(error_msg, src_dict, prop_name, lower_bound=None,
try:
value = int(src_dict[prop_name])
except ValueError:
- return error_msg('If specified, %s must be an int' % prop_name)
+ return error_msg(f'If specified, {prop_name} must be an int')
if lower_bound is not None and value < lower_bound:
- return error_msg('If specified, %s must be >=%d' % (prop_name, lower_bound))
+ return error_msg(f'If specified, {prop_name} must be >={lower_bound}')
if upper_bound is not None and value > upper_bound:
- return error_msg('If specified, %s must be <=%d' % (prop_name, upper_bound))
+ return error_msg(f'If specified, {prop_name} must be <={upper_bound}')
return []
@@ -120,7 +119,7 @@ def _check_test(error_msg, test):
test_args = test.get('test_args', [])
if not isinstance(test_args, list):
errors += error_msg('If specified, test_args must be a list of arguments')
- if not all(isinstance(x, basestring) for x in test_args):
+ if not all(isinstance(x, str) for x in test_args):
errors += error_msg('If specified, all test_args must be strings')
# Limit shards to 14 to avoid erroneous resource exhaustion.
@@ -128,7 +127,7 @@ def _check_test(error_msg, test):
error_msg, test, 'shards', lower_bound=1, upper_bound=14)
variant = test.get('variant', 'default')
- if not variant or not isinstance(variant, basestring):
+ if not variant or not isinstance(variant, str):
errors += error_msg('If specified, variant must be a non-empty string')
return errors
@@ -136,23 +135,23 @@ def _check_test(error_msg, test):
def _check_test_spec(file_path, raw_pyl):
def error_msg(msg):
- return ['Error in %s:\n%s' % (file_path, msg)]
+ return [f'Error in {file_path}:\n{msg}']
try:
# Eval python literal file.
full_test_spec = ast.literal_eval(raw_pyl)
except SyntaxError as e:
- return error_msg('Pyl parsing failed with:\n%s' % e)
+ return error_msg(f'Pyl parsing failed with:\n{e}')
if not isinstance(full_test_spec, dict):
return error_msg('Test spec must be a dict')
errors = []
- for buildername, builder_spec in full_test_spec.iteritems():
+ for buildername, builder_spec in full_test_spec.items():
def error_msg(msg):
- return ['Error in %s for builder %s:\n%s' % (file_path, buildername, msg)]
+ return [f'Error in {file_path} for builder {buildername}:\n{msg}']
- if not isinstance(buildername, basestring) or not buildername:
+ if not isinstance(buildername, str) or not buildername:
errors += error_msg('Buildername must be a non-empty string')
if not isinstance(builder_spec, dict) or not builder_spec:
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index db7566addd..519adbdb07 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -361,7 +361,6 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
- {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_dict_tracking_dbg_ng_triggered': {
@@ -474,7 +473,6 @@
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
- {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_perfetto_dbg_ng_triggered': {
@@ -493,6 +491,15 @@
{'name': 'v8testing', 'shards': 3},
],
},
+ 'v8_linux64_python3_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ {'name': 'gcmole'},
+ ],
+ },
'v8_linux64_single_generation_dbg_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -520,7 +527,6 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
- {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_tsan_rel_ng_triggered': {
@@ -822,7 +828,6 @@
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -833,7 +838,6 @@
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -844,7 +848,6 @@
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -882,7 +885,7 @@
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'shards': 2},
- {'name': 'test262', 'variant': 'extra'},
+ {'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8initializers'},
{'name': 'v8testing'},
{'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates']},
@@ -1148,7 +1151,7 @@
{'name': 'perf_integration'},
{'name': 'test262', 'shards': 2},
{'name': 'test262', 'variant': 'assert_types'},
- {'name': 'test262', 'variant': 'extra'},
+ {'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8initializers'},
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'assert_types'},
@@ -1156,7 +1159,6 @@
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
- {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1208,7 +1210,7 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262', 'shards': 5},
+ {'name': 'test262', 'shards': 7},
{'name': 'test262', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
@@ -1218,7 +1220,6 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
- {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1368,6 +1369,15 @@
{'name': 'v8testing', 'shards': 2},
],
},
+ 'V8 Linux64 - python3': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ {'name': 'gcmole'},
+ ],
+ },
'V8 Linux64 - shared': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1391,7 +1401,7 @@
'os': 'Ubuntu-18.04',
},
'tests': [
- {'name': 'test262', 'shards': 5},
+ {'name': 'test262', 'shards': 7},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
@@ -1571,23 +1581,21 @@
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'swarming_task_attrs': {
'expiration': 14400,
- 'hard_timeout': 3600,
+ 'hard_timeout': 7200,
'priority': 35,
},
'tests': [
- {'name': 'v8testing', 'shards': 8},
- {'name': 'v8testing', 'variant': 'future', 'shards': 2},
+ {'name': 'v8testing', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 4},
],
},
'V8 Mac - arm64 - sim - release': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1841,7 +1849,8 @@
'name': 'test262',
'suffix': 'novfp3',
'variant': 'default',
- 'test_args': ['--novfp3']
+ 'test_args': ['--novfp3'],
+ 'shards': 2
},
{
'name': 'v8testing',
diff --git a/deps/v8/samples/cppgc/hello-world.cc b/deps/v8/samples/cppgc/hello-world.cc
index 86b0afe92f..fe0d002ab4 100644
--- a/deps/v8/samples/cppgc/hello-world.cc
+++ b/deps/v8/samples/cppgc/hello-world.cc
@@ -2,17 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <cppgc/allocation.h>
-#include <cppgc/default-platform.h>
-#include <cppgc/garbage-collected.h>
-#include <cppgc/heap.h>
-#include <cppgc/member.h>
-#include <cppgc/visitor.h>
-
#include <iostream>
#include <memory>
#include <string>
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/default-platform.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/heap.h"
+#include "include/cppgc/member.h"
+#include "include/cppgc/visitor.h"
+
+#if !CPPGC_IS_STANDALONE
+#include "include/v8-initialization.h"
+#endif // !CPPGC_IS_STANDALONE
+
/**
* This sample program shows how to set up a stand-alone cppgc heap.
*/
@@ -45,6 +49,12 @@ int main(int argc, char* argv[]) {
// Create a default platform that is used by cppgc::Heap for execution and
// backend allocation.
auto cppgc_platform = std::make_shared<cppgc::DefaultPlatform>();
+#if !CPPGC_IS_STANDALONE
+ // When initializing a stand-alone cppgc heap in a regular V8 build, the
+ // internal V8 platform will be reused. Reusing the V8 platform requires
+ // initializing it properly.
+ v8::V8::InitializePlatform(cppgc_platform->GetV8Platform());
+#endif // !CPPGC_IS_STANDALONE
// Initialize the process. This must happen before any cppgc::Heap::Create()
// calls.
cppgc::DefaultPlatform::InitializeProcess(cppgc_platform.get());
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index 557ba63e0f..3ca9ff6802 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -20,6 +20,12 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeExternalStartupData(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ fprintf(stderr, "Error initializing the V8 sandbox\n");
+ return 1;
+ }
+#endif
v8::V8::Initialize();
// Create a new Isolate and make it the current one.
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 28b6f119c3..1e94980388 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -703,6 +703,12 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeExternalStartupData(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ fprintf(stderr, "Error initializing the V8 sandbox\n");
+ return 1;
+ }
+#endif
v8::V8::Initialize();
map<string, string> options;
string file;
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 9a2c8c3f54..7e6be6f872 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -73,6 +73,12 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeExternalStartupData(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ fprintf(stderr, "Error initializing the V8 sandbox\n");
+ return 1;
+ }
+#endif
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate::CreateParams create_params;
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 341435e28d..9846e7695c 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -36,6 +36,7 @@ include_rules = [
"+src/heap/parked-scope.h",
"+src/heap/read-only-heap-inl.h",
"+src/heap/read-only-heap.h",
+ "+src/heap/reference-summarizer.h",
"+src/heap/safepoint.h",
"+src/heap/base/stack.h",
"+src/heap/conservative-stack-visitor.h",
@@ -50,6 +51,9 @@ include_rules = [
"+src/interpreter/interpreter.h",
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
+ "-src/maglev",
+ "+src/maglev/maglev.h",
+ "+src/maglev/maglev-concurrent-dispatcher.h",
"-src/regexp",
"+src/regexp/regexp.h",
"+src/regexp/regexp-flags.h",
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
index 519588070b..eb4a924271 100644
--- a/deps/v8/src/api/OWNERS
+++ b/deps/v8/src/api/OWNERS
@@ -6,3 +6,6 @@ leszeks@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
verwaest@chromium.org
+
+# For v8-debug.h implementations.
+per-file api.cc=file:../debug/OWNERS
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 17f8bd94bc..975976ae09 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -15,7 +15,6 @@
#include "src/objects/foreign-inl.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/objects-inl.h"
-#include "src/objects/stack-frame-info.h"
namespace v8 {
@@ -317,6 +316,22 @@ inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local<Array> src,
namespace internal {
+void HandleScopeImplementer::EnterContext(Context context) {
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
+ DCHECK(context.IsNativeContext());
+ entered_contexts_.push_back(context);
+ is_microtask_context_.push_back(0);
+}
+
+void HandleScopeImplementer::EnterMicrotaskContext(Context context) {
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
+ DCHECK(context.IsNativeContext());
+ entered_contexts_.push_back(context);
+ is_microtask_context_.push_back(1);
+}
+
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h
index 9b339321e7..07b2e2d0f2 100644
--- a/deps/v8/src/api/api-macros.h
+++ b/deps/v8/src/api/api-macros.h
@@ -22,12 +22,17 @@
* Exceptions should be handled either by invoking one of the
* RETURN_ON_FAILED_EXECUTION* macros.
*
+ * API methods that are part of the debug interface should use
+ *
+ * PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE
+ *
+ * in a similar fashion to ENTER_V8.
+ *
* Don't use macros with DO_NOT_USE in their name.
*
- * TODO(jochen): Document debugger specific macros.
- * TODO(jochen): Document LOG_API and other RuntimeCallStats macros.
- * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros.
- * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
+ * TODO(cbruni): Document LOG_API and other RuntimeCallStats macros.
+ * TODO(verwaest): All API methods should invoke one of the ENTER_V8* macros.
+ * TODO(verwaest): Remove calls form API methods to DO_NOT_USE macros.
*/
#define LOG_API(isolate, class_name, function_name) \
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index a71dd1670c..f0f1355f59 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -44,6 +44,7 @@
#include "src/common/globals.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/date/date.h"
+#include "src/objects/primitive-heap-object.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/debug/debug-wasm-objects.h"
#endif // V8_ENABLE_WEBASSEMBLY
@@ -56,7 +57,6 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
#include "src/execution/microtask-queue.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
@@ -100,7 +100,6 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/templates.h"
#include "src/objects/value-serializer.h"
@@ -115,8 +114,8 @@
#include "src/profiler/tick-sample.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
-#include "src/security/external-pointer.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/external-pointer.h"
+#include "src/sandbox/sandbox.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
@@ -140,7 +139,7 @@
#include "src/wasm/wasm-serialization.h"
#endif // V8_ENABLE_WEBASSEMBLY
-#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
+#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
#include <signal.h>
#include "include/v8-wasm-trap-handler-posix.h"
#include "src/trap-handler/handler-inside-posix.h"
@@ -193,37 +192,6 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
return origin;
}
-ScriptOrigin::ScriptOrigin(
- Local<Value> resource_name, Local<Integer> line_offset,
- Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
- Local<Integer> script_id, Local<Value> source_map_url,
- Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
- Local<Data> host_defined_options)
- : ScriptOrigin(
- Isolate::GetCurrent(), resource_name,
- line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
- column_offset.IsEmpty() ? 0
- : static_cast<int>(column_offset->Value()),
- !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
- static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
- source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
- !is_wasm.IsEmpty() && is_wasm->IsTrue(),
- !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
-
-ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
- int column_offset, bool is_shared_cross_origin,
- int script_id, Local<Value> source_map_url,
- bool is_opaque, bool is_wasm, bool is_module,
- Local<Data> host_defined_options)
- : isolate_(Isolate::GetCurrent()),
- resource_name_(resource_name),
- resource_line_offset_(line_offset),
- resource_column_offset_(column_offset),
- options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
- script_id_(script_id),
- source_map_url_(source_map_url),
- host_defined_options_(host_defined_options) {}
-
Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
// TODO(cbruni, chromium:1244145): remove once migrated to the context.
Utils::ApiCheck(!host_defined_options_->IsFixedArray(),
@@ -389,11 +357,11 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
-#ifdef V8_HEAP_SANDBOX
-// ArrayBufferAllocator to use when the heap sandbox is enabled, in which case
-// all ArrayBuffer backing stores need to be allocated inside the virtual
-// memory cage. Note, the current implementation is extremely inefficient as it
-// uses the BoundedPageAllocator. In the future, we'll need a proper allocator
+#ifdef V8_SANDBOXED_POINTERS
+// ArrayBufferAllocator to use when sandboxed pointers are used in which case
+// all ArrayBuffer backing stores need to be allocated inside the sandbox.
+// Note, the current implementation is extremely inefficient as it uses the
+// BoundedPageAllocator. In the future, we'll need a proper allocator
// implementation.
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
@@ -461,7 +429,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return new_data;
}
};
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_POINTERS
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
@@ -961,7 +929,7 @@ void HandleScope::Initialize(Isolate* isolate) {
// We make an exception if the serializer is enabled, which means that the
// Isolate is exclusively used to create a snapshot.
Utils::ApiCheck(
- !v8::Locker::WasEverUsed() ||
+ !internal_isolate->was_locker_ever_used() ||
internal_isolate->thread_manager()->IsLockedByCurrentThread() ||
internal_isolate->serializer_enabled(),
"HandleScope::HandleScope",
@@ -2088,8 +2056,8 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
ENTER_V8(isolate, context, Script, Run, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
i::AggregatingHistogramTimerScope histogram_timer(
isolate->counters()->compile_lazy());
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
@@ -2118,8 +2086,10 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
handle(fun->shared().script(), isolate);
if (maybe_script->IsScript() &&
i::Script::cast(*maybe_script).type() == i::Script::TYPE_WEB_SNAPSHOT) {
- i::WebSnapshotDeserializer deserializer(v8_isolate);
- deserializer.UseWebSnapshot(i::Handle<i::Script>::cast(maybe_script));
+ i::WebSnapshotDeserializer deserializer(
+ reinterpret_cast<i::Isolate*>(v8_isolate),
+ i::Handle<i::Script>::cast(maybe_script));
+ deserializer.Deserialize();
RETURN_ON_FAILED_EXECUTION(Value);
Local<Value> result = v8::Undefined(v8_isolate);
RETURN_ESCAPED(result);
@@ -2292,56 +2262,6 @@ Local<Value> Module::GetException() const {
return ToApiHandle<Value>(i::handle(self->GetException(), isolate));
}
-int Module::GetModuleRequestsLength() const {
- i::Module self = *Utils::OpenHandle(this);
- if (self.IsSyntheticModule()) return 0;
- ASSERT_NO_SCRIPT_NO_EXCEPTION(self.GetIsolate());
- return i::SourceTextModule::cast(self).info().module_requests().length();
-}
-
-Local<String> Module::GetModuleRequest(int i) const {
- Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest",
- "index must be positive");
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::GetModuleRequest",
- "Expected SourceTextModule");
- i::Isolate* isolate = self->GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::FixedArray> module_requests(
- i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
- isolate);
- Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest",
- "index is out of bounds");
- i::Handle<i::ModuleRequest> module_request(
- i::ModuleRequest::cast(module_requests->get(i)), isolate);
- return ToApiHandle<String>(i::handle(module_request->specifier(), isolate));
-}
-
-Location Module::GetModuleRequestLocation(int i) const {
- Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest",
- "index must be positive");
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::HandleScope scope(isolate);
- Utils::ApiCheck(self->IsSourceTextModule(),
- "Module::GetModuleRequestLocation",
- "Expected SourceTextModule");
- i::Handle<i::FixedArray> module_requests(
- i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
- isolate);
- Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest",
- "index is out of bounds");
- i::Handle<i::ModuleRequest> module_request(
- i::ModuleRequest::cast(module_requests->get(i)), isolate);
- int position = module_request->position();
- i::Handle<i::Script> script(
- i::Handle<i::SourceTextModule>::cast(self)->GetScript(), isolate);
- i::Script::PositionInfo info;
- i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET);
- return v8::Location(info.line, info.column);
-}
-
Local<FixedArray> Module::GetModuleRequests() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
if (self->IsSyntheticModule()) {
@@ -2426,19 +2346,6 @@ bool Module::IsSyntheticModule() const {
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
Maybe<bool> Module::InstantiateModule(Local<Context> context,
- Module::ResolveCallback callback) {
- auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Module, InstantiateModule, Nothing<bool>(),
- i::HandleScope);
- ResolveModuleCallback callback_with_import_assertions = nullptr;
- has_pending_exception =
- !i::Module::Instantiate(isolate, Utils::OpenHandle(this), context,
- callback_with_import_assertions, callback);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
-}
-
-Maybe<bool> Module::InstantiateModule(Local<Context> context,
Module::ResolveModuleCallback callback) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Module, InstantiateModule, Nothing<bool>(),
@@ -2455,8 +2362,8 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::Handle<i::Module> self = Utils::OpenHandle(this);
@@ -2987,7 +2894,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context,
PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value);
auto obj = i::Handle<i::JSObject>::cast(i_exception);
i::Handle<i::String> name = isolate->factory()->stack_string();
- Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
+ Maybe<bool> maybe = i::JSReceiver::HasProperty(isolate, obj, name);
has_pending_exception = maybe.IsNothing();
RETURN_ON_FAILED_EXECUTION(Value);
if (!maybe.FromJust()) return v8::Local<Value>();
@@ -3064,6 +2971,7 @@ ScriptOrigin Message::GetScriptOrigin() const {
void ScriptOrigin::VerifyHostDefinedOptions() const {
// TODO(cbruni, chromium:1244145): Remove checks once we allow arbitrary
// host-defined options.
+ USE(isolate_);
if (host_defined_options_.IsEmpty()) return;
Utils::ApiCheck(host_defined_options_->IsFixedArray(), "ScriptOrigin()",
"Host-defined options has to be a PrimitiveArray");
@@ -3210,14 +3118,6 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
RETURN_ESCAPED(Utils::ToLocal(self->GetSourceLine()));
}
-void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- std::ostringstream stack_trace_stream;
- i_isolate->PrintCurrentStackTrace(stack_trace_stream);
- i::PrintF(out, "%s", stack_trace_stream.str().c_str());
-}
-
void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -3229,9 +3129,9 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) {
Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
uint32_t index) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::Handle<i::StackFrameInfo> frame(
+ i::Handle<i::StackFrameInfo> info(
i::StackFrameInfo::cast(Utils::OpenHandle(this)->get(index)), isolate);
- return Utils::StackFrameToLocal(frame);
+ return Utils::StackFrameToLocal(info);
}
int StackTrace::GetFrameCount() const {
@@ -3244,89 +3144,97 @@ Local<StackTrace> StackTrace::CurrentStackTrace(Isolate* isolate,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::FixedArray> stackTrace =
- i_isolate->CaptureCurrentStackTrace(frame_limit, options);
+ i_isolate->CaptureDetailedStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
-// --- S t a c k F r a m e ---
-
-int StackFrame::GetLineNumber() const {
- return i::StackFrameInfo::GetLineNumber(Utils::OpenHandle(this));
+Local<String> StackTrace::CurrentScriptNameOrSourceURL(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::Handle<i::String> name_or_source_url =
+ isolate->CurrentScriptNameOrSourceURL();
+ return Utils::ToLocal(name_or_source_url);
}
-int StackFrame::GetColumn() const {
- return i::StackFrameInfo::GetColumnNumber(Utils::OpenHandle(this));
+// --- S t a c k F r a m e ---
+
+Location StackFrame::GetLocation() const {
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::Script> script(self->script(), isolate);
+ i::Script::PositionInfo info;
+ CHECK(i::Script::GetPositionInfo(script,
+ i::StackFrameInfo::GetSourcePosition(self),
+ &info, i::Script::WITH_OFFSET));
+ if (script->HasSourceURLComment()) {
+ info.line -= script->line_offset();
+ if (info.line == 0) {
+ info.column -= script->column_offset();
+ }
+ }
+ return {info.line, info.column};
}
int StackFrame::GetScriptId() const {
- return Utils::OpenHandle(this)->GetScriptId();
+ return Utils::OpenHandle(this)->script().id();
}
Local<String> StackFrame::GetScriptName() const {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- i::Handle<i::Object> name(self->GetScriptName(), isolate);
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::Object> name(self->script().name(), isolate);
if (!name->IsString()) return {};
- return Local<String>::Cast(Utils::ToLocal(name));
+ return Utils::ToLocal(i::Handle<i::String>::cast(name));
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- i::Handle<i::Object> name_or_url(self->GetScriptNameOrSourceURL(), isolate);
- if (!name_or_url->IsString()) return {};
- return Local<String>::Cast(Utils::ToLocal(name_or_url));
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::Object> name_or_source_url(self->script().GetNameOrSourceURL(),
+ isolate);
+ if (!name_or_source_url->IsString()) return {};
+ return Utils::ToLocal(i::Handle<i::String>::cast(name_or_source_url));
}
Local<String> StackFrame::GetScriptSource() const {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- i::Handle<i::Object> source(self->GetScriptSource(), isolate);
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ if (!self->script().HasValidSource()) return {};
+ i::Handle<i::PrimitiveHeapObject> source(self->script().source(), isolate);
if (!source->IsString()) return {};
- return Local<String>::Cast(Utils::ToLocal(source));
+ return Utils::ToLocal(i::Handle<i::String>::cast(source));
}
Local<String> StackFrame::GetScriptSourceMappingURL() const {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- i::Handle<i::Object> sourceMappingURL(self->GetScriptSourceMappingURL(),
- isolate);
- if (!sourceMappingURL->IsString()) return {};
- return Local<String>::Cast(Utils::ToLocal(sourceMappingURL));
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::Object> source_mapping_url(self->script().source_mapping_url(),
+ isolate);
+ if (!source_mapping_url->IsString()) return {};
+ return Utils::ToLocal(i::Handle<i::String>::cast(source_mapping_url));
}
Local<String> StackFrame::GetFunctionName() const {
- auto self = Utils::OpenHandle(this);
-#if V8_ENABLE_WEBASSEMBLY
- if (self->IsWasm()) {
- auto isolate = self->GetIsolate();
- auto instance = handle(self->GetWasmInstance(), isolate);
- auto func_index = self->GetWasmFunctionIndex();
- return Utils::ToLocal(
- i::GetWasmFunctionDebugName(isolate, instance, func_index));
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- auto name = i::StackFrameInfo::GetFunctionName(self);
- if (!name->IsString()) return {};
- return Local<String>::Cast(Utils::ToLocal(name));
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::String> name(self->function_name(), isolate);
+ if (name->length() == 0) return {};
+ return Utils::ToLocal(name);
}
-bool StackFrame::IsEval() const { return Utils::OpenHandle(this)->IsEval(); }
+bool StackFrame::IsEval() const {
+ i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
+ return self->script().compilation_type() == i::Script::COMPILATION_TYPE_EVAL;
+}
bool StackFrame::IsConstructor() const {
- return Utils::OpenHandle(this)->IsConstructor();
+ return Utils::OpenHandle(this)->is_constructor();
}
-bool StackFrame::IsWasm() const {
-#if V8_ENABLE_WEBASSEMBLY
- return Utils::OpenHandle(this)->IsWasm();
-#else
- return false;
-#endif // V8_ENABLE_WEBASSEMBLY
-}
+bool StackFrame::IsWasm() const { return !IsUserJavaScript(); }
bool StackFrame::IsUserJavaScript() const {
- return Utils::OpenHandle(this)->IsUserJavaScript();
+ return Utils::OpenHandle(this)->script().IsUserJavaScript();
}
// --- J S O N ---
@@ -3391,6 +3299,17 @@ Maybe<uint32_t> ValueSerializer::Delegate::GetWasmModuleTransferId(
return Nothing<uint32_t>();
}
+bool ValueSerializer::Delegate::SupportsSharedValues() const { return false; }
+
+Maybe<uint32_t> ValueSerializer::Delegate::GetSharedValueId(
+ Isolate* v8_isolate, Local<Value> shared_value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(), i::MessageTemplate::kDataCloneError,
+ Utils::OpenHandle(*shared_value)));
+ return Nothing<uint32_t>();
+}
+
void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer,
size_t size,
size_t* actual_size) {
@@ -3480,6 +3399,17 @@ MaybeLocal<WasmModuleObject> ValueDeserializer::Delegate::GetWasmModuleFromId(
return MaybeLocal<WasmModuleObject>();
}
+bool ValueDeserializer::Delegate::SupportsSharedValues() const { return false; }
+
+MaybeLocal<Value> ValueDeserializer::Delegate::GetSharedValueFromId(
+ Isolate* v8_isolate, uint32_t shared_value_id) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(),
+ i::MessageTemplate::kDataCloneDeserializationError));
+ return MaybeLocal<Value>();
+}
+
MaybeLocal<SharedArrayBuffer>
ValueDeserializer::Delegate::GetSharedArrayBufferFromId(Isolate* v8_isolate,
uint32_t id) {
@@ -3703,15 +3633,7 @@ bool Value::IsBoolean() const { return Utils::OpenHandle(this)->IsBoolean(); }
bool Value::IsExternal() const {
i::Object obj = *Utils::OpenHandle(this);
- if (!obj.IsHeapObject()) return false;
- i::HeapObject heap_obj = i::HeapObject::cast(obj);
- // Check the instance type is JS_OBJECT (instance type of Externals) before
- // attempting to get the Isolate since that guarantees the object is writable
- // and GetIsolate will work.
- if (heap_obj.map().instance_type() != i::JS_OBJECT_TYPE) return false;
- i::Isolate* isolate = i::JSObject::cast(heap_obj).GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- return heap_obj.IsExternal(isolate);
+ return obj.IsJSExternalObject();
}
bool Value::IsInt32() const {
@@ -4652,14 +4574,16 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result = i::JSProxy::SetPrototype(i::Handle<i::JSProxy>::cast(self),
- value_obj, false, i::kThrowOnError);
+ auto result =
+ i::JSProxy::SetPrototype(isolate, i::Handle<i::JSProxy>::cast(self),
+ value_obj, false, i::kThrowOnError);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
} else {
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto result = i::JSObject::SetPrototype(i::Handle<i::JSObject>::cast(self),
- value_obj, false, i::kThrowOnError);
+ auto result =
+ i::JSObject::SetPrototype(isolate, i::Handle<i::JSObject>::cast(self),
+ value_obj, false, i::kThrowOnError);
if (result.IsNothing()) {
isolate->clear_pending_exception();
return Nothing<bool>();
@@ -4739,7 +4663,10 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
- i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
+ // TODO(v8:12547): Support shared objects.
+ DCHECK(!self->InSharedHeap());
+ i::Handle<i::String> name =
+ i::JSReceiver::GetConstructorName(self->GetIsolate(), self);
return Utils::ToLocal(name);
}
@@ -4807,12 +4734,12 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
// Check if the given key is an array index.
uint32_t index = 0;
if (key_obj->ToArrayIndex(&index)) {
- maybe = i::JSReceiver::HasElement(self, index);
+ maybe = i::JSReceiver::HasElement(isolate, self, index);
} else {
// Convert the key to a name - possibly by calling back into JavaScript.
i::Handle<i::Name> name;
if (i::Object::ToName(isolate, key_obj).ToHandle(&name)) {
- maybe = i::JSReceiver::HasProperty(self, name);
+ maybe = i::JSReceiver::HasProperty(isolate, self, name);
}
}
has_pending_exception = maybe.IsNothing();
@@ -4838,7 +4765,7 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
- auto maybe = i::JSReceiver::HasElement(self, index);
+ auto maybe = i::JSReceiver::HasElement(isolate, self, index);
has_pending_exception = maybe.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return maybe;
@@ -4941,7 +4868,7 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_val = Utils::OpenHandle(*key);
- auto result = i::JSReceiver::HasOwnProperty(self, key_val);
+ auto result = i::JSReceiver::HasOwnProperty(isolate, self, key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4952,7 +4879,7 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
- auto result = i::JSReceiver::HasOwnProperty(self, index);
+ auto result = i::JSReceiver::HasOwnProperty(isolate, self, index);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4967,7 +4894,7 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
auto result = i::JSObject::HasRealNamedProperty(
- i::Handle<i::JSObject>::cast(self), key_val);
+ isolate, i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4981,7 +4908,7 @@ Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto result = i::JSObject::HasRealElementProperty(
- i::Handle<i::JSObject>::cast(self), index);
+ isolate, i::Handle<i::JSObject>::cast(self), index);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4996,7 +4923,7 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
auto result = i::JSObject::HasRealNamedCallbackProperty(
- i::Handle<i::JSObject>::cast(self), key_val);
+ isolate, i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -5165,7 +5092,8 @@ bool v8::Object::IsConstructor() const {
bool v8::Object::IsApiWrapper() const {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- return self->IsApiWrapper();
+ // Objects with embedder fields can wrap API objects.
+ return self->MayHaveEmbedderFields();
}
bool v8::Object::IsUndetectable() const {
@@ -5181,8 +5109,8 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
@@ -5201,8 +5129,8 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -5240,8 +5168,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal<Object>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
bool should_set_has_no_side_effect =
@@ -5291,8 +5219,8 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
ENTER_V8(isolate, context, Function, Call, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::NestedTimedHistogramScope execute_timer(
- isolate->counters()->execute_precise());
+ i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(),
+ isolate);
auto self = Utils::OpenHandle(this);
Utils::ApiCheck(!self.is_null(), "v8::Function::Call",
"Function to be called is a null pointer");
@@ -5843,7 +5771,7 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
}
if (i::StringShape(str).IsExternalTwoByte()) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(str.ptr());
+ internal::Isolate* isolate = I::GetIsolateForSandbox(str.ptr());
internal::Address value = I::ReadExternalPointerField(
isolate, str.ptr(), I::kStringResourceOffset,
internal::kExternalStringResourceTag);
@@ -5887,7 +5815,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
if (i::StringShape(str).IsExternalOneByte() ||
i::StringShape(str).IsExternalTwoByte()) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(string);
+ internal::Isolate* isolate = I::GetIsolateForSandbox(string);
internal::Address value =
I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset,
internal::kExternalStringResourceTag);
@@ -5911,28 +5839,6 @@ v8::String::GetExternalOneByteStringResource() const {
return nullptr;
}
-Local<Value> Symbol::Description() const {
- i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
-
- i::Isolate* isolate;
- if (!i::GetIsolateFromHeapObject(*sym, &isolate)) {
- // Symbol is in RO_SPACE, which means that its description is also in
- // RO_SPACE. Since RO_SPACE objects are immovable we can use the
- // Handle(Address*) constructor with the address of the description
- // field in the Symbol object without needing an isolate.
- DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
-#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- i::Handle<i::HeapObject> ro_description(reinterpret_cast<i::Address*>(
- sym->GetFieldAddress(i::Symbol::kDescriptionOffset)));
- return Utils::ToLocal(ro_description);
-#else
- isolate = reinterpret_cast<i::Isolate*>(Isolate::GetCurrent());
-#endif
- }
-
- return Description(reinterpret_cast<Isolate*>(isolate));
-}
-
Local<Value> Symbol::Description(Isolate* isolate) const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Handle<i::Object> description(sym->description(),
@@ -6032,18 +5938,36 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
+
+ i::DisallowGarbageCollection no_gc;
+
+ // There's no need to invalidate slots as embedder fields are always
+ // tagged.
+ obj->GetHeap()->NotifyObjectLayoutChange(*obj, no_gc,
+ i::InvalidateRecordedSlots::kNo);
+
Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index)
.store_aligned_pointer(obj->GetIsolate(), value),
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj));
+
+#ifdef VERIFY_HEAP
+ obj->GetHeap()->VerifyObjectLayoutChange(*obj, obj->map());
+#endif // VERIFY_HEAP
}
void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
void* values[]) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::SetAlignedPointerInInternalFields()";
+
i::DisallowGarbageCollection no_gc;
+ // There's no need to invalidate slots as embedder fields are always
+ // tagged.
+ obj->GetHeap()->NotifyObjectLayoutChange(*obj, no_gc,
+ i::InvalidateRecordedSlots::kNo);
+
+ const char* location = "v8::Object::SetAlignedPointerInInternalFields()";
i::JSObject js_obj = i::JSObject::cast(*obj);
int nof_embedder_fields = js_obj.GetEmbedderFieldCount();
for (int i = 0; i < argc; i++) {
@@ -6059,15 +5983,10 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
internal::WriteBarrier::MarkingFromInternalFields(js_obj);
-}
-static void* ExternalValue(i::Object obj) {
- // Obscure semantics for undefined, but somehow checked in our unit tests...
- if (obj.IsUndefined()) {
- return nullptr;
- }
- i::Object foreign = i::JSObject::cast(obj).GetEmbedderField(0);
- return reinterpret_cast<void*>(i::Foreign::cast(foreign).foreign_address());
+#ifdef VERIFY_HEAP
+ obj->GetHeap()->VerifyObjectLayoutChange(*obj, obj->map());
+#endif // VERIFY_HEAP
}
// --- E n v i r o n m e n t ---
@@ -6076,10 +5995,8 @@ void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-bool v8::V8::InitializeVirtualMemoryCage() {
- return i::V8::InitializeVirtualMemoryCage();
-}
+#ifdef V8_SANDBOX
+bool v8::V8::InitializeSandbox() { return i::V8::InitializeSandbox(); }
#endif
void v8::V8::DisposePlatform() { i::V8::DisposePlatform(); }
@@ -6103,30 +6020,31 @@ bool v8::V8::Initialize(const int build_config) {
kEmbedderSmiValueSize, internal::kSmiValueSize);
}
- const bool kEmbedderHeapSandbox = (build_config & kHeapSandbox) != 0;
- if (kEmbedderHeapSandbox != V8_HEAP_SANDBOX_BOOL) {
+ const bool kEmbedderSandboxedExternalPointers =
+ (build_config & kSandboxedExternalPointers) != 0;
+ if (kEmbedderSandboxedExternalPointers !=
+ V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) {
FATAL(
"Embedder-vs-V8 build configuration mismatch. On embedder side "
- "heap sandbox is %s while on V8 side it's %s.",
- kEmbedderHeapSandbox ? "ENABLED" : "DISABLED",
- V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
+ "sandboxed external pointers is %s while on V8 side it's %s.",
+ kEmbedderSandboxedExternalPointers ? "ENABLED" : "DISABLED",
+ V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ? "ENABLED" : "DISABLED");
}
- const bool kEmbedderVirtualMemoryCage =
- (build_config & kVirtualMemoryCage) != 0;
- if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) {
+ const bool kEmbedderSandbox = (build_config & kSandbox) != 0;
+ if (kEmbedderSandbox != V8_SANDBOX_BOOL) {
FATAL(
"Embedder-vs-V8 build configuration mismatch. On embedder side "
- "virtual memory cage is %s while on V8 side it's %s.",
- kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED",
- V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED");
+ "sandbox is %s while on V8 side it's %s.",
+ kEmbedderSandbox ? "ENABLED" : "DISABLED",
+ V8_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
}
i::V8::Initialize();
return true;
}
-#if V8_OS_LINUX || V8_OS_MACOSX
+#if V8_OS_LINUX || V8_OS_DARWIN
bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info,
void* context) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
@@ -6239,31 +6157,38 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_SANDBOX
+VirtualAddressSpace* v8::V8::GetSandboxAddressSpace() {
+ Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(),
+ "v8::V8::GetSandboxAddressSpace",
+ "The sandbox must be initialized first.");
+ return i::GetProcessWideSandbox()->address_space();
+}
+
PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() {
- Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(),
+ Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(),
"v8::V8::GetVirtualMemoryCagePageAllocator",
- "The virtual memory cage must be initialized first.");
- return i::GetProcessWideVirtualMemoryCage()->page_allocator();
+ "The sandbox must be initialized first.");
+ return i::GetProcessWideSandbox()->page_allocator();
}
-size_t v8::V8::GetVirtualMemoryCageSizeInBytes() {
- if (!i::GetProcessWideVirtualMemoryCage()->is_initialized()) {
+size_t v8::V8::GetSandboxSizeInBytes() {
+ if (!i::GetProcessWideSandbox()->is_initialized()) {
return 0;
} else {
- return i::GetProcessWideVirtualMemoryCage()->size();
+ return i::GetProcessWideSandbox()->size();
}
}
-bool v8::V8::IsUsingSecureVirtualMemoryCage() {
- Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(),
- "v8::V8::IsUsingSecureVirtualMemoryCage",
- "The virtual memory cage must be initialized first.");
- // TODO(saelo) For now, we only treat a fake cage as insecure. Once we use
- // caged pointers that assume that the cage has a constant size, we'll also
- // treat cages smaller than the default size as insecure because caged
- // pointers can then access memory outside of them.
- return !i::GetProcessWideVirtualMemoryCage()->is_fake_cage();
+bool v8::V8::IsSandboxConfiguredSecurely() {
+ Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(),
+ "v8::V8::IsSandoxConfiguredSecurely",
+ "The sandbox must be initialized first.");
+ // TODO(saelo) For now, we only treat a partially reserved sandbox as
+ // insecure. Once we use sandboxed pointers, which assume that the sandbox
+ // has a fixed size, we'll also treat sandboxes with a smaller size as
+ // insecure because these pointers can then access memory outside of them.
+ return !i::GetProcessWideSandbox()->is_partially_reserved();
}
#endif
@@ -6418,7 +6343,7 @@ Local<Context> NewContext(
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
// Sanity-check that the isolate is initialized and usable.
- CHECK(isolate->builtins()->code(i::Builtin::kIllegal).IsCode());
+ CHECK(isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT());
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
LOG_API(isolate, Context, New);
@@ -6602,6 +6527,7 @@ void v8::Context::SetPromiseHooks(Local<Function> init_hook,
Local<Function> before_hook,
Local<Function> after_hook,
Local<Function> resolve_hook) {
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6635,6 +6561,10 @@ void v8::Context::SetPromiseHooks(Local<Function> init_hook,
context->native_context().set_promise_hook_before_function(*before);
context->native_context().set_promise_hook_after_function(*after);
context->native_context().set_promise_hook_resolve_function(*resolve);
+#else // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
+ Utils::ApiCheck(false, "v8::Context::SetPromiseHook",
+ "V8 was compiled without JavaScript Promise hooks");
+#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
}
MaybeLocal<Context> metrics::Recorder::GetContext(
@@ -6786,6 +6716,11 @@ bool FunctionTemplate::IsLeafTemplateForApiObject(
Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
+ // Nullptr is not allowed here because serialization/deserialization of
+ // nullptr external api references is not possible as nullptr is used as an
+ // external_references table terminator, see v8::SnapshotCreator()
+ // constructors.
+ DCHECK_NOT_NULL(value);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, External, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -6794,7 +6729,8 @@ Local<External> v8::External::New(Isolate* isolate, void* value) {
}
void* External::Value() const {
- return ExternalValue(*Utils::OpenHandle(this));
+ auto self = Utils::OpenHandle(this);
+ return i::JSExternalObject::cast(*self).value();
}
// anonymous namespace for string creation helper functions
@@ -8585,10 +8521,11 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
void Isolate::RequestGarbageCollectionForTesting(
GarbageCollectionType type,
EmbedderHeapTracer::EmbedderStackState stack_state) {
+ base::Optional<i::EmbedderStackStateScope> stack_scope;
if (type == kFullGarbageCollection) {
- reinterpret_cast<i::Isolate*>(this)
- ->heap()
- ->SetEmbedderStackStateForNextFinalization(stack_state);
+ stack_scope.emplace(reinterpret_cast<i::Isolate*>(this)->heap(),
+ i::EmbedderStackStateScope::kExplicitInvocation,
+ stack_state);
}
RequestGarbageCollectionForTesting(type);
}
@@ -8636,6 +8573,15 @@ void Isolate::Initialize(Isolate* isolate,
} else {
i_isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob());
}
+
+ if (params.fatal_error_callback) {
+ isolate->SetFatalErrorHandler(params.fatal_error_callback);
+ }
+
+ if (params.oom_error_callback) {
+ isolate->SetOOMErrorHandler(params.oom_error_callback);
+ }
+
if (params.counter_lookup_callback) {
isolate->SetCounterFunction(params.counter_lookup_callback);
}
@@ -8664,7 +8610,7 @@ void Isolate::Initialize(Isolate* isolate,
params.experimental_attach_to_shared_isolate));
}
- // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
+ // TODO(v8:2487): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(isolate);
if (i_isolate->snapshot_blob() == nullptr) {
FATAL(
@@ -8775,6 +8721,12 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
isolate->SetHostInitializeImportMetaObjectCallback(callback);
}
+void Isolate::SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetHostCreateShadowRealmContextCallback(callback);
+}
+
void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetPrepareStackTraceCallback(callback);
@@ -9265,7 +9217,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
bool on_isolate_thread =
- v8::Locker::WasEverUsed()
+ isolate->was_locker_ever_used()
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current() == isolate->thread_id();
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
@@ -9346,7 +9298,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() {
{i::Builtin::kJSRunMicrotasksEntry,
&entry_stubs.js_run_microtasks_entry_stub}}};
for (auto& pair : stubs) {
- i::Code js_entry = isolate->builtins()->code(pair.first);
+ i::Code js_entry = FromCodeT(isolate->builtins()->code(pair.first));
pair.second->code.start =
reinterpret_cast<const void*>(js_entry.InstructionStart());
pair.second->code.length_in_bytes = js_entry.InstructionSize();
@@ -10272,14 +10224,6 @@ void EmbedderHeapTracer::SetStackStart(void* stack_start) {
stack_start);
}
-void EmbedderHeapTracer::NotifyEmptyEmbedderStack() {
- CHECK(isolate_);
- reinterpret_cast<i::Isolate*>(isolate_)
- ->heap()
- ->local_embedder_heap_tracer()
- ->NotifyEmptyEmbedderStack();
-}
-
void EmbedderHeapTracer::FinalizeTracing() {
if (isolate_) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
@@ -10290,19 +10234,6 @@ void EmbedderHeapTracer::FinalizeTracing() {
}
}
-void EmbedderHeapTracer::GarbageCollectionForTesting(
- EmbedderStackState stack_state) {
- CHECK(isolate_);
- Utils::ApiCheck(i::FLAG_expose_gc,
- "v8::EmbedderHeapTracer::GarbageCollectionForTesting",
- "Must use --expose-gc");
- i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
- heap->SetEmbedderStackStateForNextFinalization(stack_state);
- heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting,
- kGCCallbackFlagForced);
-}
-
void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
if (isolate_) {
i::LocalEmbedderHeapTracer* const tracer =
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 320346b22f..c238ffb153 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -41,6 +41,7 @@ class JSFinalizationRegistry;
namespace debug {
class AccessorPair;
class GeneratorObject;
+class ScriptSource;
class Script;
class EphemeronTable;
} // namespace debug
@@ -134,6 +135,7 @@ class RegisteredExtension {
V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
+ V(debug::ScriptSource, HeapObject) \
V(debug::Script, Script) \
V(debug::EphemeronTable, EphemeronHashTable) \
V(debug::AccessorPair, AccessorPair) \
@@ -467,13 +469,6 @@ bool HandleScopeImplementer::HasSavedContexts() {
return !saved_contexts_.empty();
}
-void HandleScopeImplementer::EnterContext(Context context) {
- DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
- DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
- entered_contexts_.push_back(context);
- is_microtask_context_.push_back(0);
-}
-
void HandleScopeImplementer::LeaveContext() {
DCHECK(!entered_contexts_.empty());
DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
@@ -486,13 +481,6 @@ bool HandleScopeImplementer::LastEnteredContextWas(Context context) {
return !entered_contexts_.empty() && entered_contexts_.back() == context;
}
-void HandleScopeImplementer::EnterMicrotaskContext(Context context) {
- DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
- DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
- entered_contexts_.push_back(context);
- is_microtask_context_.push_back(1);
-}
-
// If there's a spare block, use it for growing the current scope.
internal::Address* HandleScopeImplementer::GetSpareOrNewBlock() {
internal::Address* block =
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 8791e4eae2..a1b58f2d43 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -42,10 +42,11 @@ Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Name> name) {
Handle<Name> math_name(
isolate->factory()->InternalizeString(base::StaticCharVector("Math")));
- Handle<Object> math = JSReceiver::GetDataProperty(stdlib, math_name);
+ Handle<Object> math = JSReceiver::GetDataProperty(isolate, stdlib, math_name);
if (!math->IsJSReceiver()) return isolate->factory()->undefined_value();
Handle<JSReceiver> math_receiver = Handle<JSReceiver>::cast(math);
- Handle<Object> value = JSReceiver::GetDataProperty(math_receiver, name);
+ Handle<Object> value =
+ JSReceiver::GetDataProperty(isolate, math_receiver, name);
return value;
}
@@ -55,13 +56,13 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
if (members.contains(wasm::AsmJsParser::StandardMember::kInfinity)) {
members.Remove(wasm::AsmJsParser::StandardMember::kInfinity);
Handle<Name> name = isolate->factory()->Infinity_string();
- Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
+ Handle<Object> value = JSReceiver::GetDataProperty(isolate, stdlib, name);
if (!value->IsNumber() || !std::isinf(value->Number())) return false;
}
if (members.contains(wasm::AsmJsParser::StandardMember::kNaN)) {
members.Remove(wasm::AsmJsParser::StandardMember::kNaN);
Handle<Name> name = isolate->factory()->NaN_string();
- Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
+ Handle<Object> value = JSReceiver::GetDataProperty(isolate, stdlib, name);
if (!value->IsNaN()) return false;
}
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
@@ -77,7 +78,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
return false; \
} \
DCHECK_EQ(shared.GetCode(), \
- isolate->builtins()->codet(Builtin::kMath##FName)); \
+ isolate->builtins()->code(Builtin::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
@@ -91,16 +92,16 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
}
STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
#undef STDLIB_MATH_CONST
-#define STDLIB_ARRAY_TYPE(fname, FName) \
- if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
- *is_typed_array = true; \
- Handle<Name> name(isolate->factory()->InternalizeString( \
- base::StaticCharVector(#FName))); \
- Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
- if (!func.is_identical_to(isolate->fname())) return false; \
+#define STDLIB_ARRAY_TYPE(fname, FName) \
+ if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
+ *is_typed_array = true; \
+ Handle<Name> name(isolate->factory()->InternalizeString( \
+ base::StaticCharVector(#FName))); \
+ Handle<Object> value = JSReceiver::GetDataProperty(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
+ if (!func.is_identical_to(isolate->fname())) return false; \
}
STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array)
STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array)
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 3ff2a44201..6849c9ea5d 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -760,7 +760,7 @@ void AsmJsParser::ValidateFunction() {
ValidateFunctionParams(&params);
// Check against limit on number of parameters.
- if (params.size() >= kV8MaxWasmFunctionParams) {
+ if (params.size() > kV8MaxWasmFunctionParams) {
FAIL("Number of parameters exceeds internal limit");
}
@@ -2246,6 +2246,9 @@ AsmType* AsmJsParser::ValidateCall() {
// also determined the complete function type and can perform checking against
// the expected type or update the expected type in case of first occurrence.
if (function_info->kind == VarKind::kImportedFunction) {
+ if (param_types.size() > kV8MaxWasmFunctionParams) {
+ FAILn("Number of parameters exceeds internal limit");
+ }
for (auto t : param_specific_types) {
if (!t->IsA(AsmType::Extern())) {
FAILn("Imported function args must be type extern");
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 1aa6365817..05105be91d 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -6,7 +6,6 @@
#define V8_ASMJS_ASM_PARSER_H_
#include <memory>
-#include <string>
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 13586e139c..069e31491c 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -1,4 +1,3 @@
-gsathya@chromium.org
leszeks@chromium.org
marja@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 4dab59fdae..a93c7fd091 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -84,7 +84,7 @@ template EXPORT_TEMPLATE_DEFINE(
bool AstRawString::AsArrayIndex(uint32_t* index) const {
// The StringHasher will set up the hash. Bail out early if we know it
// can't be convertible to an array index.
- if ((raw_hash_field_ & Name::kIsNotIntegerIndexMask) != 0) return false;
+ if (!IsIntegerIndex()) return false;
if (length() <= Name::kMaxCachedArrayIndexLength) {
*index = Name::ArrayIndexValueBits::decode(raw_hash_field_);
return true;
@@ -97,7 +97,7 @@ bool AstRawString::AsArrayIndex(uint32_t* index) const {
}
bool AstRawString::IsIntegerIndex() const {
- return (raw_hash_field_ & Name::kIsNotIntegerIndexMask) == 0;
+ return Name::IsIntegerIndex(raw_hash_field_);
}
bool AstRawString::IsOneByteEqualTo(const char* data) const {
@@ -353,16 +353,18 @@ const AstRawString* AstValueFactory::GetString(
}
AstConsString* AstValueFactory::NewConsString() {
- return zone()->New<AstConsString>();
+ return single_parse_zone()->New<AstConsString>();
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str) {
- return NewConsString()->AddString(zone(), str);
+ return NewConsString()->AddString(single_parse_zone(), str);
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
const AstRawString* str2) {
- return NewConsString()->AddString(zone(), str1)->AddString(zone(), str2);
+ return NewConsString()
+ ->AddString(single_parse_zone(), str1)
+ ->AddString(single_parse_zone(), str2);
}
template <typename IsolateT>
@@ -395,9 +397,9 @@ const AstRawString* AstValueFactory::GetString(
[&]() {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
- byte* new_literal_bytes = zone()->NewArray<byte>(length);
+ byte* new_literal_bytes = ast_raw_string_zone()->NewArray<byte>(length);
memcpy(new_literal_bytes, literal_bytes.begin(), length);
- AstRawString* new_string = zone()->New<AstRawString>(
+ AstRawString* new_string = ast_raw_string_zone()->New<AstRawString>(
is_one_byte, base::Vector<const byte>(new_literal_bytes, length),
raw_hash_field);
CHECK_NOT_NULL(new_string);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index d036d99604..b0c380ee60 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -80,7 +80,7 @@ class AstRawString final : public ZoneObject {
uint32_t Hash() const {
// Hash field must be computed.
DCHECK_EQ(raw_hash_field_ & Name::kHashNotComputedMask, 0);
- return raw_hash_field_ >> Name::kHashShift;
+ return Name::HashBits::decode(raw_hash_field_);
}
// This function can be called after internalizing.
@@ -311,24 +311,40 @@ class AstValueFactory {
public:
AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
uint64_t hash_seed)
+ : AstValueFactory(zone, zone, string_constants, hash_seed) {}
+
+ AstValueFactory(Zone* ast_raw_string_zone, Zone* single_parse_zone,
+ const AstStringConstants* string_constants,
+ uint64_t hash_seed)
: string_table_(string_constants->string_table()),
strings_(nullptr),
strings_end_(&strings_),
string_constants_(string_constants),
empty_cons_string_(nullptr),
- zone_(zone),
+ ast_raw_string_zone_(ast_raw_string_zone),
+ single_parse_zone_(single_parse_zone),
hash_seed_(hash_seed) {
- DCHECK_NOT_NULL(zone_);
+ DCHECK_NOT_NULL(ast_raw_string_zone_);
+ DCHECK_NOT_NULL(single_parse_zone_);
DCHECK_EQ(hash_seed, string_constants->hash_seed());
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
- empty_cons_string_ = NewConsString();
+
+ // Allocate the empty ConsString in the AstRawString Zone instead of the
+ // single parse Zone like other ConsStrings, because unlike those it can be
+ // reused across parses.
+ empty_cons_string_ = ast_raw_string_zone_->New<AstConsString>();
+ }
+
+ Zone* ast_raw_string_zone() const {
+ DCHECK_NOT_NULL(ast_raw_string_zone_);
+ return ast_raw_string_zone_;
}
- Zone* zone() const {
- DCHECK_NOT_NULL(zone_);
- return zone_;
+ Zone* single_parse_zone() const {
+ DCHECK_NOT_NULL(single_parse_zone_);
+ return single_parse_zone_;
}
const AstRawString* GetOneByteString(base::Vector<const uint8_t> literal) {
@@ -394,7 +410,8 @@ class AstValueFactory {
static const int kMaxOneCharStringValue = 128;
const AstRawString* one_character_strings_[kMaxOneCharStringValue];
- Zone* zone_;
+ Zone* ast_raw_string_zone_;
+ Zone* single_parse_zone_;
uint64_t hash_seed_;
};
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index ac89df574d..804a6840c1 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -268,6 +268,14 @@ bool FunctionLiteral::private_name_lookup_skips_outer_class() const {
return scope()->private_name_lookup_skips_outer_class();
}
+bool FunctionLiteral::class_scope_has_private_brand() const {
+ return scope()->class_scope_has_private_brand();
+}
+
+void FunctionLiteral::set_class_scope_has_private_brand(bool value) {
+ return scope()->set_class_scope_has_private_brand(value);
+}
+
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
@@ -365,7 +373,14 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
}
}
-void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) {
+int ObjectLiteralBoilerplateBuilder::ComputeFlags(bool disable_mementos) const {
+ int flags = LiteralBoilerplateBuilder::ComputeFlags(disable_mementos);
+ if (fast_elements()) flags |= ObjectLiteral::kFastElements;
+ if (has_null_prototype()) flags |= ObjectLiteral::kHasNullPrototype;
+ return flags;
+}
+
+void ObjectLiteralBoilerplateBuilder::InitFlagsForPendingNullPrototype(int i) {
// We still check for __proto__:null after computed property names.
for (; i < properties()->length(); i++) {
if (properties()->at(i)->IsNullPrototype()) {
@@ -375,12 +390,19 @@ void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) {
}
}
-int ObjectLiteral::InitDepthAndFlags() {
- if (is_initialized()) return depth();
+int ObjectLiteralBoilerplateBuilder::EncodeLiteralType() {
+ int flags = AggregateLiteral::kNoFlags;
+ if (fast_elements()) flags |= ObjectLiteral::kFastElements;
+ if (has_null_prototype()) flags |= ObjectLiteral::kHasNullPrototype;
+ return flags;
+}
+
+void ObjectLiteralBoilerplateBuilder::InitDepthAndFlags() {
+ if (is_initialized()) return;
bool is_simple = true;
bool has_seen_prototype = false;
bool needs_initial_allocation_site = false;
- int depth_acc = 1;
+ DepthKind depth_acc = kShallow;
uint32_t nof_properties = 0;
uint32_t elements = 0;
uint32_t max_element_index = 0;
@@ -408,8 +430,8 @@ int ObjectLiteral::InitDepthAndFlags() {
MaterializedLiteral* literal = property->value()->AsMaterializedLiteral();
if (literal != nullptr) {
- int subliteral_depth = literal->InitDepthAndFlags() + 1;
- if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
+ LiteralBoilerplateBuilder::InitDepthAndFlags(literal);
+ depth_acc = kNotShallow;
needs_initial_allocation_site |= literal->NeedsInitialAllocationSite();
}
@@ -440,11 +462,11 @@ int ObjectLiteral::InitDepthAndFlags() {
set_has_elements(elements > 0);
set_fast_elements((max_element_index <= 32) ||
((2 * elements) >= max_element_index));
- return depth_acc;
}
template <typename IsolateT>
-void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
+void ObjectLiteralBoilerplateBuilder::BuildBoilerplateDescription(
+ IsolateT* isolate) {
if (!boilerplate_description_.is_null()) return;
int index_keys = 0;
@@ -479,7 +501,7 @@ void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != nullptr) {
- m_literal->BuildConstants(isolate);
+ BuildConstants(isolate, m_literal);
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use the
@@ -501,12 +523,14 @@ void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
boilerplate_description_ = boilerplate_description;
}
-template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral::
+template EXPORT_TEMPLATE_DEFINE(
+ V8_BASE_EXPORT) void ObjectLiteralBoilerplateBuilder::
BuildBoilerplateDescription(Isolate* isolate);
-template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral::
+template EXPORT_TEMPLATE_DEFINE(
+ V8_BASE_EXPORT) void ObjectLiteralBoilerplateBuilder::
BuildBoilerplateDescription(LocalIsolate* isolate);
-bool ObjectLiteral::IsFastCloningSupported() const {
+bool ObjectLiteralBoilerplateBuilder::IsFastCloningSupported() const {
// The CreateShallowObjectLiteratal builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
@@ -515,25 +539,53 @@ bool ObjectLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
-int ArrayLiteral::InitDepthAndFlags() {
- if (is_initialized()) return depth();
+// static
+template <typename IsolateT>
+Handle<Object> LiteralBoilerplateBuilder::GetBoilerplateValue(
+ Expression* expression, IsolateT* isolate) {
+ if (expression->IsLiteral()) {
+ return expression->AsLiteral()->BuildValue(isolate);
+ }
+ if (expression->IsCompileTimeValue()) {
+ if (expression->IsObjectLiteral()) {
+ ObjectLiteral* object_literal = expression->AsObjectLiteral();
+ DCHECK(object_literal->builder()->is_simple());
+ return object_literal->builder()->boilerplate_description();
+ } else {
+ DCHECK(expression->IsArrayLiteral());
+ ArrayLiteral* array_literal = expression->AsArrayLiteral();
+ DCHECK(array_literal->builder()->is_simple());
+ return array_literal->builder()->boilerplate_description();
+ }
+ }
+ return isolate->factory()->uninitialized_value();
+}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> LiteralBoilerplateBuilder::GetBoilerplateValue(
+ Expression* expression, Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> LiteralBoilerplateBuilder::GetBoilerplateValue(
+ Expression* expression, LocalIsolate* isolate);
+
+void ArrayLiteralBoilerplateBuilder::InitDepthAndFlags() {
+ if (is_initialized()) return;
int constants_length =
- first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
+ first_spread_index_ >= 0 ? first_spread_index_ : values_->length();
// Fill in the literals.
bool is_simple = first_spread_index_ < 0;
bool is_holey = false;
ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
- int depth_acc = 1;
+ DepthKind depth_acc = kShallow;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
- Expression* element = values()->at(array_index);
+ Expression* element = values_->at(array_index);
MaterializedLiteral* materialized_literal =
element->AsMaterializedLiteral();
if (materialized_literal != nullptr) {
- int subliteral_depth = materialized_literal->InitDepthAndFlags() + 1;
- if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
+ LiteralBoilerplateBuilder::InitDepthAndFlags(materialized_literal);
+ depth_acc = kNotShallow;
}
if (!element->IsCompileTimeValue()) {
@@ -592,15 +644,15 @@ int ArrayLiteral::InitDepthAndFlags() {
// Array literals always need an initial allocation site to properly track
// elements transitions.
set_needs_initial_allocation_site(true);
- return depth_acc;
}
template <typename IsolateT>
-void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
+void ArrayLiteralBoilerplateBuilder::BuildBoilerplateDescription(
+ IsolateT* isolate) {
if (!boilerplate_description_.is_null()) return;
int constants_length =
- first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
+ first_spread_index_ >= 0 ? first_spread_index_ : values_->length();
ElementsKind kind = boilerplate_descriptor_kind();
bool use_doubles = IsDoubleElementsKind(kind);
@@ -616,7 +668,7 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
// Fill in the literals.
int array_index = 0;
for (; array_index < constants_length; array_index++) {
- Expression* element = values()->at(array_index);
+ Expression* element = values_->at(array_index);
DCHECK(!element->IsSpread());
if (use_doubles) {
Literal* literal = element->AsLiteral();
@@ -636,7 +688,7 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
} else {
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != nullptr) {
- m_literal->BuildConstants(isolate);
+ BuildConstants(isolate, m_literal);
}
// New handle scope here, needs to be after BuildContants().
@@ -655,11 +707,9 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
boilerplate_value = Smi::zero();
}
- DCHECK_EQ(
- boilerplate_descriptor_kind(),
- GetMoreGeneralElementsKind(boilerplate_descriptor_kind(),
- boilerplate_value.OptimalElementsKind(
- GetPtrComprCageBase(*elements))));
+ DCHECK_EQ(kind, GetMoreGeneralElementsKind(
+ kind, boilerplate_value.OptimalElementsKind(
+ GetPtrComprCageBase(*elements))));
FixedArray::cast(*elements).set(array_index, boilerplate_value);
}
@@ -667,130 +717,120 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
- if (is_simple() && depth() == 1 && array_index > 0 &&
+ if (is_simple() && depth() == kShallow && array_index > 0 &&
IsSmiOrObjectElementsKind(kind)) {
- elements->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
+ elements->set_map_safe_transition(
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
}
boilerplate_description_ =
isolate->factory()->NewArrayBoilerplateDescription(kind, elements);
}
template EXPORT_TEMPLATE_DEFINE(
- V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(Isolate*
- isolate);
+ V8_BASE_EXPORT) void ArrayLiteralBoilerplateBuilder::
+ BuildBoilerplateDescription(Isolate* isolate);
template EXPORT_TEMPLATE_DEFINE(
- V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate*
- isolate);
+ V8_BASE_EXPORT) void ArrayLiteralBoilerplateBuilder::
+ BuildBoilerplateDescription(LocalIsolate*
-bool ArrayLiteral::IsFastCloningSupported() const {
- return depth() <= 1 &&
- values_.length() <=
+ isolate);
+
+bool ArrayLiteralBoilerplateBuilder::IsFastCloningSupported() const {
+ return depth() <= kShallow &&
+ values_->length() <=
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
}
bool MaterializedLiteral::IsSimple() const {
- if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
- if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
+ if (IsArrayLiteral()) return AsArrayLiteral()->builder()->is_simple();
+ if (IsObjectLiteral()) return AsObjectLiteral()->builder()->is_simple();
DCHECK(IsRegExpLiteral());
return false;
}
-template <typename IsolateT>
-Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
- IsolateT* isolate) {
- if (expression->IsLiteral()) {
- return expression->AsLiteral()->BuildValue(isolate);
+// static
+void LiteralBoilerplateBuilder::InitDepthAndFlags(MaterializedLiteral* expr) {
+ if (expr->IsArrayLiteral()) {
+ return expr->AsArrayLiteral()->builder()->InitDepthAndFlags();
}
- if (expression->IsCompileTimeValue()) {
- if (expression->IsObjectLiteral()) {
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- DCHECK(object_literal->is_simple());
- return object_literal->boilerplate_description();
- } else {
- DCHECK(expression->IsArrayLiteral());
- ArrayLiteral* array_literal = expression->AsArrayLiteral();
- DCHECK(array_literal->is_simple());
- return array_literal->boilerplate_description();
- }
+ if (expr->IsObjectLiteral()) {
+ return expr->AsObjectLiteral()->builder()->InitDepthAndFlags();
}
- return isolate->factory()->uninitialized_value();
+ DCHECK(expr->IsRegExpLiteral());
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<Object> MaterializedLiteral::GetBoilerplateValue(
- Expression* expression, Isolate* isolate);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<Object> MaterializedLiteral::GetBoilerplateValue(
- Expression* expression, LocalIsolate* isolate);
-int MaterializedLiteral::InitDepthAndFlags() {
- if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags();
- if (IsObjectLiteral()) return AsObjectLiteral()->InitDepthAndFlags();
- DCHECK(IsRegExpLiteral());
- return 1;
-}
+bool MaterializedLiteral::NeedsInitialAllocationSite(
-bool MaterializedLiteral::NeedsInitialAllocationSite() {
+) {
if (IsArrayLiteral()) {
- return AsArrayLiteral()->needs_initial_allocation_site();
+ return AsArrayLiteral()->builder()->needs_initial_allocation_site();
}
if (IsObjectLiteral()) {
- return AsObjectLiteral()->needs_initial_allocation_site();
+ return AsObjectLiteral()->builder()->needs_initial_allocation_site();
}
DCHECK(IsRegExpLiteral());
return false;
}
template <typename IsolateT>
-void MaterializedLiteral::BuildConstants(IsolateT* isolate) {
- if (IsArrayLiteral()) {
- AsArrayLiteral()->BuildBoilerplateDescription(isolate);
+void LiteralBoilerplateBuilder::BuildConstants(IsolateT* isolate,
+ MaterializedLiteral* expr) {
+ if (expr->IsArrayLiteral()) {
+ expr->AsArrayLiteral()->builder()->BuildBoilerplateDescription(isolate);
return;
}
- if (IsObjectLiteral()) {
- AsObjectLiteral()->BuildBoilerplateDescription(isolate);
+ if (expr->IsObjectLiteral()) {
+ expr->AsObjectLiteral()->builder()->BuildBoilerplateDescription(isolate);
return;
}
- DCHECK(IsRegExpLiteral());
+ DCHECK(expr->IsRegExpLiteral());
}
-template EXPORT_TEMPLATE_DEFINE(
- V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(Isolate* isolate);
-template EXPORT_TEMPLATE_DEFINE(
- V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(LocalIsolate*
- isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void LiteralBoilerplateBuilder::
+ BuildConstants(Isolate* isolate, MaterializedLiteral* expr);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void LiteralBoilerplateBuilder::
+ BuildConstants(LocalIsolate* isolate, MaterializedLiteral* expr);
template <typename IsolateT>
Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
IsolateT* isolate) {
- Handle<FixedArray> raw_strings = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> raw_strings_handle = isolate->factory()->NewFixedArray(
this->raw_strings()->length(), AllocationType::kOld);
bool raw_and_cooked_match = true;
- for (int i = 0; i < raw_strings->length(); ++i) {
- if (this->raw_strings()->at(i) != this->cooked_strings()->at(i)) {
- // If the AstRawStrings don't match, then neither should the allocated
- // Strings, since the AstValueFactory should have deduplicated them
- // already.
- DCHECK_IMPLIES(this->cooked_strings()->at(i) != nullptr,
- *this->cooked_strings()->at(i)->string() !=
- *this->raw_strings()->at(i)->string());
-
- raw_and_cooked_match = false;
+ {
+ DisallowGarbageCollection no_gc;
+ FixedArray raw_strings = *raw_strings_handle;
+
+ for (int i = 0; i < raw_strings.length(); ++i) {
+ if (this->raw_strings()->at(i) != this->cooked_strings()->at(i)) {
+ // If the AstRawStrings don't match, then neither should the allocated
+ // Strings, since the AstValueFactory should have deduplicated them
+ // already.
+ DCHECK_IMPLIES(this->cooked_strings()->at(i) != nullptr,
+ *this->cooked_strings()->at(i)->string() !=
+ *this->raw_strings()->at(i)->string());
+
+ raw_and_cooked_match = false;
+ }
+ raw_strings.set(i, *this->raw_strings()->at(i)->string());
}
- raw_strings->set(i, *this->raw_strings()->at(i)->string());
}
- Handle<FixedArray> cooked_strings = raw_strings;
+ Handle<FixedArray> cooked_strings_handle = raw_strings_handle;
if (!raw_and_cooked_match) {
- cooked_strings = isolate->factory()->NewFixedArray(
+ cooked_strings_handle = isolate->factory()->NewFixedArray(
this->cooked_strings()->length(), AllocationType::kOld);
- for (int i = 0; i < cooked_strings->length(); ++i) {
+ DisallowGarbageCollection no_gc;
+ FixedArray cooked_strings = *cooked_strings_handle;
+ ReadOnlyRoots roots(isolate);
+ for (int i = 0; i < cooked_strings.length(); ++i) {
if (this->cooked_strings()->at(i) != nullptr) {
- cooked_strings->set(i, *this->cooked_strings()->at(i)->string());
+ cooked_strings.set(i, *this->cooked_strings()->at(i)->string());
} else {
- cooked_strings->set(i, ReadOnlyRoots(isolate).undefined_value());
+ cooked_strings.set_undefined(roots, i);
}
}
}
- return isolate->factory()->NewTemplateObjectDescription(raw_strings,
- cooked_strings);
+ return isolate->factory()->NewTemplateObjectDescription(
+ raw_strings_handle, cooked_strings_handle);
}
template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index f7b3f247f7..1fb5abdf8f 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -10,6 +10,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
+#include "src/base/pointer-with-payload.h"
#include "src/base/threaded-list.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/label.h"
@@ -1048,26 +1049,13 @@ class MaterializedLiteral : public Expression {
protected:
MaterializedLiteral(int pos, NodeType type) : Expression(pos, type) {}
- friend class CompileTimeValue;
- friend class ArrayLiteral;
- friend class ObjectLiteral;
-
- // Populate the depth field and any flags the literal has, returns the depth.
- int InitDepthAndFlags();
-
bool NeedsInitialAllocationSite();
- // Populate the constant properties/elements fixed array.
- template <typename IsolateT>
- void BuildConstants(IsolateT* isolate);
+ friend class CompileTimeValue;
- // If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is_simple
- // then return an Array or Object Boilerplate Description
- // Otherwise, return undefined literal as the placeholder
- // in the object literal boilerplate.
- template <typename IsolateT>
- Handle<Object> GetBoilerplateValue(Expression* expression, IsolateT* isolate);
+ friend class LiteralBoilerplateBuilder;
+ friend class ArrayLiteralBoilerplateBuilder;
+ friend class ObjectLiteralBoilerplateBuilder;
};
// Node for capturing a regexp literal.
@@ -1090,8 +1078,7 @@ class RegExpLiteral final : public MaterializedLiteral {
const AstRawString* const pattern_;
};
-// Base class for Array and Object literals, providing common code for handling
-// nested subliterals.
+// Base class for Array and Object literals
class AggregateLiteral : public MaterializedLiteral {
public:
enum Flags {
@@ -1102,22 +1089,47 @@ class AggregateLiteral : public MaterializedLiteral {
kIsShallowAndDisableMementos = kIsShallow | kDisableMementos,
};
- bool is_initialized() const { return 0 < depth_; }
- int depth() const {
+ protected:
+ AggregateLiteral(int pos, NodeType type) : MaterializedLiteral(pos, type) {}
+};
+
+// Base class for build literal boilerplate, providing common code for handling
+// nested subliterals.
+class LiteralBoilerplateBuilder {
+ public:
+ enum DepthKind { kUninitialized, kShallow, kNotShallow };
+
+ static constexpr int kDepthKindBits = 2;
+ STATIC_ASSERT((1 << kDepthKindBits) > kNotShallow);
+
+ bool is_initialized() const {
+ return kUninitialized != DepthField::decode(bit_field_);
+ }
+ DepthKind depth() const {
DCHECK(is_initialized());
- return depth_;
+ return DepthField::decode(bit_field_);
}
- bool is_shallow() const { return depth() == 1; }
+ // If the expression is a literal, return the literal value;
+ // if the expression is a materialized literal and is_simple
+ // then return an Array or Object Boilerplate Description
+ // Otherwise, return undefined literal as the placeholder
+ // in the object literal boilerplate.
+ template <typename IsolateT>
+ static Handle<Object> GetBoilerplateValue(Expression* expression,
+ IsolateT* isolate);
+
+ bool is_shallow() const { return depth() == kShallow; }
bool needs_initial_allocation_site() const {
return NeedsInitialAllocationSiteField::decode(bit_field_);
}
int ComputeFlags(bool disable_mementos = false) const {
- int flags = kNoFlags;
- if (is_shallow()) flags |= kIsShallow;
- if (disable_mementos) flags |= kDisableMementos;
- if (needs_initial_allocation_site()) flags |= kNeedsInitialAllocationSite;
+ int flags = AggregateLiteral::kNoFlags;
+ if (is_shallow()) flags |= AggregateLiteral::kIsShallow;
+ if (disable_mementos) flags |= AggregateLiteral::kDisableMementos;
+ if (needs_initial_allocation_site())
+ flags |= AggregateLiteral::kNeedsInitialAllocationSite;
return flags;
}
@@ -1130,19 +1142,22 @@ class AggregateLiteral : public MaterializedLiteral {
}
private:
- int depth_ : 31;
- using NeedsInitialAllocationSiteField =
- MaterializedLiteral::NextBitField<bool, 1>;
+ // we actually only care three conditions for depth
+ // - depth == kUninitialized, DCHECK(!is_initialized())
+ // - depth == kShallow, which means depth = 1
+ // - depth == kNotShallow, which means depth > 1
+ using DepthField = base::BitField<DepthKind, 0, kDepthKindBits>;
+ using NeedsInitialAllocationSiteField = DepthField::Next<bool, 1>;
using IsSimpleField = NeedsInitialAllocationSiteField::Next<bool, 1>;
using BoilerplateDescriptorKindField =
IsSimpleField::Next<ElementsKind, kFastElementsKindBits>;
protected:
- friend class AstNodeFactory;
- friend Zone;
- AggregateLiteral(int pos, NodeType type)
- : MaterializedLiteral(pos, type), depth_(0) {
- bit_field_ |=
+ uint32_t bit_field_;
+
+ LiteralBoilerplateBuilder() {
+ bit_field_ =
+ DepthField::encode(kUninitialized) |
NeedsInitialAllocationSiteField::encode(false) |
IsSimpleField::encode(false) |
BoilerplateDescriptorKindField::encode(FIRST_FAST_ELEMENTS_KIND);
@@ -1157,15 +1172,22 @@ class AggregateLiteral : public MaterializedLiteral {
bit_field_ = BoilerplateDescriptorKindField::update(bit_field_, kind);
}
- void set_depth(int depth) {
+ void set_depth(DepthKind depth) {
DCHECK(!is_initialized());
- depth_ = depth;
+ bit_field_ = DepthField::update(bit_field_, depth);
}
void set_needs_initial_allocation_site(bool required) {
bit_field_ = NeedsInitialAllocationSiteField::update(bit_field_, required);
}
+ // Populate the depth field and any flags the literal builder has
+ static void InitDepthAndFlags(MaterializedLiteral* expr);
+
+ // Populate the constant properties/elements fixed array.
+ template <typename IsolateT>
+ void BuildConstants(IsolateT* isolate, MaterializedLiteral* expr);
+
template <class T, int size>
using NextBitField = BoilerplateDescriptorKindField::Next<T, size>;
};
@@ -1185,7 +1207,7 @@ class LiteralProperty : public ZoneObject {
LiteralProperty(Expression* key, Expression* value, bool is_computed_name)
: key_and_is_computed_name_(key, is_computed_name), value_(value) {}
- PointerWithPayload<Expression, bool, 1> key_and_is_computed_name_;
+ base::PointerWithPayload<Expression, bool, 1> key_and_is_computed_name_;
Expression* value_;
};
@@ -1229,18 +1251,30 @@ class ObjectLiteralProperty final : public LiteralProperty {
bool emit_store_;
};
-// An object literal has a boilerplate object that is used
-// for minimizing the work when constructing it at runtime.
-class ObjectLiteral final : public AggregateLiteral {
+// class for build object boilerplate
+class ObjectLiteralBoilerplateBuilder final : public LiteralBoilerplateBuilder {
public:
using Property = ObjectLiteralProperty;
+ ObjectLiteralBoilerplateBuilder(ZoneList<Property*>* properties,
+ uint32_t boilerplate_properties,
+ bool has_rest_property)
+ : properties_(properties),
+ boilerplate_properties_(boilerplate_properties) {
+ bit_field_ |= HasElementsField::encode(false) |
+ HasRestPropertyField::encode(has_rest_property) |
+ FastElementsField::encode(false) |
+ HasNullPrototypeField::encode(false);
+ }
Handle<ObjectBoilerplateDescription> boilerplate_description() const {
DCHECK(!boilerplate_description_.is_null());
return boilerplate_description_;
}
+ // Determines whether the {CreateShallowArrayLiteral} builtin can be used.
+ bool IsFastCloningSupported() const;
+
int properties_count() const { return boilerplate_properties_; }
- const ZonePtrList<Property>* properties() const { return &properties_; }
+ const ZonePtrList<Property>* properties() const { return properties_; }
bool has_elements() const { return HasElementsField::decode(bit_field_); }
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
@@ -1250,18 +1284,9 @@ class ObjectLiteral final : public AggregateLiteral {
return HasNullPrototypeField::decode(bit_field_);
}
- bool is_empty() const {
- DCHECK(is_initialized());
- return !has_elements() && properties_count() == 0 &&
- properties()->length() == 0;
- }
-
- bool IsEmptyObjectLiteral() const {
- return is_empty() && !has_null_prototype();
- }
-
- // Populate the depth field and flags, returns the depth.
- int InitDepthAndFlags();
+ // Populate the boilerplate description.
+ template <typename IsolateT>
+ void BuildBoilerplateDescription(IsolateT* isolate);
// Get the boilerplate description, populating it if necessary.
template <typename IsolateT>
@@ -1270,37 +1295,53 @@ class ObjectLiteral final : public AggregateLiteral {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
- return boilerplate_description();
+ return boilerplate_description_;
}
- // Populate the boilerplate description.
- template <typename IsolateT>
- void BuildBoilerplateDescription(IsolateT* isolate);
+ bool is_empty() const {
+ DCHECK(is_initialized());
+ return !has_elements() && properties_count() == 0 &&
+ properties()->length() == 0;
+ }
+ // Assemble bitfield of flags for the CreateObjectLiteral helper.
+ int ComputeFlags(bool disable_mementos = false) const;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- void CalculateEmitStore(Zone* zone);
+ bool IsEmptyObjectLiteral() const {
+ return is_empty() && !has_null_prototype();
+ }
- // Determines whether the {CreateShallowObjectLiteratal} builtin can be used.
- bool IsFastCloningSupported() const;
+ int EncodeLiteralType();
- // Assemble bitfield of flags for the CreateObjectLiteral helper.
- int ComputeFlags(bool disable_mementos = false) const {
- int flags = AggregateLiteral::ComputeFlags(disable_mementos);
- if (fast_elements()) flags |= kFastElements;
- if (has_null_prototype()) flags |= kHasNullPrototype;
- return flags;
- }
+ // Populate the depth field and flags, returns the depth.
+ void InitDepthAndFlags();
- int EncodeLiteralType() {
- int flags = kNoFlags;
- if (fast_elements()) flags |= kFastElements;
- if (has_null_prototype()) flags |= kHasNullPrototype;
- return flags;
+ private:
+ void InitFlagsForPendingNullPrototype(int i);
+
+ void set_has_elements(bool has_elements) {
+ bit_field_ = HasElementsField::update(bit_field_, has_elements);
}
+ void set_fast_elements(bool fast_elements) {
+ bit_field_ = FastElementsField::update(bit_field_, fast_elements);
+ }
+ void set_has_null_protoype(bool has_null_prototype) {
+ bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype);
+ }
+ ZoneList<Property*>* properties_;
+ uint32_t boilerplate_properties_;
+ Handle<ObjectBoilerplateDescription> boilerplate_description_;
- Variable* home_object() const { return home_object_; }
+ using HasElementsField = LiteralBoilerplateBuilder::NextBitField<bool, 1>;
+ using HasRestPropertyField = HasElementsField::Next<bool, 1>;
+ using FastElementsField = HasRestPropertyField::Next<bool, 1>;
+ using HasNullPrototypeField = FastElementsField::Next<bool, 1>;
+};
+
+// An object literal has a boilerplate object that is used
+// for minimizing the work when constructing it at runtime.
+class ObjectLiteral final : public AggregateLiteral {
+ public:
+ using Property = ObjectLiteralProperty;
enum Flags {
kFastElements = 1 << 3,
@@ -1310,6 +1351,19 @@ class ObjectLiteral final : public AggregateLiteral {
static_cast<int>(AggregateLiteral::kNeedsInitialAllocationSite) <
static_cast<int>(kFastElements));
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ void CalculateEmitStore(Zone* zone);
+
+ ZoneList<Property*>* properties() { return &properties_; }
+
+ const ObjectLiteralBoilerplateBuilder* builder() const { return &builder_; }
+
+ ObjectLiteralBoilerplateBuilder* builder() { return &builder_; }
+
+ Variable* home_object() const { return home_object_; }
+
private:
friend class AstNodeFactory;
friend Zone;
@@ -1318,51 +1372,38 @@ class ObjectLiteral final : public AggregateLiteral {
uint32_t boilerplate_properties, int pos,
bool has_rest_property, Variable* home_object)
: AggregateLiteral(pos, kObjectLiteral),
- boilerplate_properties_(boilerplate_properties),
properties_(properties.ToConstVector(), zone),
- home_object_(home_object) {
- bit_field_ |= HasElementsField::encode(false) |
- HasRestPropertyField::encode(has_rest_property) |
- FastElementsField::encode(false) |
- HasNullPrototypeField::encode(false);
- }
-
- void InitFlagsForPendingNullPrototype(int i);
+ home_object_(home_object),
+ builder_(&properties_, boilerplate_properties, has_rest_property) {}
- void set_has_elements(bool has_elements) {
- bit_field_ = HasElementsField::update(bit_field_, has_elements);
- }
- void set_fast_elements(bool fast_elements) {
- bit_field_ = FastElementsField::update(bit_field_, fast_elements);
- }
- void set_has_null_protoype(bool has_null_prototype) {
- bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype);
- }
- uint32_t boilerplate_properties_;
- Handle<ObjectBoilerplateDescription> boilerplate_description_;
ZoneList<Property*> properties_;
Variable* home_object_;
-
- using HasElementsField = AggregateLiteral::NextBitField<bool, 1>;
- using HasRestPropertyField = HasElementsField::Next<bool, 1>;
- using FastElementsField = HasRestPropertyField::Next<bool, 1>;
- using HasNullPrototypeField = FastElementsField::Next<bool, 1>;
+ ObjectLiteralBoilerplateBuilder builder_;
};
-// An array literal has a literals object that is used
-// for minimizing the work when constructing it at runtime.
-class ArrayLiteral final : public AggregateLiteral {
+// class for build boilerplate for array literal, including
+// array_literal, spread call elements
+class ArrayLiteralBoilerplateBuilder final : public LiteralBoilerplateBuilder {
public:
+ ArrayLiteralBoilerplateBuilder(const ZonePtrList<Expression>* values,
+ int first_spread_index)
+ : values_(values), first_spread_index_(first_spread_index) {}
Handle<ArrayBoilerplateDescription> boilerplate_description() const {
return boilerplate_description_;
}
- const ZonePtrList<Expression>* values() const { return &values_; }
+ // Determines whether the {CreateShallowArrayLiteral} builtin can be used.
+ bool IsFastCloningSupported() const;
+
+ // Assemble bitfield of flags for the CreateArrayLiteral helper.
+ int ComputeFlags(bool disable_mementos = false) const {
+ return LiteralBoilerplateBuilder::ComputeFlags(disable_mementos);
+ }
int first_spread_index() const { return first_spread_index_; }
- // Populate the depth field and flags, returns the depth.
- int InitDepthAndFlags();
+ // Populate the depth field and flags
+ void InitDepthAndFlags();
// Get the boilerplate description, populating it if necessary.
template <typename IsolateT>
@@ -1378,13 +1419,19 @@ class ArrayLiteral final : public AggregateLiteral {
template <typename IsolateT>
void BuildBoilerplateDescription(IsolateT* isolate);
- // Determines whether the {CreateShallowArrayLiteral} builtin can be used.
- bool IsFastCloningSupported() const;
+ const ZonePtrList<Expression>* values_;
+ int first_spread_index_;
+ Handle<ArrayBoilerplateDescription> boilerplate_description_;
+};
- // Assemble bitfield of flags for the CreateArrayLiteral helper.
- int ComputeFlags(bool disable_mementos = false) const {
- return AggregateLiteral::ComputeFlags(disable_mementos);
- }
+// An array literal has a literals object that is used
+// for minimizing the work when constructing it at runtime.
+class ArrayLiteral final : public AggregateLiteral {
+ public:
+ const ZonePtrList<Expression>* values() const { return &values_; }
+
+ const ArrayLiteralBoilerplateBuilder* builder() const { return &builder_; }
+ ArrayLiteralBoilerplateBuilder* builder() { return &builder_; }
private:
friend class AstNodeFactory;
@@ -1393,12 +1440,11 @@ class ArrayLiteral final : public AggregateLiteral {
ArrayLiteral(Zone* zone, const ScopedPtrList<Expression>& values,
int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
- first_spread_index_(first_spread_index),
- values_(values.ToConstVector(), zone) {}
+ values_(values.ToConstVector(), zone),
+ builder_(&values_, first_spread_index) {}
- int first_spread_index_;
- Handle<ArrayBoilerplateDescription> boilerplate_description_;
ZonePtrList<Expression> values_;
+ ArrayLiteralBoilerplateBuilder builder_;
};
enum class HoleCheckMode { kRequired, kElided };
@@ -2245,12 +2291,8 @@ class FunctionLiteral final : public Expression {
return HasStaticPrivateMethodsOrAccessorsField::decode(bit_field_);
}
- void set_class_scope_has_private_brand(bool value) {
- bit_field_ = ClassScopeHasPrivateBrandField::update(bit_field_, value);
- }
- bool class_scope_has_private_brand() const {
- return ClassScopeHasPrivateBrandField::decode(bit_field_);
- }
+ void set_class_scope_has_private_brand(bool value);
+ bool class_scope_has_private_brand() const;
bool private_name_lookup_skips_outer_class() const;
@@ -2299,10 +2341,8 @@ class FunctionLiteral final : public Expression {
using HasDuplicateParameters = Pretenure::Next<bool, 1>;
using RequiresInstanceMembersInitializer =
HasDuplicateParameters::Next<bool, 1>;
- using ClassScopeHasPrivateBrandField =
- RequiresInstanceMembersInitializer::Next<bool, 1>;
using HasStaticPrivateMethodsOrAccessorsField =
- ClassScopeHasPrivateBrandField::Next<bool, 1>;
+ RequiresInstanceMembersInitializer::Next<bool, 1>;
using HasBracesField = HasStaticPrivateMethodsOrAccessorsField::Next<bool, 1>;
using ShouldParallelCompileField = HasBracesField::Next<bool, 1>;
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 6758079823..679472c7c6 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -166,17 +166,19 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* avfactory)
- : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE,
- FunctionKind::kModule),
- module_descriptor_(avfactory->zone()->New<SourceTextModuleDescriptor>(
- avfactory->zone())) {
+ : DeclarationScope(avfactory->single_parse_zone(), script_scope,
+ MODULE_SCOPE, FunctionKind::kModule),
+ module_descriptor_(
+ avfactory->single_parse_zone()->New<SourceTextModuleDescriptor>(
+ avfactory->single_parse_zone())) {
set_language_mode(LanguageMode::kStrict);
DeclareThis(avfactory);
}
ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
- : DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info),
+ : DeclarationScope(avfactory->single_parse_zone(), MODULE_SCOPE, avfactory,
+ scope_info),
module_descriptor_(nullptr) {
set_language_mode(LanguageMode::kStrict);
}
@@ -195,7 +197,7 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
: Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info),
rare_data_and_is_parsing_heritage_(nullptr) {
set_language_mode(LanguageMode::kStrict);
- if (scope_info->HasClassBrand()) {
+ if (scope_info->ClassScopeHasPrivateBrand()) {
Variable* brand =
LookupInScopeInfo(ast_value_factory->dot_brand_string(), this);
DCHECK_NOT_NULL(brand);
@@ -204,11 +206,10 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
// If the class variable is context-allocated and its index is
// saved for deserialization, deserialize it.
- if (scope_info->HasSavedClassVariableIndex()) {
- int index = scope_info->SavedClassVariableContextLocalIndex();
- DCHECK_GE(index, 0);
- DCHECK_LT(index, scope_info->ContextLocalCount());
- String name = scope_info->ContextLocalName(index);
+ if (scope_info->HasSavedClassVariable()) {
+ String name;
+ int index;
+ std::tie(name, index) = scope_info->SavedClassVariable();
DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst);
DCHECK_EQ(scope_info->ContextLocalInitFlag(index),
InitializationFlag::kNeedsInitialization);
@@ -222,6 +223,10 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
var->AllocateTo(VariableLocation::CONTEXT,
Context::MIN_CONTEXT_SLOTS + index);
}
+
+ DCHECK(scope_info->HasPositionInfo());
+ set_start_position(scope_info->StartPosition());
+ set_end_position(scope_info->EndPosition());
}
template ClassScope::ClassScope(Isolate* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
@@ -252,11 +257,9 @@ Scope::Scope(Zone* zone, ScopeType scope_type,
if (scope_type == BLOCK_SCOPE) {
// Set is_block_scope_for_object_literal_ based on the existince of the home
// object variable (we don't store it explicitly).
- VariableLookupResult lookup_result;
DCHECK_NOT_NULL(ast_value_factory);
- int home_object_index = ScopeInfo::ContextSlotIndex(
- *scope_info, *(ast_value_factory->dot_home_object_string()->string()),
- &lookup_result);
+ int home_object_index = scope_info->ContextSlotIndex(
+ ast_value_factory->dot_home_object_string()->string());
DCHECK_IMPLIES(home_object_index >= 0,
scope_type == CLASS_SCOPE || scope_type == BLOCK_SCOPE);
if (home_object_index >= 0) {
@@ -277,6 +280,10 @@ DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
DCHECK(!is_eval_scope());
sloppy_eval_can_extend_vars_ = true;
}
+ if (scope_info->ClassScopeHasPrivateBrand()) {
+ DCHECK(IsClassConstructor(function_kind()));
+ class_scope_has_private_brand_ = true;
+ }
}
Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
@@ -324,6 +331,7 @@ void DeclarationScope::SetDefaults() {
was_lazily_parsed_ = false;
is_skipped_function_ = false;
preparse_data_builder_ = nullptr;
+ class_scope_has_private_brand_ = false;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@@ -469,7 +477,8 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
DCHECK_EQ(scope_info.ContextLocalCount(), 1);
DCHECK_EQ(scope_info.ContextLocalMode(0), VariableMode::kVar);
DCHECK_EQ(scope_info.ContextLocalInitFlag(0), kCreatedInitialized);
- String name = scope_info.ContextLocalName(0);
+ DCHECK(scope_info.HasInlinedLocalNames());
+ String name = scope_info.ContextInlinedLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info.ContextLocalMaybeAssignedFlag(0);
outer_scope =
@@ -499,10 +508,8 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
: ScopeInfo();
}
- if (deserialization_mode == DeserializationMode::kIncludingVariables &&
- script_scope->scope_info_.is_null()) {
- script_scope->SetScriptScopeInfo(
- ReadOnlyRoots(isolate).global_this_binding_scope_info_handle());
+ if (deserialization_mode == DeserializationMode::kIncludingVariables) {
+ SetScriptScopeInfo(isolate, script_scope);
}
if (innermost_scope == nullptr) return script_scope;
@@ -510,6 +517,24 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
return innermost_scope;
}
+template <typename IsolateT>
+void Scope::SetScriptScopeInfo(IsolateT* isolate,
+ DeclarationScope* script_scope) {
+ if (script_scope->scope_info_.is_null()) {
+ script_scope->SetScriptScopeInfo(
+ ReadOnlyRoots(isolate).global_this_binding_scope_info_handle());
+ }
+}
+
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) void Scope::SetScriptScopeInfo(Isolate* isolate,
+ DeclarationScope*
+ script_scope);
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) void Scope::SetScriptScopeInfo(LocalIsolate* isolate,
+ DeclarationScope*
+ script_scope);
+
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Scope* Scope::DeserializeScopeChain(
Isolate* isolate, Zone* zone, ScopeInfo scope_info,
@@ -722,14 +747,28 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
DCHECK(is_function_scope());
DCHECK(!is_arrow_scope());
+ // Because when arguments_ is not nullptr, we already declared
+ // "arguments exotic object" to add it into parameters before
+ // impl()->InsertShadowingVarBindingInitializers, so here
+ // only declare "arguments exotic object" when arguments_
+ // is nullptr
+ if (arguments_ != nullptr) {
+ return;
+ }
+
// Declare 'arguments' variable which exists in all non arrow functions. Note
// that it might never be accessed, in which case it won't be allocated during
// variable allocation.
- bool was_added;
+ bool was_added = false;
+
arguments_ =
Declare(zone(), ast_value_factory->arguments_string(), VariableMode::kVar,
NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added);
- if (!was_added && IsLexicalVariableMode(arguments_->mode())) {
+ // According to ES#sec-functiondeclarationinstantiation step 18
+ // we should set argumentsObjectNeeded to false if has lexical
+ // declared arguments only when hasParameterExpressions is false
+ if (!was_added && IsLexicalVariableMode(arguments_->mode()) &&
+ has_simple_parameters_) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
arguments_ = nullptr;
@@ -939,8 +978,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
{
location = VariableLocation::CONTEXT;
- index =
- ScopeInfo::ContextSlotIndex(scope_info, name_handle, &lookup_result);
+ index = scope_info.ContextSlotIndex(name->string(), &lookup_result);
found = index >= 0;
}
@@ -1446,7 +1484,7 @@ bool Scope::NeedsScopeInfo() const {
DCHECK(!already_resolved_);
DCHECK(GetClosureScope()->ShouldEagerCompile());
// The debugger expects all functions to have scope infos.
- // TODO(jochen|yangguo): Remove this requirement.
+ // TODO(yangguo): Remove this requirement.
if (is_function_scope()) return true;
return NeedsContext();
}
@@ -1465,6 +1503,18 @@ DeclarationScope* Scope::GetReceiverScope() {
return scope->AsDeclarationScope();
}
+DeclarationScope* Scope::GetConstructorScope() {
+ Scope* scope = this;
+ while (scope != nullptr && !scope->IsConstructorScope()) {
+ scope = scope->outer_scope();
+ }
+ if (scope == nullptr) {
+ return nullptr;
+ }
+ DCHECK(scope->IsConstructorScope());
+ return scope->AsDeclarationScope();
+}
+
Scope* Scope::GetHomeObjectScope() {
Scope* scope = this;
while (scope != nullptr && !scope->is_home_object_scope()) {
@@ -1532,6 +1582,11 @@ void Scope::ForEach(FunctionType callback) {
}
}
+bool Scope::IsConstructorScope() const {
+ return is_declaration_scope() &&
+ IsClassConstructor(AsDeclarationScope()->function_kind());
+}
+
bool Scope::IsOuterScopeOf(Scope* other) const {
Scope* scope = other;
while (scope) {
@@ -1634,18 +1689,18 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
has_rest_ = false;
function_ = nullptr;
- DCHECK_NE(zone(), ast_value_factory->zone());
+ DCHECK_NE(zone(), ast_value_factory->single_parse_zone());
// Make sure this scope and zone aren't used for allocation anymore.
{
// Get the zone, while variables_ is still valid
Zone* zone = this->zone();
variables_.Invalidate();
- zone->ReleaseMemory();
+ zone->Reset();
}
if (aborted) {
// Prepare scope for use in the outer zone.
- variables_ = VariableMap(ast_value_factory->zone());
+ variables_ = VariableMap(ast_value_factory->single_parse_zone());
if (!IsArrowFunction(function_kind_)) {
has_simple_parameters_ = true;
DeclareDefaultFunctionVariables(ast_value_factory);
@@ -1906,6 +1961,9 @@ void Scope::Print(int n) {
}
Indent(n1, "// ");
PrintF("%s\n", FunctionKind2String(scope->function_kind()));
+ if (scope->class_scope_has_private_brand()) {
+ Indent(n1, "// class scope has private brand\n");
+ }
}
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
@@ -2656,7 +2714,7 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, IsolateT* isolate) {
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info.
- // TODO(jochen|yangguo): Remove this requirement.
+ // TODO(yangguo): Remove this requirement.
if (scope->scope_info_.is_null()) {
scope->scope_info_ =
ScopeInfo::Create(isolate, scope->zone(), scope, outer_scope);
@@ -2684,6 +2742,48 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
+VariableProxy* Scope::NewHomeObjectVariableProxy(AstNodeFactory* factory,
+ const AstRawString* name,
+ int start_pos) {
+ // VariableProxies of the home object cannot be resolved like a normal
+ // variable. Consider the case of a super.property usage in heritage position:
+ //
+ // class C extends super.foo { m() { super.bar(); } }
+ //
+ // The super.foo property access is logically nested under C's class scope,
+ // which also has a home object due to its own method m's usage of
+ // super.bar(). However, super.foo must resolve super in C's outer scope.
+ //
+ // Because of the above, home object VariableProxies are always made directly
+ // on the Scope that needs the home object instead of the innermost scope.
+ DCHECK(needs_home_object());
+ if (!scope_info_.is_null()) {
+ // This is a lazy compile, so the home object's context slot is already
+ // known.
+ Variable* home_object = variables_.Lookup(name);
+ if (home_object == nullptr) {
+ VariableLookupResult lookup_result;
+ int index = scope_info_->ContextSlotIndex(name->string(), &lookup_result);
+ DCHECK_GE(index, 0);
+ bool was_added;
+ home_object = variables_.Declare(zone(), this, name, lookup_result.mode,
+ NORMAL_VARIABLE, lookup_result.init_flag,
+ lookup_result.maybe_assigned_flag,
+ IsStaticFlag::kNotStatic, &was_added);
+ DCHECK(was_added);
+ home_object->AllocateTo(VariableLocation::CONTEXT, index);
+ }
+ return factory->NewVariableProxy(home_object, start_pos);
+ }
+ // This is not a lazy compile. Add the unresolved home object VariableProxy to
+ // the unresolved list of the home object scope, which is not necessarily the
+ // innermost scope.
+ VariableProxy* proxy =
+ factory->NewVariableProxy(name, NORMAL_VARIABLE, start_pos);
+ AddUnresolved(proxy);
+ return proxy;
+}
+
bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) {
switch (a) {
case VariableMode::kPrivateGetterOnly:
@@ -2695,53 +2795,42 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) {
}
}
-void ClassScope::ReplaceReparsedClassScope(Isolate* isolate,
- AstValueFactory* ast_value_factory,
- ClassScope* old_scope) {
- DCHECK_EQ(outer_scope_, old_scope->outer_scope());
- Scope* outer = outer_scope_;
-
- outer->RemoveInnerScope(old_scope);
- // The outer scope should only have this deserialized inner scope,
- // otherwise we have to update the sibling scopes.
- DCHECK_EQ(outer->inner_scope_, this);
- DCHECK_NULL(sibling_);
-
- DCHECK_NULL(old_scope->inner_scope_);
+void ClassScope::FinalizeReparsedClassScope(
+ Isolate* isolate, MaybeHandle<ScopeInfo> maybe_scope_info,
+ AstValueFactory* ast_value_factory, bool needs_allocation_fixup) {
+ // Set this bit so that DelcarationScope::Analyze recognizes
+ // the reparsed instance member initializer scope.
+#ifdef DEBUG
+ is_reparsed_class_scope_ = true;
+#endif
- Handle<ScopeInfo> scope_info = old_scope->scope_info_;
- DCHECK(!scope_info.is_null());
- DCHECK(!scope_info->IsEmpty());
+ if (!needs_allocation_fixup) {
+ return;
+ }
// Restore variable allocation results for context-allocated variables in
// the class scope from ScopeInfo, so that we don't need to run
// resolution and allocation on these variables again when generating
// code for the initializer function.
- int context_local_count = scope_info->ContextLocalCount();
+ DCHECK(!maybe_scope_info.is_null());
+ Handle<ScopeInfo> scope_info = maybe_scope_info.ToHandleChecked();
+ DCHECK_EQ(scope_info->scope_type(), CLASS_SCOPE);
+ DCHECK_EQ(scope_info->StartPosition(), start_position_);
+
int context_header_length = scope_info->ContextHeaderLength();
DisallowGarbageCollection no_gc;
- for (int i = 0; i < context_local_count; ++i) {
- int slot_index = context_header_length + i;
+ for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
+ int slot_index = context_header_length + it->index();
DCHECK_LT(slot_index, scope_info->ContextLength());
- String name = scope_info->ContextLocalName(i);
const AstRawString* string = ast_value_factory->GetString(
- name, SharedStringAccessGuardIfNeeded(isolate));
- Variable* var = nullptr;
-
- var = string->IsPrivateName() ? LookupLocalPrivateName(string)
- : LookupLocal(string);
+ it->name(), SharedStringAccessGuardIfNeeded(isolate));
+ Variable* var = string->IsPrivateName() ? LookupLocalPrivateName(string)
+ : LookupLocal(string);
DCHECK_NOT_NULL(var);
var->AllocateTo(VariableLocation::CONTEXT, slot_index);
}
-
scope_info_ = scope_info;
-
- // Set this bit so that DelcarationScope::Analyze recognizes
- // the reparsed instance member initializer scope.
-#ifdef DEBUG
- is_reparsed_class_scope_ = true;
-#endif
}
Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
@@ -2833,10 +2922,8 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
DCHECK_NULL(LookupLocalPrivateName(name));
DisallowGarbageCollection no_gc;
- String name_handle = *name->string();
VariableLookupResult lookup_result;
- int index =
- ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &lookup_result);
+ int index = scope_info_->ContextSlotIndex(name->string(), &lookup_result);
if (index < 0) {
return nullptr;
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index c04d99b4b0..6f701ead0b 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -10,16 +10,28 @@
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
+#include "src/base/pointer-with-payload.h"
#include "src/base/threaded-list.h"
#include "src/common/globals.h"
#include "src/objects/function-kind.h"
#include "src/objects/objects.h"
-#include "src/utils/pointer-with-payload.h"
#include "src/utils/utils.h"
#include "src/zone/zone-hashmap.h"
#include "src/zone/zone.h"
namespace v8 {
+
+namespace internal {
+class Scope;
+} // namespace internal
+
+namespace base {
+template <>
+struct PointerWithPayloadTraits<v8::internal::Scope> {
+ static constexpr int kAvailableBits = 1;
+};
+} // namespace base
+
namespace internal {
class AstNodeFactory;
@@ -64,13 +76,6 @@ class VariableMap : public ZoneHashMap {
Zone* zone() const { return allocator().zone(); }
};
-class Scope;
-
-template <>
-struct PointerWithPayloadTraits<Scope> {
- static constexpr int value = 1;
-};
-
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@@ -155,7 +160,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Upon move assignment we store whether the new inner scope calls eval into
// the move target calls_eval bit, and restore calls eval on the outer
// scope.
- PointerWithPayload<Scope, bool, 1> outer_scope_and_calls_eval_;
+ base::PointerWithPayload<Scope, bool, 1> outer_scope_and_calls_eval_;
Scope* top_inner_scope_;
UnresolvedList::Iterator top_unresolved_;
base::ThreadedList<Variable>::Iterator top_local_;
@@ -171,6 +176,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
+ template <typename IsolateT>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ static void SetScriptScopeInfo(IsolateT* isolate,
+ DeclarationScope* script_scope);
+
// Checks if the block scope is redundant, i.e. it does not contain any
// block scoped declarations. In that case it is removed from the scope
// tree and its children are reparented.
@@ -454,6 +464,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
kDescend
};
+ bool IsConstructorScope() const;
+
// Check is this scope is an outer scope of the given scope.
bool IsOuterScopeOf(Scope* other) const;
@@ -549,6 +561,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// 'this' is bound, and what determines the function kind.
DeclarationScope* GetReceiverScope();
+ // Find the first constructor scope. Its outer scope is where the instance
+ // members that should be initialized right after super() is called
+ // are declared.
+ DeclarationScope* GetConstructorScope();
+
// Find the first class scope or object literal block scope. This is where
// 'super' is bound.
Scope* GetHomeObjectScope();
@@ -608,6 +625,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
needs_home_object_ = true;
}
+ VariableProxy* NewHomeObjectVariableProxy(AstNodeFactory* factory,
+ const AstRawString* name,
+ int start_pos);
+
bool RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
@@ -866,7 +887,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
FunctionKind function_kind() const { return function_kind_; }
// Inform the scope that the corresponding code uses "super".
- void RecordSuperPropertyUsage() {
+ Scope* RecordSuperPropertyUsage() {
DCHECK(IsConciseMethod(function_kind()) ||
IsAccessorFunction(function_kind()) ||
IsClassConstructor(function_kind()));
@@ -874,6 +895,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Scope* home_object_scope = GetHomeObjectScope();
DCHECK_NOT_NULL(home_object_scope);
home_object_scope->set_needs_home_object();
+ return home_object_scope;
}
bool uses_super_property() const { return uses_super_property_; }
@@ -1229,6 +1251,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// to REPL_GLOBAL. Should only be called on REPL scripts.
void RewriteReplGlobalVariables();
+ void set_class_scope_has_private_brand(bool value) {
+ class_scope_has_private_brand_ = value;
+ }
+ bool class_scope_has_private_brand() const {
+ return class_scope_has_private_brand_;
+ }
+
private:
V8_INLINE void AllocateParameter(Variable* var, int index);
@@ -1276,7 +1305,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool has_this_reference_ : 1;
bool has_this_declaration_ : 1;
bool needs_private_name_context_chain_recalc_ : 1;
-
+ bool class_scope_has_private_brand_ : 1;
// If the scope is a function scope, this is the function kind.
FunctionKind function_kind_;
@@ -1477,9 +1506,14 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
should_save_class_variable_index_ = true;
}
- void ReplaceReparsedClassScope(Isolate* isolate,
- AstValueFactory* ast_value_factory,
- ClassScope* old_scope);
+ // Finalize the reparsed class scope, called when reparsing the
+ // class scope for the initializer member function.
+ // If the reparsed scope declares any variable that needs allocation
+ // fixup using the scope info, needs_allocation_fixup is true.
+ void FinalizeReparsedClassScope(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope_info,
+ AstValueFactory* ast_value_factory,
+ bool needs_allocation_fixup);
#ifdef DEBUG
bool is_reparsed_class_scope() const { return is_reparsed_class_scope_; }
#endif
@@ -1519,7 +1553,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
rare_data_and_is_parsing_heritage_.SetPayload(v);
}
- PointerWithPayload<RareData, bool, 1> rare_data_and_is_parsing_heritage_;
+ base::PointerWithPayload<RareData, bool, 1>
+ rare_data_and_is_parsing_heritage_;
Variable* class_variable_ = nullptr;
// These are only maintained when the scope is parsed, not when the
// scope is deserialized.
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 84015af362..be045b0e68 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -67,6 +67,13 @@ class AsAtomicImpl {
using AtomicStorageType = TAtomicStorageType;
template <typename T>
+ static T SeqCst_Load(T* addr) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return cast_helper<T>::to_return_type(
+ base::SeqCst_Load(to_storage_addr(addr)));
+ }
+
+ template <typename T>
static T Acquire_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
return cast_helper<T>::to_return_type(
@@ -81,6 +88,14 @@ class AsAtomicImpl {
}
template <typename T>
+ static void SeqCst_Store(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ base::SeqCst_Store(to_storage_addr(addr),
+ cast_helper<T>::to_storage_type(new_value));
+ }
+
+ template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 56fd5f3094..f6b516ad9e 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -241,6 +241,16 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
std::memory_order_acquire);
}
+inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_seq_cst);
+}
+
+inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_seq_cst);
+}
+
#if defined(V8_HOST_ARCH_64_BIT)
inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
@@ -314,6 +324,11 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
std::memory_order_acquire);
}
+inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_seq_cst);
+}
+
#endif // defined(V8_HOST_ARCH_64_BIT)
inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
@@ -441,7 +456,7 @@ inline int Relaxed_Memcmp(volatile const Atomic8* s1,
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
-#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
+#if defined(V8_OS_DARWIN) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif
diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h
index 7b2796e3df..63142a20fa 100644
--- a/deps/v8/src/base/bit-field.h
+++ b/deps/v8/src/base/bit-field.h
@@ -16,7 +16,7 @@ namespace base {
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
// Instantiate them via 'using', which is cheaper than deriving a new class:
-// using MyBitField = base::BitField<int, 4, 2, MyEnum>;
+// using MyBitField = base::BitField<MyEnum, 4, 2>;
// The BitField class is final to enforce this style over derivation.
template <class T, int shift, int size, class U = uint32_t>
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index a51206aec6..37924a6d67 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -118,8 +118,7 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
MutexGuard guard(&mutex_);
Address address = reinterpret_cast<Address>(raw_address);
- size_t freed_size = region_allocator_.FreeRegion(address);
- if (freed_size != size) return false;
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// When we are required to return zero-initialized pages, we decommit the
@@ -167,15 +166,15 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// See comment in FreePages().
- return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
- free_size);
+ CHECK(page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
+ free_size));
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
- return page_allocator_->SetPermissions(
- reinterpret_cast<void*>(free_address), free_size,
- PageAllocator::kNoAccess);
+ CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
+ free_size, PageAllocator::kNoAccess));
}
+ return true;
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index 07c5cda307..ade9aa2d34 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -27,7 +27,6 @@ enum class PageInitializationMode {
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
-// - the V8 virtual memory cage
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 3303916776..3befde51e7 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -154,8 +154,8 @@
#error Target architecture ia32 is only supported on ia32 host
#endif
#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \
- !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_64_BIT))
-#error Target architecture x64 is only supported on x64 host
+ !((V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64) && V8_HOST_ARCH_64_BIT))
+#error Target architecture x64 is only supported on x64 and arm64 host
#endif
#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \
!(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT))
@@ -222,7 +222,7 @@
#endif
// pthread_jit_write_protect is only available on arm64 Mac.
-#if defined(V8_OS_MACOSX) && !defined(V8_OS_IOS) && defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1
#else
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0
@@ -237,8 +237,9 @@ constexpr int kReturnAddressStackSlotCount =
V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
// Number of bits to represent the page size for paged spaces.
-#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
-// PPC has large (64KB) physical pages.
+#if (defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_PPC64)) && !defined(_AIX)
+// Native PPC linux has large (64KB) physical pages.
+// Simulator (and Aix) need to use the same value as x64.
const int kPageSizeBits = 19;
#elif defined(ENABLE_HUGEPAGE)
// When enabling huge pages, adjust V8 page size to take up exactly one huge
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index ab263c7e77..dc61f4bf11 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -50,6 +50,8 @@
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
#if V8_OS_WIN
+#include <windows.h>
+
#include "src/base/win32-headers.h"
#endif
@@ -85,7 +87,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \
- V8_HOST_ARCH_MIPS64
+ V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
#if V8_OS_LINUX
@@ -354,7 +356,7 @@ static bool HasListItem(const char* list, const char* item) {
#endif // V8_OS_LINUX
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 ||
- // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+ // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
#if defined(V8_OS_STARBOARD)
@@ -444,7 +446,8 @@ CPU::CPU()
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
is_running_in_vm_(false),
- has_msa_(false) {
+ has_msa_(false),
+ has_rvv_(false) {
memcpy(vendor_, "Unknown", 8);
#if defined(V8_OS_STARBOARD)
@@ -498,6 +501,9 @@ CPU::CPU()
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
has_avx2_ = (cpu_info7[1] & 0x00000020) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
+ // CET shadow stack feature flag. See
+ // https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features
+ has_cetss_ = (cpu_info7[2] & 0x00000080) != 0;
// "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
// See https://lwn.net/Articles/301888/
// This is checking for any hypervisor. Hypervisors may choose not to
@@ -758,6 +764,13 @@ CPU::CPU()
// user-space.
has_non_stop_time_stamp_counter_ = true;
+ // Defined in winnt.h, but in a newer version of the Windows SDK than the one
+ // that V8 requires, so we must copy the value here.
+ constexpr int PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44;
+
+ has_jscvt_ =
+ IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE);
+
#elif V8_OS_LINUX
// Try to extract the list of CPU features from ELF hwcaps.
uint32_t hwcaps = ReadELFHWCaps();
@@ -770,7 +783,7 @@ CPU::CPU()
has_jscvt_ = HasListItem(features, "jscvt");
delete[] features;
}
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
// ARM64 Macs always have JSCVT.
has_jscvt_ = true;
#endif // V8_OS_WIN
@@ -854,7 +867,19 @@ CPU::CPU()
}
#endif // V8_OS_AIX
#endif // !USE_SIMULATOR
-#endif // V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
+
+#elif V8_HOST_ARCH_RISCV64
+ CPUInfo cpu_info;
+ char* features = cpu_info.ExtractField("isa");
+
+ if (HasListItem(features, "rv64imafdc")) {
+ has_fpu_ = true;
+ }
+ if (HasListItem(features, "rv64imafdcv")) {
+ has_fpu_ = true;
+ has_rvv_ = true;
+ }
+#endif // V8_HOST_ARCH_RISCV64
}
} // namespace base
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 9fcf90b3bc..3050f2c466 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -101,6 +101,7 @@ class V8_BASE_EXPORT CPU final {
bool has_lzcnt() const { return has_lzcnt_; }
bool has_popcnt() const { return has_popcnt_; }
bool is_atom() const { return is_atom_; }
+ bool has_cetss() const { return has_cetss_; }
bool has_non_stop_time_stamp_counter() const {
return has_non_stop_time_stamp_counter_;
}
@@ -127,6 +128,9 @@ class V8_BASE_EXPORT CPU final {
bool is_fp64_mode() const { return is_fp64_mode_; }
bool has_msa() const { return has_msa_; }
+ // riscv features
+ bool has_rvv() const { return has_rvv_; }
+
private:
#if defined(V8_OS_STARBOARD)
bool StarboardDetectCPU();
@@ -156,6 +160,7 @@ class V8_BASE_EXPORT CPU final {
bool has_sse41_;
bool has_sse42_;
bool is_atom_;
+ bool has_cetss_;
bool has_osxsave_;
bool has_avx_;
bool has_avx2_;
@@ -175,6 +180,7 @@ class V8_BASE_EXPORT CPU final {
bool has_non_stop_time_stamp_counter_;
bool is_running_in_vm_;
bool has_msa_;
+ bool has_rvv_;
};
} // namespace base
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 270f1ca4e0..b76c098d88 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -33,7 +33,7 @@
#include <cxxabi.h>
#include <execinfo.h>
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <AvailabilityMacros.h>
#endif
diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.cc b/deps/v8/src/base/emulated-virtual-address-subspace.cc
index fbfb125569..ae07a3cd96 100644
--- a/deps/v8/src/base/emulated-virtual-address-subspace.cc
+++ b/deps/v8/src/base/emulated-virtual-address-subspace.cc
@@ -16,7 +16,7 @@ EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
size_t total_size)
: VirtualAddressSpace(parent_space->page_size(),
parent_space->allocation_granularity(), base,
- total_size),
+ total_size, parent_space->max_page_permissions()),
mapped_size_(mapped_size),
parent_space_(parent_space),
region_allocator_(base, mapped_size, parent_space_->page_size()) {
@@ -30,7 +30,7 @@ EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
}
EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() {
- CHECK(parent_space_->FreePages(base(), mapped_size_));
+ parent_space_->FreePages(base(), mapped_size_);
}
void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) {
@@ -40,7 +40,7 @@ void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) {
Address EmulatedVirtualAddressSubspace::RandomPageAddress() {
MutexGuard guard(&mutex_);
- Address addr = base() + (rng_.NextInt64() % size());
+ Address addr = base() + (static_cast<uint64_t>(rng_.NextInt64()) % size());
return RoundDown(addr, allocation_granularity());
}
@@ -64,26 +64,27 @@ Address EmulatedVirtualAddressSubspace::AllocatePages(
// No luck or hint is outside of the mapped region. Try to allocate pages in
// the unmapped space using page allocation hints instead.
-
- // Somewhat arbitrary size limitation to ensure that the loop below for
- // finding a fitting base address hint terminates quickly.
- if (size >= (unmapped_size() / 2)) return kNullAddress;
+ if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress;
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
- // If the hint wouldn't result in the entire allocation being inside the
- // managed region, simply retry. There is at least a 50% chance of
- // getting a usable address due to the size restriction above.
+ // If an unmapped region exists, it must cover at least 50% of the whole
+ // space (unmapped + mapped region). Since we limit the size of allocation
+ // to 50% of the unmapped region (see IsUsableSizeForUnmappedRegion), a
+ // random page address has at least a 25% chance of being a usable base. As
+ // such, this loop should usually terminate quickly.
+ DCHECK_GE(unmapped_size(), mapped_size());
while (!UnmappedRegionContains(hint, size)) {
hint = RandomPageAddress();
}
+ hint = RoundDown(hint, alignment);
- Address region =
+ const Address result =
parent_space_->AllocatePages(hint, size, alignment, permissions);
- if (region && UnmappedRegionContains(region, size)) {
- return region;
- } else if (region) {
- CHECK(parent_space_->FreePages(region, size));
+ if (UnmappedRegionContains(result, size)) {
+ return result;
+ } else if (result) {
+ parent_space_->FreePages(result, size);
}
// Retry at a different address.
@@ -93,15 +94,49 @@ Address EmulatedVirtualAddressSubspace::AllocatePages(
return kNullAddress;
}
-bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
+void EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
if (MappedRegionContains(address, size)) {
MutexGuard guard(&mutex_);
- if (region_allocator_.FreeRegion(address) != size) return false;
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
CHECK(parent_space_->DecommitPages(address, size));
- return true;
+ } else {
+ DCHECK(UnmappedRegionContains(address, size));
+ parent_space_->FreePages(address, size);
}
- if (!UnmappedRegionContains(address, size)) return false;
- return parent_space_->FreePages(address, size);
+}
+
+Address EmulatedVirtualAddressSubspace::AllocateSharedPages(
+ Address hint, size_t size, PagePermissions permissions,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ // Can only allocate shared pages in the unmapped region.
+ if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress;
+
+ static constexpr int kMaxAttempts = 10;
+ for (int i = 0; i < kMaxAttempts; i++) {
+ // See AllocatePages() for why this loop usually terminates quickly.
+ DCHECK_GE(unmapped_size(), mapped_size());
+ while (!UnmappedRegionContains(hint, size)) {
+ hint = RandomPageAddress();
+ }
+
+ Address region = parent_space_->AllocateSharedPages(hint, size, permissions,
+ handle, offset);
+ if (UnmappedRegionContains(region, size)) {
+ return region;
+ } else if (region) {
+ parent_space_->FreeSharedPages(region, size);
+ }
+
+ hint = RandomPageAddress();
+ }
+
+ return kNullAddress;
+}
+
+void EmulatedVirtualAddressSubspace::FreeSharedPages(Address address,
+ size_t size) {
+ DCHECK(UnmappedRegionContains(address, size));
+ parent_space_->FreeSharedPages(address, size);
}
bool EmulatedVirtualAddressSubspace::SetPagePermissions(
@@ -110,6 +145,27 @@ bool EmulatedVirtualAddressSubspace::SetPagePermissions(
return parent_space_->SetPagePermissions(address, size, permissions);
}
+bool EmulatedVirtualAddressSubspace::AllocateGuardRegion(Address address,
+ size_t size) {
+ if (MappedRegionContains(address, size)) {
+ MutexGuard guard(&mutex_);
+ return region_allocator_.AllocateRegionAt(address, size);
+ }
+ if (!UnmappedRegionContains(address, size)) return false;
+ return parent_space_->AllocateGuardRegion(address, size);
+}
+
+void EmulatedVirtualAddressSubspace::FreeGuardRegion(Address address,
+ size_t size) {
+ if (MappedRegionContains(address, size)) {
+ MutexGuard guard(&mutex_);
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ } else {
+ DCHECK(UnmappedRegionContains(address, size));
+ parent_space_->FreeGuardRegion(address, size);
+ }
+}
+
bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
// This is not supported, mostly because it's not (yet) needed in practice.
return false;
@@ -118,7 +174,7 @@ bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
std::unique_ptr<v8::VirtualAddressSpace>
EmulatedVirtualAddressSubspace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) {
+ PagePermissions max_page_permissions) {
UNREACHABLE();
}
diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.h b/deps/v8/src/base/emulated-virtual-address-subspace.h
index 480c3e1ae0..c507835550 100644
--- a/deps/v8/src/base/emulated-virtual-address-subspace.h
+++ b/deps/v8/src/base/emulated-virtual-address-subspace.h
@@ -48,16 +48,27 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
- bool FreePages(Address address, size_t size) override;
+ void FreePages(Address address, size_t size) override;
+
+ Address AllocateSharedPages(Address hint, size_t size,
+ PagePermissions permissions,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) override;
+
+ void FreeSharedPages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
+ bool AllocateGuardRegion(Address address, size_t size) override;
+
+ void FreeGuardRegion(Address address, size_t size) override;
+
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) override;
+ PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
@@ -88,6 +99,13 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
return Contains(unmapped_base(), unmapped_size(), addr, length);
}
+ // Helper function to define a limit for the size of allocations in the
+ // unmapped region. This limit makes it possible to estimate the expected
+ // runtime of some loops in the Allocate methods.
+ bool IsUsableSizeForUnmappedRegion(size_t size) const {
+ return size <= (unmapped_size() / 2);
+ }
+
// Size of the mapped region located at the beginning of this address space.
const size_t mapped_size_;
diff --git a/deps/v8/src/base/immediate-crash.h b/deps/v8/src/base/immediate-crash.h
index ef1f922317..770cb273f9 100644
--- a/deps/v8/src/base/immediate-crash.h
+++ b/deps/v8/src/base/immediate-crash.h
@@ -42,7 +42,7 @@
#if V8_CC_GNU
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32
// TODO(https://crbug.com/958675): In theory, it should be possible to use just
// int3. However, there are a number of crashes with SIGILL as the exception
@@ -50,13 +50,13 @@
// to continue after SIGTRAP.
#define TRAP_SEQUENCE1_() asm volatile("int3")
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// Intentionally empty: __builtin_unreachable() is always part of the sequence
// (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
#define TRAP_SEQUENCE2_() asm volatile("")
#else
#define TRAP_SEQUENCE2_() asm volatile("ud2")
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
#elif V8_HOST_ARCH_ARM
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 3a73afc1ce..fcb9f8756f 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -313,7 +313,7 @@ V8_INLINE A implicit_cast(A x) {
#endif
// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#undef V8PRIdPTR
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 2956bf1475..d2a8621b5f 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -7,7 +7,7 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <sys/mman.h> // For MAP_JIT.
#endif
@@ -132,13 +132,15 @@ void* PageAllocator::RemapShared(void* old_address, void* new_address,
}
bool PageAllocator::FreePages(void* address, size_t size) {
- return base::OS::Free(address, size);
+ base::OS::Free(address, size);
+ return true;
}
bool PageAllocator::ReleasePages(void* address, size_t size, size_t new_size) {
DCHECK_LT(new_size, size);
- return base::OS::Release(reinterpret_cast<uint8_t*>(address) + new_size,
- size - new_size);
+ base::OS::Release(reinterpret_cast<uint8_t*>(address) + new_size,
+ size - new_size);
+ return true;
}
bool PageAllocator::SetPermissions(void* address, size_t size,
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 5ab66d39a4..b7b21c9947 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -40,7 +40,7 @@ ConditionVariable::ConditionVariable() {
ConditionVariable::~ConditionVariable() {
-#if defined(V8_OS_MACOSX)
+#if defined(V8_OS_DARWIN)
// This hack is necessary to avoid a fatal pthreads subsystem bug in the
// Darwin kernel. http://crbug.com/517681.
{
@@ -86,7 +86,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
struct timespec ts;
int result;
mutex->AssertHeldAndUnmark();
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// Mac OS X provides pthread_cond_timedwait_relative_np(), which does
// not depend on the real time clock, which is what you really WANT here!
ts = rel_time.ToTimespec();
@@ -111,7 +111,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
ts = end_time.ToTimespec();
result = pthread_cond_timedwait(
&native_handle_, &mutex->native_handle(), &ts);
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
mutex->AssertUnheldAndMark();
if (result == ETIMEDOUT) {
return false;
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index 2947c31237..c5ac56043d 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -116,7 +116,7 @@ class ElapsedTimer final {
private:
static V8_INLINE TimeTicks Now() {
- TimeTicks now = TimeTicks::HighResolutionNow();
+ TimeTicks now = TimeTicks::Now();
DCHECK(!now.IsNull());
return now;
}
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index 7bf60996ee..423ab0d98a 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -222,7 +222,7 @@ bool RecursiveMutex::TryLock() {
return true;
}
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
SharedMutex::SharedMutex() { InitializeNativeHandle(&native_handle_); }
@@ -251,7 +251,7 @@ bool SharedMutex::TryLockExclusive() {
return true;
}
-#else // !V8_OS_MACOSX
+#else // !V8_OS_DARWIN
SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); }
@@ -301,7 +301,7 @@ bool SharedMutex::TryLockExclusive() {
return result;
}
-#endif // !V8_OS_MACOSX
+#endif // !V8_OS_DARWIN
#elif V8_OS_WIN
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 5fefa25ab6..ce13d8d763 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -265,7 +265,7 @@ class V8_BASE_EXPORT SharedMutex final {
private:
// The implementation-defined native handle type.
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// pthread_rwlock_t is broken on MacOS when signals are being sent to the
// process (see https://crbug.com/v8/11399). Until Apple fixes that in the OS,
// we have to fall back to a non-shared mutex.
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index b27bfbc8bc..9c9adda389 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -164,5 +164,43 @@ Stack::StackSlot Stack::GetStackStart() {
return reinterpret_cast<void*>(buf.__pi_stackend);
}
+// static
+bool OS::DecommitPages(void* address, size_t size) {
+ // The difference between this implementation and the alternative under
+ // platform-posix.cc is that on AIX, calling mmap on a pre-designated address
+ // with MAP_FIXED will fail and return -1 unless the application has requested
+ // SPEC1170 compliant behaviour:
+ // https://www.ibm.com/docs/en/aix/7.3?topic=m-mmap-mmap64-subroutine
+ // Therefore in case if failure we need to unmap the address before trying to
+ // map it again. The downside is another thread could place another mapping at
+ // the same address after the munmap but before the mmap, therefore a CHECK is
+ // also added to assure the address is mapped successfully. Refer to the
+ // comments under https://crrev.com/c/3010195 for more details.
+#define MMAP() \
+ mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ void* ptr;
+ // Try without mapping first.
+ ptr = MMAP();
+ if (ptr != address) {
+ DCHECK_EQ(ptr, MAP_FAILED);
+ // Returns 0 when successful.
+ if (munmap(address, size)) {
+ return false;
+ }
+ // Try again after unmap.
+ ptr = MMAP();
+ // If this check fails it's most likely due to a racing condition where
+ // another thread has mapped the same address right before we do.
+ // Since this could cause hard-to-debug issues, potentially with security
+ // impact, and we can't recover from this, the best we can do is abort the
+ // process.
+ CHECK_EQ(ptr, address);
+ }
+#undef MMAP
+ return true;
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 5aae01c9c4..0875bf263c 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -118,7 +118,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
- CHECK(Free(base, size));
+ Free(base, size);
// Clear the hint. It's unlikely we can allocate at this address.
hint = nullptr;
@@ -134,7 +134,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
- CHECK(Free(base, padded_size));
+ Free(base, padded_size);
aligned_base = RoundUp(base, alignment);
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
@@ -147,18 +147,18 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
}
// static
-bool OS::Free(void* address, const size_t size) {
+void OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, static_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
USE(size);
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
+ CHECK_NE(0, VirtualFree(address, 0, MEM_RELEASE));
}
// static
-bool OS::Release(void* address, size_t size) {
+void OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ CHECK_NE(0, VirtualFree(address, size, MEM_DECOMMIT));
}
// static
diff --git a/deps/v8/src/base/platform/platform-darwin.cc b/deps/v8/src/base/platform/platform-darwin.cc
new file mode 100644
index 0000000000..bf360e3136
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-darwin.cc
@@ -0,0 +1,107 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code shared between macOS and iOS goes here. The macOS
+// specific part is in platform-macos.cc, the POSIX-compatible parts in
+// platform-posix.cc.
+
+#include <AvailabilityMacros.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+#include <mach/mach.h>
+#include <mach/mach_init.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/vm_statistics.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
+#include "src/base/platform/platform-posix.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ unsigned int images_count = _dyld_image_count();
+ for (unsigned int i = 0; i < images_count; ++i) {
+ const mach_header* header = _dyld_get_image_header(i);
+ if (header == nullptr) continue;
+#if V8_HOST_ARCH_I32
+ unsigned int size;
+ char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#else
+ uint64_t size;
+ char* code_ptr = getsectdatafromheader_64(
+ reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
+ &size);
+#endif
+ if (code_ptr == nullptr) continue;
+ const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
+ const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+ result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
+ start + size, slide));
+ }
+ return result;
+}
+
+void OS::SignalCodeMovingGC() {}
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
+
+void OS::AdjustSchedulingParams() {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ {
+ // Check availability of scheduling params.
+ uint32_t val = 0;
+ size_t valSize = sizeof(val);
+ int rc = sysctlbyname("kern.tcsm_available", &val, &valSize, NULL, 0);
+ if (rc < 0 || !val) return;
+ }
+
+ {
+ // Adjust scheduling params.
+ uint32_t val = 1;
+ int rc = sysctlbyname("kern.tcsm_enable", NULL, NULL, &val, sizeof(val));
+ DCHECK_GE(rc, 0);
+ USE(rc);
+ }
+#endif
+}
+
+std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
+ OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
+ size_t alignment) {
+ return {};
+}
+
+// static
+Stack::StackSlot Stack::GetStackStart() {
+ return pthread_get_stackaddr_np(pthread_self());
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index f090ea5b6a..1f4c35cca4 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <fuchsia/kernel/cpp/fidl.h>
+#include <lib/fdio/directory.h>
#include <lib/zx/resource.h>
#include <lib/zx/thread.h>
#include <lib/zx/vmar.h>
@@ -18,6 +20,27 @@ namespace base {
namespace {
+static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID;
+
+static void* g_root_vmar_base = nullptr;
+
+#ifdef V8_USE_VMEX_RESOURCE
+void SetVmexResource() {
+ DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID);
+ zx::resource vmex_resource;
+ fuchsia::kernel::VmexResourceSyncPtr vmex_resource_svc;
+ zx_status_t status = fdio_service_connect(
+ "/svc/fuchsia.kernel.VmexResource",
+ vmex_resource_svc.NewRequest().TakeChannel().release());
+ DCHECK_EQ(status, ZX_OK);
+ status = vmex_resource_svc->Get(&vmex_resource);
+ USE(status);
+ DCHECK_EQ(status, ZX_OK);
+ DCHECK(vmex_resource.is_valid());
+ g_vmex_resource = vmex_resource.release();
+}
+#endif
+
zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
@@ -56,29 +79,22 @@ zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
return alignment_log2 << ZX_VM_ALIGN_BASE;
}
-void* AllocateInternal(const zx::vmar& vmar, size_t page_size,
- size_t vmar_offset, bool vmar_offset_is_hint,
- size_t size, size_t alignment,
- OS::MemoryPermission access) {
+enum class PlacementMode {
+ // Attempt to place the object at the provided address, otherwise elsewhere.
+ kUseHint,
+ // Place the object anywhere it fits.
+ kAnywhere,
+ // Place the object at the provided address, otherwise fail.
+ kFixed
+};
+
+void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
+ void* address, const zx::vmo& vmo, uint64_t offset,
+ PlacementMode placement, size_t size, size_t alignment,
+ OS::MemoryPermission access) {
DCHECK_EQ(0, size % page_size);
- DCHECK_EQ(0, alignment % page_size);
- DCHECK_EQ(0, vmar_offset % page_size);
-
- zx::vmo vmo;
- if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
- return nullptr;
- }
- static const char kVirtualMemoryName[] = "v8-virtualmem";
- vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
- strlen(kVirtualMemoryName));
-
- // Always call zx_vmo_replace_as_executable() in case the memory will need
- // to be marked as executable in the future.
- // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
- // region will need to be marked as executable in the future.
- if (vmo.replace_as_executable(zx::resource(), &vmo) != ZX_OK) {
- return nullptr;
- }
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
+ DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
@@ -86,30 +102,60 @@ void* AllocateInternal(const zx::vmar& vmar, size_t page_size,
CHECK_NE(0, alignment_option); // Invalid alignment specified
options |= alignment_option;
- if (vmar_offset != 0) {
+ size_t vmar_offset = 0;
+ if (placement != PlacementMode::kAnywhere) {
+ // Try placing the mapping at the specified address.
+ uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
+ uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
+ DCHECK_GE(target_addr, base);
+ vmar_offset = target_addr - base;
options |= ZX_VM_SPECIFIC;
}
- zx_vaddr_t address;
- zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &address);
+ zx_vaddr_t result;
+ zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &result);
- if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
- // If a vmar_offset was specified and the allocation failed (for example,
- // because the offset overlapped another mapping), then we should retry
- // again without a vmar_offset if that offset was just meant to be a hint.
+ if (status != ZX_OK && placement == PlacementMode::kUseHint) {
+ // If a placement hint was specified but couldn't be used (for example,
+ // because the offset overlapped another mapping), then retry again without
+ // a vmar_offset to let the kernel pick another location.
options &= ~(ZX_VM_SPECIFIC);
- status = vmar.map(options, 0, vmo, 0, size, &address);
+ status = vmar.map(options, 0, vmo, 0, size, &result);
}
if (status != ZX_OK) {
return nullptr;
}
- return reinterpret_cast<void*>(address);
+ return reinterpret_cast<void*>(result);
+}
+
+void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
+ void* address, PlacementMode placement, size_t size,
+ size_t alignment, OS::MemoryPermission access) {
+ zx::vmo vmo;
+ if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
+ return nullptr;
+ }
+ static const char kVirtualMemoryName[] = "v8-virtualmem";
+ vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
+ strlen(kVirtualMemoryName));
+
+ // Always call zx_vmo_replace_as_executable() in case the memory will need
+ // to be marked as executable in the future.
+ // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
+ // region will need to be marked as executable in the future.
+ zx::unowned_resource vmex(g_vmex_resource);
+ if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
+ return nullptr;
+ }
+
+ return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size,
+ alignment, access);
}
-bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address,
- const size_t size) {
+bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address,
+ size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_EQ(0, size % page_size);
return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
@@ -135,13 +181,14 @@ bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
}
zx_status_t CreateAddressSpaceReservationInternal(
- const zx::vmar& vmar, size_t page_size, size_t vmar_offset,
- bool vmar_offset_is_hint, size_t size, size_t alignment,
+ const zx::vmar& vmar, void* vmar_base, size_t page_size, void* address,
+ PlacementMode placement, size_t size, size_t alignment,
OS::MemoryPermission max_permission, zx::vmar* child,
zx_vaddr_t* child_addr) {
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
- DCHECK_EQ(0, vmar_offset % page_size);
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % alignment);
+ DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
// TODO(v8) determine these based on max_permission.
zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
@@ -151,16 +198,22 @@ zx_status_t CreateAddressSpaceReservationInternal(
CHECK_NE(0, alignment_option); // Invalid alignment specified
options |= alignment_option;
- if (vmar_offset != 0) {
+ size_t vmar_offset = 0;
+ if (placement != PlacementMode::kAnywhere) {
+ // Try placing the mapping at the specified address.
+ uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
+ uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
+ DCHECK_GE(target_addr, base);
+ vmar_offset = target_addr - base;
options |= ZX_VM_SPECIFIC;
}
zx_status_t status =
vmar.allocate(options, vmar_offset, size, child, child_addr);
- if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
- // If a vmar_offset was specified and the allocation failed (for example,
- // because the offset overlapped another mapping), then we should retry
- // again without a vmar_offset if that offset was just meant to be a hint.
+ if (status != ZX_OK && placement == PlacementMode::kUseHint) {
+ // If a placement hint was specified but couldn't be used (for example,
+ // because the offset overlapped another mapping), then retry again without
+ // a vmar_offset to let the kernel pick another location.
options &= ~(ZX_VM_SPECIFIC);
status = vmar.allocate(options, 0, size, child, child_addr);
}
@@ -175,23 +228,55 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
// static
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
+ PosixInitializeCommon(hard_abort, gc_fake_mmap);
+
+ // Determine base address of root VMAR.
+ zx_info_vmar_t info;
+ zx_status_t status = zx::vmar::root_self()->get_info(
+ ZX_INFO_VMAR, &info, sizeof(info), nullptr, nullptr);
+ CHECK_EQ(ZX_OK, status);
+ g_root_vmar_base = reinterpret_cast<void*>(info.base);
+
+#ifdef V8_USE_VMEX_RESOURCE
+ SetVmexResource();
+#endif
+}
+
+// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
- constexpr bool vmar_offset_is_hint = true;
- DCHECK_EQ(0, reinterpret_cast<Address>(address) % alignment);
- return AllocateInternal(*zx::vmar::root_self(), AllocatePageSize(),
- reinterpret_cast<uint64_t>(address),
- vmar_offset_is_hint, size, alignment, access);
+ PlacementMode placement =
+ address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
+ return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base,
+ AllocatePageSize(), address, placement, size,
+ alignment, access);
}
// static
-bool OS::Free(void* address, const size_t size) {
- return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address,
- size);
+void OS::Free(void* address, size_t size) {
+ CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
}
// static
-bool OS::Release(void* address, size_t size) { return Free(address, size); }
+void* OS::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ PlacementMode placement =
+ address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
+ zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
+ return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(),
+ address, *vmo, offset, placement, size, AllocatePageSize(),
+ access);
+}
+
+// static
+void OS::FreeShared(void* address, size_t size) {
+ CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
+}
+
+// static
+void OS::Release(void* address, size_t size) { Free(address, size); }
// static
bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
@@ -224,22 +309,37 @@ Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
DCHECK_EQ(0, reinterpret_cast<Address>(hint) % alignment);
zx::vmar child;
zx_vaddr_t child_addr;
- uint64_t vmar_offset = reinterpret_cast<uint64_t>(hint);
- constexpr bool vmar_offset_is_hint = true;
+ PlacementMode placement =
+ hint != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
zx_status_t status = CreateAddressSpaceReservationInternal(
- *zx::vmar::root_self(), AllocatePageSize(), vmar_offset,
- vmar_offset_is_hint, size, alignment, max_permission, &child,
- &child_addr);
+ *zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), hint,
+ placement, size, alignment, max_permission, &child, &child_addr);
if (status != ZX_OK) return {};
return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
child.release());
}
// static
-bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
// Destroy the vmar and release the handle.
zx::vmar vmar(reservation.vmar_);
- return vmar.destroy() == ZX_OK;
+ CHECK_EQ(ZX_OK, vmar.destroy());
+}
+
+// static
+PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+ zx::vmo vmo;
+ if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
+ return kInvalidSharedMemoryHandle;
+ }
+ return SharedMemoryHandleFromVMO(vmo.release());
+}
+
+// static
+void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ DCHECK_NE(kInvalidSharedMemoryHandle, handle);
+ zx_handle_t vmo = VMOFromSharedMemoryHandle(handle);
+ zx_handle_close(vmo);
}
// static
@@ -287,16 +387,10 @@ Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
zx::vmar child;
zx_vaddr_t child_addr;
- size_t vmar_offset = 0;
- if (address != 0) {
- vmar_offset =
- reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
- }
- constexpr bool vmar_offset_is_hint = false;
zx_status_t status = CreateAddressSpaceReservationInternal(
- *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
- vmar_offset_is_hint, size, OS::AllocatePageSize(), max_permission, &child,
- &child_addr);
+ *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
+ PlacementMode::kFixed, size, OS::AllocatePageSize(), max_permission,
+ &child, &child_addr);
if (status != ZX_OK) return {};
DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
@@ -305,29 +399,41 @@ Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
- return OS::FreeAddressSpaceReservation(reservation);
+ OS::FreeAddressSpaceReservation(reservation);
+ return true;
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
- size_t vmar_offset = 0;
- if (address != 0) {
- vmar_offset =
- reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
- }
- constexpr bool vmar_offset_is_hint = false;
- void* allocation = AllocateInternal(
- *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
- vmar_offset_is_hint, size, OS::AllocatePageSize(), access);
+ void* allocation = CreateAndMapVmo(
+ *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
+ PlacementMode::kFixed, size, OS::AllocatePageSize(), access);
DCHECK(!allocation || allocation == address);
return allocation != nullptr;
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
- return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
- size);
+ return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
+ size);
+}
+
+bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) {
+ DCHECK(Contains(address, size));
+ zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
+ return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(),
+ address, *vmo, offset, PlacementMode::kFixed, size,
+ OS::AllocatePageSize(), access);
+}
+
+bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
+ size);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 3ab88060f5..370facf141 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -138,7 +138,7 @@ void OS::SignalCodeMovingGC() {
void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
- CHECK(Free(addr, size));
+ Free(addr, size);
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index d1675bdc44..bba8d3c699 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -2,106 +2,104 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Platform-specific code for MacOS goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
+// Platform-specific code for MacOS goes here. Code shared between iOS and
+// macOS is in platform-darwin.cc, while the POSIX-compatible are in in
+// platform-posix.cc.
-#include <dlfcn.h>
-#include <mach/mach_init.h>
-#include <mach-o/dyld.h>
-#include <mach-o/getsect.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <AvailabilityMacros.h>
-
-#include <errno.h>
-#include <libkern/OSAtomic.h>
#include <mach/mach.h>
-#include <mach/semaphore.h>
-#include <mach/task.h>
-#include <mach/vm_statistics.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/resource.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/base/macros.h"
-#include "src/base/platform/platform-posix-time.h"
-#include "src/base/platform/platform-posix.h"
+#include <mach/mach_vm.h>
+#include <mach/vm_map.h>
+
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- unsigned int images_count = _dyld_image_count();
- for (unsigned int i = 0; i < images_count; ++i) {
- const mach_header* header = _dyld_get_image_header(i);
- if (header == nullptr) continue;
-#if V8_HOST_ARCH_I32
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#else
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
- &size);
-#endif
- if (code_ptr == nullptr) continue;
- const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
- const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
- start + size, slide));
+namespace {
+
+vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ case OS::MemoryPermission::kNoAccessWillJitLater:
+ return VM_PROT_NONE;
+ case OS::MemoryPermission::kRead:
+ return VM_PROT_READ;
+ case OS::MemoryPermission::kReadWrite:
+ return VM_PROT_READ | VM_PROT_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ case OS::MemoryPermission::kReadExecute:
+ return VM_PROT_READ | VM_PROT_EXECUTE;
}
- return result;
+ UNREACHABLE();
}
-void OS::SignalCodeMovingGC() {}
-
-TimezoneCache* OS::CreateTimezoneCache() {
- return new PosixDefaultTimezoneCache();
+kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
+ mach_vm_size_t size, int flags,
+ mach_port_t port,
+ memory_object_offset_t offset,
+ vm_prot_t prot) {
+ vm_prot_t current_prot = prot;
+ vm_prot_t maximum_prot = current_prot;
+ return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
+ FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
}
-void OS::AdjustSchedulingParams() {
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
- {
- // Check availability of scheduling params.
- uint32_t val = 0;
- size_t valSize = sizeof(val);
- int rc = sysctlbyname("kern.tcsm_available", &val, &valSize, NULL, 0);
- if (rc < 0 || !val) return;
- }
+} // namespace
- {
- // Adjust scheduling params.
- uint32_t val = 1;
- int rc = sysctlbyname("kern.tcsm_enable", NULL, NULL, &val, sizeof(val));
- DCHECK_GE(rc, 0);
- USE(rc);
- }
-#endif
+// static
+PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+ mach_vm_size_t vm_size = size;
+ mach_port_t port;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(), &vm_size, 0,
+ MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
+ MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
+ return SharedMemoryHandleFromMachMemoryEntry(port);
}
-std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
- OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
- size_t alignment) {
- return {};
+// static
+void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ DCHECK_NE(kInvalidSharedMemoryHandle, handle);
+ mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
}
// static
-Stack::StackSlot Stack::GetStackStart() {
- return pthread_get_stackaddr_np(pthread_self());
+void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK_EQ(0, size % AllocatePageSize());
+
+ mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
+ vm_prot_t prot = GetVMProtFromMemoryPermission(access);
+ mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
+ shared_mem_port, offset, prot);
+
+ if (kr != KERN_SUCCESS) {
+ // Retry without hint.
+ kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
+ offset, prot);
+ }
+
+ if (kr != KERN_SUCCESS) return nullptr;
+ return reinterpret_cast<void*>(addr);
+}
+
+bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) {
+ DCHECK(Contains(address, size));
+
+ vm_prot_t prot = GetVMProtFromMemoryPermission(access);
+ mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
+ mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ kern_return_t kr =
+ mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ shared_mem_port, offset, prot);
+ return kr == KERN_SUCCESS;
}
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index f15800aa87..325c40aec8 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -116,7 +116,7 @@ void OS::SignalCodeMovingGC() {
void* addr =
mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
DCHECK(addr != MAP_FAILED);
- CHECK(OS::Free(addr, size));
+ OS::Free(addr, size);
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 155af37155..280d7f88f8 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -12,15 +12,15 @@
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
#include <pthread_np.h> // for pthread_set_name_np
#endif
+#include <fcntl.h>
#include <sched.h> // for sched_yield
#include <stdio.h>
-#include <time.h>
-#include <unistd.h>
-
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
#include <sys/sysctl.h> // for sysctl
@@ -46,8 +46,11 @@
#include <atomic>
#endif
-#if V8_OS_MACOSX
-#include <dlfcn.h>
+#if V8_OS_DARWIN || V8_OS_LINUX
+#include <dlfcn.h> // for dlsym
+#endif
+
+#if V8_OS_DARWIN
#include <mach/mach.h>
#endif
@@ -65,7 +68,7 @@
#include <sys/syscall.h>
#endif
-#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
+#if V8_OS_FREEBSD || V8_OS_DARWIN || V8_OS_OPENBSD || V8_OS_SOLARIS
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -102,16 +105,16 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
#if !V8_OS_FUCHSIA
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
// tools like vmmap(1).
const int kMmapFd = VM_MAKE_TAG(255);
-#else // !V8_OS_MACOSX
+#else // !V8_OS_DARWIN
const int kMmapFd = -1;
-#endif // !V8_OS_MACOSX
+#endif // !V8_OS_DARWIN
-#if defined(V8_TARGET_OS_MACOSX) && V8_HOST_ARCH_ARM64
+#if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
// During snapshot generation in cross builds, sysconf() runs on the Intel
// host and returns host page size, while the snapshot needs to use the
// target page size.
@@ -153,7 +156,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// MAP_JIT is required to obtain writable and executable pages when the
// hardened runtime/memory protection is enabled, which is optional (via code
// signing) on Intel-based Macs but mandatory on Apple silicon ones. See also
@@ -161,7 +164,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
flags |= MAP_JIT;
}
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
return flags;
}
@@ -237,11 +240,17 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
#endif
-void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
+void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
+#if !V8_OS_FUCHSIA
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
+ PosixInitializeCommon(hard_abort, gc_fake_mmap);
+}
+#endif // !V8_OS_FUCHSIA
+
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -263,7 +272,7 @@ int OS::ActivationFrameAlignment() {
// static
size_t OS::AllocatePageSize() {
-#if defined(V8_TARGET_OS_MACOSX) && V8_HOST_ARCH_ARM64
+#if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
return kAppleArmPageSize;
#else
static size_t page_size = static_cast<size_t>(sysconf(_SC_PAGESIZE));
@@ -293,7 +302,7 @@ void* OS::GetRandomMmapAddr() {
GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
}
#if V8_HOST_ARCH_ARM64
-#if defined(V8_TARGET_OS_MACOSX)
+#if defined(V8_TARGET_OS_MACOS)
DCHECK_EQ(1 << 14, AllocatePageSize());
#endif
// Keep the address page-aligned, AArch64 supports 4K, 16K and 64K
@@ -400,14 +409,14 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
- CHECK(Free(base, prefix_size));
+ Free(base, prefix_size);
request_size -= prefix_size;
}
// Unmap memory allocated after the potentially unaligned end.
if (size != request_size) {
DCHECK_LT(size, request_size);
size_t suffix_size = request_size - size;
- CHECK(Free(aligned_base + size, suffix_size));
+ Free(aligned_base + size, suffix_size);
request_size -= suffix_size;
}
@@ -422,17 +431,37 @@ void* OS::AllocateShared(size_t size, MemoryPermission access) {
}
// static
-bool OS::Free(void* address, const size_t size) {
+void OS::Free(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
- return munmap(address, size) == 0;
+ CHECK_EQ(0, munmap(address, size));
}
+// macOS specific implementation in platform-macos.cc.
+#if !defined(V8_OS_MACOS)
// static
-bool OS::Release(void* address, size_t size) {
+void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK_EQ(0, size % AllocatePageSize());
+ int prot = GetProtectionFromMemoryPermission(access);
+ int fd = FileDescriptorFromSharedMemoryHandle(handle);
+ void* result = mmap(hint, size, prot, MAP_SHARED, fd, offset);
+ if (result == MAP_FAILED) return nullptr;
+ return result;
+}
+#endif // !defined(V8_OS_MACOS)
+
+// static
+void OS::FreeShared(void* address, size_t size) {
+ DCHECK_EQ(0, size % AllocatePageSize());
+ CHECK_EQ(0, munmap(address, size));
+}
+
+// static
+void OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
- return munmap(address, size) == 0;
+ CHECK_EQ(0, munmap(address, size));
}
// static
@@ -445,7 +474,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// MacOS 11.2 on Apple Silicon refuses to switch permissions from
// rwx to none. Just use madvise instead.
-#if defined(V8_OS_MACOSX)
+#if defined(V8_OS_DARWIN)
if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
ret = madvise(address, size, MADV_FREE_REUSABLE);
return ret == 0;
@@ -463,7 +492,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// The cost is a syscall that effectively no-ops.
// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
// https://crbug.com/823915
-#if defined(V8_OS_MACOSX)
+#if defined(V8_OS_DARWIN)
if (access != OS::MemoryPermission::kNoAccess)
madvise(address, size, MADV_FREE_REUSE);
#endif
@@ -473,33 +502,33 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
+ // Roughly based on PartitionAlloc's DiscardSystemPagesInternal
+ // (base/allocator/partition_allocator/page_allocator_internals_posix.h)
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
-#if defined(V8_OS_MACOSX)
+#if defined(V8_OS_DARWIN)
// On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
// marks the pages with the reusable bit, which allows both Activity Monitor
// and memory-infra to correctly track the pages.
int ret = madvise(address, size, MADV_FREE_REUSABLE);
+ if (ret) {
+ // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
+ ret = madvise(address, size, MADV_DONTNEED);
+ }
#elif defined(_AIX) || defined(V8_OS_SOLARIS)
int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
-#else
- int ret = madvise(address, size, MADV_FREE);
-#endif
if (ret != 0 && errno == ENOSYS)
return true; // madvise is not available on all systems.
- if (ret != 0 && errno == EINVAL) {
-// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
-// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
-// imply runtime support.
-#if defined(_AIX) || defined(V8_OS_SOLARIS)
+ if (ret != 0 && errno == EINVAL)
ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
#else
- ret = madvise(address, size, MADV_DONTNEED);
+ int ret = madvise(address, size, MADV_DONTNEED);
#endif
- }
return ret == 0;
}
+#if !defined(_AIX)
+// See AIX version for details.
// static
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
@@ -514,6 +543,7 @@ bool OS::DecommitPages(void* address, size_t size) {
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
return ptr == address;
}
+#endif // !defined(_AIX)
// static
bool OS::CanReserveAddressSpace() { return true; }
@@ -541,13 +571,48 @@ Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
}
// static
-bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
- return Free(reservation.base(), reservation.size());
+void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+ Free(reservation.base(), reservation.size());
+}
+
+// macOS specific implementation in platform-macos.cc.
+#if !defined(V8_OS_MACOS)
+// static
+// Need to disable CFI_ICALL due to the indirect call to memfd_create.
+DISABLE_CFI_ICALL
+PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+#if V8_OS_LINUX && !V8_OS_ANDROID
+ // Use memfd_create if available, otherwise mkstemp.
+ using memfd_create_t = int (*)(const char*, unsigned int);
+ memfd_create_t memfd_create =
+ reinterpret_cast<memfd_create_t>(dlsym(RTLD_DEFAULT, "memfd_create"));
+ int fd = -1;
+ if (memfd_create) {
+ fd = memfd_create("V8MemFDForTesting", MFD_CLOEXEC);
+ } else {
+ char filename[] = "/tmp/v8_tmp_file_for_testing_XXXXXX";
+ fd = mkstemp(filename);
+ if (fd != -1) CHECK_EQ(0, unlink(filename));
+ }
+ if (fd == -1) return kInvalidSharedMemoryHandle;
+ CHECK_EQ(0, ftruncate(fd, size));
+ return SharedMemoryHandleFromFileDescriptor(fd);
+#else
+ return kInvalidSharedMemoryHandle;
+#endif
+}
+
+// static
+void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ DCHECK_NE(kInvalidSharedMemoryHandle, handle);
+ int fd = FileDescriptorFromSharedMemoryHandle(handle);
+ CHECK_EQ(0, close(fd));
}
+#endif // !defined(V8_OS_MACOS)
// static
bool OS::HasLazyCommits() {
-#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
+#if V8_OS_AIX || V8_OS_LINUX || V8_OS_DARWIN
return true;
#else
// TODO(bbudge) Return true for all POSIX platforms.
@@ -669,7 +734,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())));
+ if (memory_) OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()));
fclose(file_);
}
@@ -680,7 +745,7 @@ int OS::GetCurrentProcessId() {
int OS::GetCurrentThreadId() {
-#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
+#if V8_OS_DARWIN || (V8_OS_ANDROID && defined(__APPLE__))
return static_cast<int>(pthread_mach_thread_np(pthread_self()));
#elif V8_OS_LINUX
return static_cast<int>(syscall(__NR_gettid));
@@ -885,6 +950,12 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
// The region is already mmap'ed, so it just has to be made accessible now.
DCHECK(Contains(address, size));
+ if (access == OS::MemoryPermission::kNoAccess) {
+ // Nothing to do. We don't want to call SetPermissions with kNoAccess here
+ // as that will for example mark the pages as discardable, which is
+ // probably not desired here.
+ return true;
+ }
return OS::SetPermissions(address, size, access);
}
@@ -893,6 +964,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) {
return OS::DecommitPages(address, size);
}
+// macOS specific implementation in platform-macos.cc.
+#if !defined(V8_OS_MACOS)
+bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) {
+ DCHECK(Contains(address, size));
+ int prot = GetProtectionFromMemoryPermission(access);
+ int fd = FileDescriptorFromSharedMemoryHandle(handle);
+ return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) !=
+ MAP_FAILED;
+}
+#endif // !defined(V8_OS_MACOS)
+
+bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0) == address;
+}
+
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
@@ -944,7 +1035,7 @@ static void SetThreadName(const char* name) {
#elif V8_OS_NETBSD
STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
pthread_setname_np(pthread_self(), "%s", name);
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
// pthread_setname_np is only available in 10.6 or later, so test
// for it at runtime.
int (*dynamic_pthread_setname_np)(const char*);
@@ -990,7 +1081,7 @@ bool Thread::Start() {
if (result != 0) return false;
size_t stack_size = stack_size_;
if (stack_size == 0) {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// Default on Mac OS X is 512kB -- bump up to 1MB
stack_size = 1 * 1024 * 1024;
#elif V8_OS_AIX
@@ -1139,7 +1230,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
// pthread_getattr_np used below is non portable (hence the _np suffix). We
// keep this version in POSIX as most Linux-compatible derivatives will
// support it. MacOS and FreeBSD are different here.
-#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \
+#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \
!defined(V8_OS_SOLARIS)
// static
@@ -1166,7 +1257,7 @@ Stack::StackSlot Stack::GetStackStart() {
#endif // !defined(V8_LIBC_GLIBC)
}
-#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) &&
+#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) &&
// !defined(_AIX) && !defined(V8_OS_SOLARIS)
// static
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index 7d732b4a8f..38db244144 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -11,6 +11,8 @@
namespace v8 {
namespace base {
+void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap);
+
class PosixTimezoneCache : public TimezoneCache {
public:
double DaylightSavingsOffset(double time_ms) override;
diff --git a/deps/v8/src/base/platform/platform-starboard.cc b/deps/v8/src/base/platform/platform-starboard.cc
index a688c70692..c0cccbe122 100644
--- a/deps/v8/src/base/platform/platform-starboard.cc
+++ b/deps/v8/src/base/platform/platform-starboard.cc
@@ -172,14 +172,14 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
- CHECK(Free(base, prefix_size));
+ Free(base, prefix_size);
request_size -= prefix_size;
}
// Unmap memory allocated after the potentially unaligned end.
if (size != request_size) {
DCHECK_LT(size, request_size);
size_t suffix_size = request_size - size;
- CHECK(Free(aligned_base + size, suffix_size));
+ Free(aligned_base + size, suffix_size);
request_size -= suffix_size;
}
@@ -188,13 +188,13 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
}
// static
-bool OS::Free(void* address, const size_t size) {
- return SbMemoryUnmap(address, size);
+void OS::Free(void* address, const size_t size) {
+ CHECK(SbMemoryUnmap(address, size));
}
// static
-bool OS::Release(void* address, size_t size) {
- return SbMemoryUnmap(address, size);
+void OS::Release(void* address, size_t size) {
+ CHECK(SbMemoryUnmap(address, size));
}
// static
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index d00c4f5ebb..b696669142 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -722,9 +722,17 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
}
-typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
- MEM_EXTENDED_PARAMETER*, ULONG);
-VirtualAlloc2_t VirtualAlloc2;
+typedef PVOID(__stdcall* VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
+ MEM_EXTENDED_PARAMETER*, ULONG);
+VirtualAlloc2_t VirtualAlloc2 = nullptr;
+
+typedef PVOID(__stdcall* MapViewOfFile3_t)(HANDLE, HANDLE, PVOID, ULONG64,
+ SIZE_T, ULONG, ULONG,
+ MEM_EXTENDED_PARAMETER*, ULONG);
+MapViewOfFile3_t MapViewOfFile3 = nullptr;
+
+typedef PVOID(__stdcall* UnmapViewOfFile2_t)(HANDLE, PVOID, ULONG);
+UnmapViewOfFile2_t UnmapViewOfFile2 = nullptr;
void OS::EnsureWin32MemoryAPILoaded() {
static bool loaded = false;
@@ -732,6 +740,12 @@ void OS::EnsureWin32MemoryAPILoaded() {
VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2");
+ MapViewOfFile3 = (MapViewOfFile3_t)GetProcAddress(
+ GetModuleHandle(L"kernelbase.dll"), "MapViewOfFile3");
+
+ UnmapViewOfFile2 = (UnmapViewOfFile2_t)GetProcAddress(
+ GetModuleHandle(L"kernelbase.dll"), "UnmapViewOfFile2");
+
loaded = true;
}
}
@@ -815,43 +829,47 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
-void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) {
+// Desired access parameter for MapViewOfFile
+DWORD GetFileViewAccessFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ case OS::MemoryPermission::kNoAccessWillJitLater:
+ case OS::MemoryPermission::kRead:
+ return FILE_MAP_READ;
+ case OS::MemoryPermission::kReadWrite:
+ return FILE_MAP_READ | FILE_MAP_WRITE;
+ default:
+ // Execute access is not supported
+ break;
+ }
+ UNREACHABLE();
+}
+
+void* VirtualAllocWrapper(void* address, size_t size, DWORD flags,
+ DWORD protect) {
if (VirtualAlloc2) {
- return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0);
+ return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
} else {
- return VirtualAlloc(hint, size, flags, protect);
+ return VirtualAlloc(address, size, flags, protect);
}
}
-uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
- void* hint) {
- LPVOID base = nullptr;
- static BOOL use_aslr = -1;
-#ifdef V8_HOST_ARCH_32_BIT
- // Don't bother randomizing on 32-bit hosts, because they lack the room and
- // don't have viable ASLR anyway.
- if (use_aslr == -1 && !IsWow64Process(GetCurrentProcess(), &use_aslr))
- use_aslr = FALSE;
-#else
- use_aslr = TRUE;
-#endif
-
- if (use_aslr && protect != PAGE_READWRITE) {
- // For executable or reserved pages try to randomize the allocation address.
- base = VirtualAllocWrapper(hint, size, flags, protect);
- }
+uint8_t* VirtualAllocWithHint(size_t size, DWORD flags, DWORD protect,
+ void* hint) {
+ LPVOID base = VirtualAllocWrapper(hint, size, flags, protect);
// On failure, let the OS find an address to use.
- if (base == nullptr) {
+ if (hint && base == nullptr) {
base = VirtualAllocWrapper(nullptr, size, flags, protect);
}
+
return reinterpret_cast<uint8_t*>(base);
}
void* AllocateInternal(void* hint, size_t size, size_t alignment,
size_t page_size, DWORD flags, DWORD protect) {
// First, try an exact size aligned allocation.
- uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
+ uint8_t* base = VirtualAllocWithHint(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
@@ -871,7 +889,7 @@ void* AllocateInternal(void* hint, size_t size, size_t alignment,
const int kMaxAttempts = 3;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
- base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
+ base = VirtualAllocWithHint(padded_size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
@@ -909,18 +927,46 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
}
// static
-bool OS::Free(void* address, const size_t size) {
+void OS::Free(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
USE(size);
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
+ CHECK_NE(0, VirtualFree(address, 0, MEM_RELEASE));
+}
+
+// static
+void* OS::AllocateShared(void* hint, size_t size, MemoryPermission permission,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(hint) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ DCHECK_EQ(0, offset % AllocatePageSize());
+
+ DWORD off_hi = static_cast<DWORD>(offset >> 32);
+ DWORD off_lo = static_cast<DWORD>(offset);
+ DWORD access = GetFileViewAccessFromMemoryPermission(permission);
+
+ HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
+ void* result =
+ MapViewOfFileEx(file_mapping, access, off_hi, off_lo, size, hint);
+
+ if (!result) {
+ // Retry without hint.
+ result = MapViewOfFile(file_mapping, access, off_hi, off_lo, size);
+ }
+
+ return result;
+}
+
+// static
+void OS::FreeShared(void* address, size_t size) {
+ CHECK(UnmapViewOfFile(address));
}
// static
-bool OS::Release(void* address, size_t size) {
+void OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ CHECK_NE(0, VirtualFree(address, size, MEM_DECOMMIT));
}
// static
@@ -977,7 +1023,10 @@ bool OS::DecommitPages(void* address, size_t size) {
}
// static
-bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; }
+bool OS::CanReserveAddressSpace() {
+ return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr &&
+ UnmapViewOfFile2 != nullptr;
+}
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
@@ -1001,8 +1050,23 @@ Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
}
// static
-bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
- return OS::Free(reservation.base(), reservation.size());
+void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+ OS::Free(reservation.base(), reservation.size());
+}
+
+// static
+PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+ HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, nullptr,
+ PAGE_READWRITE, 0, size, nullptr);
+ if (!handle) return kInvalidSharedMemoryHandle;
+ return SharedMemoryHandleFromFileMapping(handle);
+}
+
+// static
+void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ DCHECK_NE(kInvalidSharedMemoryHandle, handle);
+ HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
+ CHECK(CloseHandle(file_mapping));
}
// static
@@ -1159,7 +1223,7 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size,
? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER
: MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER;
DWORD protect = GetProtectionFromMemoryPermission(access);
- return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
+ return VirtualAlloc2(nullptr, address, size, flags, protect, nullptr, 0);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
@@ -1167,6 +1231,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) {
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
+bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) {
+ DCHECK(Contains(address, size));
+ CHECK(MapViewOfFile3);
+
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
+ return MapViewOfFile3(file_mapping, nullptr, address, offset, size,
+ MEM_REPLACE_PLACEHOLDER, protect, nullptr, 0);
+}
+
+bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ CHECK(UnmapViewOfFile2);
+
+ return UnmapViewOfFile2(nullptr, address, MEM_PRESERVE_PLACEHOLDER);
+}
+
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 53a7267889..0a359ad211 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -26,6 +26,7 @@
#include <string>
#include <vector>
+#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
@@ -84,7 +85,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
__readfsdword(kTibInlineTlsOffset + kSystemPointerSize * index));
}
intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- DCHECK_NE(extra, 0);
+ if (!extra) return 0;
return *reinterpret_cast<intptr_t*>(extra + kSystemPointerSize *
(index - kMaxInlineSlots));
}
@@ -144,10 +145,10 @@ class V8_BASE_EXPORT OS {
// On Windows, ensure the newer memory API is loaded if available. This
// includes function like VirtualAlloc2 and MapViewOfFile3.
// TODO(chromium:1218005) this should probably happen as part of Initialize,
- // but that is currently invoked too late, after the virtual memory cage
- // is initialized. However, eventually the virtual memory cage initialization
- // will happen as part of V8::Initialize, at which point this function can
- // probably be merged into OS::Initialize.
+ // but that is currently invoked too late, after the sandbox is initialized.
+ // However, eventually the sandbox initialization will probably happen as
+ // part of V8::Initialize, at which point this function can probably be
+ // merged into OS::Initialize.
static void EnsureWin32MemoryAPILoaded();
#endif
@@ -196,12 +197,11 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
// Memory permissions. These should be kept in sync with the ones in
- // v8::PageAllocator.
+ // v8::PageAllocator and v8::PagePermissions.
enum class MemoryPermission {
kNoAccess,
kRead,
kReadWrite,
- // TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute,
// TODO(jkummerow): Remove this when Wasm has a platform-independent
@@ -209,6 +209,11 @@ class V8_BASE_EXPORT OS {
kNoAccessWillJitLater
};
+ // Helpers to create shared memory objects. Currently only used for testing.
+ static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting(
+ size_t size);
+ static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle);
+
static bool HasLazyCommits();
// Sleep for a specified time interval.
@@ -336,9 +341,15 @@ class V8_BASE_EXPORT OS {
void* new_address,
size_t size);
- V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
+ static void Free(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT static void* AllocateShared(
+ void* address, size_t size, OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle, uint64_t offset);
- V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
+ static void FreeShared(void* address, size_t size);
+
+ static void Release(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
MemoryPermission access);
@@ -354,8 +365,7 @@ class V8_BASE_EXPORT OS {
CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment,
MemoryPermission max_permission);
- V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation(
- AddressSpaceReservation reservation);
+ static void FreeAddressSpaceReservation(AddressSpaceReservation reservation);
static const int msPerSecond = 1000;
@@ -383,6 +393,10 @@ inline void EnsureConsoleOutput() {
//
// This class provides the same memory management functions as OS but operates
// inside a previously reserved contiguous region of virtual address space.
+//
+// Reserved address space in which no pages have been allocated is guaranteed
+// to be inaccessible and cause a fault on access. As such, creating guard
+// regions requires no further action.
class V8_BASE_EXPORT AddressSpaceReservation {
public:
using Address = uintptr_t;
@@ -402,6 +416,13 @@ class V8_BASE_EXPORT AddressSpaceReservation {
V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size);
+ V8_WARN_UNUSED_RESULT bool AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset);
+
+ V8_WARN_UNUSED_RESULT bool FreeShared(void* address, size_t size);
+
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
OS::MemoryPermission access);
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 2fc748da87..3e9f6334d9 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -4,7 +4,7 @@
#include "src/base/platform/semaphore.h"
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <dispatch/dispatch.h>
#elif V8_OS_WIN
#include <windows.h>
@@ -19,7 +19,7 @@
namespace v8 {
namespace base {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
Semaphore::Semaphore(int count) {
native_handle_ = dispatch_semaphore_create(count);
@@ -174,7 +174,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
return native_handle_.TakeWait(microseconds);
}
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index ec107bd290..2d5b50bca9 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -11,7 +11,7 @@
#include "src/base/win32-headers.h"
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <dispatch/dispatch.h>
#elif V8_OS_POSIX
#include <semaphore.h>
@@ -55,7 +55,7 @@ class V8_BASE_EXPORT Semaphore final {
// the semaphore counter is decremented and true is returned.
bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
using NativeHandle = dispatch_semaphore_t;
#elif V8_OS_POSIX
using NativeHandle = sem_t;
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index af214f0a6d..5efa998795 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -9,7 +9,7 @@
#include <sys/time.h>
#include <unistd.h>
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <pthread.h>
@@ -39,7 +39,7 @@
namespace {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
int64_t ComputeThreadTicks() {
mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t thread_info_data;
@@ -111,23 +111,37 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) {
#endif
}
-V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
- // Limit duration of timer resolution measurement to 100 ms. If we cannot
- // measure timer resoltuion within this time, we assume a low resolution
- // timer.
- int64_t end =
- ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
- int64_t start, delta;
- do {
- start = ClockNow(clk_id);
- // Loop until we can detect that the clock has changed. Non-HighRes timers
- // will increment in chunks, i.e. 15ms. By spinning until we see a clock
- // change, we detect the minimum time between measurements.
- do {
- delta = ClockNow(clk_id) - start;
- } while (delta == 0);
- } while (delta > 1 && start < end);
- return delta <= 1;
+V8_INLINE int64_t NanosecondsNow() {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return int64_t{ts.tv_sec} * v8::base::Time::kNanosecondsPerSecond +
+ ts.tv_nsec;
+}
+
+inline bool IsHighResolutionTimer(clockid_t clk_id) {
+ // Currently this is only needed for CLOCK_MONOTONIC. If other clocks need
+ // to be checked, care must be taken to support all platforms correctly;
+ // see ClockNow() above for precedent.
+ DCHECK_EQ(clk_id, CLOCK_MONOTONIC);
+ int64_t previous = NanosecondsNow();
+ // There should be enough attempts to make the loop run for more than one
+ // microsecond if the early return is not taken -- the elapsed time can't
+ // be measured in that situation, so we have to estimate it offline.
+ constexpr int kAttempts = 100;
+ for (int i = 0; i < kAttempts; i++) {
+ int64_t next = NanosecondsNow();
+ int64_t delta = next - previous;
+ if (delta == 0) continue;
+ // We expect most systems to take this branch on the first iteration.
+ if (delta <= v8::base::Time::kNanosecondsPerMicrosecond) {
+ return true;
+ }
+ previous = next;
+ }
+ // As of 2022, we expect that the loop above has taken at least 2 μs (on
+ // a fast desktop). If we still haven't seen a non-zero clock increment
+ // in sub-microsecond range, assume a low resolution timer.
+ return false;
}
#elif V8_OS_WIN
@@ -142,8 +156,7 @@ V8_INLINE uint64_t QPCNowRaw() {
USE(result);
return perf_counter_now.QuadPart;
}
-#endif // V8_OS_MACOSX
-
+#endif // V8_OS_DARWIN
} // namespace
@@ -231,8 +244,7 @@ int64_t TimeDelta::InNanoseconds() const {
return delta_ * Time::kNanosecondsPerMicrosecond;
}
-
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
DCHECK_GE(ts.tv_nsec, 0);
@@ -252,8 +264,7 @@ struct mach_timespec TimeDelta::ToMachTimespec() const {
return ts;
}
-#endif // V8_OS_MACOSX
-
+#endif // V8_OS_DARWIN
#if V8_OS_POSIX
@@ -463,16 +474,6 @@ Time Time::NowFromSystemTime() { return Now(); }
#endif // V8_OS_STARBOARD
-// static
-TimeTicks TimeTicks::HighResolutionNow() {
- // a DCHECK of TimeTicks::IsHighResolution() was removed from here
- // as it turns out this path is used in the wild for logs and counters.
- //
- // TODO(hpayer) We may eventually want to split TimedHistograms based
- // on low resolution clocks to avoid polluting metrics
- return TimeTicks::Now();
-}
-
Time Time::FromJsTime(double ms_since_epoch) {
// The epoch is a valid time, so this constructor doesn't interpret
// 0 as the null time.
@@ -709,7 +710,7 @@ bool TimeTicks::IsHighResolution() {
TimeTicks TimeTicks::Now() {
int64_t ticks;
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
static struct mach_timebase_info info;
if (info.denom == 0) {
kern_return_t result = mach_timebase_info(&info);
@@ -725,18 +726,18 @@ TimeTicks TimeTicks::Now() {
#elif V8_OS_STARBOARD
ticks = SbTimeGetMonotonicNow();
#else
-#error platform does not implement TimeTicks::HighResolutionNow.
-#endif // V8_OS_MACOSX
+#error platform does not implement TimeTicks::Now.
+#endif // V8_OS_DARWIN
// Make sure we never return 0 here.
return TimeTicks(ticks + 1);
}
// static
bool TimeTicks::IsHighResolution() {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
return true;
#elif V8_OS_POSIX
- static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
+ static const bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
return is_high_resolution;
#else
return true;
@@ -759,7 +760,7 @@ bool ThreadTicks::IsSupported() {
// Thread CPU time accounting is unavailable in PASE
return false;
#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
- defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
+ defined(V8_OS_DARWIN) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
return true;
#elif defined(V8_OS_WIN)
return IsSupportedWin();
@@ -780,7 +781,7 @@ ThreadTicks ThreadTicks::Now() {
#else
UNREACHABLE();
#endif
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
return ThreadTicks(ComputeThreadTicks());
#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
defined(V8_OS_ANDROID)
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 2fc7859dd7..d4be4109f9 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -433,11 +433,6 @@ class V8_BASE_EXPORT TimeTicks final
// This method never returns a null TimeTicks.
static TimeTicks Now();
- // This is equivalent to Now() but DCHECKs that IsHighResolution(). Useful for
- // test frameworks that rely on high resolution clocks (in practice all
- // platforms but low-end Windows devices have high resolution clocks).
- static TimeTicks HighResolutionNow();
-
// Returns true if the high-resolution clock is working on this system.
static bool IsHighResolution();
diff --git a/deps/v8/src/utils/pointer-with-payload.h b/deps/v8/src/base/pointer-with-payload.h
index 6200f41077..94801a9af7 100644
--- a/deps/v8/src/utils/pointer-with-payload.h
+++ b/deps/v8/src/base/pointer-with-payload.h
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UTILS_POINTER_WITH_PAYLOAD_H_
-#define V8_UTILS_POINTER_WITH_PAYLOAD_H_
+#ifndef V8_BASE_POINTER_WITH_PAYLOAD_H_
+#define V8_BASE_POINTER_WITH_PAYLOAD_H_
#include <cstdint>
#include <type_traits>
-#include "include/v8config.h"
#include "src/base/logging.h"
namespace v8 {
-namespace internal {
+namespace base {
template <typename PointerType>
struct PointerWithPayloadTraits {
- static constexpr int value =
+ static constexpr int kAvailableBits =
alignof(PointerType) >= 8 ? 3 : alignof(PointerType) >= 4 ? 2 : 1;
};
@@ -37,82 +36,83 @@ struct PointerWithPayloadTraits<void> : public PointerWithPayloadTraits<void*> {
//
// Here we store a bool that needs 1 bit of storage state into the lower bits
// of int *, which points to some int data;
-
template <typename PointerType, typename PayloadType, int NumPayloadBits>
class PointerWithPayload {
- // We have log2(ptr alignment) kAvailBits free to use
- static constexpr int kAvailBits = PointerWithPayloadTraits<
- typename std::remove_const<PointerType>::type>::value;
- static_assert(
- kAvailBits >= NumPayloadBits,
- "Ptr does not have sufficient alignment for the selected amount of "
- "storage bits.");
-
- static constexpr uintptr_t kPayloadMask =
- (uintptr_t{1} << NumPayloadBits) - 1;
- static constexpr uintptr_t kPointerMask = ~kPayloadMask;
-
public:
PointerWithPayload() = default;
explicit PointerWithPayload(PointerType* pointer)
- : pointer_(reinterpret_cast<uintptr_t>(pointer)) {
+ : pointer_with_payload_(reinterpret_cast<uintptr_t>(pointer)) {
DCHECK_EQ(GetPointer(), pointer);
DCHECK_EQ(GetPayload(), static_cast<PayloadType>(0));
}
explicit PointerWithPayload(PayloadType payload)
- : pointer_(static_cast<uintptr_t>(payload)) {
+ : pointer_with_payload_(static_cast<uintptr_t>(payload)) {
DCHECK_EQ(GetPointer(), nullptr);
DCHECK_EQ(GetPayload(), payload);
}
PointerWithPayload(PointerType* pointer, PayloadType payload) {
- update(pointer, payload);
+ Update(pointer, payload);
}
V8_INLINE PointerType* GetPointer() const {
- return reinterpret_cast<PointerType*>(pointer_ & kPointerMask);
+ return reinterpret_cast<PointerType*>(pointer_with_payload_ & kPointerMask);
}
// An optimized version of GetPointer for when we know the payload value.
V8_INLINE PointerType* GetPointerWithKnownPayload(PayloadType payload) const {
DCHECK_EQ(GetPayload(), payload);
- return reinterpret_cast<PointerType*>(pointer_ -
+ return reinterpret_cast<PointerType*>(pointer_with_payload_ -
static_cast<uintptr_t>(payload));
}
V8_INLINE PointerType* operator->() const { return GetPointer(); }
- V8_INLINE void update(PointerType* new_pointer, PayloadType new_payload) {
- pointer_ = reinterpret_cast<uintptr_t>(new_pointer) |
- static_cast<uintptr_t>(new_payload);
+ V8_INLINE void Update(PointerType* new_pointer, PayloadType new_payload) {
+ pointer_with_payload_ = reinterpret_cast<uintptr_t>(new_pointer) |
+ static_cast<uintptr_t>(new_payload);
DCHECK_EQ(GetPayload(), new_payload);
DCHECK_EQ(GetPointer(), new_pointer);
}
V8_INLINE void SetPointer(PointerType* newptr) {
DCHECK_EQ(reinterpret_cast<uintptr_t>(newptr) & kPayloadMask, 0);
- pointer_ = reinterpret_cast<uintptr_t>(newptr) | (pointer_ & kPayloadMask);
+ pointer_with_payload_ = reinterpret_cast<uintptr_t>(newptr) |
+ (pointer_with_payload_ & kPayloadMask);
DCHECK_EQ(GetPointer(), newptr);
}
V8_INLINE PayloadType GetPayload() const {
- return static_cast<PayloadType>(pointer_ & kPayloadMask);
+ return static_cast<PayloadType>(pointer_with_payload_ & kPayloadMask);
}
V8_INLINE void SetPayload(PayloadType new_payload) {
uintptr_t new_payload_ptr = static_cast<uintptr_t>(new_payload);
DCHECK_EQ(new_payload_ptr & kPayloadMask, new_payload_ptr);
- pointer_ = (pointer_ & kPointerMask) | new_payload_ptr;
+ pointer_with_payload_ =
+ (pointer_with_payload_ & kPointerMask) | new_payload_ptr;
DCHECK_EQ(GetPayload(), new_payload);
}
private:
- uintptr_t pointer_ = 0;
+ static constexpr int kAvailableBits = PointerWithPayloadTraits<
+ typename std::remove_const<PointerType>::type>::kAvailableBits;
+ static_assert(
+ kAvailableBits >= NumPayloadBits,
+ "Ptr does not have sufficient alignment for the selected amount of "
+ "storage bits. Override PointerWithPayloadTraits to guarantee available "
+ "bits manually.");
+
+ static constexpr uintptr_t kPayloadMask =
+ (uintptr_t{1} << NumPayloadBits) - 1;
+ static constexpr uintptr_t kPointerMask = ~kPayloadMask;
+
+ uintptr_t pointer_with_payload_ = 0;
};
-} // namespace internal
+} // namespace base
} // namespace v8
-#endif // V8_UTILS_POINTER_WITH_PAYLOAD_H_
+#endif // V8_BASE_POINTER_WITH_PAYLOAD_H_
diff --git a/deps/v8/src/base/safe_conversions_impl.h b/deps/v8/src/base/safe_conversions_impl.h
index 5d9277df24..89a41740b0 100644
--- a/deps/v8/src/base/safe_conversions_impl.h
+++ b/deps/v8/src/base/safe_conversions_impl.h
@@ -12,6 +12,7 @@
#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#include <stddef.h>
#include <stdint.h>
#include <limits>
@@ -195,7 +196,7 @@ class RangeCheck {
public:
constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
: is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
- constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {}
constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
diff --git a/deps/v8/src/base/sanitizer/lsan-page-allocator.cc b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc
index bb52eb368f..c50bb4611b 100644
--- a/deps/v8/src/base/sanitizer/lsan-page-allocator.cc
+++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc
@@ -50,25 +50,21 @@ bool LsanPageAllocator::CanAllocateSharedPages() {
}
bool LsanPageAllocator::FreePages(void* address, size_t size) {
- bool result = page_allocator_->FreePages(address, size);
+ CHECK(page_allocator_->FreePages(address, size));
#if defined(LEAK_SANITIZER)
- if (result) {
- __lsan_unregister_root_region(address, size);
- }
+ __lsan_unregister_root_region(address, size);
#endif
- return result;
+ return true;
}
bool LsanPageAllocator::ReleasePages(void* address, size_t size,
size_t new_size) {
- bool result = page_allocator_->ReleasePages(address, size, new_size);
+ CHECK(page_allocator_->ReleasePages(address, size, new_size));
#if defined(LEAK_SANITIZER)
- if (result) {
- __lsan_unregister_root_region(address, size);
- __lsan_register_root_region(address, new_size);
- }
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
#endif
- return result;
+ return true;
}
} // namespace base
diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc
index 1877c44b7b..cd8d0decae 100644
--- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc
+++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc
@@ -17,7 +17,8 @@ namespace base {
LsanVirtualAddressSpace::LsanVirtualAddressSpace(
std::unique_ptr<v8::VirtualAddressSpace> vas)
: VirtualAddressSpace(vas->page_size(), vas->allocation_granularity(),
- vas->base(), vas->size()),
+ vas->base(), vas->size(),
+ vas->max_page_permissions()),
vas_(std::move(vas)) {
DCHECK_NOT_NULL(vas_);
}
@@ -27,28 +28,45 @@ Address LsanVirtualAddressSpace::AllocatePages(Address hint, size_t size,
PagePermissions permissions) {
Address result = vas_->AllocatePages(hint, size, alignment, permissions);
#if defined(LEAK_SANITIZER)
- if (result != 0) {
+ if (result) {
__lsan_register_root_region(reinterpret_cast<void*>(result), size);
}
#endif // defined(LEAK_SANITIZER)
return result;
}
-bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) {
- bool result = vas_->FreePages(address, size);
+void LsanVirtualAddressSpace::FreePages(Address address, size_t size) {
+ vas_->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(reinterpret_cast<void*>(address), size);
+#endif // defined(LEAK_SANITIZER)
+}
+
+Address LsanVirtualAddressSpace::AllocateSharedPages(
+ Address hint, size_t size, PagePermissions permissions,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ Address result =
+ vas_->AllocateSharedPages(hint, size, permissions, handle, offset);
#if defined(LEAK_SANITIZER)
if (result) {
- __lsan_unregister_root_region(reinterpret_cast<void*>(address), size);
+ __lsan_register_root_region(reinterpret_cast<void*>(result), size);
}
#endif // defined(LEAK_SANITIZER)
return result;
}
+void LsanVirtualAddressSpace::FreeSharedPages(Address address, size_t size) {
+ vas_->FreeSharedPages(address, size);
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(reinterpret_cast<void*>(address), size);
+#endif // defined(LEAK_SANITIZER)
+}
+
std::unique_ptr<VirtualAddressSpace> LsanVirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) {
+ PagePermissions max_page_permissions) {
auto subspace =
- vas_->AllocateSubspace(hint, size, alignment, max_permissions);
+ vas_->AllocateSubspace(hint, size, alignment, max_page_permissions);
#if defined(LEAK_SANITIZER)
if (subspace) {
subspace = std::make_unique<LsanVirtualAddressSpace>(std::move(subspace));
diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h
index cc16561710..00cd32a39f 100644
--- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h
+++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h
@@ -33,18 +33,33 @@ class V8_BASE_EXPORT LsanVirtualAddressSpace final
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
- bool FreePages(Address address, size_t size) override;
+ void FreePages(Address address, size_t size) override;
+
+ Address AllocateSharedPages(Address hint, size_t size,
+ PagePermissions permissions,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) override;
+
+ void FreeSharedPages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override {
return vas_->SetPagePermissions(address, size, permissions);
}
+ bool AllocateGuardRegion(Address address, size_t size) override {
+ return vas_->AllocateGuardRegion(address, size);
+ }
+
+ void FreeGuardRegion(Address address, size_t size) override {
+ vas_->FreeGuardRegion(address, size);
+ }
+
bool CanAllocateSubspaces() override { return vas_->CanAllocateSubspaces(); }
std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) override;
+ PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override {
return vas_->DiscardSystemPages(address, size);
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 6f69e2aa9c..143aa4ae89 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -63,7 +63,7 @@ int SysInfo::NumberOfProcessors() {
// static
int64_t SysInfo::AmountOfPhysicalMemory() {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
int mib[2] = {CTL_HW, HW_MEMSIZE};
int64_t memsize = 0;
size_t len = sizeof(memsize);
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index 91c726474e..807ff4f2a8 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -160,6 +160,15 @@ class ThreadedListBase final : public BaseClass {
return *this;
}
+ bool is_null() { return entry_ == nullptr; }
+
+ void InsertBefore(T* value) {
+ T* old_entry_value = *entry_;
+ *entry_ = value;
+ entry_ = TLTraits::next(value);
+ *entry_ = old_entry_value;
+ }
+
Iterator() : entry_(nullptr) {}
private:
@@ -178,6 +187,10 @@ class ThreadedListBase final : public BaseClass {
using reference = const value_type;
using pointer = const value_type*;
+ // Allow implicit conversion to const iterator.
+ // NOLINTNEXTLINE
+ ConstIterator(Iterator& iterator) : entry_(iterator.entry_) {}
+
public:
ConstIterator& operator++() {
entry_ = TLTraits::next(*entry_);
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index f6f9dcfef2..f6dc62893c 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -56,7 +56,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
DCHECK_EQ(0, result);
USE(result);
SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
-#elif V8_OS_MACOSX || V8_OS_FREEBSD || V8_OS_OPENBSD
+#elif V8_OS_DARWIN || V8_OS_FREEBSD || V8_OS_OPENBSD
// Despite its prefix suggests it is not RC4 algorithm anymore.
// It always succeeds while having decent performance and
// no file descriptor involved.
@@ -87,8 +87,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
// which provides reasonable entropy, see:
// https://code.google.com/p/v8/issues/detail?id=2905
int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
- seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
- seed ^= TimeTicks::Now().ToInternalValue() << 8;
+ seed ^= TimeTicks::Now().ToInternalValue();
SetSeed(seed);
#endif // V8_OS_CYGWIN || V8_OS_WIN
}
diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.cc b/deps/v8/src/base/virtual-address-space-page-allocator.cc
index 297b9adbf9..f88afdcc19 100644
--- a/deps/v8/src/base/virtual-address-space-page-allocator.cc
+++ b/deps/v8/src/base/virtual-address-space-page-allocator.cc
@@ -28,7 +28,8 @@ bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) {
size = result->second;
resized_allocations_.erase(result);
}
- return vas_->FreePages(address, size);
+ vas_->FreePages(address, size);
+ return true;
}
bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
@@ -46,7 +47,8 @@ bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
// Will fail if the allocation was resized previously, which is desired.
Address address = reinterpret_cast<Address>(ptr);
resized_allocations_.insert({address, size});
- return vas_->DecommitPages(address + new_size, size - new_size);
+ CHECK(vas_->DecommitPages(address + new_size, size - new_size));
+ return true;
}
bool VirtualAddressSpacePageAllocator::SetPermissions(
diff --git a/deps/v8/src/base/virtual-address-space.cc b/deps/v8/src/base/virtual-address-space.cc
index 9907facb57..6ef95f5ca8 100644
--- a/deps/v8/src/base/virtual-address-space.cc
+++ b/deps/v8/src/base/virtual-address-space.cc
@@ -26,10 +26,34 @@ STATIC_ASSERT_ENUM(PagePermissions::kReadExecute,
#undef STATIC_ASSERT_ENUM
+namespace {
+uint8_t PagePermissionsToBitset(PagePermissions permissions) {
+ switch (permissions) {
+ case PagePermissions::kNoAccess:
+ return 0b000;
+ case PagePermissions::kRead:
+ return 0b100;
+ case PagePermissions::kReadWrite:
+ return 0b110;
+ case PagePermissions::kReadWriteExecute:
+ return 0b111;
+ case PagePermissions::kReadExecute:
+ return 0b101;
+ }
+}
+} // namespace
+
+bool IsSubset(PagePermissions lhs, PagePermissions rhs) {
+ uint8_t lhs_bits = PagePermissionsToBitset(lhs);
+ uint8_t rhs_bits = PagePermissionsToBitset(rhs);
+ return (lhs_bits & rhs_bits) == lhs_bits;
+}
+
VirtualAddressSpace::VirtualAddressSpace()
: VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(),
kNullAddress,
- std::numeric_limits<uintptr_t>::max()) {
+ std::numeric_limits<uintptr_t>::max(),
+ PagePermissions::kReadWriteExecute) {
#if V8_OS_WIN
// On Windows, this additional step is required to lookup the VirtualAlloc2
// and friends functions.
@@ -61,11 +85,11 @@ Address VirtualAddressSpace::AllocatePages(Address hint, size_t size,
static_cast<OS::MemoryPermission>(permissions)));
}
-bool VirtualAddressSpace::FreePages(Address address, size_t size) {
+void VirtualAddressSpace::FreePages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
- return OS::Free(reinterpret_cast<void*>(address), size);
+ OS::Free(reinterpret_cast<void*>(address), size);
}
bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size,
@@ -77,13 +101,52 @@ bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size,
static_cast<OS::MemoryPermission>(permissions));
}
+bool VirtualAddressSpace::AllocateGuardRegion(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ void* hint = reinterpret_cast<void*>(address);
+ void* result = OS::Allocate(hint, size, allocation_granularity(),
+ OS::MemoryPermission::kNoAccess);
+ if (result && result != hint) {
+ OS::Free(result, size);
+ }
+ return result == hint;
+}
+
+void VirtualAddressSpace::FreeGuardRegion(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ OS::Free(reinterpret_cast<void*>(address), size);
+}
+
bool VirtualAddressSpace::CanAllocateSubspaces() {
return OS::CanReserveAddressSpace();
}
+Address VirtualAddressSpace::AllocateSharedPages(
+ Address hint, size_t size, PagePermissions permissions,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK(IsAligned(hint, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+ DCHECK(IsAligned(offset, allocation_granularity()));
+
+ return reinterpret_cast<Address>(OS::AllocateShared(
+ reinterpret_cast<void*>(hint), size,
+ static_cast<OS::MemoryPermission>(permissions), handle, offset));
+}
+
+void VirtualAddressSpace::FreeSharedPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ OS::FreeShared(reinterpret_cast<void*>(address), size);
+}
+
std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) {
+ PagePermissions max_page_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
@@ -91,11 +154,11 @@ std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
base::Optional<AddressSpaceReservation> reservation =
OS::CreateAddressSpaceReservation(
reinterpret_cast<void*>(hint), size, alignment,
- static_cast<OS::MemoryPermission>(max_permissions));
+ static_cast<OS::MemoryPermission>(max_page_permissions));
if (!reservation.has_value())
return std::unique_ptr<v8::VirtualAddressSpace>();
return std::unique_ptr<v8::VirtualAddressSpace>(
- new VirtualAddressSubspace(*reservation, this));
+ new VirtualAddressSubspace(*reservation, this, max_page_permissions));
}
bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) {
@@ -112,15 +175,17 @@ bool VirtualAddressSpace::DecommitPages(Address address, size_t size) {
return OS::DecommitPages(reinterpret_cast<void*>(address), size);
}
-bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) {
- return OS::FreeAddressSpaceReservation(subspace->reservation_);
+void VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) {
+ OS::FreeAddressSpaceReservation(subspace->reservation_);
}
VirtualAddressSubspace::VirtualAddressSubspace(
- AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space)
- : VirtualAddressSpaceBase(
- parent_space->page_size(), parent_space->allocation_granularity(),
- reinterpret_cast<Address>(reservation.base()), reservation.size()),
+ AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space,
+ PagePermissions max_page_permissions)
+ : VirtualAddressSpaceBase(parent_space->page_size(),
+ parent_space->allocation_granularity(),
+ reinterpret_cast<Address>(reservation.base()),
+ reservation.size(), max_page_permissions),
reservation_(reservation),
region_allocator_(reinterpret_cast<Address>(reservation.base()),
reservation.size(),
@@ -141,7 +206,7 @@ VirtualAddressSubspace::VirtualAddressSubspace(
}
VirtualAddressSubspace::~VirtualAddressSubspace() {
- CHECK(parent_space_->FreeSubspace(this));
+ parent_space_->FreeSubspace(this);
}
void VirtualAddressSubspace::SetRandomSeed(int64_t seed) {
@@ -153,7 +218,7 @@ Address VirtualAddressSubspace::RandomPageAddress() {
MutexGuard guard(&mutex_);
// Note: the random numbers generated here aren't uniformly distributed if the
// size isn't a power of two.
- Address addr = base() + (rng_.NextInt64() % size());
+ Address addr = base() + (static_cast<uint64_t>(rng_.NextInt64()) % size());
return RoundDown(addr, allocation_granularity());
}
@@ -163,6 +228,7 @@ Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size,
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
+ DCHECK(IsSubset(permissions, max_page_permissions()));
MutexGuard guard(&mutex_);
@@ -179,38 +245,91 @@ Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size,
return address;
}
-bool VirtualAddressSubspace::FreePages(Address address, size_t size) {
+void VirtualAddressSubspace::FreePages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
MutexGuard guard(&mutex_);
- if (region_allocator_.CheckRegion(address) != size) return false;
-
// The order here is important: on Windows, the allocation first has to be
// freed to a placeholder before the placeholder can be merged (during the
// merge_callback) with any surrounding placeholder mappings.
CHECK(reservation_.Free(reinterpret_cast<void*>(address), size));
CHECK_EQ(size, region_allocator_.FreeRegion(address));
- return true;
}
bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size,
PagePermissions permissions) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
+ DCHECK(IsSubset(permissions, max_page_permissions()));
return reservation_.SetPermissions(
reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(permissions));
}
+bool VirtualAddressSubspace::AllocateGuardRegion(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+
+ // It is guaranteed that reserved address space is inaccessible, so we just
+ // need to mark the region as in-use in the region allocator.
+ return region_allocator_.AllocateRegionAt(address, size);
+}
+
+void VirtualAddressSubspace::FreeGuardRegion(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+}
+
+Address VirtualAddressSubspace::AllocateSharedPages(
+ Address hint, size_t size, PagePermissions permissions,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK(IsAligned(hint, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+ DCHECK(IsAligned(offset, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+
+ Address address =
+ region_allocator_.AllocateRegion(hint, size, allocation_granularity());
+ if (address == RegionAllocator::kAllocationFailure) return kNullAddress;
+
+ if (!reservation_.AllocateShared(
+ reinterpret_cast<void*>(address), size,
+ static_cast<OS::MemoryPermission>(permissions), handle, offset)) {
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ return kNullAddress;
+ }
+
+ return address;
+}
+
+void VirtualAddressSubspace::FreeSharedPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+ // The order here is important: on Windows, the allocation first has to be
+ // freed to a placeholder before the placeholder can be merged (during the
+ // merge_callback) with any surrounding placeholder mappings.
+ CHECK(reservation_.FreeShared(reinterpret_cast<void*>(address), size));
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+}
+
std::unique_ptr<v8::VirtualAddressSpace>
VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
size_t alignment,
- PagePermissions max_permissions) {
+ PagePermissions max_page_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
+ DCHECK(IsSubset(max_page_permissions, this->max_page_permissions()));
MutexGuard guard(&mutex_);
@@ -222,13 +341,13 @@ VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
base::Optional<AddressSpaceReservation> reservation =
reservation_.CreateSubReservation(
reinterpret_cast<void*>(address), size,
- static_cast<OS::MemoryPermission>(max_permissions));
+ static_cast<OS::MemoryPermission>(max_page_permissions));
if (!reservation.has_value()) {
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return nullptr;
}
return std::unique_ptr<v8::VirtualAddressSpace>(
- new VirtualAddressSubspace(*reservation, this));
+ new VirtualAddressSubspace(*reservation, this, max_page_permissions));
}
bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) {
@@ -246,16 +365,13 @@ bool VirtualAddressSubspace::DecommitPages(Address address, size_t size) {
return reservation_.DecommitPages(reinterpret_cast<void*>(address), size);
}
-bool VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) {
+void VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) {
MutexGuard guard(&mutex_);
AddressSpaceReservation reservation = subspace->reservation_;
Address base = reinterpret_cast<Address>(reservation.base());
- if (region_allocator_.FreeRegion(base) != reservation.size()) {
- return false;
- }
-
- return reservation_.FreeSubReservation(reservation);
+ CHECK_EQ(reservation.size(), region_allocator_.FreeRegion(base));
+ CHECK(reservation_.FreeSubReservation(reservation));
}
} // namespace base
diff --git a/deps/v8/src/base/virtual-address-space.h b/deps/v8/src/base/virtual-address-space.h
index 5cfe462079..3681367777 100644
--- a/deps/v8/src/base/virtual-address-space.h
+++ b/deps/v8/src/base/virtual-address-space.h
@@ -32,10 +32,16 @@ class VirtualAddressSpaceBase
// Called by a subspace during destruction. Responsible for freeing the
// address space reservation and any other data associated with the subspace
// in the parent space.
- virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0;
+ virtual void FreeSubspace(VirtualAddressSubspace* subspace) = 0;
};
/*
+ * Helper routine to determine whether one set of page permissions (the lhs) is
+ * a subset of another one (the rhs).
+ */
+V8_BASE_EXPORT bool IsSubset(PagePermissions lhs, PagePermissions rhs);
+
+/*
* The virtual address space of the current process. Conceptionally, there
* should only be one such "root" instance. However, in practice there is no
* issue with having multiple instances as the actual resources are managed by
@@ -53,23 +59,34 @@ class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions access) override;
- bool FreePages(Address address, size_t size) override;
+ void FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions access) override;
+ bool AllocateGuardRegion(Address address, size_t size) override;
+
+ void FreeGuardRegion(Address address, size_t size) override;
+
+ Address AllocateSharedPages(Address hint, size_t size,
+ PagePermissions permissions,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) override;
+
+ void FreeSharedPages(Address address, size_t size) override;
+
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) override;
+ PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
- bool FreeSubspace(VirtualAddressSubspace* subspace) override;
+ void FreeSubspace(VirtualAddressSubspace* subspace) override;
};
/*
@@ -87,16 +104,27 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
- bool FreePages(Address address, size_t size) override;
+ void FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
+ bool AllocateGuardRegion(Address address, size_t size) override;
+
+ void FreeGuardRegion(Address address, size_t size) override;
+
+ Address AllocateSharedPages(Address hint, size_t size,
+ PagePermissions permissions,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) override;
+
+ void FreeSharedPages(Address address, size_t size) override;
+
bool CanAllocateSubspaces() override { return true; }
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
- PagePermissions max_permissions) override;
+ PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
@@ -107,10 +135,11 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
// allocating sub spaces.
friend class v8::base::VirtualAddressSpace;
- bool FreeSubspace(VirtualAddressSubspace* subspace) override;
+ void FreeSubspace(VirtualAddressSubspace* subspace) override;
VirtualAddressSubspace(AddressSpaceReservation reservation,
- VirtualAddressSpaceBase* parent_space);
+ VirtualAddressSpaceBase* parent_space,
+ PagePermissions max_page_permissions);
// The address space reservation backing this subspace.
AddressSpaceReservation reservation_;
diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h
index 5d8633798b..1a06750d08 100644
--- a/deps/v8/src/base/vlq-base64.h
+++ b/deps/v8/src/base/vlq-base64.h
@@ -5,7 +5,8 @@
#ifndef V8_BASE_VLQ_BASE64_H_
#define V8_BASE_VLQ_BASE64_H_
-#include <string>
+#include <stddef.h>
+#include <stdint.h>
#include "src/base/base-export.h"
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index db3c05ce18..f77b85e2ef 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -83,6 +83,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ add(rscratch, fp,
+ Operand(interpreter_register.ToOperand() * kSystemPointerSize));
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -474,7 +479,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ Push(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
@@ -502,9 +507,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 7824f92c2a..b08ac0d7ac 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -79,6 +79,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ Add(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -557,7 +562,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->PushArgument(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
@@ -583,10 +588,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index b8c876a8d3..e0fe720bc4 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -26,6 +26,8 @@ class BaselineAssembler {
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
inline static MemOperand RegisterFrameOperand(
interpreter::Register interpreter_register);
+ inline void RegisterFrameAddress(interpreter::Register interpreter_register,
+ Register rscratch);
inline MemOperand ContextOperand();
inline MemOperand FunctionOperand();
inline MemOperand FeedbackVectorOperand();
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index fe0e9d84cc..e0f528bcbe 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -27,6 +27,12 @@ namespace v8 {
namespace internal {
namespace baseline {
+static bool CanCompileWithConcurrentBaseline(SharedFunctionInfo shared,
+ Isolate* isolate) {
+ return !shared.is_compiled() || shared.HasBaselineCode() ||
+ !CanCompileWithBaseline(isolate, shared);
+}
+
class BaselineCompilerTask {
public:
BaselineCompilerTask(Isolate* isolate, PersistentHandles* handles,
@@ -60,8 +66,7 @@ class BaselineCompilerTask {
}
// Don't install the code if the bytecode has been flushed or has
// already some baseline code installed.
- if (!shared_function_info_->is_compiled() ||
- shared_function_info_->HasBaselineCode()) {
+ if (!CanCompileWithConcurrentBaseline(*shared_function_info_, isolate)) {
return;
}
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
@@ -90,8 +95,7 @@ class BaselineCompilerTask {
class BaselineBatchCompilerJob {
public:
BaselineBatchCompilerJob(Isolate* isolate, Handle<WeakFixedArray> task_queue,
- int batch_size)
- : isolate_for_local_isolate_(isolate) {
+ int batch_size) {
handles_ = isolate->NewPersistentHandles();
tasks_.reserve(batch_size);
for (int i = 0; i < batch_size; i++) {
@@ -103,7 +107,7 @@ class BaselineBatchCompilerJob {
if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue;
// Skip functions where the bytecode has been flushed.
SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
- if (ShouldSkipFunction(shared)) continue;
+ if (CanCompileWithConcurrentBaseline(shared, isolate)) continue;
tasks_.emplace_back(isolate, handles_.get(), shared);
}
if (FLAG_trace_baseline_concurrent_compilation) {
@@ -113,34 +117,14 @@ class BaselineBatchCompilerJob {
}
}
- bool ShouldSkipFunction(SharedFunctionInfo shared) {
- return !shared.is_compiled() || shared.HasBaselineCode() ||
- !CanCompileWithBaseline(isolate_for_local_isolate_, shared);
- }
-
// Executed in the background thread.
- void Compile() {
-#ifdef V8_RUNTIME_CALL_STATS
- WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
- isolate_for_local_isolate_->counters()
- ->worker_thread_runtime_call_stats());
- LocalIsolate local_isolate(isolate_for_local_isolate_,
- ThreadKind::kBackground,
- runtime_call_stats_scope.Get());
-#else
- LocalIsolate local_isolate(isolate_for_local_isolate_,
- ThreadKind::kBackground);
-#endif
- local_isolate.heap()->AttachPersistentHandles(std::move(handles_));
- UnparkedScope unparked_scope(&local_isolate);
- LocalHandleScope handle_scope(&local_isolate);
-
+ void Compile(LocalIsolate* local_isolate) {
+ local_isolate->heap()->AttachPersistentHandles(std::move(handles_));
for (auto& task : tasks_) {
- task.Compile(&local_isolate);
+ task.Compile(local_isolate);
}
-
// Get the handle back since we'd need them to install the code later.
- handles_ = local_isolate.heap()->DetachPersistentHandles();
+ handles_ = local_isolate->heap()->DetachPersistentHandles();
}
// Executed in the main thread.
@@ -151,7 +135,6 @@ class BaselineBatchCompilerJob {
}
private:
- Isolate* isolate_for_local_isolate_;
std::vector<BaselineCompilerTask> tasks_;
std::unique_ptr<PersistentHandles> handles_;
};
@@ -169,14 +152,19 @@ class ConcurrentBaselineCompiler {
outgoing_queue_(outcoming_queue) {}
void Run(JobDelegate* delegate) override {
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_isolate);
+ LocalHandleScope handle_scope(&local_isolate);
+
+ // Since we're going to compile an entire batch, this guarantees that
+ // we only switch back the memory chunks to RX at the end.
+ CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap());
+
while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) {
- // Since we're going to compile an entire batch, this guarantees that
- // we only switch back the memory chunks to RX at the end.
- CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap());
std::unique_ptr<BaselineBatchCompilerJob> job;
if (!incoming_queue_->Dequeue(&job)) break;
DCHECK_NOT_NULL(job);
- job->Compile();
+ job->Compile(&local_isolate);
outgoing_queue_->Enqueue(std::move(job));
}
isolate_->stack_guard()->RequestInstallBaselineCode();
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index 3ef0c68727..e057535020 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -586,7 +586,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
if (weight < 0) {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode,
+ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck,
__ FunctionOperand());
}
}
@@ -841,13 +841,13 @@ void BaselineCompiler::VisitMov() {
StoreRegister(1, scratch);
}
-void BaselineCompiler::VisitLdaNamedProperty() {
+void BaselineCompiler::VisitGetNamedProperty() {
CallBuiltin<Builtin::kLoadICBaseline>(RegisterOperand(0), // object
Constant<Name>(1), // name
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
+void BaselineCompiler::VisitGetNamedPropertyFromSuper() {
__ LoadPrototype(
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
kInterpreterAccumulatorRegister);
@@ -860,7 +860,7 @@ void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitLdaKeyedProperty() {
+void BaselineCompiler::VisitGetKeyedProperty() {
CallBuiltin<Builtin::kKeyedLoadICBaseline>(
RegisterOperand(0), // object
kInterpreterAccumulatorRegister, // key
@@ -921,7 +921,12 @@ void BaselineCompiler::VisitStaModuleVariable() {
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value);
}
-void BaselineCompiler::VisitStaNamedProperty() {
+void BaselineCompiler::VisitSetNamedProperty() {
+ // StoreIC is currently a base class for multiple property store operations
+ // and contains mixed logic for named and keyed, set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can be
+ // called here.
CallBuiltin<Builtin::kStoreICBaseline>(
RegisterOperand(0), // object
Constant<Name>(1), // name
@@ -929,15 +934,20 @@ void BaselineCompiler::VisitStaNamedProperty() {
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitStaNamedOwnProperty() {
- CallBuiltin<Builtin::kStoreOwnICBaseline>(
+void BaselineCompiler::VisitDefineNamedOwnProperty() {
+ CallBuiltin<Builtin::kDefineNamedOwnICBaseline>(
RegisterOperand(0), // object
Constant<Name>(1), // name
kInterpreterAccumulatorRegister, // value
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitStaKeyedProperty() {
+void BaselineCompiler::VisitSetKeyedProperty() {
+ // KeyedStoreIC is currently a base class for multiple keyed property store
+ // operations and contains mixed logic for set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetKeyedIC as a subclass of KeyedStoreIC, which
+ // can be called here.
CallBuiltin<Builtin::kKeyedStoreICBaseline>(
RegisterOperand(0), // object
RegisterOperand(1), // key
@@ -945,8 +955,8 @@ void BaselineCompiler::VisitStaKeyedProperty() {
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitStaKeyedPropertyAsDefine() {
- CallBuiltin<Builtin::kKeyedDefineOwnICBaseline>(
+void BaselineCompiler::VisitDefineKeyedOwnProperty() {
+ CallBuiltin<Builtin::kDefineKeyedOwnICBaseline>(
RegisterOperand(0), // object
RegisterOperand(1), // key
kInterpreterAccumulatorRegister, // value
@@ -961,11 +971,12 @@ void BaselineCompiler::VisitStaInArrayLiteral() {
IndexAsTagged(2)); // slot
}
-void BaselineCompiler::VisitStaDataPropertyInLiteral() {
- // Here we should save the accumulator, since StaDataPropertyInLiteral doesn't
- // write the accumulator, but Runtime::kDefineDataPropertyInLiteral returns
- // the value that we got from the accumulator so this still works.
- CallRuntime(Runtime::kDefineDataPropertyInLiteral,
+void BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral() {
+ // Here we should save the accumulator, since
+ // DefineKeyedOwnPropertyInLiteral doesn't write the accumulator, but
+ // Runtime::kDefineKeyedOwnPropertyInLiteral returns the value that we got
+ // from the accumulator so this still works.
+ CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral,
RegisterOperand(0), // object
RegisterOperand(1), // name
kInterpreterAccumulatorRegister, // value
@@ -1211,14 +1222,12 @@ void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
args);
}
@@ -1271,7 +1280,6 @@ void BaselineCompiler::VisitCallWithSpread() {
args = args.Truncate(args.register_count() - 1);
uint32_t arg_count = args.register_count();
- if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
CallBuiltin<Builtin::kCallWithSpread_Baseline>(
RegisterOperand(0), // kFunction
@@ -1329,6 +1337,19 @@ void BaselineCompiler::VisitIntrinsicCopyDataProperties(
CallBuiltin<Builtin::kCopyDataProperties>(args);
}
+void BaselineCompiler::
+ VisitIntrinsicCopyDataPropertiesWithExcludedPropertiesOnStack(
+ interpreter::RegisterList args) {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register rscratch = scratch_scope.AcquireScratch();
+ // Use an offset from args[0] instead of args[1] to pass a valid "end of"
+ // pointer in the case where args.register_count() == 1.
+ basm_.RegisterFrameAddress(interpreter::Register(args[0].index() + 1),
+ rscratch);
+ CallBuiltin<Builtin::kCopyDataPropertiesWithExcludedPropertiesOnStack>(
+ args[0], args.register_count() - 1, rscratch);
+}
+
void BaselineCompiler::VisitIntrinsicCreateIterResultObject(
interpreter::RegisterList args) {
CallBuiltin<Builtin::kCreateIterResultObject>(args);
@@ -2121,15 +2142,8 @@ void BaselineCompiler::VisitReturn() {
iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
- if (kJSArgcIncludesReceiver) {
- TailCallBuiltin<Builtin::kBaselineLeaveFrame>(parameter_count,
- -profiling_weight);
-
- } else {
- int parameter_count_without_receiver = parameter_count - 1;
- TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
- parameter_count_without_receiver, -profiling_weight);
- }
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(parameter_count,
+ -profiling_weight);
}
void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index e280bee3da..6c36c7e8ba 100644
--- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -84,6 +84,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(ebp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ lea(rscratch, MemOperand(ebp, interpreter_register.ToOperand() *
+ kSystemPointerSize));
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -428,7 +433,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
@@ -457,10 +462,9 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(
- params_size, scratch, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ masm()->DropArguments(params_size, scratch,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
index 185bb349c2..25b279ff8e 100644
--- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
- wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ wrapped_scope_.Include({t0, t1, t2, t3});
}
assembler_->scratch_register_scope_ = this;
}
@@ -78,6 +78,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ Add_d(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -449,7 +454,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
@@ -475,10 +480,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
-
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 9cc0e749bd..c33ff88024 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
- wrapped_scope_.Include(t4.bit() | t5.bit() | t6.bit() | t7.bit());
+ wrapped_scope_.Include({t4, t5, t6, t7});
}
assembler_->scratch_register_scope_ = this;
}
@@ -80,6 +80,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ Addu(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -461,7 +466,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
@@ -488,9 +493,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index 3f4dd6d455..8aa9122f51 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
- wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ wrapped_scope_.Include({t0, t1, t2, t3});
}
assembler_->scratch_register_scope_ = this;
}
@@ -78,6 +78,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ Daddu(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -459,7 +464,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
@@ -486,9 +491,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
index 110f7b7465..2058cd7ff3 100644
--- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
+++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
@@ -13,56 +13,99 @@ namespace v8 {
namespace internal {
namespace baseline {
+namespace detail {
+
+static constexpr Register kScratchRegisters[] = {r9, r10, ip};
+static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.rb() == target || op.rx() == target;
+}
+#endif
+} // namespace detail
+
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
- wrapped_scope_(assembler->masm()) {
- if (!assembler_->scratch_register_scope_) {
- // If we haven't opened a scratch scope yet, for the first one add a
- // couple of extra registers.
- DCHECK(wrapped_scope_.CanAcquire());
- wrapped_scope_.Include(r8, r9);
- wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister);
- }
+ registers_used_(prev_scope_ == nullptr ? 0
+ : prev_scope_->registers_used_) {
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
- Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+ Register AcquireScratch() {
+ DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
+ return detail::kScratchRegisters[registers_used_++];
+ }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
- UseScratchRegisterScope wrapped_scope_;
+ int registers_used_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
- kEqual = static_cast<uint32_t>(eq),
- kNotEqual = static_cast<uint32_t>(ne),
+ kEqual,
+ kNotEqual,
- kLessThan = static_cast<uint32_t>(lt),
- kGreaterThan = static_cast<uint32_t>(gt),
- kLessThanEqual = static_cast<uint32_t>(le),
- kGreaterThanEqual = static_cast<uint32_t>(ge),
+ kLessThan,
+ kGreaterThan,
+ kLessThanEqual,
+ kGreaterThanEqual,
- kUnsignedLessThan = static_cast<uint32_t>(lo),
- kUnsignedGreaterThan = static_cast<uint32_t>(hi),
- kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
- kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
+ kUnsignedLessThan,
+ kUnsignedGreaterThan,
+ kUnsignedLessThanEqual,
+ kUnsignedGreaterThanEqual,
- kOverflow = static_cast<uint32_t>(vs),
- kNoOverflow = static_cast<uint32_t>(vc),
+ kOverflow,
+ kNoOverflow,
- kZero = static_cast<uint32_t>(eq),
- kNotZero = static_cast<uint32_t>(ne),
+ kZero,
+ kNotZero
};
inline internal::Condition AsMasmCondition(Condition cond) {
- UNIMPLEMENTED();
- return static_cast<internal::Condition>(cond);
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ switch (cond) {
+ case Condition::kEqual:
+ return eq;
+ case Condition::kNotEqual:
+ return ne;
+ case Condition::kLessThan:
+ return lt;
+ case Condition::kGreaterThan:
+ return gt;
+ case Condition::kLessThanEqual:
+ return le;
+ case Condition::kGreaterThanEqual:
+ return ge;
+
+ case Condition::kUnsignedLessThan:
+ return lt;
+ case Condition::kUnsignedGreaterThan:
+ return gt;
+ case Condition::kUnsignedLessThanEqual:
+ return le;
+ case Condition::kUnsignedGreaterThanEqual:
+ return ge;
+
+ case Condition::kOverflow:
+ return overflow;
+ case Condition::kNoOverflow:
+ return nooverflow;
+
+ case Condition::kZero:
+ return eq;
+ case Condition::kNotZero:
+ return ne;
+ default:
+ UNREACHABLE();
+ }
}
namespace detail {
@@ -83,6 +126,10 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
UNIMPLEMENTED();
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ UNIMPLEMENTED();
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
UNIMPLEMENTED();
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 96420093d1..7aef7d138e 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -76,6 +76,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ Add64(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -478,7 +483,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister);
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
@@ -505,9 +510,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver);
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
index ce7afbf4ea..705e7bbd85 100644
--- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
+++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
@@ -76,7 +76,6 @@ inline internal::Condition AsMasmCondition(Condition cond) {
return eq;
case Condition::kNotEqual:
return ne;
-
case Condition::kLessThan:
return lt;
case Condition::kGreaterThan:
@@ -134,10 +133,10 @@ inline bool IsSignedCondition(Condition cond) {
}
}
-#define __ assm->masm()->
+#define __ assm->
// s390x helper
-void JumpIfHelper(BaselineAssembler* assm, Condition cc, Register lhs,
- Register rhs, Label* target) {
+static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
+ Register rhs, Label* target) {
if (IsSignedCondition(cc)) {
__ CmpS64(lhs, rhs);
} else {
@@ -154,6 +153,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ AddS64(rscratch, fp,
+ interpreter_register.ToOperand() * kSystemPointerSize);
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -256,32 +260,32 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ LoadU64(tmp, operand);
- JumpIfHelper(this, cc, value, tmp, target);
+ JumpIfHelper(masm_, cc, value, tmp, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
__ AssertSmi(value);
__ LoadSmiLiteral(r0, smi);
- JumpIfHelper(this, cc, value, r0, target);
+ JumpIfHelper(masm_, cc, value, r0, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- JumpIfHelper(this, cc, lhs, rhs, target);
+ JumpIfHelper(masm_, cc, lhs, rhs, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
__ LoadU64(r0, operand);
- JumpIfHelper(this, cc, value, r0, target);
+ JumpIfHelper(masm_, cc, value, r0, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
__ LoadU64(r0, operand);
- JumpIfHelper(this, cc, r0, value, target);
+ JumpIfHelper(masm_, cc, r0, value, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -289,28 +293,28 @@ void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
- UNIMPLEMENTED();
+ Move(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
- UNIMPLEMENTED();
+ __ mov(output, Operand(value.ptr()));
}
void BaselineAssembler::Move(MemOperand output, Register source) {
- UNIMPLEMENTED();
+ __ StoreU64(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
- UNIMPLEMENTED();
+ __ Move(output, reference);
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
- UNIMPLEMENTED();
+ __ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
- UNIMPLEMENTED();
+ __ mov(output, Operand(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
- UNIMPLEMENTED();
+ __ mov(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
- UNIMPLEMENTED();
+ __ mov(output, source);
}
namespace detail {
@@ -319,7 +323,8 @@ template <typename Arg>
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Arg arg) {
- UNIMPLEMENTED();
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
return reg;
}
inline Register ToRegister(BaselineAssembler* basm,
@@ -424,63 +429,188 @@ void BaselineAssembler::Pop(T... registers) {
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ __ LoadU8(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ LoadSmiLiteral(tmp, value);
+ __ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ Register scratch = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(target, value, scratch));
+ __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
+ __ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
- UNIMPLEMENTED();
+ __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
int32_t weight, Label* skip_interrupt_label) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ LoadU32(
+ interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ AddS32(interrupt_budget, Operand(weight));
+ __ StoreU32(
+ interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
+ if (skip_interrupt_label) {
+ // Use compare flags set by add
+ DCHECK_LT(weight, 0);
+ __ b(ge, skip_interrupt_label);
+ }
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
Register weight, Label* skip_interrupt_label) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ LoadU32(
+ interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ AddS32(interrupt_budget, interrupt_budget, weight);
+ __ StoreU32(
+ interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (rhs.value() == 0) return;
+ __ LoadSmiLiteral(r0, rhs);
+ if (SmiValuesAre31Bits()) {
+ __ AddS32(lhs, lhs, r0);
+ } else {
+ __ AddS64(lhs, lhs, r0);
+ }
}
-void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { UNIMPLEMENTED(); }
-
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ Label fallthrough, jump_table;
+ if (case_value_base != 0) {
+ __ AddS64(reg, Operand(-case_value_base));
+ }
+
+ // Mostly copied from code-generator-arm.cc
+ ScratchRegisterScope scope(this);
+ JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
+ &fallthrough);
+ // Ensure to emit the constant pool first if necessary.
+ int entry_size_log2 = 3;
+ __ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
+ __ larl(r1, &jump_table);
+ __ lay(reg, MemOperand(reg, r1));
+ __ b(reg);
+ __ b(&fallthrough);
+ __ bind(&jump_table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ b(labels[i], Label::kFar);
+ __ nop();
+ }
+ __ bind(&fallthrough);
}
#undef __
#define __ basm.
-void BaselineAssembler::EmitReturn(MacroAssembler* masm) { UNIMPLEMENTED(); }
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ ASM_CODE_COMMENT(masm);
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
+
+ Label skip_interrupt_label;
+ __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
+
+ __ Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+
+ __ Bind(&skip_interrupt_label);
+ }
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size,
+ actual_params_size, &corrected_args_count);
+ __ masm()->mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
+ __ masm()->Ret();
+}
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
- UNIMPLEMENTED();
+ assembler_->masm()->CmpU64(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
}
} // namespace baseline
diff --git a/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h
index c481c54940..0d42949fca 100644
--- a/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h
+++ b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h
@@ -14,11 +14,82 @@ namespace baseline {
#define __ basm_.
-void BaselineCompiler::Prologue() { UNIMPLEMENTED(); }
+void BaselineCompiler::Prologue() {
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
+ CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
-void BaselineCompiler::PrologueFillFrame() { UNIMPLEMENTED(); }
+ PrologueFillFrame();
+}
-void BaselineCompiler::VerifyFrameSize() { UNIMPLEMENTED(); }
+void BaselineCompiler::PrologueFillFrame() {
+ ASM_CODE_COMMENT(&masm_);
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ if (FLAG_debug_code) {
+ __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
+ RootIndex::kUndefinedValue);
+ __ masm()->Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ for (int i = 0; i < new_target_index; i++) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+
+ } else {
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ __ masm()->SubS64(scratch, Operand(1));
+ __ masm()->b(gt, &loop);
+ }
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ __ masm()->AddS64(scratch, sp,
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->CmpU64(scratch, fp);
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
+}
} // namespace baseline
} // namespace internal
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index aa9564dcea..594b794672 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -86,6 +86,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
}
+void BaselineAssembler::RegisterFrameAddress(
+ interpreter::Register interpreter_register, Register rscratch) {
+ return __ leaq(rscratch, MemOperand(rbp, interpreter_register.ToOperand() *
+ kSystemPointerSize));
+}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -440,7 +445,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister);
__ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
@@ -468,10 +473,9 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(
- params_size, scratch, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ masm()->DropArguments(params_size, scratch,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 300229c97d..f366952762 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -301,6 +301,10 @@ class Processor {
// Z := the contents of {accumulator}.
// Assume that this leaves {accumulator} in unusable state.
Status FromString(RWDigits Z, FromStringAccumulator* accumulator);
+
+ protected:
+ // Use {Destroy} or {Destroyer} instead of the destructor directly.
+ ~Processor() = default;
};
inline int AddResultLength(int x_length, int y_length) {
@@ -418,13 +422,13 @@ class FromStringAccumulator {
: max_digits_(std::max(max_digits, kStackParts)) {}
// Step 2: Call this method to read all characters.
- // {Char} should be a character type, such as uint8_t or uint16_t.
- // {end} should be one past the last character (i.e. {start == end} would
- // indicate an empty string).
- // Returns the current position when an invalid character is encountered.
- template <class Char>
- ALWAYS_INLINE const Char* Parse(const Char* start, const Char* end,
- digit_t radix);
+ // {CharIt} should be a forward iterator and
+ // std::iterator_traits<CharIt>::value_type shall be a character type, such as
+ // uint8_t or uint16_t. {end} should be one past the last character (i.e.
+ // {start == end} would indicate an empty string). Returns the current
+ // position when an invalid character is encountered.
+ template <class CharIt>
+ ALWAYS_INLINE CharIt Parse(CharIt start, CharIt end, digit_t radix);
// Step 3: Check if a result is available, and determine its required
// allocation size (guaranteed to be <= max_digits passed to the constructor).
@@ -434,14 +438,13 @@ class FromStringAccumulator {
}
// Step 4: Use BigIntProcessor::FromString() to retrieve the result into an
- // {RWDigits} struct allocated for the size returned by step 2.
+ // {RWDigits} struct allocated for the size returned by step 3.
private:
friend class ProcessorImpl;
- template <class Char>
- ALWAYS_INLINE const Char* ParsePowerTwo(const Char* start, const Char* end,
- digit_t radix);
+ template <class CharIt>
+ ALWAYS_INLINE CharIt ParsePowerTwo(CharIt start, CharIt end, digit_t radix);
ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, bool is_last);
ALWAYS_INLINE bool AddPart(digit_t part);
@@ -491,10 +494,9 @@ static constexpr uint8_t kCharValue[] = {
// A space- and time-efficient way to map {2,4,8,16,32} to {1,2,3,4,5}.
static constexpr uint8_t kCharBits[] = {1, 2, 3, 0, 4, 0, 0, 0, 5};
-template <class Char>
-const Char* FromStringAccumulator::ParsePowerTwo(const Char* current,
- const Char* end,
- digit_t radix) {
+template <class CharIt>
+CharIt FromStringAccumulator::ParsePowerTwo(CharIt current, CharIt end,
+ digit_t radix) {
radix_ = static_cast<uint8_t>(radix);
const int char_bits = kCharBits[radix >> 2];
int bits_left;
@@ -528,11 +530,10 @@ const Char* FromStringAccumulator::ParsePowerTwo(const Char* current,
return current;
}
-template <class Char>
-const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
- digit_t radix) {
+template <class CharIt>
+CharIt FromStringAccumulator::Parse(CharIt start, CharIt end, digit_t radix) {
BIGINT_H_DCHECK(2 <= radix && radix <= 36);
- const Char* current = start;
+ CharIt current = start;
#if !HAVE_BUILTIN_MUL_OVERFLOW
const digit_t kMaxMultiplier = (~digit_t{0}) / radix;
#endif
diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc
index 0447ce0c22..3f1a277c3d 100644
--- a/deps/v8/src/bigint/tostring.cc
+++ b/deps/v8/src/bigint/tostring.cc
@@ -127,6 +127,7 @@ class ToStringFormatter {
out_end_(out + chars_available),
out_(out_end_),
processor_(processor) {
+ digits_.Normalize();
DCHECK(chars_available >= ToStringResultLength(digits_, radix_, sign_));
}
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 0d994d2d03..5aeac1f179 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -516,6 +516,7 @@ Handle<JSObject> Accessors::FunctionGetArguments(JavaScriptFrame* frame,
void Accessors::FunctionArgumentsGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ isolate->CountUsage(v8::Isolate::kFunctionPrototypeArguments);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -690,6 +691,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
void Accessors::FunctionCallerGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ isolate->CountUsage(v8::Isolate::kFunctionPrototypeCaller);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -767,75 +769,28 @@ Handle<AccessorInfo> Accessors::MakeBoundFunctionNameInfo(Isolate* isolate) {
void Accessors::ErrorStackGetter(
v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> holder =
+ Handle<Object> formatted_stack;
+ Handle<JSObject> error_object =
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
-
- // Retrieve the stack trace. It can either be structured data in the form of
- // a FixedArray of StackFrameInfo objects, an already formatted stack trace
- // (string) or whatever the "prepareStackTrace" callback produced.
-
- Handle<Object> stack_trace;
- Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
- MaybeHandle<Object> maybe_stack_trace =
- JSObject::GetProperty(isolate, holder, stack_trace_symbol);
- if (!maybe_stack_trace.ToHandle(&stack_trace) ||
- stack_trace->IsUndefined(isolate)) {
- Handle<Object> result = isolate->factory()->undefined_value();
- info.GetReturnValue().Set(Utils::ToLocal(result));
- return;
- }
-
- // Only format the stack-trace the first time around. The check for a
- // FixedArray is sufficient as the user callback can not create plain
- // FixedArrays and the result is a String in case we format the stack
- // trace ourselves.
-
- if (!stack_trace->IsFixedArray()) {
- info.GetReturnValue().Set(Utils::ToLocal(stack_trace));
- return;
- }
-
- Handle<Object> formatted_stack_trace;
- if (!ErrorUtils::FormatStackTrace(isolate, holder, stack_trace)
- .ToHandle(&formatted_stack_trace)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
- // Replace the structured stack-trace with the formatting result.
- MaybeHandle<Object> result = Object::SetProperty(
- isolate, holder, isolate->factory()->stack_trace_symbol(),
- formatted_stack_trace, StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError));
- if (result.is_null()) {
+ if (!ErrorUtils::GetFormattedStack(isolate, error_object)
+ .ToHandle(&formatted_stack)) {
isolate->OptionalRescheduleException(false);
return;
}
-
- v8::Local<v8::Value> value = Utils::ToLocal(formatted_stack_trace);
- info.GetReturnValue().Set(value);
+ info.GetReturnValue().Set(Utils::ToLocal(formatted_stack));
}
void Accessors::ErrorStackSetter(
- v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ v8::Local<v8::Name> name, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> obj = Handle<JSObject>::cast(
- Utils::OpenHandle(*v8::Local<v8::Value>(info.This())));
- Handle<Object> value = Handle<Object>::cast(Utils::OpenHandle(*val));
-
- // Store the value in the internal symbol to avoid reconfiguration to
- // a data property.
- MaybeHandle<Object> result = Object::SetProperty(
- isolate, obj, isolate->factory()->stack_trace_symbol(), value,
- StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError));
- if (result.is_null()) {
- isolate->OptionalRescheduleException(false);
- return;
- }
+ Handle<JSObject> error_object =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+ ErrorUtils::SetFormattedStack(isolate, error_object,
+ Utils::OpenHandle(*value));
}
Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) {
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 00f1009610..fe2536fa0a 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -89,11 +89,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
Register counter = scratch;
Register value = temps.Acquire();
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mov(counter, argc);
- }
+ __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
__ b(&entry);
__ bind(&loop);
__ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
@@ -162,9 +158,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -314,9 +308,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -441,9 +433,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldrh(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
- }
+ __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -565,7 +555,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// r1: microtask_queue
// Preserve all but r0 and pass them to entry_trampoline.
Label invoke, handler_entry, exit;
- const RegList kCalleeSavedWithoutFp = kCalleeSaved & ~fp.bit();
+ const RegList kCalleeSavedWithoutFp = kCalleeSaved - fp;
// Update |pushed_stack_space| when we manipulate the stack.
int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
@@ -599,7 +589,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
masm->isolate()));
__ ldr(r5, MemOperand(r4));
- __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r5, r6, r7, fp, lr});
pushed_stack_space += 5 * kPointerSize /* r5, r6, r7, fp, lr */;
// Clear c_entry_fp, now we've pushed its previous value to the stack.
@@ -700,7 +690,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Operand(-EntryFrameConstants::kCallerFPOffset -
kSystemPointerSize /* already popped one */));
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, {fp, lr});
// Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
@@ -767,11 +757,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments + receiver.
// Clobbers r5.
Label enough_stack_space, stack_overflow;
- if (kJSArgcIncludesReceiver) {
- __ mov(r6, r0);
- } else {
- __ add(r6, r0, Operand(1)); // Add one for receiver.
- }
+ __ mov(r6, r0);
__ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -870,9 +856,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
- if (!kJSArgcIncludesReceiver) {
- __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -953,22 +936,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1077,9 +1054,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ tst(
- optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ tst(optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
@@ -1226,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
temps.Exclude(optimization_state);
// Drop the frame created by the baseline call.
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, {fp, lr});
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
@@ -1520,12 +1496,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r0, r0, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ sub(r3, r0, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ add(r3, r0, Operand(1));
} else {
__ mov(r3, r0);
}
@@ -1584,11 +1556,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ sub(r0, r0, Operand(1));
}
- Register argc_without_receiver = r0;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = r6;
- __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = r6;
+ __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
// Push the arguments. r4 and r5 will be modified.
GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
@@ -1927,10 +1896,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ DropArgumentsAndPushNewReceiver(
- r0, r5, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2006,10 +1973,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ DropArgumentsAndPushNewReceiver(
- r0, r5, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2051,10 +2016,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ DropArgumentsAndPushNewReceiver(
- r0, r4, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2103,11 +2066,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Label loop, done;
__ bind(&loop);
__ cmp(old_sp, end);
- if (kJSArgcIncludesReceiver) {
- __ b(ge, &done);
- } else {
- __ b(gt, &done);
- }
+ __ b(ge, &done);
__ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
__ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
__ b(&loop);
@@ -2220,9 +2179,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
- }
+ __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
@@ -2283,13 +2240,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(r1);
+ __ AssertCallableFunction(r1);
- Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2364,14 +2317,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldrh(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(r1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2512,6 +2457,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ cmp(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ cmp(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
@@ -2677,9 +2628,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0];
DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1];
@@ -2688,10 +2639,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
fp_param_reg.code() <= highest_fp_reg.code());
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
arraysize(wasm::kFpParamRegisters));
- CHECK_EQ(NumRegs(gp_regs),
+ CHECK_EQ(gp_regs.Count(),
WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs);
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs);
@@ -2724,20 +2675,19 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
- constexpr uint32_t last =
- 31 - base::bits::CountLeadingZeros32(
- WasmDebugBreakFrameConstants::kPushedFpRegs);
- constexpr uint32_t first = base::bits::CountTrailingZeros32(
- WasmDebugBreakFrameConstants::kPushedFpRegs);
+ constexpr DwVfpRegister last =
+ WasmDebugBreakFrameConstants::kPushedFpRegs.last();
+ constexpr DwVfpRegister first =
+ WasmDebugBreakFrameConstants::kPushedFpRegs.first();
static_assert(
- base::bits::CountPopulation(
- WasmDebugBreakFrameConstants::kPushedFpRegs) == last - first + 1,
+ WasmDebugBreakFrameConstants::kPushedFpRegs.Count() ==
+ last.code() - first.code() + 1,
"All registers in the range from first to last have to be set");
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
- constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(first);
- constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(last);
+ constexpr DwVfpRegister lowest_fp_reg = first;
+ constexpr DwVfpRegister highest_fp_reg = last;
// Store gp parameter registers.
__ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
@@ -2766,6 +2716,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3379,12 +3339,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
STATIC_ASSERT(kNumberOfRegisters == 16);
// Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved | RegList{ip};
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(v8:1588): Note that using pc with stm is deprecated, so we should
// perhaps handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+ __ stm(db_w, sp, restored_regs | RegList{sp, lr, pc});
{
UseScratchRegisterScope temps(masm);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index b75ffcc065..e6321c614c 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -112,12 +112,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(x11, argc);
__ Push(x11, padreg);
- // Add a slot for the receiver (if not already included), and round up to
- // maintain alignment.
+ // Round up to maintain alignment.
Register slot_count = x2;
Register slot_count_without_rounding = x12;
- constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
- __ Add(slot_count_without_rounding, argc, additional_slots);
+ __ Add(slot_count_without_rounding, argc, 1);
__ Bic(slot_count, slot_count_without_rounding, 1);
__ Claim(slot_count);
@@ -130,8 +128,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
- __ Str(padreg,
- MemOperand(x2, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
+ __ Str(padreg, MemOperand(x2));
__ Bind(&already_aligned);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
@@ -151,11 +148,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
- if (kJSArgcIncludesReceiver) {
- __ Sub(count, argc, kJSArgcReceiverSlots);
- } else {
- __ Mov(count, argc);
- }
+ __ Sub(count, argc, kJSArgcReceiverSlots);
__ CopyDoubleWords(dst, src, count);
}
@@ -197,9 +190,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@@ -322,11 +313,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Round the number of arguments down to the next even number, and claim
// slots for the arguments. If the number of arguments was odd, the last
// argument will overwrite one of the receivers pushed above.
- Register argc_without_receiver = x12;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = x11;
- __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
- }
+ Register argc_without_receiver = x11;
+ __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
__ Bic(x10, x12, 1);
// Check if we have enough stack space to push all arguments.
@@ -390,9 +378,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@@ -432,19 +418,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
-static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
- Register code, Register scratch) {
+static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
- __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(scratch);
+ __ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
+ __ DecodeField<CodeT::KindField>(scratch);
__ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
-static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
- Register scratch) {
+static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
DCHECK(!AreAliased(code, scratch));
- return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+ return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
@@ -459,12 +445,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
if (FLAG_debug_code) {
Label not_baseline;
__ B(ne, &not_baseline);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
- AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
- } else {
- AssertCodeIsBaseline(masm, sfi_data, scratch1);
- }
+ AssertCodeTIsBaseline(masm, sfi_data, scratch1);
__ B(eq, is_baseline);
__ Bind(&not_baseline);
} else {
@@ -531,9 +512,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ Sub(x10, x10, kJSArgcReceiverSlots);
- }
+ __ Sub(x10, x10, kJSArgcReceiverSlots);
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
__ Bic(x11, x11, 1);
@@ -809,7 +788,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
//
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
- Handle<Code> trampoline_code =
+ Handle<CodeT> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@@ -904,10 +883,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
masm->isolate()));
__ Ldr(cp, MemOperand(scratch));
- // Claim enough space for the arguments, the function and the receiver (if
- // it is not included in argc already), including an optional slot of
- // padding.
- constexpr int additional_slots = kJSArgcIncludesReceiver ? 2 : 3;
+ // Claim enough space for the arguments and the function, including an
+ // optional slot of padding.
+ constexpr int additional_slots = 2;
__ Add(slots_to_claim, argc, additional_slots);
__ Bic(slots_to_claim, slots_to_claim, 1);
@@ -931,9 +909,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
- __ Str(
- function,
- MemOperand(scratch, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
+ __ Str(function, MemOperand(scratch));
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -941,12 +917,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, done;
// Skip the argument set up if we have no arguments.
- if (kJSArgcIncludesReceiver) {
- __ Cmp(argc, JSParameterCount(0));
- __ B(eq, &done);
- } else {
- __ Cbz(argc, &done);
- }
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(eq, &done);
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
@@ -960,11 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
- if (kJSArgcIncludesReceiver) {
- __ B(lt, &loop);
- } else {
- __ B(le, &loop);
- }
+ __ B(lt, &loop);
__ Bind(&done);
@@ -994,9 +962,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
- Handle<Code> builtin = is_construct
- ? BUILTIN_CODE(masm->isolate(), Construct)
- : masm->isolate()->builtins()->Call();
+ Handle<CodeT> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS internal frame and remove the parameters (except function),
@@ -1054,9 +1022,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
- if (!kJSArgcIncludesReceiver) {
- __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1157,22 +1122,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ Unreachable();
}
@@ -1277,10 +1236,9 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ TestAndBranchIfAllClear(
- optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
- &maybe_has_optimized_code);
+ __ TestAndBranchIfAllClear(optimization_state,
+ FeedbackVector::kHasCompileOptimizedMarker,
+ &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
@@ -1719,10 +1677,8 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Sub(num_args, num_args, 1);
}
- // Add receiver (if not already included in argc) and round up to an even
- // number of slots.
- constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
- __ Add(slots_to_claim, num_args, additional_slots);
+ // Round up to an even number of slots.
+ __ Add(slots_to_claim, num_args, 1);
__ Bic(slots_to_claim, slots_to_claim, 1);
// Add a stack check before pushing arguments.
@@ -1746,10 +1702,8 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
const bool skip_receiver =
receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (skip_receiver) {
__ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ Add(slots_to_copy, num_args, 1);
} else {
__ Mov(slots_to_copy, num_args);
}
@@ -1835,8 +1789,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -2144,6 +2098,10 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
__ LeaveFrame(StackFrame::STUB);
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
@@ -2210,9 +2168,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@@ -2259,12 +2215,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label non_zero;
Register scratch = x10;
- if (kJSArgcIncludesReceiver) {
- __ Cmp(argc, JSParameterCount(0));
- __ B(gt, &non_zero);
- } else {
- __ Cbnz(argc, &non_zero);
- }
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(gt, &non_zero);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Overwrite receiver with undefined, which will be the new receiver.
// We do not need to overwrite the padding slot above it with anything.
@@ -2283,11 +2235,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register copy_to = x11;
Register count = x12;
UseScratchRegisterScope temps(masm);
- Register argc_without_receiver = argc;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = temps.AcquireX();
- __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
- }
+ Register argc_without_receiver = temps.AcquireX();
+ __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
+
// CopyDoubleWords changes the count argument.
__ Mov(count, argc_without_receiver);
__ Tbz(argc_without_receiver, 0, &even);
@@ -2355,9 +2305,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@@ -2415,9 +2363,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
}
- __ DropArguments(argc, kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@@ -2453,11 +2399,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register slots_to_copy = x10;
Register slots_to_claim = x12;
- if (kJSArgcIncludesReceiver) {
- __ Mov(slots_to_copy, argc);
- } else {
- __ Add(slots_to_copy, argc, 1); // Copy with receiver.
- }
+ __ Mov(slots_to_copy, argc);
__ Mov(slots_to_claim, len);
__ Tbz(slots_to_claim, 0, &even);
@@ -2469,9 +2411,6 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register scratch = x11;
__ Add(slots_to_claim, len, 1);
__ And(scratch, argc, 1);
- if (!kJSArgcIncludesReceiver) {
- __ Eor(scratch, scratch, 1);
- }
__ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2495,7 +2434,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+ Handle<CodeT> code) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x0 : number of parameters on the stack
@@ -2549,12 +2488,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
Register dst = x16;
- if (kJSArgcIncludesReceiver) {
- __ SlotAddress(dst, argc);
- } else {
- __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
- __ SlotAddress(dst, dst);
- }
+ __ SlotAddress(dst, argc);
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
@@ -2575,7 +2509,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<Code> code) {
+ Handle<CodeT> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
@@ -2608,9 +2542,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register len = x6;
Label stack_done, stack_overflow;
__ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ Subs(len, len, kJSArgcReceiverSlots);
- }
+ __ Subs(len, len, kJSArgcReceiverSlots);
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
@@ -2628,12 +2560,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ lsl(start_index, start_index, kSystemPointerSizeLog2);
__ Add(args_fp, args_fp, start_index);
// Point to the position to copy to.
- if (kJSArgcIncludesReceiver) {
- __ SlotAddress(dst, argc);
- } else {
- __ Add(x10, argc, 1);
- __ SlotAddress(dst, x10);
- }
+ __ SlotAddress(dst, argc);
// Update total number of arguments.
__ Add(argc, argc, len);
__ CopyDoubleWords(dst, args_fp, len);
@@ -2655,14 +2582,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(x1);
+ __ AssertCallableFunction(x1);
- Label class_constructor;
__ LoadTaggedPointerField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
- &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2738,15 +2661,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ Bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ PushArgument(x1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- __ Unreachable();
- }
}
namespace {
@@ -2802,9 +2716,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
Register receiver = x14;
- if (kJSArgcIncludesReceiver) {
- __ Sub(argc, argc, kJSArgcReceiverSlots);
- }
+ __ Sub(argc, argc, kJSArgcReceiverSlots);
__ Add(total_argc, argc, bound_argc);
__ Peek(receiver, 0);
@@ -2873,11 +2785,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbnz(counter, &loop);
}
// Update argc.
- if (kJSArgcIncludesReceiver) {
- __ Add(argc, total_argc, kJSArgcReceiverSlots);
- } else {
- __ Mov(argc, total_argc);
- }
+ __ Add(argc, total_argc, kJSArgcReceiverSlots);
}
__ Bind(&no_bound_arguments);
}
@@ -2944,6 +2852,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Cmp(instance_type, JS_PROXY_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
@@ -3123,27 +3037,27 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
// Also push x1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
- CHECK_EQ(1, NumRegs(gp_regs) % 2);
- gp_regs |= x1.bit();
- CHECK_EQ(0, NumRegs(gp_regs) % 2);
+ CHECK_EQ(1, gp_regs.Count() % 2);
+ gp_regs.set(x1);
+ CHECK_EQ(0, gp_regs.Count() % 2);
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters) + 1);
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ PushXRegList(gp_regs);
__ PushQRegList(fp_regs);
@@ -3202,6 +3116,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3944,8 +3868,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Save all allocatable double registers.
CPURegList saved_double_registers(
- CPURegister::kVRegister, kDRegSizeInBits,
- RegisterConfiguration::Default()->allocatable_double_codes_mask());
+ kDRegSizeInBits,
+ DoubleRegList::FromBits(
+ RegisterConfiguration::Default()->allocatable_double_codes_mask()));
DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
@@ -4196,12 +4121,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
+ if (FLAG_debug_code) {
+ AssertCodeTIsBaseline(masm, code_obj, x3);
+ }
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
- if (FLAG_debug_code) {
- AssertCodeIsBaseline(masm, code_obj, x3);
- }
// Load the feedback vector.
Register feedback_vector = x2;
@@ -4340,7 +4265,7 @@ void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
+ MacroAssembler* masm, Handle<CodeT> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -4349,7 +4274,9 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kCallerSaved.list();
+ if (FLAG_debug_code) {
+ registers |= RegList::FromBits(static_cast<uint32_t>(kCallerSaved.bits()));
+ }
__ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
@@ -4396,14 +4323,14 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
+ Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ Bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
+ Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index a4bf6f002d..c88a0c2800 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -55,7 +55,8 @@ LoadJoinElement<array::FastDoubleElements>(
builtin LoadJoinTypedElement<T : type extends ElementsKind>(
context: Context, receiver: JSReceiver, k: uintptr): JSAny {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- dcheck(!IsDetachedBuffer(typedArray.buffer));
+ dcheck(!typed_array::IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean(
+ typedArray));
return typed_array::LoadFixedTypedArrayElementAsTagged(
typedArray.data_ptr, k, typed_array::KindForArrayType<T>());
}
@@ -103,7 +104,19 @@ CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
_loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
_initialLen: Number): bool {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- return IsDetachedBuffer(typedArray.buffer);
+ // When this is called from toLocaleString(), the underlying buffer might get
+ // detached / resized (in the case of RAB / GSAB) during iterating the
+ // elements. When this is called from join(), it can happen only before the
+ // first element (during parameter conversion). The code below doesn't
+ // differentiate between these two cases, but does the checks in both cases.
+ if (IsDetachedBuffer(typedArray.buffer)) {
+ return true;
+ }
+ if (IsVariableLengthJSArrayBufferView(typedArray)) {
+ // TODO(v8:11111): Add a fast(er) path here.
+ return true;
+ }
+ return false;
}
// Calculates the running total length of the resulting string. If the
@@ -387,6 +400,28 @@ transitioning ArrayJoin<JSTypedArray>(implicit context: Context)(
loadFn = LoadJoinTypedElement<typed_array::BigUint64Elements>;
} else if (kind == ElementsKind::BIGINT64_ELEMENTS) {
loadFn = LoadJoinTypedElement<typed_array::BigInt64Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_UINT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint8Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_INT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int8Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_UINT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint16Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_INT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int16Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_UINT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint32Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_INT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int32Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_FLOAT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Float32Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_FLOAT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Float64Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_UINT8_CLAMPED_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint8ClampedElements>;
+ } else if (kind == ElementsKind::RAB_GSAB_BIGUINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::BigUint64Elements>;
+ } else if (kind == ElementsKind::RAB_GSAB_BIGINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::BigInt64Elements>;
} else {
unreachable;
}
@@ -513,7 +548,7 @@ macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver):
// Builtin call was not nested (receiver is the first entry) and
// did not contain other nested arrays that expanded the stack.
if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
- StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER);
+ stack.objects[0] = TheHole;
} else
deferred {
JoinStackPop(stack, receiver);
@@ -616,12 +651,13 @@ transitioning javascript builtin TypedArrayPrototypeJoin(
// Spec: ValidateTypedArray is applied to the this value prior to evaluating
// the algorithm.
- const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ const length = typed_array::ValidateTypedArrayAndGetLength(
context, receiver, '%TypedArray%.prototype.join');
- const length = Convert<Number>(typedArray.length);
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
return CycleProtectedArrayJoin<JSTypedArray>(
- false, typedArray, length, separator, Undefined, Undefined);
+ false, typedArray, Convert<Number>(length), separator, Undefined,
+ Undefined);
}
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
@@ -632,11 +668,11 @@ transitioning javascript builtin TypedArrayPrototypeToLocaleString(
// Spec: ValidateTypedArray is applied to the this value prior to evaluating
// the algorithm.
- const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ const length = typed_array::ValidateTypedArrayAndGetLength(
context, receiver, '%TypedArray%.prototype.toLocaleString');
- const length = Convert<Number>(typedArray.length);
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
return CycleProtectedArrayJoin<JSTypedArray>(
- true, typedArray, length, ',', locales, options);
+ true, typedArray, Convert<Number>(length), ',', locales, options);
}
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 69e9faef53..dbcc05de28 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -9,16 +9,18 @@
#include 'src/heap/factory-inl.h'
#include 'src/objects/arguments.h'
#include 'src/objects/bigint.h'
+#include 'src/objects/call-site-info.h'
#include 'src/objects/elements-kind.h'
#include 'src/objects/free-space.h'
#include 'src/objects/js-function.h'
#include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h'
+#include 'src/objects/js-shadow-realms.h'
+#include 'src/objects/js-struct.h'
#include 'src/objects/js-weak-refs.h'
#include 'src/objects/objects.h'
#include 'src/objects/source-text-module.h'
-#include 'src/objects/stack-frame-info.h'
#include 'src/objects/synthetic-module.h'
#include 'src/objects/template-objects.h'
#include 'src/torque/runtime-support.h'
@@ -26,6 +28,8 @@
type void;
type never;
+type IntegerLiteral constexpr 'IntegerLiteral';
+
type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
generates 'TNode<Object>' constexpr 'Object';
@@ -209,6 +213,8 @@ extern class HashTable extends FixedArray generates 'TNode<FixedArray>';
extern class OrderedHashMap extends HashTable;
extern class OrderedHashSet extends HashTable;
extern class OrderedNameDictionary extends HashTable;
+extern class NameToIndexHashTable extends HashTable;
+extern class RegisteredSymbolTable extends HashTable;
extern class NameDictionary extends HashTable;
extern class GlobalDictionary extends HashTable;
extern class SimpleNumberDictionary extends HashTable;
@@ -252,7 +258,8 @@ type CallableApiObject extends JSObject;
// A JSProxy with the callable bit set.
type CallableJSProxy extends JSProxy;
-type Callable = JSFunction|JSBoundFunction|CallableJSProxy|CallableApiObject;
+type Callable = JSFunction|JSBoundFunction|JSWrappedFunction|CallableJSProxy|
+ CallableApiObject;
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
@@ -306,6 +313,16 @@ extern enum ElementsKind extends int32 {
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
RAB_GSAB_UINT8_ELEMENTS,
+ RAB_GSAB_INT8_ELEMENTS,
+ RAB_GSAB_UINT16_ELEMENTS,
+ RAB_GSAB_INT16_ELEMENTS,
+ RAB_GSAB_UINT32_ELEMENTS,
+ RAB_GSAB_INT32_ELEMENTS,
+ RAB_GSAB_FLOAT32_ELEMENTS,
+ RAB_GSAB_FLOAT64_ELEMENTS,
+ RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ RAB_GSAB_BIGUINT64_ELEMENTS,
+ RAB_GSAB_BIGINT64_ELEMENTS,
// TODO(torque): Allow duplicate enum values.
// FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
// FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
@@ -412,7 +429,7 @@ extern enum MessageTemplate {
kWasmTrapRemByZero,
kWasmTrapFloatUnrepresentable,
kWasmTrapFuncSigMismatch,
- kWasmTrapDataSegmentDropped,
+ kWasmTrapDataSegmentOutOfBounds,
kWasmTrapElemSegmentDropped,
kWasmTrapTableOutOfBounds,
kWasmTrapRethrowNull,
@@ -485,9 +502,6 @@ const kWasmArrayHeaderSize:
const kHeapObjectHeaderSize:
constexpr int32 generates 'HeapObject::kHeaderSize';
-const kDictModePrototypes:
- constexpr bool generates 'V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL';
-
type TheHole extends Oddball;
type Null extends Oddball;
type Undefined extends Oddball;
@@ -561,7 +575,8 @@ extern class Filler extends HeapObject generates 'TNode<HeapObject>';
// Like JSObject, but created from API function.
@apiExposedInstanceTypeValue(0x422)
@doNotGenerateCast
-extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
+extern class JSApiObject extends JSObjectWithEmbedderSlots
+ generates 'TNode<JSObject>';
// TODO(gsathya): This only exists to make JSApiObject instance type into a
// range.
@@ -604,7 +619,7 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
if (Float64IsNaN(value)) return SmiConstant(0);
value = math::Float64Trunc(value);
// ToInteger normalizes -0 to +0.
- if (value == 0.0) return SmiConstant(0);
+ if (value == 0) return SmiConstant(0);
const result = ChangeFloat64ToTagged(value);
dcheck(IsNumberNormalized(result));
return result;
@@ -889,8 +904,12 @@ macro Float64IsNaN(n: float64): bool {
}
// The type of all tagged values that can safely be compared with TaggedEqual.
-type TaggedWithIdentity =
- JSReceiver|FixedArrayBase|Oddball|Map|WeakCell|Context|EmptyString;
+@if(V8_ENABLE_WEBASSEMBLY)
+type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map |
+ WeakCell | Context | EmptyString | WasmInternalFunction;
+@ifnot(V8_ENABLE_WEBASSEMBLY)
+type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map |
+ WeakCell | Context | EmptyString;
extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool;
extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool;
@@ -976,6 +995,38 @@ extern operator '==' macro ConstexprInt32Equal(
extern operator '!=' macro ConstexprInt32NotEqual(
constexpr int32, constexpr int32): constexpr bool;
+// IntegerLiteral overloads
+extern macro ConstexprIntegerLiteralToInt31(constexpr IntegerLiteral):
+ constexpr int31;
+extern macro ConstexprIntegerLiteralToInt32(constexpr IntegerLiteral):
+ constexpr int32;
+extern macro ConstexprIntegerLiteralToUint32(constexpr IntegerLiteral):
+ constexpr uint32;
+extern macro ConstexprIntegerLiteralToUint64(constexpr IntegerLiteral):
+ constexpr uint64;
+extern macro ConstexprIntegerLiteralToIntptr(constexpr IntegerLiteral):
+ constexpr intptr;
+extern macro ConstexprIntegerLiteralToUintptr(constexpr IntegerLiteral):
+ constexpr uintptr;
+extern macro ConstexprIntegerLiteralToInt8(constexpr IntegerLiteral):
+ constexpr int8;
+extern macro ConstexprIntegerLiteralToUint8(constexpr IntegerLiteral):
+ constexpr uint8;
+extern macro ConstexprIntegerLiteralToFloat64(constexpr IntegerLiteral):
+ constexpr float64;
+
+extern operator '==' macro ConstexprIntegerLiteralEqual(
+ constexpr IntegerLiteral, constexpr IntegerLiteral): constexpr bool;
+extern operator '+' macro ConstexprIntegerLiteralAdd(
+ constexpr IntegerLiteral,
+ constexpr IntegerLiteral): constexpr IntegerLiteral;
+extern operator '<<' macro ConstexprIntegerLiteralLeftShift(
+ constexpr IntegerLiteral,
+ constexpr IntegerLiteral): constexpr IntegerLiteral;
+extern operator '|' macro ConstexprIntegerLiteralBitwiseOr(
+ constexpr IntegerLiteral,
+ constexpr IntegerLiteral): constexpr IntegerLiteral;
+
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '==' macro Word32Equal(uint32, uint32): bool;
extern operator '!=' macro Word32NotEqual(int32, int32): bool;
@@ -1165,19 +1216,29 @@ extern macro IntPtrConstant(constexpr int32): intptr;
extern macro Uint16Constant(constexpr uint16): uint16;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
+macro Int32Constant(i: constexpr IntegerLiteral): int32 {
+ return Int32Constant(ConstexprIntegerLiteralToInt32(i));
+}
extern macro Int64Constant(constexpr int64): int64;
extern macro Uint64Constant(constexpr uint64): uint64;
extern macro Float64Constant(constexpr int32): float64;
extern macro Float64Constant(constexpr float64): float64;
+extern macro Float64Constant(constexpr IntegerLiteral): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro SmiConstant(constexpr Smi): Smi;
extern macro SmiConstant(constexpr MessageTemplate): Smi;
extern macro SmiConstant(constexpr bool): Smi;
extern macro SmiConstant(constexpr uint32): Smi;
+macro SmiConstant(il: constexpr IntegerLiteral): Smi {
+ return SmiConstant(ConstexprIntegerLiteralToInt31(il));
+}
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot;
extern macro IntPtrConstant(constexpr intptr): intptr;
+macro IntPtrConstant(il: constexpr IntegerLiteral): intptr {
+ return IntPtrConstant(ConstexprIntegerLiteralToIntptr(il));
+}
extern macro PointerConstant(constexpr RawPtr): RawPtr;
extern macro SingleCharacterStringConstant(constexpr string): String;
extern macro Float64SilenceNaN(float64): float64;
@@ -1872,6 +1933,18 @@ extern operator '[]' macro LoadWeakFixedArrayElement(
extern operator '[]' macro LoadUint8Ptr(RawPtr<uint8>, intptr): uint8;
+extern enum HashFieldType extends uint32 constexpr 'Name::HashFieldType' {
+ kHash,
+ kIntegerIndex,
+ kForwardingIndex,
+ kEmpty
+}
+
+operator '==' macro HashFieldTypeEquals(
+ s1: HashFieldType, s2: HashFieldType): bool {
+ return Word32Equal(s1, s2);
+}
+
const kNoHashSentinel:
constexpr int32 generates 'PropertyArray::kNoHashSentinel';
extern macro LoadNameHash(Name): uint32;
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index e5a3d44686..914f032acd 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -128,11 +128,8 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
TNode<JSTypedArray> typed_array = CAST(receiver_);
o_ = typed_array;
- // TODO(v8:11111): Support RAB / GSAB.
- TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(typed_array);
- ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
-
- len_ = LoadJSTypedArrayLength(typed_array);
+ Label throw_detached(this, Label::kDeferred);
+ len_ = LoadJSTypedArrayLengthAndCheckDetached(typed_array, &throw_detached);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -146,13 +143,16 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
BIND(&throw_not_callable);
ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
+ BIND(&throw_detached);
+ ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
+
Label unexpected_instance_type(this);
BIND(&unexpected_instance_type);
Unreachable();
std::vector<int32_t> elements_kinds = {
#define ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
- TYPED_ARRAYS(ELEMENTS_KIND)
+ TYPED_ARRAYS(ELEMENTS_KIND) RAB_GSAB_TYPED_ARRAYS(ELEMENTS_KIND)
#undef ELEMENTS_KIND
};
std::list<Label> labels;
@@ -168,6 +168,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
generator(this);
+ TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(typed_array);
TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map);
Switch(elements_kind, &unexpected_instance_type, elements_kinds.data(),
label_ptrs.data(), labels.size());
@@ -176,15 +177,25 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
BIND(&*it);
source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
- VisitAllTypedArrayElements(array_buffer, processor, direction, typed_array);
+ // TODO(v8:11111): Only RAB-backed TAs need special handling here since the
+ // backing store can shrink mid-iteration. This implementation has an
+ // overzealous check for GSAB-backed length-tracking TAs. Then again, the
+ // non-RAB/GSAB code also has an overzealous detached check for SABs.
+ bool is_rab_gsab = IsRabGsabTypedArrayElementsKind(source_elements_kind_);
+ if (is_rab_gsab) {
+ source_elements_kind_ =
+ GetCorrespondingNonRabGsabElementsKind(source_elements_kind_);
+ }
+ VisitAllTypedArrayElements(array_buffer, processor, direction, typed_array,
+ is_rab_gsab);
ReturnFromBuiltin(a_.value());
}
}
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
- ForEachDirection direction, TNode<JSTypedArray> typed_array) {
- // TODO(v8:11111): Support RAB / GSAB.
+ ForEachDirection direction, TNode<JSTypedArray> typed_array,
+ bool can_shrink) {
VariableList list({&a_, &k_}, zone());
TNode<UintPtrT> start = UintPtrConstant(0);
@@ -203,7 +214,12 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TVARIABLE(Object, value);
Label detached(this, Label::kDeferred);
Label process(this);
- GotoIf(IsDetachedBuffer(array_buffer), &detached);
+ if (can_shrink) {
+ // If `index` is out of bounds, Get returns undefined.
+ CheckJSTypedArrayIndex(index, typed_array, &detached);
+ } else {
+ GotoIf(IsDetachedBuffer(array_buffer), &detached);
+ }
{
TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
value = LoadFixedTypedArrayElementAsTagged(data_ptr, index,
@@ -1648,7 +1664,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
- TNode<Code> code = HeapConstant(callable.code());
+ TNode<CodeT> code = HeapConstant(callable.code());
// We are going to call here ArrayNoArgumentsConstructor or
// ArraySingleArgumentsConstructor which in addition to the register arguments
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 1f169632bf..c662ad39e5 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -105,7 +105,8 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer,
const CallResultProcessor& processor,
ForEachDirection direction,
- TNode<JSTypedArray> typed_array);
+ TNode<JSTypedArray> typed_array,
+ bool can_shrink);
TNode<Object> callbackfn_;
TNode<JSReceiver> o_;
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 1baba71926..d8669b85a8 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -17,6 +17,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/lookup.h"
#include "src/objects/objects-inl.h"
#include "src/objects/prototype.h"
@@ -536,7 +537,8 @@ V8_WARN_UNUSED_RESULT Object GenericArrayShift(Isolate* isolate,
// c. Let fromPresent be ? HasProperty(O, from).
bool from_present;
MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, from_present, JSReceiver::HasProperty(receiver, from));
+ isolate, from_present,
+ JSReceiver::HasProperty(isolate, receiver, from));
// d. If fromPresent is true, then.
if (from_present) {
@@ -1045,7 +1047,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t length, ArrayConcatVisitor* visitor) {
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
+ Maybe<bool> maybe = JSReceiver::HasElement(isolate, receiver, i);
if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
Handle<Object> element_value;
@@ -1122,7 +1124,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (!element_value->IsTheHole(isolate)) {
if (!visitor->visit(j, element_value)) return false;
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(isolate, array, j);
if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
// Call GetElement on array, not its prototype, or getters won't
@@ -1160,7 +1162,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
isolate->factory()->NewNumber(double_value);
if (!visitor->visit(j, element_value)) return false;
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(isolate, array, j);
if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
// Call GetElement on array, not its prototype, or getters won't
@@ -1542,5 +1544,309 @@ BUILTIN(ArrayConcat) {
return Slow_ArrayConcat(&args, species, isolate);
}
+namespace {
+
+// https://tc39.es/proposal-array-grouping/#sec-add-value-to-keyed-group
+// Each keyed group is an array list.
+inline Handle<OrderedHashMap> AddValueToKeyedGroup(
+ Isolate* isolate, Handle<OrderedHashMap> groups, Handle<Object> key,
+ Handle<Object> value) {
+ InternalIndex entry = groups->FindEntry(isolate, *key);
+ if (!entry.is_found()) {
+ Handle<ArrayList> array = ArrayList::New(isolate, 1);
+ array = ArrayList::Add(isolate, array, value);
+ return OrderedHashMap::Add(isolate, groups, key, array).ToHandleChecked();
+ }
+ Handle<ArrayList> array =
+ Handle<ArrayList>(ArrayList::cast(groups->ValueAt(entry)), isolate);
+ array = ArrayList::Add(isolate, array, value);
+ groups->SetEntry(entry, *key, *array);
+ return groups;
+}
+
+inline ElementsKind DeduceKeyedGroupElementsKind(ElementsKind kind) {
+ // The keyed groups are array lists with fast elements.
+ // Double elements are stored as HeapNumbers in the keyed group elements
+ // so that we don't need to cast all the keyed groups when switching from
+ // fast path to the generic path.
+ // TODO(v8:12499) add unboxed double elements support
+ switch (kind) {
+ case ElementsKind::PACKED_SMI_ELEMENTS: {
+ return ElementsKind::PACKED_SMI_ELEMENTS;
+ }
+ default: {
+ return ElementsKind::PACKED_ELEMENTS;
+ }
+ }
+}
+
+inline bool IsFastArray(Handle<JSReceiver> object) {
+ Isolate* isolate = object->GetIsolate();
+ if (isolate->force_slow_path()) return false;
+ if (!object->IsJSArray()) return false;
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ if (!array->HasFastElements(isolate)) return false;
+
+ Context context = isolate->context();
+ if (array->map().prototype() !=
+ context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ return false;
+ }
+
+ return Protectors::IsNoElementsIntact(isolate);
+}
+
+inline bool CheckArrayMapNotModified(Handle<JSArray> array,
+ Handle<Map> original_map) {
+ if (array->map() != *original_map) {
+ return false;
+ }
+ return Protectors::IsNoElementsIntact(array->GetIsolate());
+}
+
+enum class GroupByMode { kToObject, kToMap };
+
+template <GroupByMode mode>
+inline MaybeHandle<OrderedHashMap> GenericArrayGroupBy(
+ Isolate* isolate, Handle<JSReceiver> O, Handle<Object> callbackfn,
+ Handle<OrderedHashMap> groups, double initialK, double len) {
+ // 6. Repeat, while k < len
+ for (double k = initialK; k < len; ++k) {
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ Handle<Name> Pk;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, Pk, Object::ToName(isolate, isolate->factory()->NewNumber(k)),
+ OrderedHashMap);
+ // 6b. Let kValue be ? Get(O, Pk).
+ Handle<Object> kValue;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, kValue,
+ Object::GetPropertyOrElement(isolate, O, Pk),
+ OrderedHashMap);
+
+ // Common steps for ArrayPrototypeGroupBy and ArrayPrototypeGroupByToMap
+ // 6c. Let key be ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »).
+ Handle<Object> propertyKey;
+ Handle<Object> argv[] = {kValue, isolate->factory()->NewNumber(k), O};
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, propertyKey,
+ Execution::Call(isolate, callbackfn, O, 3, argv),
+ OrderedHashMap);
+
+ if (mode == GroupByMode::kToMap) {
+ // 6d. If key is -0𝔽, set key to +0𝔽.
+ if (propertyKey->IsMinusZero()) {
+ propertyKey = Handle<Smi>(Smi::FromInt(0), isolate);
+ }
+ } else {
+ // 6c. Let propertyKey be ? ToPropertyKey(? Call(callbackfn, thisArg, «
+ // kValue, 𝔽(k), O »)).
+ Handle<Name> propertyKeyName;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, propertyKeyName,
+ Object::ToName(isolate, propertyKey),
+ OrderedHashMap);
+ propertyKey = isolate->factory()->InternalizeName(propertyKeyName);
+ }
+
+ // 6e. Perform ! AddValueToKeyedGroup(groups, propertyKey, kValue).
+ groups = AddValueToKeyedGroup(isolate, groups, propertyKey, kValue);
+
+ // 6f. Set k to k + 1.
+ // done by the loop.
+ }
+
+ return groups;
+}
+
+template <GroupByMode mode>
+inline MaybeHandle<OrderedHashMap> FastArrayGroupBy(
+ Isolate* isolate, Handle<JSArray> array, Handle<Object> callbackfn,
+ Handle<OrderedHashMap> groups, double len) {
+ Handle<Map> original_map = Handle<Map>(array->map(), isolate);
+ uint32_t uint_len = static_cast<uint32_t>(len);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+
+ // 4. Let k be 0.
+ // 6. Repeat, while k < len
+ for (InternalIndex k : InternalIndex::Range(uint_len)) {
+ if (!CheckArrayMapNotModified(array, original_map)) {
+ return GenericArrayGroupBy<mode>(isolate, array, callbackfn, groups,
+ k.as_uint32(), len);
+ }
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // 6b. Let kValue be ? Get(O, Pk).
+ Handle<Object> kValue = accessor->Get(array, k);
+ if (kValue->IsTheHole()) {
+ kValue = isolate->factory()->undefined_value();
+ }
+
+ // Common steps for ArrayPrototypeGroupBy and ArrayPrototypeGroupByToMap
+ // 6c. Let key be ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »).
+ Handle<Object> propertyKey;
+ Handle<Object> argv[] = {
+ kValue, isolate->factory()->NewNumber(k.as_uint32()), array};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, propertyKey,
+ Execution::Call(isolate, callbackfn, array, 3, argv), OrderedHashMap);
+
+ if (mode == GroupByMode::kToMap) {
+ // 6d. If key is -0𝔽, set key to +0𝔽.
+ if (propertyKey->IsMinusZero()) {
+ propertyKey = Handle<Smi>(Smi::FromInt(0), isolate);
+ }
+ } else {
+ // 6c. Let propertyKey be ? ToPropertyKey(? Call(callbackfn, thisArg, «
+ // kValue, 𝔽(k), O »)).
+ Handle<Name> propertyKeyName;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, propertyKeyName,
+ Object::ToName(isolate, propertyKey),
+ OrderedHashMap);
+ propertyKey = isolate->factory()->InternalizeName(propertyKeyName);
+ }
+
+ // 6e. Perform ! AddValueToKeyedGroup(groups, propertyKey, kValue).
+ groups = AddValueToKeyedGroup(isolate, groups, propertyKey, kValue);
+
+ // 6f. Set k to k + 1.
+ // done by the loop.
+ }
+
+ return groups;
+}
+
+} // namespace
+
+// https://tc39.es/proposal-array-grouping/#sec-array.prototype.groupby
+BUILTIN(ArrayPrototypeGroupBy) {
+ const char* const kMethodName = "Array.prototype.groupBy";
+ HandleScope scope(isolate);
+
+ Handle<JSReceiver> O;
+ // 1. Let O be ? ToObject(this value).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, O, Object::ToObject(isolate, args.receiver(), kMethodName));
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ double len;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len,
+ GetLengthProperty(isolate, O));
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ Handle<Object> callbackfn = args.atOrUndefined(isolate, 1);
+ if (!callbackfn->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callbackfn));
+ }
+
+ // 5. Let groups be a new empty List.
+ Handle<OrderedHashMap> groups = isolate->factory()->NewOrderedHashMap();
+ // Elements kind of the array for grouped elements kind deduction.
+ ElementsKind elements_kind = ElementsKind::NO_ELEMENTS;
+ if (IsFastArray(O)) {
+ Handle<JSArray> array = Handle<JSArray>::cast(O);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, groups,
+ FastArrayGroupBy<GroupByMode::kToObject>(isolate, array, callbackfn,
+ groups, len));
+ // Get array's elements kind after called into javascript.
+ elements_kind = array->GetElementsKind();
+ } else {
+ // 4. Let k be 0.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, groups,
+ GenericArrayGroupBy<GroupByMode::kToObject>(isolate, O, callbackfn,
+ groups, 0, len));
+ }
+
+ // 7. Let obj be ! OrdinaryObjectCreate(null).
+ Handle<JSObject> obj = isolate->factory()->NewJSObjectWithNullProto();
+ ElementsKind result_elements_kind =
+ DeduceKeyedGroupElementsKind(elements_kind);
+ // 8. For each Record { [[Key]], [[Elements]] } g of groups, do
+ for (InternalIndex entry : groups->IterateEntries()) {
+ Handle<Name> key = Handle<Name>(Name::cast(groups->KeyAt(entry)), isolate);
+ // 8a. Let elements be ! CreateArrayFromList(g.[[Elements]]).
+ Handle<ArrayList> array_list =
+ Handle<ArrayList>(ArrayList::cast(groups->ValueAt(entry)), isolate);
+ Handle<FixedArray> elements = ArrayList::Elements(isolate, array_list);
+ Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(
+ elements, result_elements_kind, array_list->Length());
+
+ // 8b. Perform ! CreateDataPropertyOrThrow(obj, g.[[Key]], elements).
+ JSReceiver::CreateDataProperty(isolate, obj, key, array,
+ Just(kThrowOnError))
+ .Check();
+ }
+
+ // 9. Return obj.
+ return *obj;
+}
+
+// https://tc39.es/proposal-array-grouping/#sec-array.prototype.groupbymap
+BUILTIN(ArrayPrototypeGroupByToMap) {
+ const char* const kMethodName = "Array.prototype.groupByToMap";
+ HandleScope scope(isolate);
+
+ Handle<JSReceiver> O;
+ // 1. Let O be ? ToObject(this value).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, O, Object::ToObject(isolate, args.receiver(), kMethodName));
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ double len;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len,
+ GetLengthProperty(isolate, O));
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ Handle<Object> callbackfn = args.atOrUndefined(isolate, 1);
+ if (!callbackfn->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callbackfn));
+ }
+
+ // 5. Let groups be a new empty List.
+ Handle<OrderedHashMap> groups = isolate->factory()->NewOrderedHashMap();
+ // Elements kind of the array for grouped elements kind deduction.
+ ElementsKind elements_kind = ElementsKind::NO_ELEMENTS;
+ if (IsFastArray(O)) {
+ Handle<JSArray> array = Handle<JSArray>::cast(O);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, groups,
+ FastArrayGroupBy<GroupByMode::kToMap>(isolate, array, callbackfn,
+ groups, len));
+ // Get array's elements kind after called into javascript.
+ elements_kind = array->GetElementsKind();
+ } else {
+ // 4. Let k be 0.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, groups,
+ GenericArrayGroupBy<GroupByMode::kToMap>(isolate, O, callbackfn, groups,
+ 0, len));
+ }
+
+ // 7. Let map be ! Construct(%Map%).
+ Handle<JSMap> map = isolate->factory()->NewJSMap();
+ Handle<OrderedHashMap> map_table = isolate->factory()->NewOrderedHashMap();
+ ElementsKind result_elements_kind =
+ DeduceKeyedGroupElementsKind(elements_kind);
+ // 8. For each Record { [[Key]], [[Elements]] } g of groups, do
+ for (InternalIndex entry : groups->IterateEntries()) {
+ Handle<Object> key = Handle<Object>(groups->KeyAt(entry), isolate);
+ // 8a. Let elements be ! CreateArrayFromList(g.[[Elements]]).
+ Handle<ArrayList> array_list =
+ Handle<ArrayList>(ArrayList::cast(groups->ValueAt(entry)), isolate);
+ Handle<FixedArray> elements = ArrayList::Elements(isolate, array_list);
+ Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(
+ elements, result_elements_kind, array_list->Length());
+
+ // 8b. Let entry be the Record { [[Key]]: g.[[Key]], [[Value]]: elements }.
+ // 8c. Append entry as the last element of map.[[MapData]].
+ map_table =
+ OrderedHashMap::Add(isolate, map_table, key, array).ToHandleChecked();
+ }
+ map->set_table(*map_table);
+
+ // 9. Return map.
+ return *map;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 1373e66397..66b3e432f8 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -46,7 +46,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
{
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
- CallRuntime(Runtime::kDebugAsyncFunctionResumed, context, promise);
+ CallRuntime(Runtime::kDebugPushPromise, context, promise);
Goto(&if_instrumentation_done);
}
BIND(&if_instrumentation_done);
@@ -103,21 +103,10 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
RootIndex::kUndefinedValue);
// Allocate and initialize the promise.
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<JSFunction> promise_function =
- CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- TNode<Map> promise_map = LoadObjectField<Map>(
- promise_function, JSFunction::kPrototypeOrInitialMapOffset);
- TNode<JSPromise> promise = UncheckedCast<JSPromise>(
- AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields));
- StoreMapNoWriteBarrier(promise, promise_map);
- StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- PromiseInit(promise);
+ TNode<JSPromise> promise = NewJSPromise(context);
// Allocate and initialize the async function object.
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> async_function_object_map = CAST(LoadContextElement(
native_context, Context::ASYNC_FUNCTION_OBJECT_MAP_INDEX));
TNode<JSAsyncFunctionObject> async_function_object =
@@ -152,22 +141,15 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
async_function_object, JSAsyncFunctionObject::kPromiseOffset, promise);
- RunContextPromiseHookInit(context, promise, UndefinedConstant());
-
- // Fire promise hooks if enabled and push the Promise under construction
- // in an async function on the catch prediction stack to handle exceptions
- // thrown before the first await.
- Label if_instrumentation(this, Label::kDeferred),
- if_instrumentation_done(this);
- Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
- &if_instrumentation, &if_instrumentation_done);
- BIND(&if_instrumentation);
- {
- CallRuntime(Runtime::kDebugAsyncFunctionEntered, context, promise);
- Goto(&if_instrumentation_done);
- }
- BIND(&if_instrumentation_done);
+ // While we are executing an async function, we need to have the implicit
+ // promise on the stack to get the catch prediction right, even before we
+ // awaited for the first time.
+ Label if_debugging(this);
+ GotoIf(IsDebugActive(), &if_debugging);
+ Return(async_function_object);
+ BIND(&if_debugging);
+ CallRuntime(Runtime::kDebugPushPromise, context, promise);
Return(async_function_object);
}
@@ -175,7 +157,6 @@ TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
auto async_function_object =
Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
auto reason = Parameter<Object>(Descriptor::kReason);
- auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
@@ -186,35 +167,32 @@ TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
CallBuiltin(Builtin::kRejectPromise, context, promise, reason,
FalseConstant());
- Label if_debugging(this, Label::kDeferred);
- GotoIf(HasAsyncEventDelegate(), &if_debugging);
+ Label if_debugging(this);
GotoIf(IsDebugActive(), &if_debugging);
Return(promise);
BIND(&if_debugging);
- TailCallRuntime(Runtime::kDebugAsyncFunctionFinished, context, can_suspend,
- promise);
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Return(promise);
}
TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
auto async_function_object =
Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
auto value = Parameter<Object>(Descriptor::kValue);
- auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
CallBuiltin(Builtin::kResolvePromise, context, promise, value);
- Label if_debugging(this, Label::kDeferred);
- GotoIf(HasAsyncEventDelegate(), &if_debugging);
+ Label if_debugging(this);
GotoIf(IsDebugActive(), &if_debugging);
Return(promise);
BIND(&if_debugging);
- TailCallRuntime(Runtime::kDebugAsyncFunctionFinished, context, can_suspend,
- promise);
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Return(promise);
}
// AsyncFunctionReject and AsyncFunctionResolve are both required to return
@@ -260,29 +238,18 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
auto value = Parameter<Object>(Descriptor::kValue);
auto context = Parameter<Context>(Descriptor::kContext);
- TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
- async_function_object, JSAsyncFunctionObject::kPromiseOffset);
-
- Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
- GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
- GotoIf(IsDebugActive(), &call_debug_hook);
- Goto(&after_debug_hook);
- BIND(&after_debug_hook);
-
TNode<SharedFunctionInfo> on_resolve_sfi =
AsyncFunctionAwaitResolveSharedFunConstant();
TNode<SharedFunctionInfo> on_reject_sfi =
AsyncFunctionAwaitRejectSharedFunConstant();
+ TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset);
Await(context, async_function_object, value, outer_promise, on_resolve_sfi,
on_reject_sfi, is_predicted_as_caught);
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
Return(outer_promise);
-
- BIND(&call_debug_hook);
- CallRuntime(Runtime::kDebugAsyncFunctionSuspended, context, outer_promise);
- Goto(&after_debug_hook);
}
// Called by the parser from the desugaring of 'await' when catch
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 08cea2e74e..03e4b32d06 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -22,7 +22,7 @@ class ValueUnwrapContext {
} // namespace
-TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
+TNode<Object> AsyncBuiltinsAssembler::Await(
TNode<Context> context, TNode<JSGeneratorObject> generator,
TNode<Object> value, TNode<JSPromise> outer_promise,
TNode<SharedFunctionInfo> on_resolve_sfi,
@@ -30,93 +30,58 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
- static const int kClosureContextSize =
- FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
- TNode<Context> closure_context =
- UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
- {
- // Initialize the await context, storing the {generator} as extension.
- TNode<Map> map = CAST(
- LoadContextElement(native_context, Context::AWAIT_CONTEXT_MAP_INDEX));
- StoreMapNoWriteBarrier(closure_context, map);
- StoreObjectFieldNoWriteBarrier(
- closure_context, Context::kLengthOffset,
- SmiConstant(Context::MIN_CONTEXT_EXTENDED_SLOTS));
- const TNode<Object> empty_scope_info =
- LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
- StoreContextElementNoWriteBarrier(
- closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
- StoreContextElementNoWriteBarrier(closure_context, Context::PREVIOUS_INDEX,
- native_context);
- StoreContextElementNoWriteBarrier(closure_context, Context::EXTENSION_INDEX,
- generator);
- }
-
- // Let promiseCapability be ! NewPromiseCapability(%Promise%).
- const TNode<JSFunction> promise_fun =
- CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- CSA_DCHECK(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- const TNode<Map> promise_map = CAST(
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset));
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSizeWithEmbedderFields.
- CSA_DCHECK(this,
- IntPtrEqual(LoadMapInstanceSizeInWords(promise_map),
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kTaggedSize)));
- TNode<JSPromise> promise;
+ // We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
+ // create wrapper promises. Now if {value} is already a promise with the
+ // intrinsics %Promise% constructor as its "constructor", we don't need
+ // to allocate the wrapper promise.
{
- // Allocate and initialize Promise
- TNode<HeapObject> wrapped_value =
- AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields);
- StoreMapNoWriteBarrier(wrapped_value, promise_map);
- StoreObjectFieldRoot(wrapped_value, JSPromise::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(wrapped_value, JSPromise::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- promise = CAST(wrapped_value);
- PromiseInit(promise);
+ TVARIABLE(Object, var_value, value);
+ Label if_slow_path(this, Label::kDeferred), if_done(this),
+ if_slow_constructor(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(value), &if_slow_path);
+ TNode<HeapObject> value_object = CAST(value);
+ const TNode<Map> value_map = LoadMap(value_object);
+ GotoIfNot(IsJSPromiseMap(value_map), &if_slow_path);
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ const TNode<Object> promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfNot(TaggedEqual(LoadMapPrototype(value_map), promise_prototype),
+ &if_slow_constructor);
+ Branch(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor,
+ &if_done);
+
+ // At this point, {value} doesn't have the initial promise prototype or
+ // the promise @@species protector was invalidated, but {value} could still
+ // have the %Promise% as its "constructor", so we need to check that as
+ // well.
+ BIND(&if_slow_constructor);
+ {
+ const TNode<Object> value_constructor = GetProperty(
+ context, value, isolate()->factory()->constructor_string());
+ const TNode<Object> promise_function =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Branch(TaggedEqual(value_constructor, promise_function), &if_done,
+ &if_slow_path);
+ }
+
+ BIND(&if_slow_path);
+ {
+ // We need to mark the {value} wrapper as having {outer_promise}
+ // as its parent, which is why we need to inline a good chunk of
+ // logic from the `PromiseResolve` builtin here.
+ var_value = NewJSPromise(native_context, outer_promise);
+ CallBuiltin(Builtin::kResolvePromise, native_context, var_value.value(),
+ value);
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ value = var_value.value();
}
- // Allocate and initialize resolve handler
- TNode<HeapObject> on_resolve =
- AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_sfi);
-
- // Allocate and initialize reject handler
- TNode<HeapObject> on_reject =
- AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_sfi);
-
- TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
-
- RunContextPromiseHookInit(context, promise, outer_promise);
-
- InitAwaitPromise(Runtime::kAwaitPromisesInitOld, context, value, promise,
- outer_promise, on_reject, is_predicted_as_caught,
- &var_throwaway);
-
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
- CallBuiltin(Builtin::kResolvePromise, context, promise, value);
-
- return CallBuiltin(Builtin::kPerformPromiseThen, context, promise, on_resolve,
- on_reject, var_throwaway.value());
-}
-
-TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
- TNode<Context> context, TNode<JSGeneratorObject> generator,
- TNode<JSPromise> promise, TNode<JSPromise> outer_promise,
- TNode<SharedFunctionInfo> on_resolve_sfi,
- TNode<SharedFunctionInfo> on_reject_sfi,
- TNode<Oddball> is_predicted_as_caught) {
- const TNode<NativeContext> native_context = LoadNativeContext(context);
-
- // 2. Let promise be ? PromiseResolve(« promise »).
- // We skip this step, because promise is already guaranteed to be a
- // JSPRomise at this point.
-
static const int kClosureContextSize =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
TNode<Context> closure_context =
@@ -151,105 +116,37 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
- TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
-
- InitAwaitPromise(Runtime::kAwaitPromisesInit, context, promise, promise,
- outer_promise, on_reject, is_predicted_as_caught,
- &var_throwaway);
-
- return CallBuiltin(Builtin::kPerformPromiseThen, native_context, promise,
- on_resolve, on_reject, var_throwaway.value());
-}
-
-void AsyncBuiltinsAssembler::InitAwaitPromise(
- Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
- TNode<Object> promise, TNode<Object> outer_promise,
- TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
- TVariable<HeapObject>* var_throwaway) {
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
// case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred),
- if_promise_hook(this, Label::kDeferred),
- not_debugging(this),
- do_nothing(this);
+ TVARIABLE(Object, var_throwaway, UndefinedConstant());
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
- Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- promiseHookFlags), &if_debugging, &not_debugging);
- BIND(&if_debugging);
- *var_throwaway =
- CAST(CallRuntime(id, context, value, promise,
- outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_nothing);
- BIND(&not_debugging);
-
+ GotoIf(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags),
+ &if_instrumentation);
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
// This call to NewJSPromise is to keep behaviour parity with what happens
- // in Runtime::kAwaitPromisesInit above if native hooks are set. It will
- // create a throwaway promise that will trigger an init event and will get
+ // in Runtime::kDebugAsyncFunctionSuspended below if native hooks are set.
+ // It creates a throwaway promise that will trigger an init event and get
// passed into Builtin::kPerformPromiseThen below.
- Branch(IsContextPromiseHookEnabled(promiseHookFlags), &if_promise_hook,
- &do_nothing);
- BIND(&if_promise_hook);
- *var_throwaway = NewJSPromise(context, promise);
- Goto(&do_nothing);
- BIND(&do_nothing);
-}
-
-TNode<Object> AsyncBuiltinsAssembler::Await(
- TNode<Context> context, TNode<JSGeneratorObject> generator,
- TNode<Object> value, TNode<JSPromise> outer_promise,
- TNode<SharedFunctionInfo> on_resolve_sfi,
- TNode<SharedFunctionInfo> on_reject_sfi,
- TNode<Oddball> is_predicted_as_caught) {
- TVARIABLE(Object, result);
- Label if_old(this), if_new(this), done(this),
- if_slow_constructor(this, Label::kDeferred);
-
- // We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
- // create wrapper promises. Now if {value} is already a promise with the
- // intrinsics %Promise% constructor as its "constructor", we don't need
- // to allocate the wrapper promise and can just use the `AwaitOptimized`
- // logic.
- GotoIf(TaggedIsSmi(value), &if_old);
- TNode<HeapObject> value_object = CAST(value);
- const TNode<Map> value_map = LoadMap(value_object);
- GotoIfNot(IsJSPromiseMap(value_map), &if_old);
- // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
- // is the (initial) Promise.prototype and the @@species protector is
- // intact, as that guards the lookup path for "constructor" on
- // JSPromise instances which have the (initial) Promise.prototype.
- const TNode<NativeContext> native_context = LoadNativeContext(context);
- const TNode<Object> promise_prototype =
- LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
- GotoIfNot(TaggedEqual(LoadMapPrototype(value_map), promise_prototype),
- &if_slow_constructor);
- Branch(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor, &if_new);
-
- // At this point, {value} doesn't have the initial promise prototype or
- // the promise @@species protector was invalidated, but {value} could still
- // have the %Promise% as its "constructor", so we need to check that as well.
- BIND(&if_slow_constructor);
+ GotoIfNot(IsContextPromiseHookEnabled(promiseHookFlags),
+ &if_instrumentation_done);
+ var_throwaway = NewJSPromise(context, value);
+#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
+ Goto(&if_instrumentation_done);
+ BIND(&if_instrumentation);
{
- const TNode<Object> value_constructor =
- GetProperty(context, value, isolate()->factory()->constructor_string());
- const TNode<Object> promise_function =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Branch(TaggedEqual(value_constructor, promise_function), &if_new, &if_old);
+ var_throwaway = CallRuntime(Runtime::kDebugAsyncFunctionSuspended,
+ native_context, value, outer_promise, on_reject,
+ generator, is_predicted_as_caught);
+ Goto(&if_instrumentation_done);
}
+ BIND(&if_instrumentation_done);
- BIND(&if_old);
- result = AwaitOld(context, generator, value, outer_promise, on_resolve_sfi,
- on_reject_sfi, is_predicted_as_caught);
- Goto(&done);
-
- BIND(&if_new);
- result =
- AwaitOptimized(context, generator, CAST(value), outer_promise,
- on_resolve_sfi, on_reject_sfi, is_predicted_as_caught);
- Goto(&done);
-
- BIND(&done);
- return result.value();
+ return CallBuiltin(Builtin::kPerformPromiseThen, native_context, value,
+ on_resolve, on_reject, var_throwaway.value());
}
void AsyncBuiltinsAssembler::InitializeNativeClosure(
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 34b7a0ce1d..118e3951a1 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -48,26 +48,6 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
TNode<SharedFunctionInfo> shared_info);
TNode<Context> AllocateAsyncIteratorValueUnwrapContext(
TNode<NativeContext> native_context, TNode<Oddball> done);
-
- TNode<Object> AwaitOld(TNode<Context> context,
- TNode<JSGeneratorObject> generator,
- TNode<Object> value, TNode<JSPromise> outer_promise,
- TNode<SharedFunctionInfo> on_resolve_sfi,
- TNode<SharedFunctionInfo> on_reject_sfi,
- TNode<Oddball> is_predicted_as_caught);
- TNode<Object> AwaitOptimized(TNode<Context> context,
- TNode<JSGeneratorObject> generator,
- TNode<JSPromise> promise,
- TNode<JSPromise> outer_promise,
- TNode<SharedFunctionInfo> on_resolve_sfi,
- TNode<SharedFunctionInfo> on_reject_sfi,
- TNode<Oddball> is_predicted_as_caught);
-
- void InitAwaitPromise(
- Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
- TNode<Object> promise, TNode<Object> outer_promise,
- TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
- TVariable<HeapObject>* var_throwaway);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 384fba3375..ecb17c77f5 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -211,21 +211,40 @@ AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
TNode<Context> context, TNode<Object> value,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- const TNode<JSAsyncGeneratorObject> generator =
+ const TNode<JSAsyncGeneratorObject> async_generator_object =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
- SetGeneratorNotAwaiting(generator);
+ SetGeneratorNotAwaiting(async_generator_object);
- CSA_SLOW_DCHECK(this, IsGeneratorSuspended(generator));
+ CSA_SLOW_DCHECK(this, IsGeneratorSuspended(async_generator_object));
- // Remember the {resume_mode} for the {generator}.
- StoreObjectFieldNoWriteBarrier(generator,
+ // Remember the {resume_mode} for the {async_generator_object}.
+ StoreObjectFieldNoWriteBarrier(async_generator_object,
JSGeneratorObject::kResumeModeOffset,
SmiConstant(resume_mode));
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator);
+ // Push the promise for the {async_generator_object} back onto the catch
+ // prediction stack to handle exceptions thrown after resuming from the
+ // await properly.
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
+ Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done);
+ BIND(&if_instrumentation);
+ {
+ TNode<AsyncGeneratorRequest> request =
+ CAST(LoadFirstAsyncGeneratorRequestFromQueue(async_generator_object));
+ TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ request, AsyncGeneratorRequest::kPromiseOffset);
+ CallRuntime(Runtime::kDebugPushPromise, context, promise);
+ Goto(&if_instrumentation_done);
+ }
+ BIND(&if_instrumentation_done);
+
+ CallStub(CodeFactory::ResumeGenerator(isolate()), context, value,
+ async_generator_object);
- TailCallBuiltin(Builtin::kAsyncGeneratorResumeNext, context, generator);
+ TailCallBuiltin(Builtin::kAsyncGeneratorResumeNext, context,
+ async_generator_object);
}
template <typename Descriptor>
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 2cb74aa399..ed95a31e99 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -27,7 +27,7 @@ BUILTIN(BigIntConstructor) {
if (value->IsJSReceiver()) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, value,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(value),
ToPrimitiveHint::kNumber));
}
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 7a1c65e4a1..1703e43447 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins.h"
#include "src/heap/heap-inl.h" // For ToBoolean.
#include "src/logging/counters.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
namespace v8 {
namespace internal {
@@ -15,7 +15,7 @@ namespace internal {
#define CHECK_CALLSITE(frame, method) \
CHECK_RECEIVER(JSObject, receiver, method); \
LookupIterator it(isolate, receiver, \
- isolate->factory()->call_site_frame_info_symbol(), \
+ isolate->factory()->call_site_info_symbol(), \
LookupIterator::OWN_SKIP_INTERCEPTOR); \
if (it.state() != LookupIterator::DATA) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
@@ -23,7 +23,7 @@ namespace internal {
NewTypeError(MessageTemplate::kCallSiteMethod, \
isolate->factory()->NewStringFromAsciiChecked(method))); \
} \
- Handle<StackFrameInfo> frame = Handle<StackFrameInfo>::cast(it.GetDataValue())
+ Handle<CallSiteInfo> frame = Handle<CallSiteInfo>::cast(it.GetDataValue())
namespace {
Object PositiveNumberOrNull(int value, Isolate* isolate) {
@@ -36,27 +36,27 @@ Object PositiveNumberOrNull(int value, Isolate* isolate) {
BUILTIN(CallSitePrototypeGetColumnNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getColumnNumber");
- return PositiveNumberOrNull(StackFrameInfo::GetColumnNumber(frame), isolate);
+ return PositiveNumberOrNull(CallSiteInfo::GetColumnNumber(frame), isolate);
}
BUILTIN(CallSitePrototypeGetEnclosingColumnNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getEnclosingColumnNumber");
- return PositiveNumberOrNull(StackFrameInfo::GetEnclosingColumnNumber(frame),
+ return PositiveNumberOrNull(CallSiteInfo::GetEnclosingColumnNumber(frame),
isolate);
}
BUILTIN(CallSitePrototypeGetEnclosingLineNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getEnclosingLineNumber");
- return PositiveNumberOrNull(StackFrameInfo::GetEnclosingLineNumber(frame),
+ return PositiveNumberOrNull(CallSiteInfo::GetEnclosingLineNumber(frame),
isolate);
}
BUILTIN(CallSitePrototypeGetEvalOrigin) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getEvalOrigin");
- return *StackFrameInfo::GetEvalOrigin(frame);
+ return *CallSiteInfo::GetEvalOrigin(frame);
}
BUILTIN(CallSitePrototypeGetFileName) {
@@ -80,25 +80,25 @@ BUILTIN(CallSitePrototypeGetFunction) {
BUILTIN(CallSitePrototypeGetFunctionName) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getFunctionName");
- return *StackFrameInfo::GetFunctionName(frame);
+ return *CallSiteInfo::GetFunctionName(frame);
}
BUILTIN(CallSitePrototypeGetLineNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getLineNumber");
- return PositiveNumberOrNull(StackFrameInfo::GetLineNumber(frame), isolate);
+ return PositiveNumberOrNull(CallSiteInfo::GetLineNumber(frame), isolate);
}
BUILTIN(CallSitePrototypeGetMethodName) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getMethodName");
- return *StackFrameInfo::GetMethodName(frame);
+ return *CallSiteInfo::GetMethodName(frame);
}
BUILTIN(CallSitePrototypeGetPosition) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getPosition");
- return Smi::FromInt(StackFrameInfo::GetSourcePosition(frame));
+ return Smi::FromInt(CallSiteInfo::GetSourcePosition(frame));
}
BUILTIN(CallSitePrototypeGetPromiseIndex) {
@@ -107,7 +107,7 @@ BUILTIN(CallSitePrototypeGetPromiseIndex) {
if (!frame->IsPromiseAll() && !frame->IsPromiseAny()) {
return ReadOnlyRoots(isolate).null_value();
}
- return Smi::FromInt(StackFrameInfo::GetSourcePosition(frame));
+ return Smi::FromInt(CallSiteInfo::GetSourcePosition(frame));
}
BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
@@ -132,7 +132,7 @@ BUILTIN(CallSitePrototypeGetThis) {
BUILTIN(CallSitePrototypeGetTypeName) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getTypeName");
- return *StackFrameInfo::GetTypeName(frame);
+ return *CallSiteInfo::GetTypeName(frame);
}
BUILTIN(CallSitePrototypeIsAsync) {
@@ -174,7 +174,7 @@ BUILTIN(CallSitePrototypeIsToplevel) {
BUILTIN(CallSitePrototypeToString) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "toString");
- RETURN_RESULT_OR_FAILURE(isolate, SerializeStackFrameInfo(isolate, frame));
+ RETURN_RESULT_OR_FAILURE(isolate, SerializeCallSiteInfo(isolate, frame));
}
#undef CHECK_CALLSITE
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 818b8373de..00cad7b314 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -123,7 +123,8 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<IntPtrT> EstimatedInitialSize(TNode<Object> initial_entries,
TNode<BoolT> is_fast_jsarray);
- void GotoIfNotJSReceiver(const TNode<Object> obj, Label* if_not_receiver);
+ void GotoIfCannotBeWeakKey(const TNode<Object> obj,
+ Label* if_cannot_be_weak_key);
// Determines whether the collection's prototype has been modified.
TNode<BoolT> HasInitialCollectionPrototype(Variant variant,
@@ -522,10 +523,14 @@ TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
[=] { return IntPtrConstant(0); });
}
-void BaseCollectionsAssembler::GotoIfNotJSReceiver(const TNode<Object> obj,
- Label* if_not_receiver) {
- GotoIf(TaggedIsSmi(obj), if_not_receiver);
- GotoIfNot(IsJSReceiver(CAST(obj)), if_not_receiver);
+void BaseCollectionsAssembler::GotoIfCannotBeWeakKey(
+ const TNode<Object> obj, Label* if_cannot_be_weak_key) {
+ GotoIf(TaggedIsSmi(obj), if_cannot_be_weak_key);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(LoadMap(CAST(obj)));
+ GotoIfNot(IsJSReceiverInstanceType(instance_type), if_cannot_be_weak_key);
+ // TODO(v8:12547) Shared structs should only be able to point to shared values
+ // in weak collections. For now, disallow them as weak collection keys.
+ GotoIf(IsJSSharedStructInstanceType(instance_type), if_cannot_be_weak_key);
}
TNode<Map> BaseCollectionsAssembler::GetInitialCollectionPrototype(
@@ -2723,17 +2728,18 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
auto table = Parameter<EphemeronHashTable>(Descriptor::kTable);
auto key = Parameter<Object>(Descriptor::kKey);
- Label if_not_found(this);
+ Label if_cannot_be_weak_key(this);
- GotoIfNotJSReceiver(key, &if_not_found);
+ GotoIfCannotBeWeakKey(key, &if_cannot_be_weak_key);
- TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(CAST(key), &if_not_found);
+ TNode<IntPtrT> hash =
+ LoadJSReceiverIdentityHash(CAST(key), &if_cannot_be_weak_key);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
- TNode<IntPtrT> key_index =
- FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
+ TNode<IntPtrT> key_index = FindKeyIndexForKey(
+ table, key, hash, EntryMask(capacity), &if_cannot_be_weak_key);
Return(SmiTag(ValueIndexFromKeyIndex(key_index)));
- BIND(&if_not_found);
+ BIND(&if_cannot_be_weak_key);
Return(SmiConstant(-1));
}
@@ -2788,22 +2794,23 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
auto collection = Parameter<JSWeakCollection>(Descriptor::kCollection);
auto key = Parameter<Object>(Descriptor::kKey);
- Label call_runtime(this), if_not_found(this);
+ Label call_runtime(this), if_cannot_be_weak_key(this);
- GotoIfNotJSReceiver(key, &if_not_found);
+ GotoIfCannotBeWeakKey(key, &if_cannot_be_weak_key);
- TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(CAST(key), &if_not_found);
+ TNode<IntPtrT> hash =
+ LoadJSReceiverIdentityHash(CAST(key), &if_cannot_be_weak_key);
TNode<EphemeronHashTable> table = LoadTable(collection);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
- TNode<IntPtrT> key_index =
- FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
+ TNode<IntPtrT> key_index = FindKeyIndexForKey(
+ table, key, hash, EntryMask(capacity), &if_cannot_be_weak_key);
TNode<IntPtrT> number_of_elements = LoadNumberOfElements(table, -1);
GotoIf(ShouldShrink(capacity, number_of_elements), &call_runtime);
RemoveEntry(table, key_index, number_of_elements);
Return(TrueConstant());
- BIND(&if_not_found);
+ BIND(&if_cannot_be_weak_key);
Return(FalseConstant());
BIND(&call_runtime);
@@ -2884,7 +2891,7 @@ TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
"WeakMap.prototype.set");
Label throw_invalid_key(this);
- GotoIfNotJSReceiver(key, &throw_invalid_key);
+ GotoIfCannotBeWeakKey(key, &throw_invalid_key);
Return(
CallBuiltin(Builtin::kWeakCollectionSet, context, receiver, key, value));
@@ -2902,7 +2909,7 @@ TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
"WeakSet.prototype.add");
Label throw_invalid_value(this);
- GotoIfNotJSReceiver(value, &throw_invalid_value);
+ GotoIfCannotBeWeakKey(value, &throw_invalid_value);
Return(CallBuiltin(Builtin::kWeakCollectionSet, context, receiver, value,
TrueConstant()));
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index a1359cd422..42cfb5eac6 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stack>
+
#include "src/api/api-inl.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
@@ -16,28 +18,128 @@ namespace internal {
// -----------------------------------------------------------------------------
// Console
-#define CONSOLE_METHOD_LIST(V) \
- V(Debug, debug) \
- V(Error, error) \
- V(Info, info) \
- V(Log, log) \
- V(Warn, warn) \
- V(Dir, dir) \
- V(DirXml, dirXml) \
- V(Table, table) \
- V(Trace, trace) \
- V(Group, group) \
- V(GroupCollapsed, groupCollapsed) \
- V(GroupEnd, groupEnd) \
- V(Clear, clear) \
- V(Count, count) \
- V(CountReset, countReset) \
- V(Assert, assert) \
- V(Profile, profile) \
- V(ProfileEnd, profileEnd) \
+#define CONSOLE_METHOD_LIST(V) \
+ V(Dir, dir) \
+ V(DirXml, dirXml) \
+ V(Table, table) \
+ V(GroupEnd, groupEnd) \
+ V(Clear, clear) \
+ V(Count, count) \
+ V(CountReset, countReset) \
+ V(Profile, profile) \
+ V(ProfileEnd, profileEnd) \
V(TimeLog, timeLog)
+#define CONSOLE_METHOD_WITH_FORMATTER_LIST(V) \
+ V(Debug, debug, 1) \
+ V(Error, error, 1) \
+ V(Info, info, 1) \
+ V(Log, log, 1) \
+ V(Warn, warn, 1) \
+ V(Trace, trace, 1) \
+ V(Group, group, 1) \
+ V(GroupCollapsed, groupCollapsed, 1) \
+ V(Assert, assert, 2)
+
namespace {
+
+// 2.2 Formatter(args) [https://console.spec.whatwg.org/#formatter]
+//
+// This implements the formatter operation defined in the Console
+// specification to the degree that it makes sense for V8. That
+// means we primarily deal with %s, %i, %f, and %d, and any side
+// effects caused by the type conversions, and we preserve the %o,
+// %c, and %O specifiers and their parameters unchanged, and instead
+// leave it to the debugger front-end to make sense of those.
+//
+// Chrome also supports the non-standard bypass format specifier %_
+// which just skips over the parameter.
+//
+// This implementation updates the |args| in-place with the results
+// from the conversion.
+//
+// The |index| describes the position of the format string within,
+// |args| (starting with 1, since |args| also includes the receiver),
+// which is different for example in case of `console.log` where it
+// is 1 compared to `console.assert` where it is 2.
+bool Formatter(Isolate* isolate, BuiltinArguments& args, int index) {
+ if (args.length() < index + 2 || !args[index].IsString()) {
+ return true;
+ }
+ struct State {
+ Handle<String> str;
+ int off;
+ };
+ std::stack<State> states;
+ HandleScope scope(isolate);
+ auto percent = isolate->factory()->LookupSingleCharacterStringFromCode('%');
+ states.push({args.at<String>(index++), 0});
+ while (!states.empty() && index < args.length()) {
+ State& state = states.top();
+ state.off = String::IndexOf(isolate, state.str, percent, state.off);
+ if (state.off < 0 || state.off == state.str->length() - 1) {
+ states.pop();
+ continue;
+ }
+ Handle<Object> current = args.at(index);
+ uint16_t specifier = state.str->Get(state.off + 1, isolate);
+ if (specifier == 'd' || specifier == 'f' || specifier == 'i') {
+ if (current->IsSymbol()) {
+ current = isolate->factory()->nan_value();
+ } else {
+ Handle<Object> params[] = {current,
+ isolate->factory()->NewNumberFromInt(10)};
+ auto builtin = specifier == 'f' ? isolate->global_parse_float_fun()
+ : isolate->global_parse_int_fun();
+ if (!Execution::CallBuiltin(isolate, builtin,
+ isolate->factory()->undefined_value(),
+ arraysize(params), params)
+ .ToHandle(&current)) {
+ return false;
+ }
+ }
+ } else if (specifier == 's') {
+ Handle<Object> params[] = {current};
+ if (!Execution::CallBuiltin(isolate, isolate->string_function(),
+ isolate->factory()->undefined_value(),
+ arraysize(params), params)
+ .ToHandle(&current)) {
+ return false;
+ }
+
+ // Recurse into string results from type conversions, as they
+ // can themselves contain formatting specifiers.
+ states.push({Handle<String>::cast(current), 0});
+ } else if (specifier == 'c' || specifier == 'o' || specifier == 'O' ||
+ specifier == '_') {
+ // We leave the interpretation of %c (CSS), %o (optimally useful
+ // formatting), and %O (generic JavaScript object formatting) as
+ // well as the non-standard %_ (bypass formatter in Chrome) to
+ // the debugger front-end, and preserve these specifiers as well
+ // as their arguments verbatim.
+ index++;
+ state.off += 2;
+ continue;
+ } else if (specifier == '%') {
+ // Chrome also supports %% as a way to generate a single % in the
+ // output.
+ state.off += 2;
+ continue;
+ } else {
+ state.off++;
+ continue;
+ }
+
+ // Replace the |specifier| (including the '%' character) in |target|
+ // with the |current| value. We perform the replacement only morally
+ // by updating the argument to the conversion result, but leave it to
+ // the debugger front-end to perform the actual substitution.
+ args.set_at(index++, *current);
+ state.off += 2;
+ }
+ return true;
+}
+
void ConsoleCall(
Isolate* isolate, const internal::BuiltinArguments& args,
void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
@@ -48,11 +150,12 @@ void ConsoleCall(
HandleScope scope(isolate);
debug::ConsoleCallArguments wrapper(args);
Handle<Object> context_id_obj = JSObject::GetDataProperty(
- args.target(), isolate->factory()->console_context_id_symbol());
+ isolate, args.target(), isolate->factory()->console_context_id_symbol());
int context_id =
context_id_obj->IsSmi() ? Handle<Smi>::cast(context_id_obj)->value() : 0;
Handle<Object> context_name_obj = JSObject::GetDataProperty(
- args.target(), isolate->factory()->console_context_name_symbol());
+ isolate, args.target(),
+ isolate->factory()->console_context_name_symbol());
Handle<String> context_name = context_name_obj->IsString()
? Handle<String>::cast(context_name_obj)
: isolate->factory()->anonymous_string();
@@ -74,6 +177,7 @@ void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
}
LOG(isolate, TimerEvent(se, raw_name));
}
+
} // namespace
#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name) \
@@ -85,6 +189,18 @@ void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
+#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name, index) \
+ BUILTIN(Console##call) { \
+ if (!Formatter(isolate, args, index)) { \
+ return ReadOnlyRoots(isolate).exception(); \
+ } \
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::call); \
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); \
+ return ReadOnlyRoots(isolate).undefined_value(); \
+ }
+CONSOLE_METHOD_WITH_FORMATTER_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
+#undef CONSOLE_BUILTIN_IMPLEMENTATION
+
BUILTIN(ConsoleTime) {
LogTimerEvent(isolate, args, v8::LogEventStatus::kStart);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time);
@@ -162,10 +278,11 @@ BUILTIN(ConsoleContext) {
int id = isolate->last_console_context_id() + 1;
isolate->set_last_console_context_id(id);
-#define CONSOLE_BUILTIN_SETUP(call, name) \
+#define CONSOLE_BUILTIN_SETUP(call, name, ...) \
InstallContextFunction(isolate, context, #name, Builtin::kConsole##call, id, \
args.at(1));
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_SETUP)
+ CONSOLE_METHOD_WITH_FORMATTER_LIST(CONSOLE_BUILTIN_SETUP)
#undef CONSOLE_BUILTIN_SETUP
InstallContextFunction(isolate, context, "time", Builtin::kConsoleTime, id,
args.at(1));
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 9fff2f4911..f452f83f1a 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -255,7 +255,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
TNode<CodeT> lazy_builtin =
- HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
+ HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index bab7ba4eeb..d84e9c4267 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -88,7 +88,8 @@ BUILTIN(DataViewConstructor) {
MessageTemplate::kInvalidDataViewLength));
if (view_byte_offset + byte_length->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidDataViewLength, byte_length));
}
view_byte_length = byte_length->Number();
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index c1264891f6..c4e81c71fd 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -145,7 +145,7 @@ BUILTIN(DateConstructor) {
time_val = Handle<JSDate>::cast(value)->value().Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToPrimitive(value));
+ Object::ToPrimitive(isolate, value));
if (value->IsString()) {
time_val = ParseDateTimeString(isolate, Handle<String>::cast(value));
} else {
@@ -910,7 +910,7 @@ BUILTIN(DatePrototypeToJson) {
Handle<Object> primitive;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, primitive,
- Object::ToPrimitive(receiver_obj, ToPrimitiveHint::kNumber));
+ Object::ToPrimitive(isolate, receiver_obj, ToPrimitiveHint::kNumber));
if (primitive->IsNumber() && !std::isfinite(primitive->Number())) {
return ReadOnlyRoots(isolate).null_value();
} else {
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 538a3970d4..53533f5ac7 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -95,6 +95,8 @@ namespace internal {
ASM(CallFunction_ReceiverIsAny, CallTrampoline) \
/* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
ASM(CallBoundFunction, CallTrampoline) \
+ /* #sec-wrapped-function-exotic-objects-call-thisargument-argumentslist */ \
+ TFC(CallWrappedFunction, CallTrampoline) \
/* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
@@ -273,7 +275,7 @@ namespace internal {
/* Handlers */ \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, Store) \
- TFH(KeyedDefineOwnIC_Megamorphic, Store) \
+ TFH(DefineKeyedOwnIC_Megamorphic, Store) \
TFH(LoadGlobalIC_NoFeedback, LoadGlobalNoFeedback) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
TFH(LoadIC_StringLength, LoadWithVector) \
@@ -281,7 +283,7 @@ namespace internal {
TFH(LoadIC_NoFeedback, LoadNoFeedback) \
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_NoFeedback, Store) \
- TFH(StoreOwnIC_NoFeedback, Store) \
+ TFH(DefineNamedOwnIC_NoFeedback, Store) \
TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \
TFH(LoadIndexedInterceptorIC, LoadWithVector) \
TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \
@@ -315,6 +317,10 @@ namespace internal {
/* ES #sec-copydataproperties */ \
TFS(CopyDataProperties, kTarget, kSource) \
TFS(SetDataProperties, kTarget, kSource) \
+ TFC(CopyDataPropertiesWithExcludedPropertiesOnStack, \
+ CopyDataPropertiesWithExcludedPropertiesOnStack) \
+ TFC(CopyDataPropertiesWithExcludedProperties, \
+ CopyDataPropertiesWithExcludedProperties) \
\
/* Abort */ \
TFC(Abort, Abort) \
@@ -385,6 +391,9 @@ namespace internal {
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
TFJ(ArrayPrototypePop, kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.prototype.groupby */ \
+ CPP(ArrayPrototypeGroupBy) \
+ CPP(ArrayPrototypeGroupByToMap) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
TFJ(ArrayPrototypePush, kDontAdaptArgumentsSentinel) \
@@ -423,8 +432,8 @@ namespace internal {
\
/* AsyncFunction */ \
TFS(AsyncFunctionEnter, kClosure, kReceiver) \
- TFS(AsyncFunctionReject, kAsyncFunctionObject, kReason, kCanSuspend) \
- TFS(AsyncFunctionResolve, kAsyncFunctionObject, kValue, kCanSuspend) \
+ TFS(AsyncFunctionReject, kAsyncFunctionObject, kReason) \
+ TFS(AsyncFunctionResolve, kAsyncFunctionObject, kValue) \
TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \
TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
@@ -621,26 +630,26 @@ namespace internal {
TFH(LoadICTrampoline_Megamorphic, Load) \
TFH(LoadSuperIC, LoadWithReceiverAndVector) \
TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \
- TFH(KeyedLoadIC, LoadWithVector) \
- TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
- TFH(KeyedLoadICTrampoline, Load) \
- TFH(KeyedLoadICBaseline, LoadBaseline) \
- TFH(KeyedLoadICTrampoline_Megamorphic, Load) \
+ TFH(KeyedLoadIC, KeyedLoadWithVector) \
+ TFH(KeyedLoadIC_Megamorphic, KeyedLoadWithVector) \
+ TFH(KeyedLoadICTrampoline, KeyedLoad) \
+ TFH(KeyedLoadICBaseline, KeyedLoadBaseline) \
+ TFH(KeyedLoadICTrampoline_Megamorphic, KeyedLoad) \
TFH(StoreGlobalIC, StoreGlobalWithVector) \
TFH(StoreGlobalICTrampoline, StoreGlobal) \
TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \
TFH(StoreIC, StoreWithVector) \
TFH(StoreICTrampoline, Store) \
TFH(StoreICBaseline, StoreBaseline) \
- TFH(StoreOwnIC, StoreWithVector) \
- TFH(StoreOwnICTrampoline, Store) \
- TFH(StoreOwnICBaseline, StoreBaseline) \
+ TFH(DefineNamedOwnIC, StoreWithVector) \
+ TFH(DefineNamedOwnICTrampoline, Store) \
+ TFH(DefineNamedOwnICBaseline, StoreBaseline) \
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
TFH(KeyedStoreICBaseline, StoreBaseline) \
- TFH(KeyedDefineOwnIC, StoreWithVector) \
- TFH(KeyedDefineOwnICTrampoline, Store) \
- TFH(KeyedDefineOwnICBaseline, StoreBaseline) \
+ TFH(DefineKeyedOwnIC, StoreWithVector) \
+ TFH(DefineKeyedOwnICTrampoline, Store) \
+ TFH(DefineKeyedOwnICBaseline, StoreBaseline) \
TFH(StoreInArrayLiteralIC, StoreWithVector) \
TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \
TFH(LookupContextBaseline, LookupBaseline) \
@@ -656,9 +665,9 @@ namespace internal {
TFH(CloneObjectIC, CloneObjectWithVector) \
TFH(CloneObjectICBaseline, CloneObjectBaseline) \
TFH(CloneObjectIC_Slow, CloneObjectWithVector) \
- TFH(KeyedHasIC, LoadWithVector) \
- TFH(KeyedHasICBaseline, LoadBaseline) \
- TFH(KeyedHasIC_Megamorphic, LoadWithVector) \
+ TFH(KeyedHasIC, KeyedHasICWithVector) \
+ TFH(KeyedHasICBaseline, KeyedHasICBaseline) \
+ TFH(KeyedHasIC_Megamorphic, KeyedHasICWithVector) \
\
/* IterableToList */ \
/* ES #sec-iterabletolist */ \
@@ -704,29 +713,29 @@ namespace internal {
\
/* Binary ops with feedback collection */ \
TFC(Add_Baseline, BinaryOp_Baseline) \
- TFC(AddSmi_Baseline, BinaryOp_Baseline) \
+ TFC(AddSmi_Baseline, BinarySmiOp_Baseline) \
TFC(Subtract_Baseline, BinaryOp_Baseline) \
- TFC(SubtractSmi_Baseline, BinaryOp_Baseline) \
+ TFC(SubtractSmi_Baseline, BinarySmiOp_Baseline) \
TFC(Multiply_Baseline, BinaryOp_Baseline) \
- TFC(MultiplySmi_Baseline, BinaryOp_Baseline) \
+ TFC(MultiplySmi_Baseline, BinarySmiOp_Baseline) \
TFC(Divide_Baseline, BinaryOp_Baseline) \
- TFC(DivideSmi_Baseline, BinaryOp_Baseline) \
+ TFC(DivideSmi_Baseline, BinarySmiOp_Baseline) \
TFC(Modulus_Baseline, BinaryOp_Baseline) \
- TFC(ModulusSmi_Baseline, BinaryOp_Baseline) \
+ TFC(ModulusSmi_Baseline, BinarySmiOp_Baseline) \
TFC(Exponentiate_Baseline, BinaryOp_Baseline) \
- TFC(ExponentiateSmi_Baseline, BinaryOp_Baseline) \
+ TFC(ExponentiateSmi_Baseline, BinarySmiOp_Baseline) \
TFC(BitwiseAnd_Baseline, BinaryOp_Baseline) \
- TFC(BitwiseAndSmi_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseAndSmi_Baseline, BinarySmiOp_Baseline) \
TFC(BitwiseOr_Baseline, BinaryOp_Baseline) \
- TFC(BitwiseOrSmi_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseOrSmi_Baseline, BinarySmiOp_Baseline) \
TFC(BitwiseXor_Baseline, BinaryOp_Baseline) \
- TFC(BitwiseXorSmi_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseXorSmi_Baseline, BinarySmiOp_Baseline) \
TFC(ShiftLeft_Baseline, BinaryOp_Baseline) \
- TFC(ShiftLeftSmi_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftLeftSmi_Baseline, BinarySmiOp_Baseline) \
TFC(ShiftRight_Baseline, BinaryOp_Baseline) \
- TFC(ShiftRightSmi_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRightSmi_Baseline, BinarySmiOp_Baseline) \
TFC(ShiftRightLogical_Baseline, BinaryOp_Baseline) \
- TFC(ShiftRightLogicalSmi_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRightLogicalSmi_Baseline, BinarySmiOp_Baseline) \
\
TFC(Add_WithFeedback, BinaryOp_WithFeedback) \
TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \
@@ -867,15 +876,22 @@ namespace internal {
TFJ(SetIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(SetOrSetIteratorToList, kSource) \
\
+ /* ShadowRealm */ \
+ CPP(ShadowRealmConstructor) \
+ CPP(ShadowRealmPrototypeEvaluate) \
+ CPP(ShadowRealmPrototypeImportValue) \
+ TFS(ShadowRealmGetWrappedValue, kCreationContext, kValue) \
+ \
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
/* https://tc39.es/proposal-resizablearraybuffer/ */ \
CPP(SharedArrayBufferPrototypeGrow) \
\
- TFJ(AtomicsLoad, kJSArgcReceiverSlots + 2, kReceiver, kArray, kIndex) \
- TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
- kValue) \
+ TFJ(AtomicsLoad, kJSArgcReceiverSlots + 2, kReceiver, kArrayOrSharedStruct, \
+ kIndexOrFieldName) \
+ TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArrayOrSharedStruct, \
+ kIndexOrFieldName, kValue) \
TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
kValue) \
TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
@@ -948,6 +964,8 @@ namespace internal {
/* Wasm */ \
IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \
IF_WASM(ASM, WasmReturnPromiseOnSuspend, Dummy) \
+ IF_WASM(ASM, WasmSuspend, WasmSuspend) \
+ IF_WASM(ASM, WasmResume, Dummy) \
IF_WASM(ASM, WasmCompileLazy, Dummy) \
IF_WASM(ASM, WasmDebugBreak, Dummy) \
IF_WASM(ASM, WasmOnStackReplace, Dummy) \
@@ -975,6 +993,10 @@ namespace internal {
TFS(WeakCollectionDelete, kCollection, kKey) \
TFS(WeakCollectionSet, kCollection, kKey, kValue) \
\
+ /* JS Structs */ \
+ CPP(SharedStructTypeConstructor) \
+ CPP(SharedStructConstructor) \
+ \
/* AsyncGenerator */ \
\
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
@@ -1637,7 +1659,11 @@ namespace internal {
/* Temporal #sec-temporal.calendar.prototype.tostring */ \
CPP(TemporalCalendarPrototypeToString) \
/* Temporal #sec-temporal.calendar.prototype.tojson */ \
- CPP(TemporalCalendarPrototypeToJSON)
+ CPP(TemporalCalendarPrototypeToJSON) \
+ \
+ /* "Private" (created but not exposed) Bulitins needed by Temporal */ \
+ TFJ(StringFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable) \
+ TFJ(TemporalInstantFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable)
#define BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \
@@ -1747,6 +1773,10 @@ namespace internal {
CPP(NumberFormatInternalFormatNumber) \
/* ecma402 #sec-intl.numberformat.prototype.format */ \
CPP(NumberFormatPrototypeFormatNumber) \
+ /* ecma402 #sec-intl.numberformat.prototype.formatrange */ \
+ CPP(NumberFormatPrototypeFormatRange) \
+ /* ecma402 #sec-intl.numberformat.prototype.formatrangetoparts */ \
+ CPP(NumberFormatPrototypeFormatRangeToParts) \
/* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
CPP(NumberFormatPrototypeFormatToParts) \
/* ecma402 #sec-intl.numberformat.prototype.resolvedoptions */ \
@@ -1759,6 +1789,8 @@ namespace internal {
CPP(PluralRulesPrototypeResolvedOptions) \
/* ecma402 #sec-intl.pluralrules.prototype.select */ \
CPP(PluralRulesPrototypeSelect) \
+ /* ecma402 #sec-intl.pluralrules.prototype.selectrange */ \
+ CPP(PluralRulesPrototypeSelectRange) \
/* ecma402 #sec-intl.pluralrules.supportedlocalesof */ \
CPP(PluralRulesSupportedLocalesOf) \
/* ecma402 #sec-intl.RelativeTimeFormat.constructor */ \
@@ -1895,13 +1927,10 @@ namespace internal {
V(PromiseConstructor) \
V(PromiseConstructorLazyDeoptContinuation) \
V(PromiseFulfillReactionJob) \
+ V(PromiseRejectReactionJob) \
V(PromiseRace) \
V(ResolvePromise)
-// The exception thrown in the following builtins are caught internally and will
-// not be propagated further or re-thrown
-#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseRejectReactionJob)
-
#define IGNORE_BUILTIN(...)
#define BUILTIN_LIST_C(V) \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 44dce9224a..3abf98f742 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -44,10 +44,8 @@ BUILTIN(ErrorCaptureStackTrace) {
// Collect the stack trace.
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- isolate->CaptureAndSetDetailedStackTrace(object));
RETURN_FAILURE_ON_EXCEPTION(
- isolate, isolate->CaptureAndSetSimpleStackTrace(object, mode, caller));
+ isolate, isolate->CaptureAndSetErrorStack(object, mode, caller));
// Add the stack accessors.
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 23350c3860..7fb1d7d55f 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -207,16 +207,18 @@ Object DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
isolate, function,
isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
- LookupIterator length_lookup(isolate, target,
- isolate->factory()->length_string(), target,
- LookupIterator::OWN);
// Setup the "length" property based on the "length" of the {target}.
// If the targets length is the default JSFunction accessor, we can keep the
// accessor that's installed by default on the JSBoundFunction. It lazily
// computes the value from the underlying internal length.
+ Handle<AccessorInfo> function_length_accessor =
+ isolate->factory()->function_length_accessor();
+ LookupIterator length_lookup(isolate, target,
+ isolate->factory()->length_string(), target,
+ LookupIterator::OWN);
if (!target->IsJSFunction() ||
length_lookup.state() != LookupIterator::ACCESSOR ||
- !length_lookup.GetAccessors()->IsAccessorInfo()) {
+ !length_lookup.GetAccessors().is_identical_to(function_length_accessor)) {
Handle<Object> length(Smi::zero(), isolate);
Maybe<PropertyAttributes> attributes =
JSReceiver::GetPropertyAttributes(&length_lookup);
@@ -242,11 +244,13 @@ Object DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// If the target's name is the default JSFunction accessor, we can keep the
// accessor that's installed by default on the JSBoundFunction. It lazily
// computes the value from the underlying internal name.
+ Handle<AccessorInfo> function_name_accessor =
+ isolate->factory()->function_name_accessor();
LookupIterator name_lookup(isolate, target, isolate->factory()->name_string(),
target);
if (!target->IsJSFunction() ||
name_lookup.state() != LookupIterator::ACCESSOR ||
- !name_lookup.GetAccessors()->IsAccessorInfo() ||
+ !name_lookup.GetAccessors().is_identical_to(function_name_accessor) ||
(name_lookup.IsFound() && !name_lookup.HolderIsReceiver())) {
Handle<Object> target_name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_name,
@@ -257,8 +261,9 @@ Object DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
isolate, name,
Name::ToFunctionName(isolate, Handle<String>::cast(target_name)));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, name, isolate->factory()->NewConsString(
- isolate->factory()->bound__string(), name));
+ isolate, name,
+ isolate->factory()->NewConsString(isolate->factory()->bound__string(),
+ name));
} else {
name = isolate->factory()->bound__string();
}
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index cdae0cdd33..09e7f2e4ec 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -231,7 +231,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
// Copy over the function parameters
auto parameter_base_index = IntPtrConstant(
- interpreter::Register::FromParameterIndex(0, 1).ToOperand() + 1);
+ interpreter::Register::FromParameterIndex(0).ToOperand() + 1);
CSA_CHECK(this, UintPtrLessThan(formal_parameter_count,
parameters_and_registers_length));
auto parent_frame_pointer = LoadParentFramePointer();
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index ed039cc680..cc94990e84 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -58,9 +58,9 @@ void Builtins::Generate_KeyedStoreIC_Megamorphic(
KeyedStoreGenericGenerator::Generate(state);
}
-void Builtins::Generate_KeyedDefineOwnIC_Megamorphic(
+void Builtins::Generate_DefineKeyedOwnIC_Megamorphic(
compiler::CodeAssemblerState* state) {
- KeyedDefineOwnGenericGenerator::Generate(state);
+ DefineKeyedOwnGenericGenerator::Generate(state);
}
void Builtins::Generate_StoreIC_NoFeedback(
@@ -68,9 +68,9 @@ void Builtins::Generate_StoreIC_NoFeedback(
StoreICNoFeedbackGenerator::Generate(state);
}
-void Builtins::Generate_StoreOwnIC_NoFeedback(
+void Builtins::Generate_DefineNamedOwnIC_NoFeedback(
compiler::CodeAssemblerState* state) {
- StoreOwnICNoFeedbackGenerator::Generate(state);
+ DefineNamedOwnICNoFeedbackGenerator::Generate(state);
}
// All possible fast-to-fast transitions. Transitions to dictionary mode are not
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 19bf83cabb..744c057099 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -109,19 +109,19 @@ void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
assembler.GenerateStoreICBaseline();
}
-void Builtins::Generate_StoreOwnIC(compiler::CodeAssemblerState* state) {
+void Builtins::Generate_DefineNamedOwnIC(compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateStoreOwnIC();
+ assembler.GenerateDefineNamedOwnIC();
}
-void Builtins::Generate_StoreOwnICTrampoline(
+void Builtins::Generate_DefineNamedOwnICTrampoline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateStoreOwnICTrampoline();
+ assembler.GenerateDefineNamedOwnICTrampoline();
}
-void Builtins::Generate_StoreOwnICBaseline(
+void Builtins::Generate_DefineNamedOwnICBaseline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateStoreOwnICBaseline();
+ assembler.GenerateDefineNamedOwnICBaseline();
}
void Builtins::Generate_KeyedStoreIC(compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
@@ -137,19 +137,19 @@ void Builtins::Generate_KeyedStoreICBaseline(
AccessorAssembler assembler(state);
assembler.GenerateKeyedStoreICBaseline();
}
-void Builtins::Generate_KeyedDefineOwnIC(compiler::CodeAssemblerState* state) {
+void Builtins::Generate_DefineKeyedOwnIC(compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateKeyedDefineOwnIC();
+ assembler.GenerateDefineKeyedOwnIC();
}
-void Builtins::Generate_KeyedDefineOwnICTrampoline(
+void Builtins::Generate_DefineKeyedOwnICTrampoline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateKeyedDefineOwnICTrampoline();
+ assembler.GenerateDefineKeyedOwnICTrampoline();
}
-void Builtins::Generate_KeyedDefineOwnICBaseline(
+void Builtins::Generate_DefineKeyedOwnICBaseline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
- assembler.GenerateKeyedDefineOwnICBaseline();
+ assembler.GenerateDefineKeyedOwnICBaseline();
}
void Builtins::Generate_StoreInArrayLiteralIC(
compiler::CodeAssemblerState* state) {
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 4777983a4e..a8b22f243c 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -111,8 +111,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
TNode<CodeT> code = GetSharedFunctionInfoCode(shared);
- // TODO(v8:11880): call CodeT directly.
- TailCallJSCode(FromCodeT(code), context, function, new_target, arg_count);
+ TailCallJSCode(code, context, function, new_target, arg_count);
}
class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
@@ -766,10 +765,21 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- TNode<Object> SetOrCopyDataProperties(TNode<Context> context,
- TNode<JSReceiver> target,
- TNode<Object> source, Label* if_runtime,
- bool use_set = true) {
+ TNode<JSObject> AllocateJsObjectTarget(TNode<Context> context) {
+ const TNode<NativeContext> native_context = LoadNativeContext(context);
+ const TNode<JSFunction> object_function = Cast(
+ LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
+ const TNode<Map> map =
+ Cast(LoadJSFunctionPrototypeOrInitialMap(object_function));
+ const TNode<JSObject> target = AllocateJSObjectFromMap(map);
+ return target;
+ }
+ TNode<Object> SetOrCopyDataProperties(
+ TNode<Context> context, TNode<JSReceiver> target, TNode<Object> source,
+ Label* if_runtime,
+ base::Optional<TNode<IntPtrT>> excluded_property_count = base::nullopt,
+ base::Optional<TNode<IntPtrT>> excluded_property_base = base::nullopt,
+ bool use_set = true) {
Label if_done(this), if_noelements(this),
if_sourcenotjsobject(this, Label::kDeferred);
@@ -792,12 +802,12 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
BIND(&if_noelements);
{
- // If the target is deprecated, the object will be updated on first store.
- // If the source for that store equals the target, this will invalidate
- // the cached representation of the source. Handle this case in runtime.
+ // If the target is deprecated, the object will be updated on first
+ // store. If the source for that store equals the target, this will
+ // invalidate the cached representation of the source. Handle this case
+ // in runtime.
TNode<Map> target_map = LoadMap(target);
GotoIf(IsDeprecatedMap(target_map), if_runtime);
-
if (use_set) {
TNode<BoolT> target_is_simple_receiver = IsSimpleObjectMap(target_map);
ForEachEnumerableOwnProperty(
@@ -812,8 +822,27 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
ForEachEnumerableOwnProperty(
context, source_map, CAST(source), kEnumerationOrder,
[=](TNode<Name> key, TNode<Object> value) {
+ Label skip(this);
+ if (excluded_property_count.has_value()) {
+ BuildFastLoop<IntPtrT>(
+ IntPtrConstant(0), excluded_property_count.value(),
+ [&](TNode<IntPtrT> index) {
+ auto offset = Signed(TimesSystemPointerSize(index));
+ TNode<IntPtrT> location = Signed(
+ IntPtrSub(excluded_property_base.value(), offset));
+ auto property = LoadFullTagged(location);
+
+ Label continue_label(this);
+ BranchIfSameValue(key, property, &skip, &continue_label);
+ Bind(&continue_label);
+ },
+ 1, IndexAdvanceMode::kPost);
+ }
+
CallBuiltin(Builtin::kSetPropertyInLiteral, context, target, key,
value);
+ Goto(&skip);
+ Bind(&skip);
},
if_runtime);
}
@@ -834,12 +863,62 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
}
BIND(&if_done);
- return UndefinedConstant();
+ return target;
}
};
} // namespace
+TF_BUILTIN(CopyDataPropertiesWithExcludedPropertiesOnStack,
+ SetOrCopyDataPropertiesAssembler) {
+ auto source = UncheckedParameter<Object>(Descriptor::kSource);
+ auto excluded_property_count =
+ UncheckedParameter<IntPtrT>(Descriptor::kExcludedPropertyCount);
+ auto excluded_properties =
+ UncheckedParameter<IntPtrT>(Descriptor::kExcludedPropertyBase);
+ auto context = Parameter<Context>(Descriptor::kContext);
+
+ // first check undefine or null
+ Label if_runtime(this, Label::kDeferred);
+ GotoIf(IsNullOrUndefined(source), &if_runtime);
+
+ TNode<JSReceiver> target = AllocateJsObjectTarget(context);
+ Return(SetOrCopyDataProperties(context, target, source, &if_runtime,
+ excluded_property_count, excluded_properties,
+ false));
+
+ BIND(&if_runtime);
+ // The excluded_property_base is passed as a raw stack pointer, but is
+ // bitcasted to a Smi . This is safe because the stack pointer is aligned, so
+ // it looks like a Smi to the GC.
+ CSA_DCHECK(this, IntPtrEqual(WordAnd(excluded_properties,
+ IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(kSmiTag)));
+ TailCallRuntime(Runtime::kCopyDataPropertiesWithExcludedPropertiesOnStack,
+ context, source, SmiTag(excluded_property_count),
+ BitcastWordToTaggedSigned(excluded_properties));
+}
+
+TF_BUILTIN(CopyDataPropertiesWithExcludedProperties,
+ SetOrCopyDataPropertiesAssembler) {
+ auto source = UncheckedParameter<Object>(Descriptor::kSource);
+
+ auto excluded_property_count_smi =
+ UncheckedParameter<Smi>(Descriptor::kExcludedPropertyCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+
+ auto excluded_property_count = SmiToIntPtr(excluded_property_count_smi);
+ CodeStubArguments arguments(this, excluded_property_count);
+
+ TNode<IntPtrT> excluded_properties =
+ ReinterpretCast<IntPtrT>(arguments.AtIndexPtr(
+ IntPtrSub(excluded_property_count, IntPtrConstant(2))));
+
+ arguments.PopAndReturn(CallBuiltin(
+ Builtin::kCopyDataPropertiesWithExcludedPropertiesOnStack, context,
+ source, excluded_property_count, excluded_properties));
+}
+
// ES #sec-copydataproperties
TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
auto target = Parameter<JSObject>(Descriptor::kTarget);
@@ -849,7 +928,9 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
CSA_DCHECK(this, TaggedNotEqual(target, source));
Label if_runtime(this, Label::kDeferred);
- Return(SetOrCopyDataProperties(context, target, source, &if_runtime, false));
+ SetOrCopyDataProperties(context, target, source, &if_runtime, base::nullopt,
+ base::nullopt, false);
+ Return(UndefinedConstant());
BIND(&if_runtime);
TailCallRuntime(Runtime::kCopyDataProperties, context, target, source);
@@ -862,7 +943,9 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
Label if_runtime(this, Label::kDeferred);
GotoIfForceSlowPath(&if_runtime);
- Return(SetOrCopyDataProperties(context, target, source, &if_runtime, true));
+ SetOrCopyDataProperties(context, target, source, &if_runtime, base::nullopt,
+ base::nullopt, true);
+ Return(UndefinedConstant());
BIND(&if_runtime);
TailCallRuntime(Runtime::kSetDataProperties, context, target, source);
@@ -989,7 +1072,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
- TNode<Code> code =
+ TNode<CodeT> code =
HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, builtin_exit_frame));
@@ -1328,8 +1411,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
// On failure, tail call back to regular JavaScript by re-calling the given
// function which has been reset to the compile lazy builtin.
- // TODO(v8:11880): call CodeT directly.
- TNode<Code> code = FromCodeT(LoadJSFunctionCode(function));
+ TNode<CodeT> code = LoadJSFunctionCode(function);
TailCallJSCode(code, context, function, new_target, arg_count);
}
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 1d72a3ae32..3089aa65bd 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -85,8 +85,15 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
Handle<Object> x;
if (args.length() >= 2) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
- Object::ToNumeric(isolate, args.at(1)));
+ Handle<Object> value = args.at(1);
+ if (FLAG_harmony_intl_number_format_v3) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x,
+ Intl::ToIntlMathematicalValueAsNumberBigIntOrString(isolate, value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumeric(isolate, value));
+ }
} else {
x = isolate->factory()->nan_value();
}
@@ -156,11 +163,10 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
}
// Common code for DateTimeFormatPrototypeFormtRange(|ToParts)
-template <class T>
+template <class T, MaybeHandle<T> (*F)(Isolate*, Handle<JSDateTimeFormat>,
+ double, double)>
V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
- BuiltinArguments args, Isolate* isolate, const char* const method_name,
- MaybeHandle<T> (*format)(Isolate*, Handle<JSDateTimeFormat>, double,
- double)) {
+ BuiltinArguments args, Isolate* isolate, const char* const method_name) {
// 1. Let dtf be this value.
// 2. If Type(dtf) is not Object, throw a TypeError exception.
CHECK_RECEIVER(JSObject, date_format_holder, method_name);
@@ -204,22 +210,22 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
// 8. Return ? FormatDateTimeRange(dtf, x, y)
// OR
// 8. Return ? FormatDateTimeRangeToParts(dtf, x, y).
- RETURN_RESULT_OR_FAILURE(isolate, format(isolate, dtf, x, y));
+ RETURN_RESULT_OR_FAILURE(isolate, F(isolate, dtf, x, y));
}
BUILTIN(DateTimeFormatPrototypeFormatRange) {
const char* const method_name = "Intl.DateTimeFormat.prototype.formatRange";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<String>(args, isolate, method_name,
- JSDateTimeFormat::FormatRange);
+ return DateTimeFormatRange<String, JSDateTimeFormat::FormatRange>(
+ args, isolate, method_name);
}
BUILTIN(DateTimeFormatPrototypeFormatRangeToParts) {
const char* const method_name =
"Intl.DateTimeFormat.prototype.formatRangeToParts";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<JSArray>(args, isolate, method_name,
- JSDateTimeFormat::FormatRangeToParts);
+ return DateTimeFormatRange<JSArray, JSDateTimeFormat::FormatRangeToParts>(
+ args, isolate, method_name);
}
namespace {
@@ -501,8 +507,14 @@ BUILTIN(NumberFormatInternalFormatNumber) {
// 4. Let x be ? ToNumeric(value).
Handle<Object> numeric_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
- Object::ToNumeric(isolate, value));
+ if (FLAG_harmony_intl_number_format_v3) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, numeric_obj,
+ Intl::ToIntlMathematicalValueAsNumberBigIntOrString(isolate, value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumeric(isolate, value));
+ }
icu::number::LocalizedNumberFormatter* icu_localized_number_formatter =
number_format->icu_number_formatter().raw();
@@ -514,6 +526,63 @@ BUILTIN(NumberFormatInternalFormatNumber) {
isolate, *icu_localized_number_formatter, numeric_obj));
}
+// Common code for NumberFormatPrototypeFormtRange(|ToParts)
+template <class T, MaybeHandle<T> (*F)(Isolate*, Handle<JSNumberFormat>,
+ Handle<Object>, Handle<Object>)>
+V8_WARN_UNUSED_RESULT Object NumberFormatRange(BuiltinArguments args,
+ Isolate* isolate,
+ const char* const method_name) {
+ // 1. Let nf be this value.
+ // 2. Perform ? RequireInternalSlot(nf, [[InitializedNumberFormat]]).
+ CHECK_RECEIVER(JSNumberFormat, nf, method_name);
+
+ Handle<Object> start = args.atOrUndefined(isolate, 1);
+ Handle<Object> end = args.atOrUndefined(isolate, 2);
+
+ Factory* factory = isolate->factory();
+ // 3. If start is undefined or end is undefined, throw a TypeError exception.
+ if (start->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("start"), start));
+ }
+ if (end->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("end"), end));
+ }
+
+ // 4. Let x be ? ToIntlMathematicalValue(start).
+ Handle<Object> x;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x,
+ Intl::ToIntlMathematicalValueAsNumberBigIntOrString(isolate, start));
+
+ // 5. Let y be ? ToIntlMathematicalValue(end).
+ Handle<Object> y;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, y,
+ Intl::ToIntlMathematicalValueAsNumberBigIntOrString(isolate, end));
+
+ RETURN_RESULT_OR_FAILURE(isolate, F(isolate, nf, x, y));
+}
+
+BUILTIN(NumberFormatPrototypeFormatRange) {
+ const char* const method_name = "Intl.NumberFormat.prototype.formatRange";
+ HandleScope handle_scope(isolate);
+ return NumberFormatRange<String, JSNumberFormat::FormatNumericRange>(
+ args, isolate, method_name);
+}
+
+BUILTIN(NumberFormatPrototypeFormatRangeToParts) {
+ const char* const method_name =
+ "Intl.NumberFormat.prototype.formatRangeToParts";
+ HandleScope handle_scope(isolate);
+ return NumberFormatRange<JSArray, JSNumberFormat::FormatNumericRangeToParts>(
+ args, isolate, method_name);
+}
+
BUILTIN(DateTimeFormatConstructor) {
HandleScope scope(isolate);
@@ -902,24 +971,80 @@ BUILTIN(PluralRulesPrototypeResolvedOptions) {
BUILTIN(PluralRulesPrototypeSelect) {
HandleScope scope(isolate);
- // 1. Let pr be the this value.
- // 2. If Type(pr) is not Object, throw a TypeError exception.
- // 3. If pr does not have an [[InitializedPluralRules]] internal slot, throw a
- // TypeError exception.
+ // 1. 1. Let pr be the this value.
+ // 2. Perform ? RequireInternalSlot(pr, [[InitializedPluralRules]]).
CHECK_RECEIVER(JSPluralRules, plural_rules,
"Intl.PluralRules.prototype.select");
- // 4. Let n be ? ToNumber(value).
+ // 3. Let n be ? ToNumber(value).
Handle<Object> number = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number,
Object::ToNumber(isolate, number));
double number_double = number->Number();
- // 5. Return ? ResolvePlural(pr, n).
+ // 4. Return ! ResolvePlural(pr, n).
RETURN_RESULT_OR_FAILURE(isolate, JSPluralRules::ResolvePlural(
isolate, plural_rules, number_double));
}
+BUILTIN(PluralRulesPrototypeSelectRange) {
+ HandleScope scope(isolate);
+
+ // 1. Let pr be the this value.
+ // 2. Perform ? RequireInternalSlot(pr, [[InitializedPluralRules]]).
+ CHECK_RECEIVER(JSPluralRules, plural_rules,
+ "Intl.PluralRules.prototype.selectRange");
+
+ // 3. If start is undefined or end is undefined, throw a TypeError exception.
+ Handle<Object> start = args.atOrUndefined(isolate, 1);
+ Handle<Object> end = args.atOrUndefined(isolate, 2);
+ if (start->IsUndefined()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalid,
+ isolate->factory()->startRange_string(), start));
+ }
+ if (end->IsUndefined()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalid,
+ isolate->factory()->endRange_string(), end));
+ }
+
+ // 4. Let x be ? ToNumber(start).
+ Handle<Object> x;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumber(isolate, start));
+
+ // 5. Let y be ? ToNumber(end).
+ Handle<Object> y;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, y,
+ Object::ToNumber(isolate, end));
+
+ // 6. Return ! ResolvePluralRange(pr, x, y).
+ // Inside ResolvePluralRange
+ // 5. If x is NaN or y is NaN, throw a RangeError exception.
+ if (x->IsNaN()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalid,
+ isolate->factory()->startRange_string(), x));
+ }
+ if (y->IsNaN()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalid,
+ isolate->factory()->endRange_string(), y));
+ }
+
+ // 6. If x > y, throw a RangeError exception.
+ double x_double = x->Number();
+ double y_double = y->Number();
+ if (x_double > y_double) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y));
+ }
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSPluralRules::ResolvePluralRange(isolate, plural_rules,
+ x_double, y_double));
+}
+
BUILTIN(PluralRulesSupportedLocalesOf) {
HandleScope scope(isolate);
Handle<Object> locales = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 6656a37a1c..60d30ce3e8 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -218,7 +218,7 @@ TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
+TNode<FixedArray> IteratorBuiltinsAssembler::StringListFromIterable(
TNode<Context> context, TNode<Object> iterable) {
Label done(this);
GrowableFixedArray list(state());
@@ -279,7 +279,7 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
BIND(&done);
// 6. Return list.
- return list.ToJSArray(context);
+ return list.ToFixedArray();
}
TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
@@ -289,6 +289,13 @@ TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
Return(StringListFromIterable(context, iterable));
}
+TF_BUILTIN(StringFixedArrayFromIterable, IteratorBuiltinsAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+
+ Return(StringListFromIterable(context, iterable));
+}
+
// This builtin always returns a new JSArray and is thus safe to use even in the
// presence of code that may call back into user-JS. This builtin will take the
// fast path if the iterable is a fast array and the Array prototype and the
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index c584708861..e9257f013d 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -67,8 +67,8 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// Currently at https://tc39.github.io/proposal-intl-list-format/
// #sec-createstringlistfromiterable
- TNode<JSArray> StringListFromIterable(TNode<Context> context,
- TNode<Object> iterable);
+ TNode<FixedArray> StringListFromIterable(TNode<Context> context,
+ TNode<Object> iterable);
void FastIterableToList(TNode<Context> context, TNode<Object> iterable,
TVariable<JSArray>* var_result, Label* slow);
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 5e888ba563..f9b5378cc9 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -19,8 +19,7 @@ void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
- // TODO(v8:11880): call CodeT directly.
- TailCallJSCode(FromCodeT(code), context, function, new_target, argc);
+ TailCallJSCode(code, context, function, new_target, argc);
}
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
@@ -54,23 +53,26 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
&fallthrough);
- GotoIfNot(IsSetWord32(
- optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
+ GotoIfNot(IsSetWord32(optimization_state,
+ FeedbackVector::kHasCompileOptimizedMarker),
&may_have_optimized_code);
// TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
// all these marker values there.
TNode<Uint32T> marker =
DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
- TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution, function);
- TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent,
- function);
TailCallRuntimeIfMarkerEquals(
- marker, OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent, function);
+ marker, OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent, function);
+ TailCallRuntimeIfMarkerEquals(marker,
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent, function);
+ TailCallRuntimeIfMarkerEquals(
+ marker, OptimizationMarker::kCompileMaglev_NotConcurrent,
+ Runtime::kCompileMaglev_NotConcurrent, function);
+ TailCallRuntimeIfMarkerEquals(marker,
+ OptimizationMarker::kCompileMaglev_Concurrent,
+ Runtime::kCompileMaglev_Concurrent, function);
Unreachable();
BIND(&may_have_optimized_code);
@@ -145,7 +147,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code);
- CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODET(
+ CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
@@ -187,7 +189,7 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget);
- TNode<CodeT> code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
+ TNode<CodeT> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
// Set the code slot inside the JSFunction to CompileLazy.
StoreObjectField(function, JSFunction::kCodeOffset, code);
GenerateTailCallToJSCode(code, function);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index e8a8805453..750a7db954 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -575,7 +575,7 @@ TF_BUILTIN(ObjectHasOwn, ObjectBuiltinsAssembler) {
BIND(&not_undefined_nor_null);
Return(CallBuiltin(Builtin::kObjectPrototypeHasOwnProperty, context, target,
- new_target, Int32Constant(2), object, key));
+ new_target, JSParameterCount(1), object, key));
}
// ES #sec-object.getOwnPropertyNames
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 9423cae18f..47d85f4b84 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -260,8 +260,9 @@ BUILTIN(ObjectPrototypeSetProto) {
// 4. Let status be ? O.[[SetPrototypeOf]](proto).
// 5. If status is false, throw a TypeError exception.
- MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(isolate, receiver, proto, true, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
// Return undefined.
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-shadow-realms.cc b/deps/v8/src/builtins/builtins-shadow-realms.cc
new file mode 100644
index 0000000000..b39f570ef8
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-shadow-realms.cc
@@ -0,0 +1,248 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/logging/counters.h"
+#include "src/objects/js-shadow-realms-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm-constructor
+BUILTIN(ShadowRealmConstructor) {
+ HandleScope scope(isolate);
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->ShadowRealm_string()));
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ // 3. Let realmRec be CreateRealm().
+ // 5. Let context be a new execution context.
+ // 6. Set the Function of context to null.
+ // 7. Set the Realm of context to realmRec.
+ // 8. Set the ScriptOrModule of context to null.
+ // 10. Perform ? SetRealmGlobalObject(realmRec, undefined, undefined).
+ // 11. Perform ? SetDefaultGlobalBindings(O.[[ShadowRealm]]).
+ // 12. Perform ? HostInitializeShadowRealm(O.[[ShadowRealm]]).
+ // These steps are combined in
+ // Isolate::RunHostCreateShadowRealmContextCallback and Context::New.
+ // The host operation is hoisted for not creating a half-initialized
+ // ShadowRealm object, which can fail the heap verification.
+ Handle<NativeContext> native_context;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, native_context,
+ isolate->RunHostCreateShadowRealmContextCallback());
+
+ // 2. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%ShadowRealm.prototype%", « [[ShadowRealm]], [[ExecutionContext]] »).
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<JSShadowRealm> O = Handle<JSShadowRealm>::cast(result);
+
+ // 4. Set O.[[ShadowRealm]] to realmRec.
+ // 9. Set O.[[ExecutionContext]] to context.
+ O->set_native_context(*native_context);
+
+ // 13. Return O.
+ return *O;
+}
+
+namespace {
+
+// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
+MaybeHandle<Object> GetWrappedValue(Isolate* isolate, Handle<Object> value,
+ Handle<NativeContext> creation_context,
+ Handle<NativeContext> target_context) {
+ // 1. If Type(value) is Object, then
+ if (!value->IsJSReceiver()) {
+ // 2. Return value.
+ return value;
+ }
+ // 1a. If IsCallable(value) is false, throw a TypeError exception.
+ if (!value->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewError(Handle<JSFunction>(creation_context->type_error_function(),
+ isolate),
+ MessageTemplate::kNotCallable),
+ {});
+ }
+ // 1b. Return ? WrappedFunctionCreate(callerRealm, value).
+
+ // WrappedFunctionCreate
+ // https://tc39.es/proposal-shadowrealm/#sec-wrappedfunctioncreate
+
+ // The intermediate wrapped functions are not user-visible. And calling a
+ // wrapped function won't cause a side effect in the creation realm.
+ // Unwrap here to avoid nested unwrapping at the call site.
+ if (value->IsJSWrappedFunction()) {
+ Handle<JSWrappedFunction> target_wrapped =
+ Handle<JSWrappedFunction>::cast(value);
+ value = Handle<Object>(target_wrapped->wrapped_target_function(), isolate);
+ }
+
+ // 1. Let internalSlotsList be the internal slots listed in Table 2, plus
+ // [[Prototype]] and [[Extensible]].
+ // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
+ // 3. Set wrapped.[[Prototype]] to
+ // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
+ // 4. Set wrapped.[[Call]] as described in 2.1.
+ // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
+ // 6. Set wrapped.[[Realm]] to callerRealm.
+ // 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped").
+ // 8. If result is an Abrupt Completion, throw a TypeError exception.
+ Handle<JSWrappedFunction> wrapped =
+ isolate->factory()->NewJSWrappedFunction(creation_context, value);
+
+ // 9. Return wrapped.
+ return wrapped;
+}
+
+} // namespace
+
+// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm.prototype.evaluate
+BUILTIN(ShadowRealmPrototypeEvaluate) {
+ HandleScope scope(isolate);
+
+ Handle<Object> source_text = args.atOrUndefined(isolate, 1);
+ // 1. Let O be this value.
+ Handle<Object> receiver = args.receiver();
+
+ Factory* factory = isolate->factory();
+
+ // 2. Perform ? ValidateShadowRealmObject(O).
+ if (!receiver->IsJSShadowRealm()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver));
+ }
+ Handle<JSShadowRealm> shadow_realm = Handle<JSShadowRealm>::cast(receiver);
+
+ // 3. If Type(sourceText) is not String, throw a TypeError exception.
+ if (!source_text->IsString()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidShadowRealmEvaluateSourceText));
+ }
+
+ // 4. Let callerRealm be the current Realm Record.
+ Handle<NativeContext> caller_context = isolate->native_context();
+
+ // 5. Let evalRealm be O.[[ShadowRealm]].
+ Handle<NativeContext> eval_context =
+ Handle<NativeContext>(shadow_realm->native_context(), isolate);
+ // 6. Return ? PerformShadowRealmEval(sourceText, callerRealm, evalRealm).
+
+ // PerformShadowRealmEval
+ // https://tc39.es/proposal-shadowrealm/#sec-performshadowrealmeval
+ // 1. Perform ? HostEnsureCanCompileStrings(callerRealm, evalRealm).
+ // Run embedder pre-checks before executing the source code.
+ MaybeHandle<String> validated_source;
+ bool unhandled_object;
+ std::tie(validated_source, unhandled_object) =
+ Compiler::ValidateDynamicCompilationSource(isolate, eval_context,
+ source_text);
+ if (unhandled_object) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidShadowRealmEvaluateSourceText));
+ }
+
+ Handle<JSObject> eval_global_proxy(eval_context->global_proxy(), isolate);
+ MaybeHandle<Object> result;
+ bool is_parse_failed = false;
+ {
+ // 8. If runningContext is not already suspended, suspend runningContext.
+ // 9. Let evalContext be a new ECMAScript code execution context.
+ // 10. Set evalContext's Function to null.
+ // 11. Set evalContext's Realm to evalRealm.
+ // 12. Set evalContext's ScriptOrModule to null.
+ // 13. Set evalContext's VariableEnvironment to varEnv.
+ // 14. Set evalContext's LexicalEnvironment to lexEnv.
+ // 15. Push evalContext onto the execution context stack; evalContext is now
+ // the running execution context.
+ SaveAndSwitchContext save(isolate, *eval_context);
+
+ // 2. Perform the following substeps in an implementation-defined order,
+ // possibly interleaving parsing and error detection:
+ // 2a. Let script be ParseText(! StringToCodePoints(sourceText), Script).
+ // 2b. If script is a List of errors, throw a SyntaxError exception.
+ // 2c. If script Contains ScriptBody is false, return undefined.
+ // 2d. Let body be the ScriptBody of script.
+ // 2e. If body Contains NewTarget is true, throw a SyntaxError
+ // exception.
+ // 2f. If body Contains SuperProperty is true, throw a SyntaxError
+ // exception.
+ // 2g. If body Contains SuperCall is true, throw a SyntaxError exception.
+ // 3. Let strictEval be IsStrict of script.
+ // 4. Let runningContext be the running execution context.
+ // 5. Let lexEnv be NewDeclarativeEnvironment(evalRealm.[[GlobalEnv]]).
+ // 6. Let varEnv be evalRealm.[[GlobalEnv]].
+ // 7. If strictEval is true, set varEnv to lexEnv.
+ Handle<JSFunction> function;
+ MaybeHandle<JSFunction> maybe_function =
+ Compiler::GetFunctionFromValidatedString(eval_context, validated_source,
+ NO_PARSE_RESTRICTION,
+ kNoSourcePosition);
+ if (maybe_function.is_null()) {
+ is_parse_failed = true;
+ } else {
+ function = maybe_function.ToHandleChecked();
+
+ // 16. Let result be EvalDeclarationInstantiation(body, varEnv,
+ // lexEnv, null, strictEval).
+ // 17. If result.[[Type]] is normal, then
+ // 20a. Set result to the result of evaluating body.
+ // 18. If result.[[Type]] is normal and result.[[Value]] is empty, then
+ // 21a. Set result to NormalCompletion(undefined).
+ result =
+ Execution::Call(isolate, function, eval_global_proxy, 0, nullptr);
+
+ // 19. Suspend evalContext and remove it from the execution context stack.
+ // 20. Resume the context that is now on the top of the execution context
+ // stack as the running execution context. Done by the scope.
+ }
+ }
+
+ if (result.is_null()) {
+ Handle<Object> pending_exception =
+ Handle<Object>(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ if (is_parse_failed) {
+ Handle<JSObject> error_object = Handle<JSObject>::cast(pending_exception);
+ Handle<String> message = Handle<String>::cast(JSReceiver::GetDataProperty(
+ isolate, error_object, factory->message_string()));
+
+ return isolate->ReThrow(
+ *factory->NewError(isolate->syntax_error_function(), message));
+ }
+ // 21. If result.[[Type]] is not normal, throw a TypeError exception.
+ // TODO(v8:11989): provide a non-observable inspection.
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown));
+ }
+ // 22. Return ? GetWrappedValue(callerRealm, result.[[Value]]).
+ Handle<Object> wrapped_result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, wrapped_result,
+ GetWrappedValue(isolate, result.ToHandleChecked(), caller_context,
+ eval_context));
+ return *wrapped_result;
+}
+
+// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm.prototype.importvalue
+BUILTIN(ShadowRealmPrototypeImportValue) {
+ HandleScope scope(isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-shadowrealm-gen.cc b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc
new file mode 100644
index 0000000000..03bc854c9c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc
@@ -0,0 +1,186 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/codegen/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ShadowRealmBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ShadowRealmBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ TNode<JSObject> AllocateJSWrappedFunction(TNode<Context> context);
+};
+
+TNode<JSObject> ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction(
+ TNode<Context> context) {
+ TNode<NativeContext> native_context = LoadNativeContext(context);
+ TNode<Map> map = CAST(
+ LoadContextElement(native_context, Context::WRAPPED_FUNCTION_MAP_INDEX));
+ return AllocateJSObjectFromMap(map);
+}
+
+// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
+TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto creation_context = Parameter<Context>(Descriptor::kCreationContext);
+ auto value = Parameter<Object>(Descriptor::kValue);
+
+ Label if_primitive(this), if_callable(this), unwrap(this), wrap(this),
+ bailout(this, Label::kDeferred);
+
+ // 2. Return value.
+ GotoIf(TaggedIsSmi(value), &if_primitive);
+ GotoIfNot(IsJSReceiver(CAST(value)), &if_primitive);
+
+ // 1. If Type(value) is Object, then
+ // 1a. If IsCallable(value) is false, throw a TypeError exception.
+ // 1b. Return ? WrappedFunctionCreate(callerRealm, value).
+ Branch(IsCallable(CAST(value)), &if_callable, &bailout);
+
+ BIND(&if_primitive);
+ Return(value);
+
+ BIND(&if_callable);
+ TVARIABLE(Object, target);
+ target = value;
+ // WrappedFunctionCreate
+ // https://tc39.es/proposal-shadowrealm/#sec-wrappedfunctioncreate
+ Branch(IsJSWrappedFunction(CAST(value)), &unwrap, &wrap);
+
+ BIND(&unwrap);
+ // The intermediate wrapped functions are not user-visible. And calling a
+ // wrapped function won't cause a side effect in the creation realm.
+ // Unwrap here to avoid nested unwrapping at the call site.
+ TNode<JSWrappedFunction> target_wrapped_function = CAST(value);
+ target = LoadObjectField(target_wrapped_function,
+ JSWrappedFunction::kWrappedTargetFunctionOffset);
+ Goto(&wrap);
+
+ BIND(&wrap);
+ // 1. Let internalSlotsList be the internal slots listed in Table 2, plus
+ // [[Prototype]] and [[Extensible]].
+ // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
+ // 3. Set wrapped.[[Prototype]] to
+ // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
+ // 4. Set wrapped.[[Call]] as described in 2.1.
+ TNode<JSObject> wrapped = AllocateJSWrappedFunction(creation_context);
+
+ // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
+ StoreObjectFieldNoWriteBarrier(
+ wrapped, JSWrappedFunction::kWrappedTargetFunctionOffset, target.value());
+ // 6. Set wrapped.[[Realm]] to callerRealm.
+ StoreObjectFieldNoWriteBarrier(wrapped, JSWrappedFunction::kContextOffset,
+ creation_context);
+
+ // 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped").
+ // 8. If result is an Abrupt Completion, throw a TypeError exception.
+ // TODO(v8:11989): https://github.com/tc39/proposal-shadowrealm/pull/348
+
+ // 9. Return wrapped.
+ Return(wrapped);
+
+ BIND(&bailout);
+ ThrowTypeError(context, MessageTemplate::kNotCallable, value);
+}
+
+// https://tc39.es/proposal-shadowrealm/#sec-wrapped-function-exotic-objects-call-thisargument-argumentslist
+TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
+ auto wrapped_function = Parameter<JSWrappedFunction>(Descriptor::kFunction);
+ auto context = Parameter<Context>(Descriptor::kContext);
+
+ PerformStackCheck(context);
+
+ Label call_exception(this, Label::kDeferred),
+ target_not_callable(this, Label::kDeferred);
+
+ // 1. Let target be F.[[WrappedTargetFunction]].
+ TNode<JSReceiver> target = CAST(LoadObjectField(
+ wrapped_function, JSWrappedFunction::kWrappedTargetFunctionOffset));
+ // 2. Assert: IsCallable(target) is true.
+ CSA_DCHECK(this, IsCallable(target));
+
+ // 4. Let callerRealm be ? GetFunctionRealm(F).
+ TNode<Context> caller_context = LoadObjectField<Context>(
+ wrapped_function, JSWrappedFunction::kContextOffset);
+ // 3. Let targetRealm be ? GetFunctionRealm(target).
+ TNode<Context> target_context =
+ GetFunctionRealm(caller_context, target, &target_not_callable);
+ // 5. NOTE: Any exception objects produced after this point are associated
+ // with callerRealm.
+
+ CodeStubArguments args(this, argc_ptr);
+ TNode<Object> receiver = args.GetReceiver();
+
+ // 6. Let wrappedArgs be a new empty List.
+ TNode<FixedArray> wrapped_args =
+ CAST(AllocateFixedArray(ElementsKind::PACKED_ELEMENTS, argc_ptr));
+ // Fill the fixed array so that heap verifier doesn't complain about it.
+ FillFixedArrayWithValue(ElementsKind::PACKED_ELEMENTS, wrapped_args,
+ IntPtrConstant(0), argc_ptr,
+ RootIndex::kUndefinedValue);
+
+ // 8. Let wrappedThisArgument to ? GetWrappedValue(targetRealm, thisArgument).
+ // Create wrapped value in the target realm.
+ TNode<Object> wrapped_receiver =
+ CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
+ target_context, receiver);
+ StoreFixedArrayElement(wrapped_args, 0, wrapped_receiver);
+ // 7. For each element arg of argumentsList, do
+ BuildFastLoop<IntPtrT>(
+ IntPtrConstant(0), args.GetLengthWithoutReceiver(),
+ [&](TNode<IntPtrT> index) {
+ // 7a. Let wrappedValue be ? GetWrappedValue(targetRealm, arg).
+ // Create wrapped value in the target realm.
+ TNode<Object> wrapped_value =
+ CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
+ target_context, args.AtIndex(index));
+ // 7b. Append wrappedValue to wrappedArgs.
+ StoreFixedArrayElement(
+ wrapped_args, IntPtrAdd(index, IntPtrConstant(1)), wrapped_value);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ TVARIABLE(Object, var_exception);
+ TNode<Object> result;
+ {
+ compiler::ScopedExceptionHandler handler(this, &call_exception,
+ &var_exception);
+ TNode<Int32T> args_count = Int32Constant(0); // args already on the stack
+ Callable callable = CodeFactory::CallVarargs(isolate());
+
+ // 9. Let result be the Completion Record of Call(target,
+ // wrappedThisArgument, wrappedArgs).
+ result = CallStub(callable, target_context, target, args_count, argc,
+ wrapped_args);
+ }
+
+ // 10. If result.[[Type]] is normal or result.[[Type]] is return, then
+ // 10a. Return ? GetWrappedValue(callerRealm, result.[[Value]]).
+ TNode<Object> wrapped_result =
+ CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
+ caller_context, result);
+ args.PopAndReturn(wrapped_result);
+
+ // 11. Else,
+ BIND(&call_exception);
+ // 11a. Throw a TypeError exception.
+ // TODO(v8:11989): provide a non-observable inspection.
+ ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown,
+ var_exception.value());
+
+ BIND(&target_not_callable);
+ // A wrapped value should not be non-callable.
+ Unreachable();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 154c6d39f8..4222cf9c79 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -168,20 +168,26 @@ TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
// https://tc39.es/ecma262/#sec-atomicload
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
- auto maybe_array = Parameter<Object>(Descriptor::kArray);
- auto index = Parameter<Object>(Descriptor::kIndex);
+ auto maybe_array_or_shared_struct =
+ Parameter<Object>(Descriptor::kArrayOrSharedStruct);
+ auto index_or_field_name = Parameter<Object>(Descriptor::kIndexOrFieldName);
auto context = Parameter<Context>(Descriptor::kContext);
+ Label shared_struct(this);
+ GotoIf(IsJSSharedStruct(maybe_array_or_shared_struct), &shared_struct);
+
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
TNode<Int32T> elements_kind;
TNode<RawPtrT> backing_store;
- TNode<JSArrayBuffer> array_buffer = ValidateIntegerTypedArray(
- maybe_array, context, &elements_kind, &backing_store, &detached);
- TNode<JSTypedArray> array = CAST(maybe_array);
+ TNode<JSArrayBuffer> array_buffer =
+ ValidateIntegerTypedArray(maybe_array_or_shared_struct, context,
+ &elements_kind, &backing_store, &detached);
+ TNode<JSTypedArray> array = CAST(maybe_array_or_shared_struct);
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
- TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
+ TNode<UintPtrT> index_word =
+ ValidateAtomicAccess(array, index_or_field_name, context);
// 3. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
// 4. NOTE: The above check is not redundant with the check in
@@ -254,25 +260,37 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"Atomics.load");
}
+
+ BIND(&shared_struct);
+ {
+ Return(CallRuntime(Runtime::kAtomicsLoadSharedStructField, context,
+ maybe_array_or_shared_struct, index_or_field_name));
+ }
}
// https://tc39.es/ecma262/#sec-atomics.store
TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
- auto maybe_array = Parameter<Object>(Descriptor::kArray);
- auto index = Parameter<Object>(Descriptor::kIndex);
+ auto maybe_array_or_shared_struct =
+ Parameter<Object>(Descriptor::kArrayOrSharedStruct);
+ auto index_or_field_name = Parameter<Object>(Descriptor::kIndexOrFieldName);
auto value = Parameter<Object>(Descriptor::kValue);
auto context = Parameter<Context>(Descriptor::kContext);
+ Label shared_struct(this);
+ GotoIf(IsJSSharedStruct(maybe_array_or_shared_struct), &shared_struct);
+
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
TNode<Int32T> elements_kind;
TNode<RawPtrT> backing_store;
- TNode<JSArrayBuffer> array_buffer = ValidateIntegerTypedArray(
- maybe_array, context, &elements_kind, &backing_store, &detached);
- TNode<JSTypedArray> array = CAST(maybe_array);
+ TNode<JSArrayBuffer> array_buffer =
+ ValidateIntegerTypedArray(maybe_array_or_shared_struct, context,
+ &elements_kind, &backing_store, &detached);
+ TNode<JSTypedArray> array = CAST(maybe_array_or_shared_struct);
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
- TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
+ TNode<UintPtrT> index_word =
+ ValidateAtomicAccess(array, index_or_field_name, context);
Label u8(this), u16(this), u32(this), u64(this), other(this);
@@ -356,6 +374,13 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"Atomics.store");
}
+
+ BIND(&shared_struct);
+ {
+ Return(CallRuntime(Runtime::kAtomicsStoreSharedStructField, context,
+ maybe_array_or_shared_struct, index_or_field_name,
+ value));
+ }
}
// https://tc39.es/ecma262/#sec-atomics.exchange
diff --git a/deps/v8/src/builtins/builtins-struct.cc b/deps/v8/src/builtins/builtins-struct.cc
new file mode 100644
index 0000000000..851af187a1
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-struct.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/objects/js-struct-inl.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr int kMaxJSStructFields = 999;
+
+#ifdef V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-limits.h"
+static_assert(wasm::kV8MaxWasmStructFields == kMaxJSStructFields,
+ "Max number of fields should be the same for both JS and "
+ "WebAssembly structs");
+#endif // V8_ENABLE_WEBASSEMBLY
+
+BUILTIN(SharedStructTypeConstructor) {
+ DCHECK(FLAG_shared_string_table);
+
+ HandleScope scope(isolate);
+ static const char method_name[] = "SharedStructType";
+ auto* factory = isolate->factory();
+
+ Handle<JSReceiver> field_names_arg;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, field_names_arg,
+ Object::ToObject(isolate, args.atOrUndefined(isolate, 1), method_name));
+
+ // Treat field_names_arg as arraylike.
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_length_number,
+ Object::GetLengthFromArrayLike(isolate, field_names_arg));
+ double num_properties_double = raw_length_number->Number();
+ if (num_properties_double < 0 || num_properties_double > kMaxJSStructFields) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kStructFieldCountOutOfRange));
+ }
+ int num_properties = static_cast<int>(num_properties_double);
+
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(
+ num_properties, 0, AllocationType::kSharedOld);
+
+ // Build up the descriptor array.
+ for (int i = 0; i < num_properties; ++i) {
+ Handle<Object> raw_field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_field_name,
+ JSReceiver::GetElement(isolate, field_names_arg, i));
+ Handle<Name> field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
+ Object::ToName(isolate, raw_field_name));
+ field_name = factory->InternalizeName(field_name);
+
+ // Shared structs' fields need to be aligned, so make it all tagged.
+ PropertyDetails details(
+ PropertyKind::kData, SEALED, PropertyLocation::kField,
+ PropertyConstness::kMutable, Representation::Tagged(), i);
+ descriptors->Set(InternalIndex(i), *field_name,
+ MaybeObject::FromObject(FieldType::Any()), details);
+ }
+ descriptors->Sort();
+
+ Handle<SharedFunctionInfo> info =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate->factory()->empty_string(), Builtin::kSharedStructConstructor,
+ FunctionKind::kNormalFunction);
+ info->set_internal_formal_parameter_count(JSParameterCount(0));
+ info->set_length(0);
+
+ Handle<JSFunction> constructor =
+ Factory::JSFunctionBuilder{isolate, info, isolate->native_context()}
+ .set_map(isolate->strict_function_map())
+ .Build();
+
+ int instance_size;
+ int in_object_properties;
+ JSFunction::CalculateInstanceSizeHelper(JS_SHARED_STRUCT_TYPE, false, 0,
+ num_properties, &instance_size,
+ &in_object_properties);
+ Handle<Map> instance_map = factory->NewMap(
+ JS_SHARED_STRUCT_TYPE, instance_size, TERMINAL_FAST_ELEMENTS_KIND,
+ in_object_properties, AllocationType::kSharedMap);
+
+ instance_map->InitializeDescriptors(isolate, *descriptors);
+ // Structs have fixed layout ahead of time, so there's no slack.
+ instance_map->SetInObjectUnusedPropertyFields(0);
+ instance_map->set_is_extensible(false);
+ JSFunction::SetInitialMap(isolate, constructor, instance_map,
+ factory->null_value());
+
+ // The constructor is not a shared object, so the shared map should not point
+ // to it.
+ instance_map->set_constructor_or_back_pointer(*factory->null_value());
+
+ return *constructor;
+}
+
+BUILTIN(SharedStructConstructor) {
+ HandleScope scope(isolate);
+ auto* factory = isolate->factory();
+
+ Handle<JSObject> instance =
+ factory->NewJSObject(args.target(), AllocationType::kSharedOld);
+
+ Handle<Map> instance_map(instance->map(), isolate);
+ if (instance_map->HasOutOfObjectProperties()) {
+ int num_oob_fields =
+ instance_map->NumberOfFields(ConcurrencyMode::kNotConcurrent) -
+ instance_map->GetInObjectProperties();
+ Handle<PropertyArray> property_array =
+ factory->NewPropertyArray(num_oob_fields, AllocationType::kSharedOld);
+ instance->SetProperties(*property_array);
+ }
+
+ return *instance;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-temporal-gen.cc b/deps/v8/src/builtins/builtins-temporal-gen.cc
new file mode 100644
index 0000000000..b8f1fb27ac
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-temporal-gen.cc
@@ -0,0 +1,98 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-iterator-gen.h"
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/growable-fixed-array-gen.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/js-temporal-objects-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class TemporalBuiltinsAssembler : public IteratorBuiltinsAssembler {
+ public:
+ explicit TemporalBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : IteratorBuiltinsAssembler(state) {}
+
+ // For the use inside Temporal GetPossibleInstantFor
+ TNode<FixedArray> TemporalInstantFixedArrayFromIterable(
+ TNode<Context> context, TNode<Object> iterable);
+};
+
+// #sec-iterabletolistoftype
+TNode<FixedArray>
+TemporalBuiltinsAssembler::TemporalInstantFixedArrayFromIterable(
+ TNode<Context> context, TNode<Object> iterable) {
+ GrowableFixedArray list(state());
+ Label done(this);
+ // 1. If iterable is undefined, then
+ // a. Return a new empty List.
+ GotoIf(IsUndefined(iterable), &done);
+
+ // 2. Let iteratorRecord be ? GetIterator(items).
+ IteratorRecord iterator_record = GetIterator(context, iterable);
+
+ // 3. Let list be a new empty List.
+
+ Label loop_start(this,
+ {list.var_array(), list.var_length(), list.var_capacity()});
+ Goto(&loop_start);
+ // 4. Let next be true.
+ // 5. Repeat, while next is not false
+ Label if_isnottemporalinstant(this, Label::kDeferred),
+ if_exception(this, Label::kDeferred);
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<JSReceiver> next = IteratorStep(context, iterator_record, &done);
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value = IteratorValue(context, next);
+ // ii. If Type(nextValue) is not Object or nextValue does not have an
+ // [[InitializedTemporalInstant]] internal slot
+ GotoIf(TaggedIsSmi(next_value), &if_isnottemporalinstant);
+ TNode<Uint16T> next_value_type = LoadInstanceType(CAST(next_value));
+ GotoIfNot(IsTemporalInstantInstanceType(next_value_type),
+ &if_isnottemporalinstant);
+ // iii. Append nextValue to the end of the List list.
+ list.Push(next_value);
+ Goto(&loop_start);
+ // 5.b.ii
+ BIND(&if_isnottemporalinstant);
+ {
+ // 1. Let error be ThrowCompletion(a newly created TypeError object).
+ TVARIABLE(Object, var_exception);
+ {
+ compiler::ScopedExceptionHandler handler(this, &if_exception,
+ &var_exception);
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIterableYieldedNonString),
+ next_value);
+ }
+ Unreachable();
+
+ // 2. Return ? IteratorClose(iteratorRecord, error).
+ BIND(&if_exception);
+ IteratorCloseOnException(context, iterator_record);
+ CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ Unreachable();
+ }
+ }
+
+ BIND(&done);
+ return list.ToFixedArray();
+}
+
+TF_BUILTIN(TemporalInstantFixedArrayFromIterable, TemporalBuiltinsAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+
+ Return(TemporalInstantFixedArrayFromIterable(context, iterable));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-temporal.cc b/deps/v8/src/builtins/builtins-temporal.cc
index d25c769ea4..91ff221ddd 100644
--- a/deps/v8/src/builtins/builtins-temporal.cc
+++ b/deps/v8/src/builtins/builtins-temporal.cc
@@ -16,74 +16,40 @@ namespace internal {
UNIMPLEMENTED(); \
}
-/* Temporal #sec-temporal.now.timezone */
-TO_BE_IMPLEMENTED(TemporalNowTimeZone)
-/* Temporal #sec-temporal.now.instant */
-TO_BE_IMPLEMENTED(TemporalNowInstant)
-/* Temporal #sec-temporal.now.plaindatetime */
-TO_BE_IMPLEMENTED(TemporalNowPlainDateTime)
-/* Temporal #sec-temporal.now.plaindatetimeiso */
-TO_BE_IMPLEMENTED(TemporalNowPlainDateTimeISO)
-/* Temporal #sec-temporal.now.zoneddatetime */
-TO_BE_IMPLEMENTED(TemporalNowZonedDateTime)
-/* Temporal #sec-temporal.now.zoneddatetimeiso */
-TO_BE_IMPLEMENTED(TemporalNowZonedDateTimeISO)
-/* Temporal #sec-temporal.now.plaindate */
-TO_BE_IMPLEMENTED(TemporalNowPlainDate)
-/* Temporal #sec-temporal.now.plaindateiso */
-TO_BE_IMPLEMENTED(TemporalNowPlainDateISO)
-/* There are no Temporal.now.plainTime */
-/* See https://github.com/tc39/proposal-temporal/issues/1540 */
-/* Temporal #sec-temporal.now.plaintimeiso */
-TO_BE_IMPLEMENTED(TemporalNowPlainTimeISO)
-
-/* Temporal.PlaneDate */
-/* Temporal #sec-temporal.plaindate */
-TO_BE_IMPLEMENTED(TemporalPlainDateConstructor)
-/* Temporal #sec-temporal.plaindate.from */
-TO_BE_IMPLEMENTED(TemporalPlainDateFrom)
+#define TEMPORAL_NOW0(T) \
+ BUILTIN(TemporalNow##T) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE(isolate, JSTemporal##T::Now(isolate)); \
+ }
+
+#define TEMPORAL_NOW2(T) \
+ BUILTIN(TemporalNow##T) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, JSTemporal##T::Now(isolate, args.atOrUndefined(isolate, 1), \
+ args.atOrUndefined(isolate, 2))); \
+ }
+
+#define TEMPORAL_NOW_ISO1(T) \
+ BUILTIN(TemporalNow##T##ISO) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T::NowISO(isolate, args.atOrUndefined(isolate, 1))); \
+ }
+
/* Temporal #sec-temporal.plaindate.compare */
TO_BE_IMPLEMENTED(TemporalPlainDateCompare)
-/* Temporal #sec-get-temporal.plaindate.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeCalendar)
-/* Temporal #sec-get-temporal.plaindate.prototype.year */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeYear)
-/* Temporal #sec-get-temporal.plaindate.prototype.month */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonth)
-/* Temporal #sec-get-temporal.plaindate.prototype.monthcode */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonthCode)
-/* Temporal #sec-get-temporal.plaindate.prototype.day */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDay)
-/* Temporal #sec-get-temporal.plaindate.prototype.dayofweek */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDayOfWeek)
-/* Temporal #sec-get-temporal.plaindate.prototype.dayofyear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDayOfYear)
-/* Temporal #sec-get-temporal.plaindate.prototype.weekofyear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWeekOfYear)
-/* Temporal #sec-get-temporal.plaindate.prototype.daysinweek */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInWeek)
-/* Temporal #sec-get-temporal.plaindate.prototype.daysinmonth */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInMonth)
-/* Temporal #sec-get-temporal.plaindate.prototype.daysinyear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInYear)
-/* Temporal #sec-get-temporal.plaindate.prototype.monthsinyear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonthsInYear)
-/* Temporal #sec-get-temporal.plaindate.prototype.inleapyear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeInLeapYear)
/* Temporal #sec-temporal.plaindate.prototype.toplainyearmonth */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToPlainYearMonth)
/* Temporal #sec-temporal.plaindate.prototype.toplainmonthday */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToPlainMonthDay)
-/* Temporal #sec-temporal.plaindate.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeGetISOFields)
/* Temporal #sec-temporal.plaindate.prototype.add */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeAdd)
/* Temporal #sec-temporal.plaindate.prototype.substract */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeSubtract)
/* Temporal #sec-temporal.plaindate.prototype.with */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWith)
-/* Temporal #sec-temporal.plaindate.prototype.withcalendar */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWithCalendar)
/* Temporal #sec-temporal.plaindate.prototype.until */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeUntil)
/* Temporal #sec-temporal.plaindate.prototype.since */
@@ -98,30 +64,12 @@ TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToZonedDateTime)
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToString)
/* Temporal #sec-temporal.plaindate.prototype.tojson */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToJSON)
-/* Temporal #sec-temporal.plaindate.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeValueOf)
/* Temporal.PlaneTime */
-/* Temporal #sec-temporal.plaintime */
-TO_BE_IMPLEMENTED(TemporalPlainTimeConstructor)
/* Temporal #sec-temporal.plaintime.from */
TO_BE_IMPLEMENTED(TemporalPlainTimeFrom)
/* Temporal #sec-temporal.plaintime.compare */
TO_BE_IMPLEMENTED(TemporalPlainTimeCompare)
-/* Temporal #sec-get-temporal.plaintime.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeCalendar)
-/* Temporal #sec-get-temporal.plaintime.prototype.hour */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeHour)
-/* Temporal #sec-get-temporal.plaintime.prototype.minute */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMinute)
-/* Temporal #sec-get-temporal.plaintime.prototype.second */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeSecond)
-/* Temporal #sec-get-temporal.plaintime.prototype.millisecond */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMillisecond)
-/* Temporal #sec-get-temporal.plaintime.prototype.microsecond */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMicrosecond)
-/* Temporal #sec-get-temporal.plaintime.prototype.nanoseond */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeNanosecond)
/* Temporal #sec-temporal.plaintime.prototype.add */
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeAdd)
/* Temporal #sec-temporal.plaintime.prototype.subtract */
@@ -140,68 +88,22 @@ TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeEquals)
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToPlainDateTime)
/* Temporal #sec-temporal.plaintime.prototype.tozoneddatetime */
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToZonedDateTime)
-/* Temporal #sec-temporal.plaintime.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeGetISOFields)
/* Temporal #sec-temporal.plaintime.prototype.tostring */
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToString)
/* Temporal #sec-temporal.plaindtimeprototype.tojson */
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToJSON)
-/* Temporal #sec-temporal.plaintime.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeValueOf)
/* Temporal.PlaneDateTime */
-/* Temporal #sec-temporal.plaindatetime */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimeConstructor)
/* Temporal #sec-temporal.plaindatetime.from */
TO_BE_IMPLEMENTED(TemporalPlainDateTimeFrom)
/* Temporal #sec-temporal.plaindatetime.compare */
TO_BE_IMPLEMENTED(TemporalPlainDateTimeCompare)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeCalendar)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.year */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeYear)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.month */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonth)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.monthcode */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonthCode)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.day */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDay)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.hour */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeHour)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.minute */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMinute)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.second */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeSecond)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.millisecond */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMillisecond)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.microsecond */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMicrosecond)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.nanosecond */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeNanosecond)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.dayofweek */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDayOfWeek)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.dayofyear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDayOfYear)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.weekofyear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWeekOfYear)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinweek */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInWeek)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinmonth */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInMonth)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinyear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInYear)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.monthsinyear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonthsInYear)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.inleapyear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeInLeapYear)
/* Temporal #sec-temporal.plaindatetime.prototype.with */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWith)
/* Temporal #sec-temporal.plaindatetime.prototype.withplainTime */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithPlainTime)
/* Temporal #sec-temporal.plaindatetime.prototype.withplainDate */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithPlainDate)
-/* Temporal #sec-temporal.plaindatetime.prototype.withcalendar */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithCalendar)
/* Temporal #sec-temporal.plaindatetime.prototype.add */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeAdd)
/* Temporal #sec-temporal.plaindatetime.prototype.subtract */
@@ -218,8 +120,6 @@ TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEquals)
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToString)
/* Temporal #sec-temporal.plainddatetimeprototype.tojson */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToJSON)
-/* Temporal #sec-temporal.plaindatetime.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeValueOf)
/* Temporal #sec-temporal.plaindatetime.prototype.tozoneddatetime */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToZonedDateTime)
/* Temporal #sec-temporal.plaindatetime.prototype.toplaindate */
@@ -230,20 +130,12 @@ TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainYearMonth)
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainMonthDay)
/* Temporal #sec-temporal.plaindatetime.prototype.toplaintime */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainTime)
-/* Temporal #sec-temporal.plaindatetime.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeGetISOFields)
/* Temporal.ZonedDateTime */
-/* Temporal #sec-temporal.zoneddatetime */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimeConstructor)
/* Temporal #sec-temporal.zoneddatetime.from */
TO_BE_IMPLEMENTED(TemporalZonedDateTimeFrom)
/* Temporal #sec-temporal.zoneddatetime.compare */
TO_BE_IMPLEMENTED(TemporalZonedDateTimeCompare)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeCalendar)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.timezone */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeTimeZone)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.year */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeYear)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.month */
@@ -252,26 +144,6 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMonth)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMonthCode)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.day */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDay)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.hour */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeHour)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.minute */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMinute)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.second */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeSecond)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.millisecond */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMillisecond)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.microsecond */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMicrosecond)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.nanosecond */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeNanosecond)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochsecond */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochSeconds)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmilliseconds */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochMilliseconds)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmicroseconds */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochMicroseconds)
-/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochnanoseconds */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochNanoseconds)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofweek */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDayOfWeek)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofyear */
@@ -300,10 +172,6 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWith)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithPlainTime)
/* Temporal #sec-temporal.zoneddatetime.prototype.withplaindate */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithPlainDate)
-/* Temporal #sec-temporal.zoneddatetime.prototype.withtimezone */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithTimeZone)
-/* Temporal #sec-temporal.zoneddatetime.prototype.withcalendar */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithCalendar)
/* Temporal #sec-temporal.zoneddatetime.prototype.add */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeAdd)
/* Temporal #sec-temporal.zoneddatetime.prototype.subtract */
@@ -320,8 +188,6 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEquals)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToString)
/* Temporal #sec-temporal.zonedddatetimeprototype.tojson */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToJSON)
-/* Temporal #sec-temporal.zoneddatetime.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeValueOf)
/* Temporal #sec-temporal.zoneddatetime.prototype.startofday */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeStartOfDay)
/* Temporal #sec-temporal.zoneddatetime.prototype.toinstant */
@@ -336,40 +202,12 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainDateTime)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainYearMonth)
/* Temporal #sec-temporal.zoneddatetime.prototype.toplainmonthday */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainMonthDay)
-/* Temporal #sec-temporal.zoneddatetime.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeGetISOFields)
/* Temporal.Duration */
-/* Temporal #sec-temporal.duration */
-TO_BE_IMPLEMENTED(TemporalDurationConstructor)
/* Temporal #sec-temporal.duration.from */
TO_BE_IMPLEMENTED(TemporalDurationFrom)
/* Temporal #sec-temporal.duration.compare */
TO_BE_IMPLEMENTED(TemporalDurationCompare)
-/* Temporal #sec-get-temporal.duration.prototype.years */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeYears)
-/* Temporal #sec-get-temporal.duration.prototype.months */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeMonths)
-/* Temporal #sec-get-temporal.duration.prototype.weeks */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeWeeks)
-/* Temporal #sec-get-temporal.duration.prototype.days */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeDays)
-/* Temporal #sec-get-temporal.duration.prototype.hours */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeHours)
-/* Temporal #sec-get-temporal.duration.prototype.minutes */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeMinutes)
-/* Temporal #sec-get-temporal.duration.prototype.seconds */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeSeconds)
-/* Temporal #sec-get-temporal.duration.prototype.milliseconds */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeMilliseconds)
-/* Temporal #sec-get-temporal.duration.prototype.microseconds */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeMicroseconds)
-/* Temporal #sec-get-temporal.duration.prototype.nanoseconds */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeNanoseconds)
-/* Temporal #sec-get-temporal.duration.prototype.sign */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeSign)
-/* Temporal #sec-get-temporal.duration.prototype.blank */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeBlank)
/* Temporal #sec-temporal.duration.prototype.with */
TO_BE_IMPLEMENTED(TemporalDurationPrototypeWith)
/* Temporal #sec-temporal.duration.prototype.negated */
@@ -388,12 +226,8 @@ TO_BE_IMPLEMENTED(TemporalDurationPrototypeTotal)
TO_BE_IMPLEMENTED(TemporalDurationPrototypeToString)
/* Temporal #sec-temporal.duration.tojson */
TO_BE_IMPLEMENTED(TemporalDurationPrototypeToJSON)
-/* Temporal #sec-temporal.duration.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeValueOf)
/* Temporal.Instant */
-/* Temporal #sec-temporal.instant */
-TO_BE_IMPLEMENTED(TemporalInstantConstructor)
/* Temporal #sec-temporal.instant.from */
TO_BE_IMPLEMENTED(TemporalInstantFrom)
/* Temporal #sec-temporal.instant.fromepochseconds */
@@ -406,14 +240,6 @@ TO_BE_IMPLEMENTED(TemporalInstantFromEpochMicroseconds)
TO_BE_IMPLEMENTED(TemporalInstantFromEpochNanoseconds)
/* Temporal #sec-temporal.instant.compare */
TO_BE_IMPLEMENTED(TemporalInstantCompare)
-/* Temporal #sec-get-temporal.instant.prototype.epochseconds */
-TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochSeconds)
-/* Temporal #sec-get-temporal.instant.prototype.epochmilliseconds */
-TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochMilliseconds)
-/* Temporal #sec-get-temporal.instant.prototype.epochmicroseconds */
-TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochMicroseconds)
-/* Temporal #sec-get-temporal.instant.prototype.epochnanoseconds */
-TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochNanoseconds)
/* Temporal #sec-temporal.instant.prototype.add */
TO_BE_IMPLEMENTED(TemporalInstantPrototypeAdd)
/* Temporal #sec-temporal.instant.prototype.subtract */
@@ -430,36 +256,16 @@ TO_BE_IMPLEMENTED(TemporalInstantPrototypeEquals)
TO_BE_IMPLEMENTED(TemporalInstantPrototypeToString)
/* Temporal #sec-temporal.instant.tojson */
TO_BE_IMPLEMENTED(TemporalInstantPrototypeToJSON)
-/* Temporal #sec-temporal.instant.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalInstantPrototypeValueOf)
/* Temporal #sec-temporal.instant.prototype.tozoneddatetime */
TO_BE_IMPLEMENTED(TemporalInstantPrototypeToZonedDateTime)
/* Temporal #sec-temporal.instant.prototype.tozoneddatetimeiso */
TO_BE_IMPLEMENTED(TemporalInstantPrototypeToZonedDateTimeISO)
/* Temporal.PlainYearMonth */
-/* Temporal #sec-temporal.plainyearmonth */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthConstructor)
/* Temporal #sec-temporal.plainyearmonth.from */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthFrom)
/* Temporal #sec-temporal.plainyearmonth.compare */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthCompare)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeCalendar)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.year */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeYear)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.month */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonth)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.monthcode */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonthCode)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinyear */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeDaysInYear)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinmonth */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeDaysInMonth)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.monthsinyear */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonthsInYear)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.inleapyear */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeInLeapYear)
/* Temporal #sec-temporal.plainyearmonth.prototype.with */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeWith)
/* Temporal #sec-temporal.plainyearmonth.prototype.add */
@@ -476,26 +282,16 @@ TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEquals)
TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToString)
/* Temporal #sec-temporal.plainyearmonth.tojson */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToJSON)
-/* Temporal #sec-temporal.plainyearmonth.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeValueOf)
/* Temporal #sec-temporal.plainyearmonth.prototype.toplaindate */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToPlainDate)
-/* Temporal #sec-temporal.plainyearmonth.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeGetISOFields)
/* Temporal.PlainMonthDay */
-/* Temporal #sec-temporal.plainmonthday */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayConstructor)
/* Temporal #sec-temporal.plainmonthday.from */
TO_BE_IMPLEMENTED(TemporalPlainMonthDayFrom)
-/* There are no compare for PlainMonthDay */
-/* See https://github.com/tc39/proposal-temporal/issues/1547 */
-/* Temporal #sec-get-temporal.plainmonthday.prototype.calendar */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeCalendar)
-/* Temporal #sec-get-temporal.plainmonthday.prototype.monthcode */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeMonthCode)
-/* Temporal #sec-get-temporal.plainmonthday.prototype.day */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeDay)
+
+/* There is no compare for PlainMonthDay. See
+ * https://github.com/tc39/proposal-temporal/issues/1547 */
+
/* Temporal #sec-temporal.plainmonthday.prototype.with */
TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeWith)
/* Temporal #sec-temporal.plainmonthday.prototype.equals */
@@ -504,20 +300,10 @@ TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeEquals)
TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToString)
/* Temporal #sec-temporal.plainmonthday.tojson */
TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToJSON)
-/* Temporal #sec-temporal.plainmonthday.prototype.valueof */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeValueOf)
/* Temporal #sec-temporal.plainmonthday.prototype.toplaindate */
TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToPlainDate)
-/* Temporal #sec-temporal.plainmonthday.prototype.getisofields */
-TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeGetISOFields)
/* Temporal.TimeZone */
-/* Temporal #sec-temporal.timezone */
-TO_BE_IMPLEMENTED(TemporalTimeZoneConstructor)
-/* Temporal #sec-temporal.timezone.from */
-TO_BE_IMPLEMENTED(TemporalTimeZoneFrom)
-/* Temporal #sec-get-temporal.timezone.prototype.id */
-TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeId)
/* Temporal #sec-temporal.timezone.prototype.getoffsetnanosecondsfor */
TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetOffsetNanosecondsFor)
/* Temporal #sec-temporal.timezone.prototype.getoffsetstringfor */
@@ -532,18 +318,10 @@ TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPossibleInstantsFor)
TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetNextTransition)
/* Temporal #sec-temporal.timezone.prototype.getprevioustransition */
TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPreviousTransition)
-/* Temporal #sec-temporal.timezone.prototype.tostring */
-TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeToString)
/* Temporal #sec-temporal.timezone.prototype.tojson */
TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeToJSON)
/* Temporal.Calendar */
-/* Temporal #sec-temporal.calendar */
-TO_BE_IMPLEMENTED(TemporalCalendarConstructor)
-/* Temporal #sec-temporal.calendar.from */
-TO_BE_IMPLEMENTED(TemporalCalendarFrom)
-/* Temporal #sec-get-temporal.calendar.prototype.id */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeId)
/* Temporal #sec-temporal.calendar.prototype.datefromfields */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateFromFields)
/* Temporal #sec-temporal.calendar.prototype.yearmonthfromfields */
@@ -578,15 +356,15 @@ TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInYear)
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthsInYear)
/* Temporal #sec-temporal.calendar.prototype.inleapyear */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeInLeapYear)
-/* Temporal #sec-temporal.calendar.prototype.fields */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeFields)
/* Temporal #sec-temporal.calendar.prototype.mergefields */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMergeFields)
-/* Temporal #sec-temporal.calendar.prototype.tostring */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeToString)
/* Temporal #sec-temporal.calendar.prototype.tojson */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeToJSON)
+// to be switch to TFJ later
+/* Temporal #sec-temporal.calendar.prototype.fields */
+TO_BE_IMPLEMENTED(TemporalCalendarPrototypeFields)
+
#ifdef V8_INTL_SUPPORT
/* Temporal */
/* Temporal #sec-temporal.calendar.prototype.era */
@@ -597,26 +375,14 @@ TO_BE_IMPLEMENTED(TemporalCalendarPrototypeEraYear)
TO_BE_IMPLEMENTED(TemporalDurationPrototypeToLocaleString)
/* Temporal #sec-temporal.instant.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalInstantPrototypeToLocaleString)
-/* Temporal #sec-get-temporal.plaindate.prototype.era */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeEra)
-/* Temporal #sec-get-temporal.plaindate.prototype.erayear */
-TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeEraYear)
/* Temporal #sec-temporal.plaindate.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToLocaleString)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.era */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEra)
-/* Temporal #sec-get-temporal.plaindatetime.prototype.erayear */
-TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEraYear)
/* Temporal #sec-temporal.plaindatetime.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToLocaleString)
/* Temporal #sec-temporal.plainmonthday.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToLocaleString)
/* Temporal #sec-temporal.plaintime.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToLocaleString)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.era */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEra)
-/* Temporal #sec-get-temporal.plainyearmonth.prototype.erayear */
-TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEraYear)
/* Temporal #sec-temporal.plainyearmonth.prototype.tolocalestring */
TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToLocaleString)
/* Temporal #sec-get-temporal.zoneddatetime.prototype.era */
@@ -627,5 +393,446 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEraYear)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToLocaleString)
#endif // V8_INTL_SUPPORT
+#define TEMPORAL_CONSTRUCTOR1(T) \
+ BUILTIN(Temporal##T##Constructor) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T::Constructor(isolate, args.target(), args.new_target(), \
+ args.atOrUndefined(isolate, 1))); \
+ }
+
+#define TEMPORAL_ID_BY_TO_STRING(T) \
+ BUILTIN(Temporal##T##PrototypeId) { \
+ HandleScope scope(isolate); \
+ Handle<String> id; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, id, Object::ToString(isolate, args.receiver())); \
+ return *id; \
+ }
+
+#define TEMPORAL_TO_STRING(T) \
+ BUILTIN(Temporal##T##PrototypeToString) { \
+ HandleScope scope(isolate); \
+ const char* method = "Temporal." #T ".prototype.toString"; \
+ CHECK_RECEIVER(JSTemporal##T, t, method); \
+ Handle<Object> ret; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, ret, JSTemporal##T::ToString(isolate, t, method)); \
+ return *ret; \
+ }
+
+#define TEMPORAL_PROTOTYPE_METHOD0(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ RETURN_RESULT_OR_FAILURE(isolate, JSTemporal##T ::METHOD(isolate, obj)); \
+ }
+
+#define TEMPORAL_PROTOTYPE_METHOD1(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T ::METHOD(isolate, obj, args.atOrUndefined(isolate, 1))); \
+ }
+
+#define TEMPORAL_PROTOTYPE_METHOD3(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T ::METHOD(isolate, obj, args.atOrUndefined(isolate, 1), \
+ args.atOrUndefined(isolate, 2), \
+ args.atOrUndefined(isolate, 3))); \
+ }
+
+#define TEMPORAL_METHOD2(T, METHOD) \
+ BUILTIN(Temporal##T##METHOD) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T ::METHOD(isolate, args.atOrUndefined(isolate, 1), \
+ args.atOrUndefined(isolate, 2))); \
+ }
+
+#define TEMPORAL_VALUE_OF(T) \
+ BUILTIN(Temporal##T##PrototypeValueOf) { \
+ HandleScope scope(isolate); \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kDoNotUse, \
+ isolate->factory()->NewStringFromAsciiChecked( \
+ "Temporal." #T ".prototype.valueOf"), \
+ isolate->factory()->NewStringFromAsciiChecked( \
+ "use Temporal." #T \
+ ".prototype.compare for comparison."))); \
+ }
+
+#define TEMPORAL_GET_SMI(T, METHOD, field) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #field; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ return Smi::FromInt(obj->field()); \
+ }
+
+#define TEMPORAL_GET(T, METHOD, field) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #field; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ return obj->field(); \
+ }
+
+#define TEMPORAL_GET_NUMBER_AFTER_DIVID(T, M, field, scale, name) \
+ BUILTIN(Temporal##T##Prototype##M) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, handle, method); \
+ Handle<BigInt> value; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, value, \
+ BigInt::Divide(isolate, Handle<BigInt>(handle->field(), isolate), \
+ BigInt::FromUint64(isolate, scale))); \
+ Handle<Object> number = BigInt::ToNumber(isolate, value); \
+ DCHECK(std::isfinite(number->Number())); \
+ return *number; \
+ }
+
+#define TEMPORAL_GET_BIGINT_AFTER_DIVID(T, M, field, scale, name) \
+ BUILTIN(Temporal##T##Prototype##M) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, handle, method); \
+ Handle<BigInt> value; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, value, \
+ BigInt::Divide(isolate, Handle<BigInt>(handle->field(), isolate), \
+ BigInt::FromUint64(isolate, scale))); \
+ return *value; \
+ }
+
+#define TEMPORAL_GET_BY_FORWARD_CALENDAR(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, temporal_date, method); \
+ Handle<JSReceiver> calendar = handle(temporal_date->calendar(), isolate); \
+ RETURN_RESULT_OR_FAILURE(isolate, temporal::Calendar##METHOD( \
+ isolate, calendar, temporal_date)); \
+ }
+
+#define TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal." #T ".prototype." #name; \
+ /* 2. Perform ? RequireInternalSlot(temporalDate, [[InitializedTemporal \
+ * #T]]). */ \
+ CHECK_RECEIVER(JSTemporal##T, date_like, method); \
+ /* 3. Let calendar be temporalDate.[[Calendar]]. */ \
+ Handle<JSReceiver> calendar = handle(date_like->calendar(), isolate); \
+ /* 2. Return ? Invoke(calendar, "name", « dateLike »). */ \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, temporal::InvokeCalendarMethod( \
+ isolate, calendar, isolate->factory()->name##_string(), \
+ date_like)); \
+ }
+
+// Now
+TEMPORAL_NOW0(TimeZone)
+TEMPORAL_NOW0(Instant)
+TEMPORAL_NOW2(PlainDateTime)
+TEMPORAL_NOW_ISO1(PlainDateTime)
+TEMPORAL_NOW2(PlainDate)
+TEMPORAL_NOW_ISO1(PlainDate)
+
+// There is NO Temporal.now.plainTime
+// See https://github.com/tc39/proposal-temporal/issues/1540
+TEMPORAL_NOW_ISO1(PlainTime)
+TEMPORAL_NOW2(ZonedDateTime)
+TEMPORAL_NOW_ISO1(ZonedDateTime)
+
+// PlainDate
+BUILTIN(TemporalPlainDateConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalPlainDate::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // iso_year
+ args.atOrUndefined(isolate, 2), // iso_month
+ args.atOrUndefined(isolate, 3), // iso_day
+ args.atOrUndefined(isolate, 4))); // calendar_like
+}
+TEMPORAL_METHOD2(PlainDate, From)
+TEMPORAL_GET(PlainDate, Calendar, calendar)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, Year, year)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, Month, month)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, MonthCode, monthCode)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, Day, day)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, DayOfWeek, dayOfWeek)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, DayOfYear, dayOfYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, WeekOfYear, weekOfYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, DaysInWeek, daysInWeek)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, DaysInMonth, daysInMonth)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, DaysInYear, daysInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, MonthsInYear, monthsInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDate, InLeapYear, inLeapYear)
+TEMPORAL_PROTOTYPE_METHOD1(PlainDate, WithCalendar, withCalendar)
+TEMPORAL_PROTOTYPE_METHOD0(PlainDate, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(PlainDate)
+
+// PlainTime
+BUILTIN(TemporalPlainTimeConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSTemporalPlainTime::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // hour
+ args.atOrUndefined(isolate, 2), // minute
+ args.atOrUndefined(isolate, 3), // second
+ args.atOrUndefined(isolate, 4), // millisecond
+ args.atOrUndefined(isolate, 5), // microsecond
+ args.atOrUndefined(isolate, 6))); // nanosecond
+}
+TEMPORAL_GET(PlainTime, Calendar, calendar)
+TEMPORAL_GET_SMI(PlainTime, Hour, iso_hour)
+TEMPORAL_GET_SMI(PlainTime, Minute, iso_minute)
+TEMPORAL_GET_SMI(PlainTime, Second, iso_second)
+TEMPORAL_GET_SMI(PlainTime, Millisecond, iso_millisecond)
+TEMPORAL_GET_SMI(PlainTime, Microsecond, iso_microsecond)
+TEMPORAL_GET_SMI(PlainTime, Nanosecond, iso_nanosecond)
+TEMPORAL_PROTOTYPE_METHOD0(PlainTime, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(PlainTime)
+
+// PlainDateTime
+BUILTIN(TemporalPlainDateTimeConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalPlainDateTime::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // iso_year
+ args.atOrUndefined(isolate, 2), // iso_month
+ args.atOrUndefined(isolate, 3), // iso_day
+ args.atOrUndefined(isolate, 4), // hour
+ args.atOrUndefined(isolate, 5), // minute
+ args.atOrUndefined(isolate, 6), // second
+ args.atOrUndefined(isolate, 7), // millisecond
+ args.atOrUndefined(isolate, 8), // microsecond
+ args.atOrUndefined(isolate, 9), // nanosecond
+ args.atOrUndefined(isolate, 10))); // calendar_like
+}
+TEMPORAL_GET(PlainDateTime, Calendar, calendar)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, Year, year)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, Month, month)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, MonthCode, monthCode)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, Day, day)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, DayOfWeek, dayOfWeek)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, DayOfYear, dayOfYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, WeekOfYear, weekOfYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, DaysInWeek, daysInWeek)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, DaysInMonth, daysInMonth)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, DaysInYear, daysInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, MonthsInYear,
+ monthsInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainDateTime, InLeapYear, inLeapYear)
+TEMPORAL_PROTOTYPE_METHOD1(PlainDateTime, WithCalendar, withCalendar)
+TEMPORAL_GET_SMI(PlainDateTime, Hour, iso_hour)
+TEMPORAL_GET_SMI(PlainDateTime, Minute, iso_minute)
+TEMPORAL_GET_SMI(PlainDateTime, Second, iso_second)
+TEMPORAL_GET_SMI(PlainDateTime, Millisecond, iso_millisecond)
+TEMPORAL_GET_SMI(PlainDateTime, Microsecond, iso_microsecond)
+TEMPORAL_GET_SMI(PlainDateTime, Nanosecond, iso_nanosecond)
+TEMPORAL_PROTOTYPE_METHOD0(PlainDateTime, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(PlainDateTime)
+
+// PlainYearMonth
+BUILTIN(TemporalPlainYearMonthConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalPlainYearMonth::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // iso_year
+ args.atOrUndefined(isolate, 2), // iso_month
+ args.atOrUndefined(isolate, 3), // calendar_like
+ args.atOrUndefined(isolate, 4))); // reference_iso_day
+}
+TEMPORAL_GET(PlainYearMonth, Calendar, calendar)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainYearMonth, Year, year)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainYearMonth, Month, month)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainYearMonth, MonthCode, monthCode)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainYearMonth, DaysInYear, daysInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainYearMonth, DaysInMonth, daysInMonth)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainYearMonth, MonthsInYear,
+ monthsInYear)
+TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD(PlainYearMonth, InLeapYear, inLeapYear)
+TEMPORAL_PROTOTYPE_METHOD0(PlainYearMonth, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(PlainYearMonth)
+
+// PlainMonthDay
+BUILTIN(TemporalPlainMonthDayConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalPlainMonthDay::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // iso_month
+ args.atOrUndefined(isolate, 2), // iso_day
+ args.atOrUndefined(isolate, 3), // calendar_like
+ args.atOrUndefined(isolate, 4))); // reference_iso_year
+}
+TEMPORAL_GET(PlainMonthDay, Calendar, calendar)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainMonthDay, MonthCode, monthCode)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainMonthDay, Day, day)
+TEMPORAL_PROTOTYPE_METHOD0(PlainMonthDay, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(PlainMonthDay)
+
+// ZonedDateTime
+
+#define TEMPORAL_ZONED_DATE_TIME_GET_PREPARE(M) \
+ HandleScope scope(isolate); \
+ const char* method = "get Temporal.ZonedDateTime.prototype." #M; \
+ /* 1. Let zonedDateTime be the this value. */ \
+ /* 2. Perform ? RequireInternalSlot(zonedDateTime, */ \
+ /* [[InitializedTemporalZonedDateTime]]). */ \
+ CHECK_RECEIVER(JSTemporalZonedDateTime, zoned_date_time, method); \
+ /* 3. Let timeZone be zonedDateTime.[[TimeZone]]. */ \
+ Handle<JSReceiver> time_zone = \
+ handle(zoned_date_time->time_zone(), isolate); \
+ /* 4. Let instant be ? */ \
+ /* CreateTemporalInstant(zonedDateTime.[[Nanoseconds]]). */ \
+ Handle<JSTemporalInstant> instant; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, instant, \
+ temporal::CreateTemporalInstant( \
+ isolate, Handle<BigInt>(zoned_date_time->nanoseconds(), isolate))); \
+ /* 5. Let calendar be zonedDateTime.[[Calendar]]. */ \
+ Handle<JSReceiver> calendar = handle(zoned_date_time->calendar(), isolate); \
+ /* 6. Let temporalDateTime be ? */ \
+ /* BuiltinTimeZoneGetPlainDateTimeFor(timeZone, */ \
+ /* instant, calendar). */ \
+ Handle<JSTemporalPlainDateTime> temporal_date_time; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, temporal_date_time, \
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor( \
+ isolate, time_zone, instant, calendar, method));
+
+#define TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(M, field) \
+ BUILTIN(TemporalZonedDateTimePrototype##M) { \
+ TEMPORAL_ZONED_DATE_TIME_GET_PREPARE(M) \
+ /* 7. Return 𝔽(temporalDateTime.[[ #field ]]). */ \
+ return Smi::FromInt(temporal_date_time->field()); \
+ }
+
+BUILTIN(TemporalZonedDateTimeConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalZonedDateTime::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // epoch_nanoseconds
+ args.atOrUndefined(isolate, 2), // time_zone_like
+ args.atOrUndefined(isolate, 3))); // calendar_like
+}
+TEMPORAL_GET(ZonedDateTime, Calendar, calendar)
+TEMPORAL_GET(ZonedDateTime, TimeZone, time_zone)
+TEMPORAL_GET(ZonedDateTime, EpochNanoseconds, nanoseconds)
+TEMPORAL_GET_NUMBER_AFTER_DIVID(ZonedDateTime, EpochSeconds, nanoseconds,
+ 1000000000, epochSeconds)
+TEMPORAL_GET_NUMBER_AFTER_DIVID(ZonedDateTime, EpochMilliseconds, nanoseconds,
+ 1000000, epochMilliseconds)
+TEMPORAL_GET_BIGINT_AFTER_DIVID(ZonedDateTime, EpochMicroseconds, nanoseconds,
+ 1000, epochMicroseconds)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Hour, iso_hour)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Minute, iso_minute)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Second, iso_second)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Millisecond,
+ iso_millisecond)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Microsecond,
+ iso_microsecond)
+TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Nanosecond,
+ iso_nanosecond)
+TEMPORAL_PROTOTYPE_METHOD1(ZonedDateTime, WithCalendar, withCalendar)
+TEMPORAL_PROTOTYPE_METHOD1(ZonedDateTime, WithTimeZone, withTimeZone)
+TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, GetISOFields, getISOFields)
+TEMPORAL_VALUE_OF(ZonedDateTime)
+
+// Duration
+BUILTIN(TemporalDurationConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSTemporalDuration::Constructor(
+ isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1), // years
+ args.atOrUndefined(isolate, 2), // months
+ args.atOrUndefined(isolate, 3), // weeks
+ args.atOrUndefined(isolate, 4), // days
+ args.atOrUndefined(isolate, 5), // hours
+ args.atOrUndefined(isolate, 6), // minutes
+ args.atOrUndefined(isolate, 7), // seconds
+ args.atOrUndefined(isolate, 8), // milliseconds
+ args.atOrUndefined(isolate, 9), // microseconds
+ args.atOrUndefined(isolate, 10))); // nanoseconds
+}
+TEMPORAL_GET(Duration, Years, years)
+TEMPORAL_GET(Duration, Months, months)
+TEMPORAL_GET(Duration, Weeks, weeks)
+TEMPORAL_GET(Duration, Days, days)
+TEMPORAL_GET(Duration, Hours, hours)
+TEMPORAL_GET(Duration, Minutes, minutes)
+TEMPORAL_GET(Duration, Seconds, seconds)
+TEMPORAL_GET(Duration, Milliseconds, milliseconds)
+TEMPORAL_GET(Duration, Microseconds, microseconds)
+TEMPORAL_GET(Duration, Nanoseconds, nanoseconds)
+TEMPORAL_PROTOTYPE_METHOD0(Duration, Sign, sign)
+TEMPORAL_PROTOTYPE_METHOD0(Duration, Blank, blank)
+TEMPORAL_VALUE_OF(Duration)
+
+// Instant
+TEMPORAL_CONSTRUCTOR1(Instant)
+TEMPORAL_VALUE_OF(Instant)
+TEMPORAL_GET(Instant, EpochNanoseconds, nanoseconds)
+TEMPORAL_GET_NUMBER_AFTER_DIVID(Instant, EpochSeconds, nanoseconds, 1000000000,
+ epochSeconds)
+TEMPORAL_GET_NUMBER_AFTER_DIVID(Instant, EpochMilliseconds, nanoseconds,
+ 1000000, epochMilliseconds)
+TEMPORAL_GET_BIGINT_AFTER_DIVID(Instant, EpochMicroseconds, nanoseconds, 1000,
+ epochMicroseconds)
+
+// Calendar
+TEMPORAL_CONSTRUCTOR1(Calendar)
+TEMPORAL_ID_BY_TO_STRING(Calendar)
+TEMPORAL_TO_STRING(Calendar)
+// #sec-temporal.calendar.from
+BUILTIN(TemporalCalendarFrom) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(isolate, temporal::ToTemporalCalendar(
+ isolate, args.atOrUndefined(isolate, 1),
+ "Temporal.Calendar.from"));
+}
+
+// TimeZone
+TEMPORAL_CONSTRUCTOR1(TimeZone)
+TEMPORAL_ID_BY_TO_STRING(TimeZone)
+TEMPORAL_TO_STRING(TimeZone)
+// #sec-temporal.timezone.from
+BUILTIN(TemporalTimeZoneFrom) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(isolate, temporal::ToTemporalTimeZone(
+ isolate, args.atOrUndefined(isolate, 1),
+ "Temporal.TimeZone.from"));
+}
+
+#ifdef V8_INTL_SUPPORT
+// get Temporal.*.prototype.era/eraYear
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, Era, era)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDate, EraYear, eraYear)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, Era, era)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainDateTime, EraYear, eraYear)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainYearMonth, Era, era)
+TEMPORAL_GET_BY_FORWARD_CALENDAR(PlainYearMonth, EraYear, eraYear)
+#endif // V8_INTL_SUPPORT
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 00b040f03f..7d1633fe3c 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -65,8 +65,8 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
UintPtrConstant(0));
- StoreCagedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset,
- EmptyBackingStoreBufferConstant());
+ StoreSandboxedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset,
+ EmptyBackingStoreBufferConstant());
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
@@ -186,8 +186,12 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
TNode<Int32T> kind) {
- return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)),
- Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
+ return Word32Or(
+ Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS))),
+ Word32Or(
+ Word32Equal(kind, Int32Constant(RAB_GSAB_UINT8_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(RAB_GSAB_UINT8_CLAMPED_ELEMENTS))));
}
TNode<BoolT> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 2df46e499b..997ace2c43 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -28,10 +28,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
TNode<UintPtrT> byte_offset);
- // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
+ // Returns true if kind is either UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS,
+ // RAB_GSAB_UINT8_ELEMENTS, or RAB_GSAB_UINT8_CLAMPED_ELEMENTS.
TNode<BoolT> IsUint8ElementsKind(TNode<Int32T> kind);
- // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
+ // Returns true if kind is either BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS,
+ // RAB_GSAB_BIGINT64_ELEMENTS, or RAB_GSAB_BIGUINT64_ELEMENTS.
TNode<BoolT> IsBigInt64ElementsKind(TNode<Int32T> kind);
// Returns the byte size of an element for a TypedArray elements kind.
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index f64d2aeab6..55666ff761 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -183,9 +183,7 @@ BUILTIN(TypedArrayPrototypeFill) {
}
if (V8_UNLIKELY(array->IsVariableLength())) {
- bool out_of_bounds = false;
- array->GetLengthOrOutOfBounds(out_of_bounds);
- if (out_of_bounds) {
+ if (array->IsOutOfBounds()) {
const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
isolate->factory()->NewStringFromAsciiChecked(method_name);
@@ -246,7 +244,7 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
isolate, array,
JSTypedArray::Validate(isolate, args.receiver(), method_name));
- int64_t len = array->length();
+ int64_t len = array->GetLength();
if (len == 0) return Smi::FromInt(-1);
int64_t index = 0;
@@ -259,6 +257,10 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
+ if (V8_UNLIKELY(array->IsVariableLength() && array->IsOutOfBounds())) {
+ return Smi::FromInt(-1);
+ }
+
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<int64_t> result =
@@ -276,7 +278,7 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
isolate, array,
JSTypedArray::Validate(isolate, args.receiver(), method_name));
- int64_t len = array->length();
+ int64_t len = array->GetLength();
if (len == 0) return Smi::FromInt(-1);
int64_t index = len - 1;
@@ -291,8 +293,10 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
if (index < 0) return Smi::FromInt(-1);
- // TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
+ if (V8_UNLIKELY(array->IsVariableLength() && array->IsOutOfBounds())) {
+ return Smi::FromInt(-1);
+ }
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 561bca4307..c0ab3bc564 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -114,12 +114,14 @@ const char* Builtins::Lookup(Address pc) {
if (!initialized_) return nullptr;
for (Builtin builtin_ix = Builtins::kFirst; builtin_ix <= Builtins::kLast;
++builtin_ix) {
- if (code(builtin_ix).contains(isolate_, pc)) return name(builtin_ix);
+ if (FromCodeT(code(builtin_ix)).contains(isolate_, pc)) {
+ return name(builtin_ix);
+ }
}
return nullptr;
}
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
+Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCallFunction_ReceiverIsNullOrUndefined);
@@ -131,7 +133,7 @@ Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
UNREACHABLE();
}
-Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
+Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCall_ReceiverIsNullOrUndefined);
@@ -143,7 +145,7 @@ Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
UNREACHABLE();
}
-Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
+Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
return code_handle(Builtin::kNonPrimitiveToPrimitive_Default);
@@ -155,7 +157,7 @@ Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
UNREACHABLE();
}
-Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
+Handle<CodeT> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
switch (hint) {
case OrdinaryToPrimitiveHint::kNumber:
return code_handle(Builtin::kOrdinaryToPrimitive_Number);
@@ -177,53 +179,23 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
return FullObjectSlot(location);
}
-void Builtins::set_code(Builtin builtin, Code code) {
+void Builtins::set_code(Builtin builtin, CodeT code) {
DCHECK_EQ(builtin, code.builtin_id());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ DCHECK_EQ(builtin, FromCodeT(code).builtin_id());
+ }
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
// The given builtin may be uninitialized thus we cannot check its type here.
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
}
-Code Builtins::code(Builtin builtin) {
+CodeT Builtins::code(Builtin builtin) {
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
- return Code::cast(Object(ptr));
-}
-
-Handle<Code> Builtins::code_handle(Builtin builtin) {
- Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
- return Handle<Code>(location);
-}
-
-FullObjectSlot Builtins::builtin_code_data_container_slot(Builtin builtin) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Address* location =
- &isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)];
- return FullObjectSlot(location);
-}
-
-void Builtins::set_codet(Builtin builtin, CodeT code) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): add DCHECK_EQ(builtin, code.builtin_id()); once CodeT
- // has respective field.
- DCHECK(Internals::HasHeapObjectTag(code.ptr()));
- // The given builtin may be uninitialized thus we cannot check its type here.
- isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)] =
- code.ptr();
-}
-
-CodeT Builtins::codet(Builtin builtin) {
- Address* table = V8_EXTERNAL_CODE_SPACE_BOOL
- ? isolate_->builtin_code_data_container_table()
- : isolate_->builtin_table();
- Address ptr = table[Builtins::ToInt(builtin)];
return CodeT::cast(Object(ptr));
}
-Handle<CodeT> Builtins::codet_handle(Builtin builtin) {
- Address* table = V8_EXTERNAL_CODE_SPACE_BOOL
- ? isolate_->builtin_code_data_container_table()
- : isolate_->builtin_table();
- Address* location = &table[Builtins::ToInt(builtin)];
+Handle<CodeT> Builtins::code_handle(Builtin builtin) {
+ Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
return Handle<CodeT>(location);
}
@@ -260,7 +232,7 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
// static
Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
- Handle<Code> code = isolate->builtins()->code_handle(builtin);
+ Handle<CodeT> code = isolate->builtins()->code_handle(builtin);
return Callable{code, CallInterfaceDescriptorFor(builtin)};
}
@@ -283,12 +255,12 @@ void Builtins::PrintBuiltinCode() {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
const char* builtin_name = name(builtin);
- Handle<Code> code = code_handle(builtin);
if (PassesFilter(base::CStrVector(builtin_name),
base::CStrVector(FLAG_print_builtin_code_filter))) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
OFStream os(trace_scope.file());
- code->Disassemble(builtin_name, os, isolate_);
+ Code builtin_code = FromCodeT(code(builtin));
+ builtin_code.Disassemble(builtin_name, os, isolate_);
os << "\n";
}
}
@@ -301,7 +273,7 @@ void Builtins::PrintBuiltinSize() {
++builtin) {
const char* builtin_name = name(builtin);
const char* kind = KindNameOf(builtin);
- Code code = Builtins::code(builtin);
+ Code code = FromCodeT(Builtins::code(builtin));
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
code.InstructionSize());
}
@@ -329,17 +301,6 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
return true;
}
-bool Builtins::IsBuiltinCodeDataContainerHandle(Handle<HeapObject> maybe_code,
- Builtin* builtin) const {
- Address* handle_location = maybe_code.location();
- Address* builtins_table = isolate_->builtin_code_data_container_table();
- if (handle_location < builtins_table) return false;
- Address* builtins_table_end = &builtins_table[Builtins::kBuiltinCount];
- if (handle_location >= builtins_table_end) return false;
- *builtin = FromInt(static_cast<int>(handle_location - builtins_table));
- return true;
-}
-
// static
bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
const Builtin builtin = code.builtin_id();
@@ -380,14 +341,16 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
int i = 0;
HandleScope scope(isolate);
for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
- Handle<AbstractCode> code(AbstractCode::cast(Object(builtins[i])), isolate);
+ Code builtin_code = FromCodeT(CodeT::cast(Object(builtins[i])));
+ Handle<AbstractCode> code(AbstractCode::cast(builtin_code), isolate);
PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG, code,
Builtins::name(FromInt(i))));
}
STATIC_ASSERT(kLastBytecodeHandlerPlusOne == kBuiltinCount);
for (; i < kBuiltinCount; i++) {
- Handle<AbstractCode> code(AbstractCode::cast(Object(builtins[i])), isolate);
+ Code builtin_code = FromCodeT(CodeT::cast(Object(builtins[i])));
+ Handle<AbstractCode> code(AbstractCode::cast(builtin_code), isolate);
interpreter::Bytecode bytecode =
builtin_metadata[i].data.bytecode_and_scale.bytecode;
interpreter::OperandScale scale =
@@ -513,7 +476,7 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
if (FLAG_allow_unsafe_function_constructor) return true;
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
Handle<Context> responsible_context = impl->LastEnteredOrMicrotaskContext();
- // TODO(jochen): Remove this.
+ // TODO(verwaest): Remove this.
if (responsible_context.is_null()) {
return true;
}
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index ddb50d3230..57e09018cf 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -36,13 +36,6 @@ static constexpr T FirstFromVarArgs(T x, ...) noexcept {
#define BUILTIN_CODE(isolate, name) \
(isolate)->builtins()->code_handle(i::Builtin::k##name)
-#ifdef V8_EXTERNAL_CODE_SPACE
-#define BUILTIN_CODET(isolate, name) \
- (isolate)->builtins()->codet_handle(i::Builtin::k##name)
-#else
-#define BUILTIN_CODET(isolate, name) BUILTIN_CODE(isolate, name)
-#endif // V8_EXTERNAL_CODE_SPACE
-
enum class Builtin : int32_t {
kNoBuiltinId = -1,
#define DEF_ENUM(Name, ...) k##Name,
@@ -156,22 +149,18 @@ class Builtins {
}
// Convenience wrappers.
- Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
- Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
- Handle<Code> NonPrimitiveToPrimitive(
+ Handle<CodeT> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<CodeT> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<CodeT> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
- Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
- Handle<Code> JSConstructStubGeneric();
+ Handle<CodeT> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
+ Handle<CodeT> JSConstructStubGeneric();
// Used by CreateOffHeapTrampolines in isolate.cc.
- void set_code(Builtin builtin, Code code);
- void set_codet(Builtin builtin, CodeT code);
-
- V8_EXPORT_PRIVATE Code code(Builtin builtin);
- V8_EXPORT_PRIVATE Handle<Code> code_handle(Builtin builtin);
+ void set_code(Builtin builtin, CodeT code);
- V8_EXPORT_PRIVATE CodeT codet(Builtin builtin);
- V8_EXPORT_PRIVATE Handle<CodeT> codet_handle(Builtin builtin);
+ V8_EXPORT_PRIVATE CodeT code(Builtin builtin);
+ V8_EXPORT_PRIVATE Handle<CodeT> code_handle(Builtin builtin);
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
@@ -203,11 +192,6 @@ class Builtins {
// by handle location. Similar to Heap::IsRootHandle.
bool IsBuiltinHandle(Handle<HeapObject> maybe_code, Builtin* index) const;
- // Similar to IsBuiltinHandle but for respective CodeDataContainer handle.
- // Can be used only when external code space is enabled.
- bool IsBuiltinCodeDataContainerHandle(Handle<HeapObject> maybe_code,
- Builtin* index) const;
-
// True, iff the given code object is a builtin with off-heap embedded code.
static bool IsIsolateIndependentBuiltin(const Code code);
@@ -296,8 +280,6 @@ class Builtins {
FullObjectSlot builtin_slot(Builtin builtin);
// Returns given builtin's slot in the tier0 builtin table.
FullObjectSlot builtin_tier0_slot(Builtin builtin);
- // Returns given builtin's slot in the builtin code data container table.
- FullObjectSlot builtin_code_data_container_slot(Builtin builtin);
private:
static void Generate_CallFunction(MacroAssembler* masm,
@@ -309,10 +291,10 @@ class Builtins {
enum class CallOrConstructMode { kCall, kConstruct };
static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<Code> code);
+ Handle<CodeT> code);
static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<Code> code);
+ Handle<CodeT> code);
static void Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
@@ -323,7 +305,7 @@ class Builtins {
template <class Descriptor>
static void Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm,
- Handle<Code> builtin_target);
+ Handle<CodeT> builtin_target);
#define DECLARE_ASM(Name, ...) \
static void Generate_##Name(MacroAssembler* masm);
@@ -347,6 +329,24 @@ class Builtins {
friend class SetupIsolateDelegate;
};
+V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
+ // Check for kNoBuiltinId first to abort early when the current Code object
+ // is not a builtin.
+ return builtin_id != Builtin::kNoBuiltinId &&
+ (builtin_id == Builtin::kInterpreterEntryTrampoline ||
+ builtin_id == Builtin::kInterpreterEnterAtBytecode ||
+ builtin_id == Builtin::kInterpreterEnterAtNextBytecode);
+}
+
+V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
+ // Check for kNoBuiltinId first to abort early when the current Code object
+ // is not a builtin.
+ return builtin_id != Builtin::kNoBuiltinId &&
+ (builtin_id == Builtin::kBaselineOutOfLinePrologue ||
+ builtin_id == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
+ builtin_id == Builtin::kBaselineOrInterpreterEnterAtNextBytecode);
+}
+
Builtin ExampleBuiltinForTorqueFunctionPointerType(
size_t function_pointer_type_id);
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 64c81ca572..23995e50e3 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -4,6 +4,59 @@
intrinsic %FromConstexpr<To: type, From: type>(b: From): To;
macro FromConstexpr<To: type, From: type>(o: From): To;
+// Conversions for IntegerLiteral
+FromConstexpr<intptr, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ intptr {
+ return ConstexprIntegerLiteralToIntptr(i);
+}
+FromConstexpr<uintptr, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ uintptr {
+ return ConstexprIntegerLiteralToUintptr(i);
+}
+FromConstexpr<int32, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ int32 {
+ return ConstexprIntegerLiteralToInt32(i);
+}
+FromConstexpr<uint32, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ uint32 {
+ return ConstexprIntegerLiteralToUint32(i);
+}
+FromConstexpr<int31, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ int31 {
+ return ConstexprIntegerLiteralToInt31(i);
+}
+FromConstexpr<int8, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ int8 {
+ return ConstexprIntegerLiteralToInt8(i);
+}
+FromConstexpr<uint8, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ uint8 {
+ return ConstexprIntegerLiteralToUint8(i);
+}
+FromConstexpr<uint64, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ uint64 {
+ return ConstexprIntegerLiteralToUint64(i);
+}
+FromConstexpr<constexpr int31, constexpr IntegerLiteral>(
+ i: constexpr IntegerLiteral): constexpr int31 {
+ return ConstexprIntegerLiteralToInt31(i);
+}
+FromConstexpr<constexpr int32, constexpr IntegerLiteral>(
+ i: constexpr IntegerLiteral): constexpr int32 {
+ return ConstexprIntegerLiteralToInt32(i);
+}
+FromConstexpr<Number, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ Number {
+ return NumberConstant(ConstexprIntegerLiteralToFloat64(i));
+}
+FromConstexpr<Smi, constexpr IntegerLiteral>(i: constexpr IntegerLiteral): Smi {
+ return Convert<Smi>(ConstexprIntegerLiteralToInt31(i));
+}
+FromConstexpr<char8, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ char8 {
+ return %RawDownCast<char8>(FromConstexpr<uint8>(i));
+}
+
FromConstexpr<int31, constexpr int31>(i: constexpr int31): int31 {
return %FromConstexpr<int31>(i);
}
@@ -325,6 +378,10 @@ Convert<intptr, Number>(n: Number): intptr {
Convert<bint, int32>(v: int32): bint {
return IntPtrToBInt(Convert<intptr>(v));
}
+FromConstexpr<float64, constexpr IntegerLiteral>(v: constexpr IntegerLiteral):
+ float64 {
+ return ConstexprIntegerLiteralToFloat64(v);
+}
extern macro IntPtrToBInt(intptr): bint;
Convert<bint, intptr>(v: intptr): bint {
return IntPtrToBInt(v);
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 9bc4bd5f2e..3e1554dfa8 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -84,11 +84,6 @@ javascript builtin DataViewPrototypeGetBuffer(
return dataView.buffer;
}
-extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView):
- never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
-extern macro LoadVariableLengthJSArrayBufferViewByteLength(
- JSArrayBufferView, JSArrayBuffer): uintptr labels DetachedOrOutOfBounds;
-
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
@@ -118,7 +113,7 @@ javascript builtin DataViewPrototypeGetByteOffset(
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
try {
- IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ typed_array::IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
} label DetachedOrOutOfBounds {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteOffset);
@@ -399,7 +394,7 @@ transitioning macro DataViewGet(
// 7. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a
// TypeError exception.
try {
- IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ typed_array::IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
} label DetachedOrOutOfBounds {
ThrowTypeError(
@@ -718,7 +713,7 @@ transitioning macro DataViewSet(
// 10. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a
// TypeError exception.
try {
- IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ typed_array::IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
} label DetachedOrOutOfBounds {
ThrowTypeError(
diff --git a/deps/v8/src/builtins/frame-arguments.tq b/deps/v8/src/builtins/frame-arguments.tq
index a877209b3e..cd9d3d84d6 100644
--- a/deps/v8/src/builtins/frame-arguments.tq
+++ b/deps/v8/src/builtins/frame-arguments.tq
@@ -8,8 +8,8 @@ struct Arguments {
const base: RawPtr;
// length is the number of arguments without the receiver.
const length: intptr;
- // actual_count is the actual number of arguments on the stack (depending on
- // kJSArgcIncludesReceiver may or may not include the receiver).
+ // actual_count is the actual number of arguments on the stack (including the
+ // receiver).
const actual_count: intptr;
}
diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq
index 4bd134e25f..d9eb1740aa 100644
--- a/deps/v8/src/builtins/function.tq
+++ b/deps/v8/src/builtins/function.tq
@@ -17,9 +17,11 @@ FunctionPrototypeBind(implicit context: Context)(
JSFunction, JSAny, int32): JSAny;
const kLengthDescriptorIndex: constexpr int32
- generates 'JSFunctionOrBoundFunction::kLengthDescriptorIndex';
+ generates 'JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex'
+ ;
const kNameDescriptorIndex: constexpr int32
- generates 'JSFunctionOrBoundFunction::kNameDescriptorIndex';
+ generates 'JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex'
+ ;
const kMinDescriptorsForFastBind:
constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind';
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 3beff0d53f..c217c6c7c3 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
@@ -89,11 +89,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
DCHECK(!AreAliased(array, argc, scratch1, scratch2));
Register counter = scratch1;
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
- } else {
- __ mov(counter, argc);
- }
+ __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
__ jmp(&entry);
__ bind(&loop);
Operand value(array, counter, times_system_pointer_size, 0);
@@ -163,9 +159,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -322,9 +316,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -695,9 +687,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(ecx, FieldOperand(
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ dec(ecx);
- }
+ __ dec(ecx); // Exclude receiver.
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -803,18 +793,17 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
ASM_CODE_COMMENT(masm);
Register params_size = scratch1;
- // Get the size of the formal parameters + receiver (in bytes).
+ // Get the size of the formal parameters (in bytes).
__ mov(params_size,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(params_size,
FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
- // Compute the size of the actual parameters + receiver (in bytes).
+ // Compute the size of the actual parameters (in bytes).
__ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ lea(actual_params_size,
- Operand(actual_params_size, times_system_pointer_size,
- kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
+ Operand(actual_params_size, times_system_pointer_size, 0));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -905,22 +894,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ int3();
}
@@ -1041,9 +1024,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm);
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ test(
- optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ test(optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
@@ -1380,14 +1362,9 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ movd(xmm0, eax); // Spill number of arguments.
// Compute the expected number of arguments.
- int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ __ mov(scratch, eax);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- argc_modification -= 1;
- }
- if (argc_modification != 0) {
- __ lea(scratch, Operand(eax, argc_modification));
- } else {
- __ mov(scratch, eax);
+ __ dec(scratch); // Exclude receiver.
}
// Pop return address to allow tail-call after pushing arguments.
@@ -1462,10 +1439,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
- constexpr int receiver_offset =
- kJSArgcIncludesReceiver ? 0 : kSystemPointerSize;
- __ lea(scratch1,
- Operand(num_args, times_system_pointer_size, receiver_offset));
+ __ lea(scratch1, Operand(num_args, times_system_pointer_size, 0));
__ AllocateStackSpace(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
@@ -1474,7 +1448,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
- i * kSystemPointerSize + receiver_offset));
+ i * kSystemPointerSize));
__ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
@@ -1496,11 +1470,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_check);
__ inc(scratch1);
__ cmp(scratch1, eax);
- if (kJSArgcIncludesReceiver) {
- __ j(less, &loop_header, Label::kNear);
- } else {
- __ j(less_equal, &loop_header, Label::kNear);
- }
+ __ j(less, &loop_header, Label::kNear);
}
} // anonymous namespace
@@ -1899,7 +1869,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// from LAZY is always the last argument.
__ movd(Operand(esp, eax, times_system_pointer_size,
BuiltinContinuationFrameConstants::kFixedFrameSize -
- (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
+ kJSArgcReceiverSlots * kSystemPointerSize),
xmm0);
}
__ mov(
@@ -1965,13 +1935,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
- if (kJSArgcIncludesReceiver) {
- __ cmp(eax, Immediate(JSParameterCount(0)));
- __ j(equal, &no_this_arg, Label::kNear);
- } else {
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
- }
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
{
__ mov(edi, args[1]);
__ cmp(eax, Immediate(JSParameterCount(1)));
@@ -1980,10 +1945,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(
- eax, edi, ecx, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -2042,13 +2006,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- if (kJSArgcIncludesReceiver) {
- __ cmp(eax, Immediate(JSParameterCount(0)));
- __ j(greater, &done, Label::kNear);
- } else {
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- }
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
__ PushRoot(RootIndex::kUndefinedValue);
__ inc(eax);
__ bind(&done);
@@ -2095,10 +2054,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ DropArgumentsAndPushNewReceiver(
- eax, ecx, edx, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2157,8 +2115,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -2205,9 +2162,6 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ lea(new_space, Operand(count, times_system_pointer_size, 0));
__ AllocateStackSpace(new_space);
- if (!kJSArgcIncludesReceiver) {
- __ inc(argc_in_out);
- }
Register current = scratch1;
Register value = scratch2;
@@ -2228,12 +2182,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
pointer_to_new_space_out,
Operand(esp, argc_in_out, times_system_pointer_size, kSystemPointerSize));
// Update the total number of arguments.
- if (kJSArgcIncludesReceiver) {
- __ add(argc_in_out, count);
- } else {
- // Also subtract the receiver again.
- __ lea(argc_in_out, Operand(argc_in_out, count, times_1, -1));
- }
+ __ add(argc_in_out, count);
}
} // namespace
@@ -2372,9 +2321,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ dec(edx);
- }
+ __ dec(edx); // Exclude receiver.
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
@@ -2447,13 +2394,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(eax);
- __ AssertFunction(edi, edx);
+ __ AssertCallableFunction(edi, edx);
- Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2534,14 +2477,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzx_w(
ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2665,7 +2600,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
StackArgumentsAccessor args(argc);
Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
- non_proxy, class_constructor;
+ non_proxy, non_wrapped_function, class_constructor;
__ JumpIfSmi(target, &non_callable);
__ bind(&non_smi);
__ LoadMap(map, target);
@@ -2694,9 +2629,17 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ bind(&non_proxy);
+ __ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
+ __ j(not_equal, &non_wrapped_function);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
- __ bind(&non_proxy);
+ __ bind(&non_wrapped_function);
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
__ j(equal, &class_constructor);
@@ -2980,19 +2923,19 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
- for (int reg_code : base::bits::IterateBitsBackwards(
- WasmDebugBreakFrameConstants::kPushedGpRegs)) {
- __ Push(Register::from_code(reg_code));
+ for (Register reg :
+ base::Reversed(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(reg);
}
constexpr int kFpStackSize =
kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
__ AllocateStackSpace(kFpStackSize);
int offset = kFpStackSize;
- for (int reg_code : base::bits::IterateBitsBackwards(
- WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ for (DoubleRegister reg :
+ base::Reversed(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
offset -= kSimd128Size;
- __ movdqu(Operand(esp, offset), DoubleRegister::from_code(reg_code));
+ __ movdqu(Operand(esp, offset), reg);
}
// Initialize the JavaScript context with 0. CEntry will use it to
@@ -3001,15 +2944,13 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ CallRuntime(Runtime::kWasmDebugBreak, 0);
// Restore registers.
- for (int reg_code :
- base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
- __ movdqu(DoubleRegister::from_code(reg_code), Operand(esp, offset));
+ for (DoubleRegister reg : WasmDebugBreakFrameConstants::kPushedFpRegs) {
+ __ movdqu(reg, Operand(esp, offset));
offset += kSimd128Size;
}
__ add(esp, Immediate(kFpStackSize));
- for (int reg_code :
- base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
- __ Pop(Register::from_code(reg_code));
+ for (Register reg : WasmDebugBreakFrameConstants::kPushedGpRegs) {
+ __ Pop(reg);
}
}
@@ -3026,6 +2967,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq
index adf513edf4..945e7fd2d4 100644
--- a/deps/v8/src/builtins/internal.tq
+++ b/deps/v8/src/builtins/internal.tq
@@ -7,8 +7,6 @@ namespace internal {
namespace runtime {
extern runtime GetTemplateObject(implicit context: Context)(
TemplateObjectDescription, SharedFunctionInfo, Smi): JSAny;
-extern runtime BytecodeBudgetInterruptFromCode(implicit context: Context)(
- FeedbackCell): JSAny;
}
builtin GetTemplateObject(
@@ -40,13 +38,6 @@ builtin GetTemplateObject(
}
}
-builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
- feedbackCell: FeedbackCell): Object {
- // The runtime call is wrapped by a builtin since the calling sequence in
- // generated code is shorter for builtins than for runtime calls.
- tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
-}
-
extern transitioning builtin ForInFilter(implicit context: Context)(
JSAny, HeapObject): JSAny;
extern enum ForInFeedback extends uint31 { kAny, ...}
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
index 2e533f6afd..8033944139 100644
--- a/deps/v8/src/builtins/loong64/builtins-loong64.cc
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -81,11 +81,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mov(scratch, argc);
- }
+ __ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots));
__ Branch(&entry);
__ bind(&loop);
__ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
@@ -144,10 +140,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver,
- t3);
+ TurboAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@@ -304,10 +297,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver,
- a4);
+ TurboAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@@ -429,9 +419,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld_hu(
a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ Sub_d(a3, a3, Operand(kJSArgcReceiverSlots));
- }
+ __ Sub_d(a3, a3, Operand(kJSArgcReceiverSlots));
__ Ld_d(t1, FieldMemOperand(
a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -565,7 +553,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// a1: microtask_queue
// Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
+ __ MultiPush(kCalleeSaved | ra);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
@@ -719,7 +707,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
+ __ MultiPop(kCalleeSaved | ra);
// Return.
__ Jump(ra);
}
@@ -764,11 +752,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(a2);
// Check if we have enough stack space to push all arguments.
- if (kJSArgcIncludesReceiver) {
- __ mov(a6, a4);
- } else {
- __ addi_d(a6, a4, 1);
- }
+ __ mov(a6, a4);
Generate_CheckStackOverflow(masm, a6, a0, s2);
// Copy arguments to the stack.
@@ -849,10 +833,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ld_d(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
- if (!kJSArgcIncludesReceiver) {
- __ Add_d(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -932,22 +912,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1058,9 +1032,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ And(scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
@@ -1080,8 +1053,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(s1.bit() | s2.bit());
- temps.Exclude(t7.bit());
+ temps.Include({s1, s2});
+ temps.Exclude({t7});
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1224,7 +1197,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
- temps.Exclude(s1.bit() | s2.bit());
+ temps.Exclude({s1, s2});
}
// Generate code for entering a JS function with the interpreter.
@@ -1510,12 +1483,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Sub_d(a0, a0, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Sub_d(a3, a0, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ Add_d(a3, a0, Operand(1));
} else {
__ mov(a3, a0);
}
@@ -1571,11 +1540,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Sub_d(a0, a0, Operand(1));
}
- Register argc_without_receiver = a0;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = a6;
- __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = a6;
+ __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
// Push the arguments, This function modifies t0, a4 and a5.
GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
@@ -1913,10 +1879,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Sub_d(scratch, scratch, Operand(1));
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld_d(receiver, MemOperand(sp, 0));
- __ DropArgumentsAndPushNewReceiver(
- argc, this_arg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_arg,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2009,10 +1974,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Sub_d(scratch, scratch, Operand(1));
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
- __ DropArgumentsAndPushNewReceiver(
- argc, this_argument, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_argument,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2070,10 +2034,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Sub_d(scratch, scratch, Operand(1));
__ Movz(new_target, target, scratch); // if argc == 2
- __ DropArgumentsAndPushNewReceiver(
- argc, undefined_value, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2120,21 +2083,13 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ mov(dest, sp);
__ Alsl_d(end, argc_in_out, old_sp, kSystemPointerSizeLog2);
Label loop, done;
- if (kJSArgcIncludesReceiver) {
- __ Branch(&done, ge, old_sp, Operand(end));
- } else {
- __ Branch(&done, gt, old_sp, Operand(end));
- }
+ __ Branch(&done, ge, old_sp, Operand(end));
__ bind(&loop);
__ Ld_d(value, MemOperand(old_sp, 0));
__ St_d(value, MemOperand(dest, 0));
__ Add_d(old_sp, old_sp, Operand(kSystemPointerSize));
__ Add_d(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- __ Branch(&loop, lt, old_sp, Operand(end));
- } else {
- __ Branch(&loop, le, old_sp, Operand(end));
- }
+ __ Branch(&loop, lt, old_sp, Operand(end));
__ bind(&done);
// Update total number of arguments.
@@ -2244,9 +2199,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ Sub_d(a7, a7, Operand(kJSArgcReceiverSlots));
- }
+ __ Sub_d(a7, a7, Operand(kJSArgcReceiverSlots));
__ Sub_d(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
@@ -2303,12 +2256,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(a1);
- Label class_constructor;
__ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
- __ And(kScratchReg, a3,
- Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2384,14 +2332,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld_hu(
a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
// static
@@ -2504,6 +2444,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
instance_type, Operand(JS_PROXY_TYPE));
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq, instance_type,
+ Operand(JS_WRAPPED_FUNCTION_TYPE));
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ Branch(&class_constructor, eq, instance_type,
@@ -2718,22 +2664,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
@@ -2741,7 +2687,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// kFixedFrameSizeFromFp is hard coded to include space for Simd
// registers, so we still need to allocate extra (unused) space on the stack
// as if they were saved.
- __ Sub_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ __ Sub_d(sp, sp, fp_regs.Count() * kDoubleSize);
// Pass instance and function index as an explicit arguments to the runtime
// function.
@@ -2752,7 +2698,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
__ mov(t8, a0);
- __ Add_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ __ Add_d(sp, sp, fp_regs.Count() * kDoubleSize);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
@@ -2792,6 +2738,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3419,7 +3375,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+ RegList saved_regs = restored_regs | sp | ra;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
@@ -3437,7 +3393,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Leave gaps for other registers.
__ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
@@ -3488,7 +3444,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ Ld_d(a2, MemOperand(sp, i * kPointerSize));
__ St_d(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
@@ -3583,12 +3539,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
- DCHECK(!(t7.bit() & restored_regs));
+ DCHECK(!(restored_regs.has(t7)));
// Restore the registers from the last output frame.
__ mov(t7, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ Ld_d(ToRegister(i), MemOperand(t7, offset));
}
}
@@ -3729,6 +3685,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, 0, a4);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add_d(code_obj, code_obj, kReturnRegister0);
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index c1b1b4711d..64ecb55f23 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -82,11 +82,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mov(scratch, argc);
- }
+ __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots));
__ Branch(&entry);
__ bind(&loop);
__ Lsa(scratch2, array, scratch, kSystemPointerSizeLog2);
@@ -144,9 +140,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Ret();
}
@@ -303,9 +297,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -398,7 +390,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// a0: root_register_value
// Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
+ __ MultiPush(kCalleeSaved | ra);
pushed_stack_space +=
kNumCalleeSaved * kPointerSize + kPointerSize /* ra */;
@@ -532,7 +524,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
+ __ MultiPop(kCalleeSaved | ra);
// Return.
__ Jump(ra);
}
@@ -585,11 +577,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers a2 and t0.
- if (kJSArgcIncludesReceiver) {
- __ mov(t1, a0);
- } else {
- __ addiu(t1, a0, 1);
- }
+ __ mov(t1, a0);
Generate_CheckStackOverflow(masm, t1, t0, t2);
// Copy arguments to the stack.
@@ -734,9 +722,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ Subu(a3, a3, Operand(kJSArgcReceiverSlots));
- }
+ __ Subu(a3, a3, Operand(kJSArgcReceiverSlots));
__ lw(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -842,10 +828,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Lw(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ sll(actual_params_size, actual_params_size, kPointerSizeLog2);
- if (!kJSArgcIncludesReceiver) {
- __ Addu(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -930,20 +912,15 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
+ // Marker should be one of CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
@@ -1054,9 +1031,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ And(scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
@@ -1076,7 +1052,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(s1.bit() | s2.bit());
+ temps.Include({s1, s2});
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1217,7 +1193,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
- temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Exclude({kScratchReg, kScratchReg2});
}
// Generate code for entering a JS function with the interpreter.
@@ -1499,12 +1475,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Subu(a0, a0, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Subu(t0, a0, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ Addu(t0, a0, Operand(1));
} else {
__ mov(t0, a0);
}
@@ -1560,11 +1532,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Subu(a0, a0, Operand(1));
}
- Register argc_without_receiver = a0;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = t2;
- __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = t2;
+ __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
GenerateInterpreterPushArgs(masm, argc_without_receiver, t4, t1, t0);
@@ -1895,10 +1864,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1)));
__ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&no_arg);
- __ DropArgumentsAndPushNewReceiver(
- a0, a3, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1975,10 +1942,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2)));
__ lw(a2, MemOperand(sp, 3 * kSystemPointerSize)); // argumentsList
__ bind(&no_arg);
- __ DropArgumentsAndPushNewReceiver(
- a0, a3, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2022,10 +1987,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2)));
__ lw(a3, MemOperand(sp, 3 * kSystemPointerSize)); // new.target
__ bind(&no_arg);
- __ DropArgumentsAndPushNewReceiver(
- a0, t0, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(a0, t0, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2072,21 +2035,13 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ mov(dest, sp);
__ Lsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
Label loop, done;
- if (kJSArgcIncludesReceiver) {
- __ Branch(&done, ge, old_sp, Operand(end));
- } else {
- __ Branch(&done, gt, old_sp, Operand(end));
- }
+ __ Branch(&done, ge, old_sp, Operand(end));
__ bind(&loop);
__ lw(value, MemOperand(old_sp, 0));
__ sw(value, MemOperand(dest, 0));
__ Addu(old_sp, old_sp, Operand(kSystemPointerSize));
__ Addu(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- __ Branch(&loop, lt, old_sp, Operand(end));
- } else {
- __ Branch(&loop, le, old_sp, Operand(end));
- }
+ __ Branch(&loop, lt, old_sp, Operand(end));
__ bind(&done);
// Update total number of arguments.
@@ -2188,9 +2143,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ Subu(t2, t2, Operand(kJSArgcReceiverSlots));
- }
+ __ Subu(t2, t2, Operand(kJSArgcReceiverSlots));
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
@@ -2246,12 +2199,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(a1);
- Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
- __ And(kScratchReg, a3,
- Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2327,14 +2275,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
// static
@@ -2448,6 +2388,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
instance_type, Operand(JS_PROXY_TYPE));
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq, instance_type,
+ Operand(JS_WRAPPED_FUNCTION_TYPE));
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ Branch(&class_constructor, eq, instance_type,
@@ -2662,22 +2608,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
@@ -2729,6 +2675,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3865,7 +3821,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
static constexpr int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+ RegList saved_regs = restored_regs | sp | ra;
static constexpr int kDoubleRegsSize =
kDoubleSize * DoubleRegister::kNumRegisters;
@@ -3884,7 +3840,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Leave gaps for other registers.
__ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
@@ -3935,7 +3891,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ lw(a2, MemOperand(sp, i * kPointerSize));
__ sw(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
@@ -4031,12 +3987,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
+ DCHECK(!(restored_regs.has(at)));
// Restore the registers from the last output frame.
__ mov(at, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ lw(ToRegister(i), MemOperand(at, offset));
}
}
@@ -4177,6 +4133,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, 0, t0);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Addu(code_obj, code_obj, kReturnRegister0);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 2ad2fae5db..85872b3d5c 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -81,11 +81,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ Dsubu(scratch, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mov(scratch, argc);
- }
+ __ Dsubu(scratch, argc, Operand(kJSArgcReceiverSlots));
__ Branch(&entry);
__ bind(&loop);
__ Dlsa(scratch2, array, scratch, kSystemPointerSizeLog2);
@@ -144,10 +140,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver,
- t3);
+ TurboAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@@ -304,10 +297,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver,
- a4);
+ TurboAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@@ -428,9 +418,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ Dsubu(a3, a3, Operand(kJSArgcReceiverSlots));
- }
+ __ Dsubu(a3, a3, Operand(kJSArgcReceiverSlots));
__ Ld(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -568,7 +556,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// 0 arg slots on mips64 (4 args slots on mips)
// Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
+ __ MultiPush(kCalleeSaved | ra);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
@@ -721,7 +709,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
+ __ MultiPop(kCalleeSaved | ra);
// Return.
__ Jump(ra);
}
@@ -766,11 +754,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(a2);
// Check if we have enough stack space to push all arguments.
- if (kJSArgcIncludesReceiver) {
- __ mov(a6, a4);
- } else {
- __ daddiu(a6, a4, 1);
- }
+ __ mov(a6, a4);
Generate_CheckStackOverflow(masm, a6, a0, s2);
// Copy arguments to the stack.
@@ -854,10 +838,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ld(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ dsll(actual_params_size, actual_params_size, kPointerSizeLog2);
- if (!kJSArgcIncludesReceiver) {
- __ Daddu(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -942,22 +922,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1065,9 +1039,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ And(scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
@@ -1086,7 +1059,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(s1.bit() | s2.bit());
+ temps.Include({s1, s2});
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1227,7 +1200,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
- temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Exclude({kScratchReg, kScratchReg2});
}
// Generate code for entering a JS function with the interpreter.
@@ -1508,12 +1481,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Dsubu(a0, a0, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Dsubu(a3, a0, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ Daddu(a3, a0, Operand(1));
} else {
__ mov(a3, a0);
}
@@ -1569,11 +1538,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Dsubu(a0, a0, Operand(1));
}
- Register argc_without_receiver = a0;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = a6;
- __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = a6;
+ __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
// Push the arguments, This function modifies t0, a4 and a5.
GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
@@ -1907,10 +1873,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld(receiver, MemOperand(sp));
- __ DropArgumentsAndPushNewReceiver(
- argc, this_arg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_arg,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2005,10 +1970,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
- __ DropArgumentsAndPushNewReceiver(
- argc, this_argument, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_argument,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2066,10 +2030,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(new_target, target, scratch); // if argc == 2
- __ DropArgumentsAndPushNewReceiver(
- argc, undefined_value, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2116,21 +2079,13 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ mov(dest, sp);
__ Dlsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
Label loop, done;
- if (kJSArgcIncludesReceiver) {
- __ Branch(&done, ge, old_sp, Operand(end));
- } else {
- __ Branch(&done, gt, old_sp, Operand(end));
- }
+ __ Branch(&done, ge, old_sp, Operand(end));
__ bind(&loop);
__ Ld(value, MemOperand(old_sp, 0));
__ Sd(value, MemOperand(dest, 0));
__ Daddu(old_sp, old_sp, Operand(kSystemPointerSize));
__ Daddu(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- __ Branch(&loop, lt, old_sp, Operand(end));
- } else {
- __ Branch(&loop, le, old_sp, Operand(end));
- }
+ __ Branch(&loop, lt, old_sp, Operand(end));
__ bind(&done);
// Update total number of arguments.
@@ -2240,9 +2195,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ Dsubu(a7, a7, Operand(kJSArgcReceiverSlots));
- }
+ __ Dsubu(a7, a7, Operand(kJSArgcReceiverSlots));
__ Dsubu(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
@@ -2299,12 +2252,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(a1);
- Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
- __ And(kScratchReg, a3,
- Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2380,14 +2328,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
// static
@@ -2499,6 +2439,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
instance_type, Operand(JS_PROXY_TYPE));
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq, instance_type,
+ Operand(JS_WRAPPED_FUNCTION_TYPE));
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ Branch(&class_constructor, eq, instance_type,
@@ -2712,22 +2658,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ MultiPush(gp_regs);
// Check if machine has simd enabled, if so push vector registers. If not
@@ -2749,7 +2695,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// kFixedFrameSizeFromFp is hard coded to include space for Simd
// registers, so we still need to allocate extra (unused) space on the stack
// as if they were saved.
- __ Dsubu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ __ Dsubu(sp, sp, fp_regs.Count() * kDoubleSize);
__ bind(&simd_pushed);
// Pass instance and function index as an explicit arguments to the runtime
// function.
@@ -2773,7 +2719,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
}
__ Branch(&simd_popped);
__ bind(&pop_doubles);
- __ Daddu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ __ Daddu(sp, sp, fp_regs.Count() * kDoubleSize);
__ MultiPopFPU(fp_regs);
__ bind(&simd_popped);
__ MultiPop(gp_regs);
@@ -2813,6 +2759,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3444,7 +3400,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+ RegList saved_regs = restored_regs | sp | ra;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
@@ -3462,7 +3418,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Leave gaps for other registers.
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
@@ -3514,7 +3470,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ Ld(a2, MemOperand(sp, i * kPointerSize));
__ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
@@ -3609,12 +3565,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
+ DCHECK(!(restored_regs.has(at)));
// Restore the registers from the last output frame.
__ mov(at, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ Ld(ToRegister(i), MemOperand(at, offset));
}
}
@@ -3754,6 +3710,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, 0, a4);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Daddu(code_obj, code_obj, kReturnRegister0);
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index 4136b9a693..5ccc5d2e9d 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -90,7 +90,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
// Calculate length and pre-allocate the result string.
let temp: int32 = n;
- let length: int32 = isNegative ? 1 : 0;
+ let length: int32 = isNegative ? Convert<int32>(1) : Convert<int32>(0);
while (temp > 0) {
temp = temp / radix;
length = length + 1;
@@ -241,7 +241,7 @@ transitioning javascript builtin NumberParseFloat(
} label String(s: String) {
// Check if the string is a cached array index.
const hash: NameHash = s.raw_hash_field;
- if (!hash.is_not_integer_index_mask &&
+ if (IsIntegerIndex(hash) &&
hash.array_index_length < kMaxCachedArrayIndexLength) {
const arrayIndex: uint32 = hash.array_index_value;
return SmiFromUint32(arrayIndex);
@@ -277,7 +277,7 @@ transitioning builtin ParseInt(implicit context: Context)(
// the runtime for the range [0,1[ because the result could be -0.
const kMaxAbsValue: float64 = 2147483648.0;
const absInput: float64 = math::Float64Abs(asFloat64);
- if (absInput < kMaxAbsValue && absInput >= 1) goto Int32(asInt32);
+ if (absInput < kMaxAbsValue && absInput >= 1.0) goto Int32(asInt32);
goto CallRuntime;
}
case (s: String): {
@@ -292,7 +292,7 @@ transitioning builtin ParseInt(implicit context: Context)(
} label String(s: String) {
// Check if the string is a cached array index.
const hash: NameHash = s.raw_hash_field;
- if (!hash.is_not_integer_index_mask &&
+ if (IsIntegerIndex(hash) &&
hash.array_index_length < kMaxCachedArrayIndexLength) {
const arrayIndex: uint32 = hash.array_index_value;
return SmiFromUint32(arrayIndex);
@@ -690,7 +690,7 @@ builtin Negate(implicit context: Context)(value: JSAny): Numeric {
} label Smi(s: Smi) {
return SmiMul(s, -1);
} label HeapNumber(h: HeapNumber) {
- return AllocateHeapNumberWithValue(Convert<float64>(h) * -1);
+ return AllocateHeapNumberWithValue(Convert<float64>(h) * -1.0);
} label BigInt(b: BigInt) {
tail runtime::BigIntUnaryOp(
context, b, SmiTag<Operation>(Operation::kNegate));
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
index 53065ded5f..0eb8206115 100644
--- a/deps/v8/src/builtins/object.tq
+++ b/deps/v8/src/builtins/object.tq
@@ -99,10 +99,11 @@ transitioning builtin CreateObjectWithoutProperties(implicit context: Context)(
case (Null): {
map = *NativeContextSlot(
ContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP);
- if (kDictModePrototypes) {
+ @if(V8_ENABLE_SWISS_NAME_DICTIONARY) {
properties =
AllocateSwissNameDictionary(kSwissNameDictionaryInitialCapacity);
- } else {
+ }
+ @ifnot(V8_ENABLE_SWISS_NAME_DICTIONARY) {
properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
}
}
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 1c4f571e83..96322fcc4b 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -79,11 +79,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, done;
- if (kJSArgcIncludesReceiver) {
- __ subi(scratch, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mr(scratch, argc);
- }
+ __ subi(scratch, argc, Operand(kJSArgcReceiverSlots));
__ cmpi(scratch, Operand::Zero());
__ beq(&done);
__ mtctr(scratch);
@@ -160,9 +156,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -324,9 +318,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r4, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -434,9 +426,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ subi(r6, r6, Operand(kJSArgcReceiverSlots));
- }
+ __ subi(r6, r6, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
@@ -524,6 +514,7 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r4);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+ __ Trap(); // Unreachable.
}
namespace {
@@ -746,11 +737,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- if (kJSArgcIncludesReceiver) {
- __ mr(r3, r7);
- } else {
- __ addi(r3, r7, Operand(1));
- }
+ __ mr(r3, r7);
__ StackOverflowCheck(r3, r9, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -852,10 +839,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
- if (!kJSArgcIncludesReceiver) {
- __ addi(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -941,22 +924,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1048,8 +1025,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
r0);
__ beq(&maybe_has_optimized_code, cr0);
@@ -1336,12 +1312,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ subi(r3, r3, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ subi(r6, r3, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ addi(r6, r3, Operand(1));
} else {
__ mr(r6, r3);
}
@@ -1397,11 +1369,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ subi(r3, r3, Operand(1));
}
- Register argc_without_receiver = r3;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = ip;
- __ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = ip;
+ __ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
// Push the arguments.
GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8);
@@ -1741,10 +1710,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r3, r8, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1827,10 +1794,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r3, r8, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1878,10 +1843,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r3, r7, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1924,13 +1887,9 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Register dest = pointer_to_new_space_out;
__ addi(dest, sp, Operand(-kSystemPointerSize));
Label loop, skip;
- if (!kJSArgcIncludesReceiver) {
- __ addi(r0, argc_in_out, Operand(1));
- } else {
- __ mr(r0, argc_in_out);
- __ cmpi(r0, Operand::Zero());
- __ ble(&skip);
- }
+ __ mr(r0, argc_in_out);
+ __ cmpi(r0, Operand::Zero());
+ __ ble(&skip);
__ mtctr(r0);
__ bind(&loop);
__ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize));
@@ -2045,15 +2004,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ EnterFrame(StackFrame::INTERNAL);
__ Push(r6);
__ CallRuntime(Runtime::kThrowNotConstructor);
+ __ Trap(); // Unreachable.
}
__ bind(&new_target_constructor);
}
Label stack_done, stack_overflow;
__ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ subi(r8, r8, Operand(kJSArgcReceiverSlots));
- }
+ __ subi(r8, r8, Operand(kJSArgcReceiverSlots));
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
@@ -2117,14 +2075,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(r4);
+ __ AssertCallableFunction(r4);
- Label class_constructor;
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
- __ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2202,14 +2156,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ push(r4);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2346,6 +2292,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ cmpi(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
@@ -2367,6 +2319,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
}
// 4. The function is a "classConstructor", need to raise an exception.
@@ -2517,29 +2470,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
// List must match register numbers under kFpParamRegisters.
- constexpr RegList simd_regs =
- Simd128Register::ListOf(v1, v2, v3, v4, v5, v6, v7, v8);
+ constexpr Simd128RegList simd_regs = {v1, v2, v3, v4, v5, v6, v7, v8};
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
- CHECK_EQ(NumRegs(simd_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(simd_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(simd_regs));
+ simd_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushF64AndV128(fp_regs, simd_regs);
@@ -2571,7 +2523,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
// them after the runtime call.
__ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
__ MultiPushF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
- WasmDebugBreakFrameConstants::kPushedFpRegs);
+ WasmDebugBreakFrameConstants::kPushedSimd128Regs);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
@@ -2580,7 +2532,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
// Restore registers.
__ MultiPopF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
- WasmDebugBreakFrameConstants::kPushedFpRegs);
+ WasmDebugBreakFrameConstants::kPushedSimd128Regs);
__ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
}
__ Ret();
@@ -2596,6 +2548,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3283,7 +3245,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit();
+ RegList saved_regs = restored_regs | sp;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
@@ -3301,7 +3263,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Leave gaps for other registers.
__ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ StoreU64(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
@@ -3466,12 +3428,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- DCHECK(!(scratch.bit() & restored_regs));
+ DCHECK(!(restored_regs.has(scratch)));
__ mr(scratch, r5);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ LoadU64(ToRegister(i), MemOperand(scratch, offset));
}
}
diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq
index cd55ec9f3b..29c468ed3e 100644
--- a/deps/v8/src/builtins/promise-all.tq
+++ b/deps/v8/src/builtins/promise-all.tq
@@ -319,12 +319,12 @@ Reject(JSAny) {
transitioning macro GeneratePromiseAll<F1: type, F2: type>(
implicit context: Context)(
receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
- createRejectElementFunctor: F2): JSAny {
+ createRejectElementFunctor: F2, message: constexpr string): JSAny {
const nativeContext = LoadNativeContext(context);
// Let C be the this value.
// If Type(C) is not Object, throw a TypeError exception.
const receiver = Cast<JSReceiver>(receiver)
- otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.all');
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, message);
// Let promiseCapability be ? NewPromiseCapability(C).
// Don't fire debugEvent so that forwarding the rejection through all does
@@ -368,7 +368,7 @@ transitioning javascript builtin PromiseAll(
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
return GeneratePromiseAll(
receiver, iterable, PromiseAllResolveElementFunctor{},
- PromiseAllRejectElementFunctor{});
+ PromiseAllRejectElementFunctor{}, 'Promise.all');
}
// ES#sec-promise.allsettled
@@ -377,7 +377,7 @@ transitioning javascript builtin PromiseAllSettled(
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
return GeneratePromiseAll(
receiver, iterable, PromiseAllSettledResolveElementFunctor{},
- PromiseAllSettledRejectElementFunctor{});
+ PromiseAllSettledRejectElementFunctor{}, 'Promise.allSettled');
}
extern macro PromiseAllResolveElementSharedFunConstant(): SharedFunctionInfo;
diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq
index d50b8b5574..ffb285a06a 100644
--- a/deps/v8/src/builtins/promise-any.tq
+++ b/deps/v8/src/builtins/promise-any.tq
@@ -69,7 +69,7 @@ macro CreatePromiseAnyRejectElementFunction(implicit context: Context)(
return reject;
}
-// https://tc39.es/proposal-promise-any/#sec-promise.any-reject-element-functions
+// https://tc39.es/ecma262/#sec-promise.any-reject-element-functions
transitioning javascript builtin
PromiseAnyRejectElementClosure(
js-implicit context: Context, receiver: JSAny,
@@ -314,7 +314,7 @@ Reject(JSAny) {
return resultCapability.promise;
}
-// https://tc39.es/proposal-promise-any/#sec-promise.any
+// https://tc39.es/ecma262/#sec-promise.any
transitioning javascript builtin
PromiseAny(
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq
index ff979f9732..a34583354c 100644
--- a/deps/v8/src/builtins/promise-finally.tq
+++ b/deps/v8/src/builtins/promise-finally.tq
@@ -160,6 +160,7 @@ macro CreatePromiseFinallyFunctions(implicit context: Context)(
};
}
+// https://tc39.es/ecma262/#sec-promise.prototype.finally
transitioning javascript builtin
PromisePrototypeFinally(
js-implicit context: Context, receiver: JSAny)(onFinally: JSAny): JSAny {
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 99c4006da2..df3010669e 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -24,6 +24,9 @@ extern macro
PromiseBuiltinsAssembler::IsContextPromiseHookEnabled(uint32): bool;
extern macro
+PromiseBuiltinsAssembler::IsIsolatePromiseHookEnabled(uint32): bool;
+
+extern macro
PromiseBuiltinsAssembler::PromiseHookFlags(): uint32;
namespace promise {
@@ -120,9 +123,13 @@ transitioning macro RunContextPromiseHookInit(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
promise: JSPromise): void {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
- PromiseHookFlags());
+ // Use potentially unused variables.
+ const _unusedPromise = promise;
+ @if(V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
+ PromiseHookFlags());
+ }
}
@export
@@ -135,9 +142,13 @@ transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
- PromiseHookFlags());
+ // Use potentially unused variables.
+ const _unusedPromiseOrCapability = promiseOrCapability;
+ @if(V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+ }
}
@export
@@ -152,9 +163,13 @@ transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
- PromiseHookFlags());
+ // Use potentially unused variables.
+ const _unusedPromiseOrCapability = promiseOrCapability;
+ @if(V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+ }
}
@export
@@ -170,27 +185,33 @@ transitioning macro RunContextPromiseHook(implicit context: Context)(
slot: Slot<NativeContext, Undefined|Callable>,
promiseOrCapability: JSPromise|PromiseCapability|Undefined,
flags: uint32): void {
- if (!IsContextPromiseHookEnabled(flags)) return;
- const maybeHook = *NativeContextSlot(slot);
- const hook = Cast<Callable>(maybeHook) otherwise return;
-
- let promise: JSPromise;
- typeswitch (promiseOrCapability) {
- case (jspromise: JSPromise): {
- promise = jspromise;
- }
- case (capability: PromiseCapability): {
- promise = Cast<JSPromise>(capability.promise) otherwise return;
- }
- case (Undefined): {
- return;
+ // Use potentially unused variables.
+ const _unusedSlot = slot;
+ const _unusedPromiseOrCapability = promiseOrCapability;
+ const _unusedFlags = flags;
+ @if(V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS) {
+ if (!IsContextPromiseHookEnabled(flags)) return;
+ const maybeHook = *NativeContextSlot(slot);
+ const hook = Cast<Callable>(maybeHook) otherwise return;
+
+ let promise: JSPromise;
+ typeswitch (promiseOrCapability) {
+ case (jspromise: JSPromise): {
+ promise = jspromise;
+ }
+ case (capability: PromiseCapability): {
+ promise = Cast<JSPromise>(capability.promise) otherwise return;
+ }
+ case (Undefined): {
+ return;
+ }
}
- }
- try {
- Call(context, hook, Undefined, promise);
- } catch (e, _message) {
- runtime::ReportMessageFromMicrotask(e);
+ try {
+ Call(context, hook, Undefined, promise);
+ } catch (e, _message) {
+ runtime::ReportMessageFromMicrotask(e);
+ }
}
}
@@ -199,10 +220,12 @@ transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
const promiseHookFlags = PromiseHookFlags();
// Fast return if no hooks are set.
if (promiseHookFlags == 0) return;
- if (IsContextPromiseHookEnabled(promiseHookFlags)) {
- RunContextPromiseHookInit(promise, parent);
+ @if(V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS) {
+ if (IsContextPromiseHookEnabled(promiseHookFlags)) {
+ RunContextPromiseHookInit(promise, parent);
+ }
}
- if (IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(promiseHookFlags)) {
+ if (IsIsolatePromiseHookEnabled(promiseHookFlags)) {
runtime::PromiseHookInit(promise, parent);
}
}
diff --git a/deps/v8/src/builtins/promise-then.tq b/deps/v8/src/builtins/promise-then.tq
index 00f9b0c80f..efa1285575 100644
--- a/deps/v8/src/builtins/promise-then.tq
+++ b/deps/v8/src/builtins/promise-then.tq
@@ -4,8 +4,16 @@
#include 'src/builtins/builtins-promise-gen.h'
+namespace runtime {
+extern transitioning runtime
+DebugPromiseThen(implicit context: Context)(JSAny): JSAny;
+}
+
namespace promise {
+extern macro
+CodeStubAssembler::HasAsyncEventDelegate(): bool;
+
macro
IsPromiseSpeciesLookupChainIntact(
nativeContext: NativeContext, promiseMap: Map): bool {
@@ -68,6 +76,14 @@ PromisePrototypeThen(js-implicit context: NativeContext, receiver: JSAny)(
// resultCapability).
PerformPromiseThenImpl(
promise, onFulfilled, onRejected, resultPromiseOrCapability);
+
+ // Async instrumentation for Promise#then(), Promise#catch() and
+ // Promise#finally(), where the latter two both call eventually
+ // call into Promise#then().
+ if (HasAsyncEventDelegate()) {
+ return runtime::DebugPromiseThen(resultPromise);
+ }
+
return resultPromise;
}
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index f5c3600850..11a8f5156c 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -80,11 +80,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ Sub64(scratch, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mv(scratch, argc);
- }
+ __ Sub64(scratch, argc, Operand(kJSArgcReceiverSlots));
__ Branch(&entry);
__ bind(&loop);
__ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
@@ -147,10 +143,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(kScratchReg, MacroAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver,
- kScratchReg);
+ MacroAssembler::kCountIncludesReceiver, kScratchReg);
__ Ret();
}
@@ -325,10 +318,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(a1, MacroAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver,
- a4);
+ MacroAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@@ -454,9 +444,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ Sub64(a3, a3, Operand(kJSArgcReceiverSlots));
- }
+ __ Sub64(a3, a3, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -599,7 +587,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// a1: microtask_queue
// Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
+ __ MultiPush(kCalleeSaved | ra);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
@@ -755,7 +743,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
+ __ MultiPop(kCalleeSaved | ra);
// Return.
__ Jump(ra);
}
@@ -800,11 +788,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(a2);
// Check if we have enough stack space to push all arguments.
- if (kJSArgcIncludesReceiver) {
- __ mv(a6, a4);
- } else {
- __ Add64(a6, a4, 1);
- }
+ __ mv(a6, a4);
Generate_CheckStackOverflow(masm, a6, a0, s2);
// Copy arguments to the stack.
@@ -898,10 +882,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ld(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ Sll64(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
- if (!kJSArgcIncludesReceiver) {
- __ Add64(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
__ Branch(&L1, le, actual_params_size, Operand(params_size),
@@ -993,19 +973,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1122,9 +1099,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ And(scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
Label::Distance::kNear);
}
@@ -1145,7 +1121,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Include({kScratchReg, kScratchReg2});
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1279,7 +1255,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
- temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Exclude({kScratchReg, kScratchReg2});
}
// Generate code for entering a JS function with the interpreter.
@@ -1499,27 +1475,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
- // Check if optimized code marker is available
- __ And(scratch, optimization_state,
- FeedbackVector::OptimizationTierBits::kMask);
- __ Branch(&maybe_has_optimized_code, ne, scratch, Operand(zero_reg),
- Label::Distance::kNear);
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ Branch(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ LoadAnyTaggedField(
- optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
-
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
@@ -1596,12 +1553,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Sub64(a0, a0, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Sub64(a3, a0, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ Add64(a3, a0, Operand(1));
} else {
__ Move(a3, a0);
}
@@ -1654,11 +1607,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// The spread argument should not be pushed.
__ Sub64(a0, a0, Operand(1));
}
- Register argc_without_receiver = a0;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = a6;
- __ Sub64(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = a6;
+ __ Sub64(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
// Push the arguments, This function modifies a4 and a5.
GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5);
@@ -2009,10 +1959,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&done1); // else (i.e., argc > 1)
__ Ld(receiver, MemOperand(sp));
- __ DropArgumentsAndPushNewReceiver(
- argc, this_arg, MacroAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_arg,
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2116,10 +2065,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Move(arguments_list, undefined_value); // if argc == 2
__ bind(&done2); // argc > 2
- __ DropArgumentsAndPushNewReceiver(
- argc, this_argument, MacroAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, this_argument,
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2184,10 +2132,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Move(new_target, target); // if argc == 2
__ bind(&done2);
- __ DropArgumentsAndPushNewReceiver(
- argc, undefined_value, MacroAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver
- : MacroAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2237,21 +2184,13 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ mv(dest, sp);
__ CalcScaledAddress(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
Label loop, done;
- if (kJSArgcIncludesReceiver) {
- __ Branch(&done, ge, old_sp, Operand(end));
- } else {
- __ Branch(&done, gt, old_sp, Operand(end));
- }
+ __ Branch(&done, ge, old_sp, Operand(end));
__ bind(&loop);
__ Ld(value, MemOperand(old_sp, 0));
__ Sd(value, MemOperand(dest, 0));
__ Add64(old_sp, old_sp, Operand(kSystemPointerSize));
__ Add64(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- __ Branch(&loop, lt, old_sp, Operand(end));
- } else {
- __ Branch(&loop, le, old_sp, Operand(end));
- }
+ __ Branch(&loop, lt, old_sp, Operand(end));
__ bind(&done);
// Update total number of arguments.
@@ -2376,9 +2315,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
Label stack_done, stack_overflow;
- if (kJSArgcIncludesReceiver) {
- __ Sub64(a7, a7, Operand(kJSArgcReceiverSlots));
- }
+ __ Sub64(a7, a7, Operand(kJSArgcReceiverSlots));
__ Sub64(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
@@ -2434,7 +2371,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- a0 : the number of arguments
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(a1);
+ __ AssertCallableFunction(a1);
Label class_constructor;
__ LoadTaggedPointerField(
@@ -2662,6 +2599,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
type, Operand(JS_PROXY_TYPE));
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq, type, Operand(JS_WRAPPED_FUNCTION_TYPE));
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ Branch(&class_constructor, eq, type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
@@ -2824,25 +2766,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see kGpParamRegisters in wasm-linkage.cc).
// They might be overwritten in the runtime call below. We don't have any
// callee-saved registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
// Also push a1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
- CHECK_EQ(0, NumRegs(gp_regs) % 2);
+ CHECK_EQ(1, gp_regs.Count() % 2);
+ gp_regs.set(a1);
+ // Ensure that A1 will not be repeated.
+ CHECK_EQ(0, gp_regs.Count() % 2);
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
@@ -3151,6 +3096,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3531,7 +3486,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+ RegList saved_regs = restored_regs | sp | ra;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
@@ -3549,7 +3504,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Leave gaps for other registers.
__ Sub64(sp, sp, kNumberOfRegisters * kSystemPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ Sd(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
@@ -3600,7 +3555,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
+ if ((saved_regs.bits() & (1 << i)) != 0) {
__ Ld(a2, MemOperand(sp, i * kSystemPointerSize));
__ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
@@ -3696,13 +3651,13 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Technically restoring 't3' should work unless zero_reg is also restored
// but it's safer to check for this.
- DCHECK(!(t3.bit() & restored_regs));
+ DCHECK(!(restored_regs.has(t3)));
// Restore the registers from the last output frame.
__ Move(t3, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ Ld(ToRegister(i), MemOperand(t3, offset));
}
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 3fe9ebc683..9b328cf3fc 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -39,7 +39,7 @@ static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
- __ LoadU64(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
@@ -329,11 +329,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
Register counter = scratch;
Register value = ip;
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ SubS64(counter, argc, Operand(kJSArgcReceiverSlots));
- } else {
- __ mov(counter, argc);
- }
+ __ SubS64(counter, argc, Operand(kJSArgcReceiverSlots));
__ b(&entry);
__ bind(&loop);
__ ShiftLeftU64(value, counter, Operand(kSystemPointerSizeLog2));
@@ -403,9 +399,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@@ -563,9 +557,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r3, TurboAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -661,9 +653,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
- }
+ __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -752,6 +742,7 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+ __ Trap(); // Unreachable.
}
namespace {
@@ -1030,11 +1021,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- if (kJSArgcIncludesReceiver) {
- __ mov(r7, r2);
- } else {
- __ AddS64(r7, r2, Operand(1));
- }
+ __ mov(r7, r2);
__ StackOverflowCheck(r7, r1, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -1141,10 +1128,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
- if (!kJSArgcIncludesReceiver) {
- __ AddS64(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
- }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1228,22 +1211,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
@@ -1335,8 +1312,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
r0);
__ beq(&maybe_has_optimized_code);
@@ -1815,12 +1791,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ SubS64(r2, r2, Operand(1));
}
- const bool skip_receiver =
- receiver_mode == ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && skip_receiver) {
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ SubS64(r5, r2, Operand(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
- __ AddS64(r5, r2, Operand(1));
} else {
__ mov(r5, r2);
}
@@ -1876,11 +1848,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ SubS64(r2, r2, Operand(1));
}
- Register argc_without_receiver = r2;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = ip;
- __ SubS64(argc_without_receiver, r2, Operand(kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = ip;
+ __ SubS64(argc_without_receiver, r2, Operand(kJSArgcReceiverSlots));
// Push the arguments. r4 and r5 will be modified.
GenerateInterpreterPushArgs(masm, argc_without_receiver, r6, r7);
@@ -2166,10 +2135,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r2, r7, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2253,10 +2220,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r2, r7, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2305,10 +2270,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- r2, r6, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2357,11 +2320,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Label loop, done;
__ bind(&loop);
__ CmpS64(old_sp, end);
- if (kJSArgcIncludesReceiver) {
- __ bge(&done);
- } else {
- __ bgt(&done);
- }
+ __ bge(&done);
__ LoadU64(value, MemOperand(old_sp));
__ lay(old_sp, MemOperand(old_sp, kSystemPointerSize));
__ StoreU64(value, MemOperand(dest));
@@ -2476,15 +2435,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ EnterFrame(StackFrame::INTERNAL);
__ Push(r5);
__ CallRuntime(Runtime::kThrowNotConstructor);
+ __ Trap(); // Unreachable.
}
__ bind(&new_target_constructor);
}
Label stack_done, stack_overflow;
__ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ SubS64(r7, r7, Operand(kJSArgcReceiverSlots));
- }
+ __ SubS64(r7, r7, Operand(kJSArgcReceiverSlots));
__ SubS64(r7, r7, r4);
__ ble(&stack_done);
{
@@ -2549,14 +2507,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(r3);
+ __ AssertCallableFunction(r3);
- Label class_constructor;
__ LoadTaggedPointerField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
- __ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2634,14 +2588,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ push(r3);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2776,6 +2722,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ CmpS64(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
@@ -2947,22 +2899,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- RegList gp_regs = 0;
+ RegList gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
- gp_regs |= gp_param_reg.bit();
+ gp_regs.set(gp_param_reg);
}
- RegList fp_regs = 0;
+ DoubleRegList fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
- fp_regs |= fp_param_reg.bit();
+ fp_regs.set(fp_param_reg);
}
- CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
- CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
- NumRegs(gp_regs));
+ gp_regs.Count());
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
- NumRegs(fp_regs));
+ fp_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushF64OrV128(fp_regs);
@@ -3017,6 +2969,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ // TODO(v8:12191): Implement for this platform.
+ __ Trap();
+}
+
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
@@ -3858,7 +3820,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
+ if ((restored_regs.bits() & (1 << i)) != 0) {
__ LoadU64(ToRegister(i), MemOperand(r1, offset));
}
}
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 3153799793..499e0cb1fe 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -156,11 +156,8 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
- const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel)
- ? 0
- : argc + (kJSArgcIncludesReceiver ? 0 : 1);
- compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
- CodeKind::BUILTIN, name, builtin);
+ compiler::CodeAssemblerState state(isolate, &zone, argc, CodeKind::BUILTIN,
+ name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
@@ -198,10 +195,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
Code code) {
DCHECK_EQ(builtin, code.builtin_id());
- builtins->set_code(builtin, code);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- builtins->set_codet(builtin, ToCodeT(code));
- }
+ builtins->set_code(builtin, ToCodeT(code));
}
// static
@@ -232,7 +226,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
PtrComprCageBase cage_base(isolate);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
isolate->heap()->UnprotectAndRegisterMemoryChunk(
code, UnprotectMemoryOrigin::kMainThread);
bool flush_icache = false;
@@ -243,16 +237,16 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
Builtins::IsIsolateIndependent(target.builtin_id()));
if (!target.is_builtin()) continue;
- Code new_target = builtins->code(target.builtin_id());
+ CodeT new_target = builtins->code(target.builtin_id());
rinfo->set_target_address(new_target.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
Object object = rinfo->target_object(cage_base);
- if (!object.IsCode(cage_base)) continue;
- Code target = Code::cast(object);
+ if (!object.IsCodeT(cage_base)) continue;
+ CodeT target = CodeT::cast(object);
if (!target.is_builtin()) continue;
- Code new_target = builtins->code(target.builtin_id());
+ CodeT new_target = builtins->code(target.builtin_id());
rinfo->set_target_object(isolate->heap(), new_target,
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
@@ -353,18 +347,13 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
ReplacePlaceholders(isolate);
+// TODO(v8:11880): avoid roundtrips between cdc and code.
#define SET_PROMISE_REJECTION_PREDICTION(Name) \
- builtins->code(Builtin::k##Name).set_is_promise_rejection(true);
+ FromCodeT(builtins->code(Builtin::k##Name)).set_is_promise_rejection(true);
BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(SET_PROMISE_REJECTION_PREDICTION)
#undef SET_PROMISE_REJECTION_PREDICTION
-#define SET_EXCEPTION_CAUGHT_PREDICTION(Name) \
- builtins->code(Builtin::k##Name).set_is_exception_caught(true);
-
- BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
-#undef SET_EXCEPTION_CAUGHT_PREDICTION
-
builtins->MarkInitialized();
}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index b5ced876b7..0c2d68861e 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -55,7 +55,7 @@ transitioning javascript builtin StringPrototypeRepeat(
// 4. If n < 0, throw a RangeError exception.
// 5. If n is +∞, throw a RangeError exception.
- if (n == V8_INFINITY || n < 0.0) goto InvalidCount;
+ if (n == V8_INFINITY || n < 0) goto InvalidCount;
// 6. If n is 0, return the empty String.
if (s.length_uint32 == 0) goto EmptyString;
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 8765a7b8ac..ad21e24ace 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -105,6 +105,10 @@ struct Slice<T: type, Reference: type> {
return this.TryAtIndex(Convert<intptr>(index)) otherwise unreachable;
}
+ macro AtIndex(index: constexpr IntegerLiteral): Reference {
+ return this.AtIndex(FromConstexpr<uintptr>(index));
+ }
+
macro AtIndex(index: constexpr int31): Reference {
const i: intptr = Convert<intptr>(index);
return this.TryAtIndex(i) otherwise unreachable;
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 9004b32ef7..2d43be0a0f 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -155,15 +155,17 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
try {
const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
-
- if (IsDetachedBuffer(src.buffer)) {
+ let byteLength: uintptr;
+ try {
+ byteLength = LoadJSArrayBufferViewByteLength(src, src.buffer)
+ otherwise DetachedOrOutOfBounds;
+ } label DetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
-
- } else if (src.elements_kind != elementsInfo.kind) {
+ }
+ if (src.elements_kind != elementsInfo.kind) {
goto IfElementsKindMismatch(src.elements_kind);
} else if (length > 0) {
- const byteLength = typedArray.byte_length;
dcheck(byteLength <= kArrayBufferMaxByteLength);
if (IsSharedArrayBuffer(src.buffer)) {
typed_array::CallCRelaxedMemcpy(
@@ -211,16 +213,27 @@ transitioning macro ConstructByTypedArray(implicit context: Context)(
labels IfConstructByArrayLike(JSTypedArray, uintptr, JSReceiver) {
let bufferConstructor: JSReceiver = GetArrayBufferFunction();
const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
- // TODO(petermarshall): Throw on detached typedArray.
- let length: uintptr = IsDetachedBuffer(srcBuffer) ? 0 : srcTypedArray.length;
+ let length: uintptr;
+ try {
+ // TODO(petermarshall): Throw on detached typedArray.
+ length = LoadJSTypedArrayLengthAndCheckDetached(srcTypedArray)
+ otherwise DetachedOrOutOfBounds;
+ } label DetachedOrOutOfBounds {
+ length = 0;
+ }
// The spec requires that constructing a typed array using a SAB-backed
// typed array use the ArrayBuffer constructor, not the species constructor.
// See https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
if (!IsSharedArrayBuffer(srcBuffer)) {
bufferConstructor = SpeciesConstructor(srcBuffer, bufferConstructor);
- // TODO(petermarshall): Throw on detached typedArray.
- if (IsDetachedBuffer(srcBuffer)) length = 0;
+ try {
+ // TODO(petermarshall): Throw on detached typedArray.
+ length = LoadJSTypedArrayLengthAndCheckDetached(srcTypedArray)
+ otherwise DetachedOrOutOfBounds;
+ } label DetachedOrOutOfBounds {
+ length = 0;
+ }
}
goto IfConstructByArrayLike(srcTypedArray, length, bufferConstructor);
}
@@ -292,7 +305,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
// in the step 12 branch.
newByteLength = bufferByteLength - offset;
newLength = elementsInfo.CalculateLength(newByteLength)
- otherwise IfInvalidOffset;
+ otherwise IfInvalidLength;
// 12. Else,
} else {
@@ -397,7 +410,6 @@ transitioning builtin CreateTypedArray(
return ConstructByArrayBuffer(target, newTarget, buffer, arg2, arg3);
}
case (typedArray: JSTypedArray): {
- // TODO(v8:11111): Support RAB / GSAB.
ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
}
case (obj: JSReceiver): {
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index e40ff9f737..aa9966bade 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -70,63 +70,62 @@ TypedArrayPrototypeSet(
// 7. Let targetBuffer be target.[[ViewedArrayBuffer]].
// 8. If IsDetachedBuffer(targetBuffer) is true, throw a TypeError
// exception.
- const utarget = typed_array::EnsureAttached(target) otherwise IsDetached;
+ const attachedTargetAndLength = EnsureAttachedAndReadLength(target)
+ otherwise IsDetachedOrOutOfBounds;
const overloadedArg = arguments[0];
try {
- // 1. Choose 22.2.3.23.2 or 22.2.3.23.1 depending on whether the
- // overloadedArg has a [[TypedArrayName]] internal slot.
- // If it does, the definition in 22.2.3.23.2 applies.
- // If it does not, the definition in 22.2.3.23.1 applies.
+ // 1. Choose SetTypedArrayFromTypedArray or SetTypedArrayFromArrayLike
+ // depending on whether the overloadedArg has a [[TypedArrayName]]
+ // internal slot.
const typedArray =
Cast<JSTypedArray>(overloadedArg) otherwise NotTypedArray;
- // Step 9 is not observable, do it later.
+ // Step 3 is not observable, do it later.
- // 10. Let srcBuffer be typedArray.[[ViewedArrayBuffer]].
- // 11. If IsDetachedBuffer(srcBuffer) is true, throw a TypeError
+ // 4. Let srcBuffer be typedArray.[[ViewedArrayBuffer]].
+ // 5. If IsDetachedBuffer(srcBuffer) is true, throw a TypeError
// exception.
- const utypedArray =
- typed_array::EnsureAttached(typedArray) otherwise IsDetached;
-
+ const attachedSourceAndLength = EnsureAttachedAndReadLength(typedArray)
+ otherwise IsDetachedOrOutOfBounds;
TypedArrayPrototypeSetTypedArray(
- utarget, utypedArray, targetOffset, targetOffsetOverflowed)
+ attachedTargetAndLength, attachedSourceAndLength, targetOffset,
+ targetOffsetOverflowed)
otherwise OffsetOutOfBounds;
return Undefined;
} label NotTypedArray deferred {
TypedArrayPrototypeSetArray(
- utarget, overloadedArg, targetOffset, targetOffsetOverflowed)
- otherwise OffsetOutOfBounds, IsDetached;
+ target, attachedTargetAndLength.length, overloadedArg, targetOffset,
+ targetOffsetOverflowed)
+ otherwise OffsetOutOfBounds;
return Undefined;
}
} label OffsetOutOfBounds deferred {
ThrowRangeError(MessageTemplate::kTypedArraySetOffsetOutOfBounds);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSet);
}
}
-// %TypedArray%.prototype.set ( array [ , offset ] )
-// https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-array-offset
+// SetTypedArrayFromArrayLike
+// https://tc39.es/ecma262/#sec-settypedarrayfromarraylike
transitioning macro
TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
- target: JSTypedArray, arrayArg: JSAny, targetOffset: uintptr,
- targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds,
- IfDetached {
- // Steps 9-13 are not observable, do them later.
+ target: JSTypedArray, targetLength: uintptr, arrayArg: JSAny,
+ targetOffset: uintptr,
+ targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds {
+ // Steps 3-7 are not observable, do them later.
- // 14. Let src be ? ToObject(array).
+ // 8. Let src be ? ToObject(source).
const src: JSReceiver = ToObject_Inline(context, arrayArg);
- // 15. Let srcLength be ? LengthOfArrayLike(src).
+ // 9. Let srcLength be ? LengthOfArrayLike(src).
const srcLengthNum: Number = GetLengthProperty(src);
+ // 10. If targetOffset is +∞, throw a RangeError exception.
if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
- // 9. Let targetLength be target.[[ArrayLength]].
- const targetLength = target.length;
-
- // 16. If srcLength + targetOffset > targetLength, throw a RangeError
+ // 11. If srcLength + targetOffset > targetLength, throw a RangeError
// exception.
const srcLength = ChangeSafeIntegerNumberToUintPtr(srcLengthNum)
otherwise IfOffsetOutOfBounds;
@@ -137,10 +136,10 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
// to do with the empty source array.
if (srcLength == 0) return;
- // 10. Let targetName be the String value of target.[[TypedArrayName]].
- // 11. Let targetElementSize be the Element Size value specified in
- // Table 62 for targetName.
- // 12. Let targetType be the Element Type value in Table 62 for
+ // 4. Let targetName be the String value of target.[[TypedArrayName]].
+ // 5. Let targetElementSize be the Element Size value specified in
+ // Table 69 for targetName.
+ // 6. Let targetType be the Element Type value in Table 69 for
// targetName.
try {
@@ -161,7 +160,10 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
IsElementsKindInRange(
srcKind, ElementsKind::PACKED_DOUBLE_ELEMENTS,
ElementsKind::HOLEY_DOUBLE_ELEMENTS)) {
- const utarget = typed_array::EnsureAttached(target) otherwise IfDetached;
+ // If the source is a JSArray (no custom length getter or elements
+ // getter), there's nothing that could detach or resize the target, so
+ // it's always non-detached here. Also we don't need to reload the length.
+ const utarget = typed_array::EnsureAttached(target) otherwise unreachable;
CallCCopyFastNumberJSArrayElementsToTypedArray(
context, fastSrc, utarget, srcLength, targetOffset);
@@ -174,56 +176,56 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
}
}
-// %TypedArray%.prototype.set ( typedArray [ , offset ] )
-// https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-typedarray-offset
+// SetTypedArrayFromTypedArray
+// https://tc39.es/ecma262/#sec-settypedarrayfromtypedarray
transitioning macro
TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
- target: AttachedJSTypedArray, typedArray: AttachedJSTypedArray,
+ attachedTargetAndLength: AttachedJSTypedArrayAndLength,
+ attachedSourceAndLength: AttachedJSTypedArrayAndLength,
targetOffset: uintptr,
targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds {
- // Steps 12-20 are not observable, so we can handle offset overflow
- // at step 21 here.
+ // Steps 6-14 are not observable, so we can handle offset overflow
+ // at step 15 here.
if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
- // 9. Let targetLength be target.[[ArrayLength]].
- const targetLength = target.length;
-
- // 19. Let srcLength be typedArray.[[ArrayLength]].
- const srcLength: uintptr = typedArray.length;
+ // 3. Let targetLength be IntegerIndexedObjectLength(target).
+ const target = attachedTargetAndLength.array;
+ const targetLength = attachedTargetAndLength.length;
- // Steps 12-20 are not observable, so we can do step 21 here.
+ // 13. Let srcLength be IntegerIndexedObjectLength(source).
+ const source = attachedSourceAndLength.array;
+ const srcLength = attachedSourceAndLength.length;
- // 21. If srcLength + targetOffset > targetLength, throw a RangeError
+ // 16. If srcLength + targetOffset > targetLength, throw a RangeError
// exception.
CheckIntegerIndexAdditionOverflow(srcLength, targetOffset, targetLength)
otherwise IfOffsetOutOfBounds;
- // 12. Let targetName be the String value of target.[[TypedArrayName]].
- // 13. Let targetType be the Element Type value in Table 62 for
- // targetName.
- // 14. Let targetElementSize be the Element Size value specified in
+ // 6. Let targetName be the String value of target.[[TypedArrayName]].
+ // 7. Let targetType be the Element Type value in Table 62 for
+ // targetName.
+ // 8. Let targetElementSize be the Element Size value specified in
// Table 62 for targetName.
const targetElementsInfo = GetTypedArrayElementsInfo(target);
- // 16. Let srcName be the String value of typedArray.[[TypedArrayName]].
- // 17. Let srcType be the Element Type value in Table 62 for srcName.
- // 18. Let srcElementSize be the Element Size value specified in
+ // 10. Let srcName be the String value of source.[[TypedArrayName]].
+ // 11. Let srcType be the Element Type value in Table 62 for srcName.
+ // 12. Let srcElementSize be the Element Size value specified in
// Table 62 for srcName.
- const srcKind: ElementsKind = typedArray.elements_kind;
- // const srcElementsInfo = GetTypedArrayElementsInfo(typedArray);
+ const srcKind: ElementsKind = source.elements_kind;
- // We skip steps 23-25 because both memmove and
+ // We skip steps 18-20 because both memmove and
// CopyTypedArrayElementsToTypedArray() properly handle overlapping
// regions.
- // 23. If both IsSharedArrayBuffer(srcBuffer) and
+ // 18. If both IsSharedArrayBuffer(srcBuffer) and
// IsSharedArrayBuffer(targetBuffer) are true, then
- // 23a. If srcBuffer.[[ArrayBufferData]] and
+ // a. If srcBuffer.[[ArrayBufferData]] and
// targetBuffer.[[ArrayBufferData]] are the same Shared Data Block
// values, let same be true; else let same be false.
- // 24. Else, let same be SameValue(srcBuffer, targetBuffer).
- // 25. If same is true, then
- // a. Let srcByteLength be typedArray.[[ByteLength]].
+ // 19. Else, let same be SameValue(srcBuffer, targetBuffer).
+ // 20. If same is true, then
+ // a. Let srcByteLength be source.[[ByteLength]].
// b. Set srcBuffer to ? CloneArrayBuffer(srcBuffer, srcByteOffset,
// srcByteLength, %ArrayBuffer%).
// c. NOTE: %ArrayBuffer% is used to clone srcBuffer because is it known
@@ -232,6 +234,8 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
try {
// Use memmove if possible.
+ // TODO(v8:11111): Enable fast copying between a RAB/GSAB element kind and
+ // the corresponding non-RAB/GSAB element kind.
if (srcKind != targetElementsInfo.kind) {
// Uint8/Uint8Clamped elements could still be copied with memmove.
if (!IsUint8ElementsKind(srcKind) ||
@@ -255,10 +259,19 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
otherwise unreachable;
const dstPtr: RawPtr = target.data_ptr + Convert<intptr>(startOffset);
- dcheck(countBytes <= target.byte_length - startOffset);
- dcheck(countBytes <= typedArray.byte_length);
+ // We've already checked for detachedness, and there's nothing that could've
+ // detached the buffers until here.
+ @if(DEBUG) {
+ const targetByteLength = LoadJSArrayBufferViewByteLength(
+ target, target.buffer) otherwise unreachable;
+ const sourceByteLength = LoadJSArrayBufferViewByteLength(
+ source, source.buffer) otherwise unreachable;
+
+ dcheck(countBytes <= targetByteLength - startOffset);
+ dcheck(countBytes <= sourceByteLength);
+ }
- // 29. If srcType is the same as targetType, then
+ // 24. If srcType is the same as targetType, then
// a. NOTE: If srcType and targetType are the same, the transfer must
// be performed in a manner that preserves the bit-level encoding of
// the source data.
@@ -271,13 +284,13 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
// iv. Set targetByteIndex to targetByteIndex + 1.
if (IsSharedArrayBuffer(target.buffer)) {
// SABs need a relaxed memmove to preserve atomicity.
- CallCRelaxedMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ CallCRelaxedMemmove(dstPtr, source.data_ptr, countBytes);
} else {
- CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ CallCMemmove(dstPtr, source.data_ptr, countBytes);
}
} label IfSlow deferred {
- // 22. If target.[[ContentType]] is not equal to
- // typedArray.[[ContentType]], throw a TypeError exception.
+ // 17. If target.[[ContentType]] is not equal to
+ // source.[[ContentType]], throw a TypeError exception.
if (IsBigInt64ElementsKind(srcKind) !=
IsBigInt64ElementsKind(targetElementsInfo.kind))
deferred {
@@ -288,7 +301,7 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
// to do with the empty source array.
if (srcLength == 0) return;
- // 30. Else,
+ // 25. Else,
// a. Repeat, while targetByteIndex < limit
// i. Let value be GetValueFromBuffer(srcBuffer, srcByteIndex,
// srcType, true, Unordered).
@@ -297,7 +310,7 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
// iii. Set srcByteIndex to srcByteIndex + srcElementSize.
// iv. Set targetByteIndex to targetByteIndex + targetElementSize.
CallCCopyTypedArrayElementsToTypedArray(
- typedArray, target, srcLength, targetOffset);
+ source, target, srcLength, targetOffset);
}
}
}
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index 1487d1396f..868af426b5 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -16,14 +16,24 @@ transitioning macro CallCompare(
const v: Number = ToNumber_Inline(Call(context, comparefn, Undefined, a, b));
// b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(array.buffer)) {
+ // c. Let getBufferByteLength be
+ // MakeIdempotentArrayBufferByteLengthGetter(SeqCst).
+ // d. If IsIntegerIndexedObjectOutOfBounds(obj, getBufferByteLength) is true,
+ // throw a TypeError exception.
+ // TODO(v8:11111): Update this, depending on how
+ // https://github.com/tc39/ecma262/pull/2646#issuecomment-1067456576 gets
+ // resolved.
+ try {
+ LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise DetachedOrOutOfBounds;
+ } label DetachedOrOutOfBounds {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSort);
}
- // c. If v is NaN, return +0.
+ // e. If v is NaN, return +0.
if (NumberIsNaN(v)) return 0;
- // d. return v.
+ // f. return v.
return v;
}
@@ -99,12 +109,10 @@ transitioning javascript builtin TypedArrayPrototypeSort(
const obj: JSAny = receiver;
// 3. Let buffer be ? ValidateTypedArray(obj).
- // ValidateTypedArray currently returns the array, not the ViewBuffer.
- const array: JSTypedArray =
- ValidateTypedArray(context, obj, kBuiltinNameSort);
-
- // 4. Let len be obj.[[ArrayLength]].
- const len: uintptr = array.length;
+ // 4. Let len be IntegerIndexedObjectLength(obj).
+ let len: uintptr =
+ ValidateTypedArrayAndGetLength(context, obj, kBuiltinNameSort);
+ const array: JSTypedArray = UnsafeCast<JSTypedArray>(obj);
// Arrays of length 1 or less are considered sorted.
if (len < 2) return array;
@@ -141,6 +149,19 @@ transitioning javascript builtin TypedArrayPrototypeSort(
TypedArrayMergeSort(work2, 0, len, work1, array, comparefn);
+ // Reload the length; it's possible the backing ArrayBuffer has been resized.
+ // It cannot be OOB here though, since we've checked it as part of the
+ // comparison function.
+
+ // TODO(v8:11111): Update this, depending on how
+ // https://github.com/tc39/ecma262/pull/2646#issuecomment-1067456576 gets
+ // resolved.
+ const newLen =
+ LoadJSTypedArrayLengthAndCheckDetached(array) otherwise unreachable;
+ if (newLen < len) {
+ len = newLen;
+ }
+
// work1 contains the sorted numbers. Write them back.
for (let i: uintptr = 0; i < len; ++i) {
accessor.StoreNumeric(
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
index 46ce383b50..85ff74dc9c 100644
--- a/deps/v8/src/builtins/typed-array-subarray.tq
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -20,42 +20,49 @@ transitioning javascript builtin TypedArrayPrototypeSubArray(
const buffer = typed_array::GetTypedArrayBuffer(source);
// 6. Let srcLength be O.[[ArrayLength]].
- const srcLength: uintptr = source.length;
+ let srcLength: uintptr;
+ try {
+ srcLength = LoadJSTypedArrayLengthAndCheckDetached(source)
+ otherwise DetachedOrOutOfBounds;
+ } label DetachedOrOutOfBounds {
+ // 7. If srcLength is out-of-bounds, set srcLength to 0.
+ srcLength = 0;
+ }
- // 7. Let relativeBegin be ? ToInteger(begin).
- // 8. If relativeBegin < 0, let beginIndex be max((srcLength +
+ // 8. Let relativeBegin be ? ToInteger(begin).
+ // 9. If relativeBegin < 0, let beginIndex be max((srcLength +
// relativeBegin), 0); else let beginIndex be min(relativeBegin,
// srcLength).
const arg0 = arguments[0];
const begin: uintptr =
arg0 != Undefined ? ConvertToRelativeIndex(arg0, srcLength) : 0;
- // 9. If end is undefined, let relativeEnd be srcLength;
+ // 10. If end is undefined, let relativeEnd be srcLength;
// else, let relativeEnd be ? ToInteger(end).
- // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd),
+ // 11. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd),
// 0); else let endIndex be min(relativeEnd, srcLength).
const arg1 = arguments[1];
const end: uintptr =
arg1 != Undefined ? ConvertToRelativeIndex(arg1, srcLength) : srcLength;
- // 11. Let newLength be max(endIndex - beginIndex, 0).
+ // 12. Let newLength be max(endIndex - beginIndex, 0).
const newLength: uintptr = Unsigned(IntPtrMax(Signed(end - begin), 0));
- // 12. Let constructorName be the String value of O.[[TypedArrayName]].
- // 13. Let elementSize be the Number value of the Element Size value
+ // 13. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 14. Let elementSize be the Number value of the Element Size value
// specified in Table 52 for constructorName.
const elementsInfo = typed_array::GetTypedArrayElementsInfo(source);
- // 14. Let srcByteOffset be O.[[ByteOffset]].
+ // 15. Let srcByteOffset be O.[[ByteOffset]].
const srcByteOffset: uintptr = source.byte_offset;
- // 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
+ // 16. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
const beginByteOffset =
srcByteOffset + elementsInfo.CalculateByteLength(begin)
otherwise ThrowRangeError(MessageTemplate::kInvalidArrayBufferLength);
- // 16. Let argumentsList be « buffer, beginByteOffset, newLength ».
- // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ // 17. Let argumentsList be « buffer, beginByteOffset, newLength ».
+ // 18. Return ? TypedArraySpeciesCreate(O, argumentsList).
return TypedArraySpeciesCreateByBuffer(
methodName, source, buffer, beginByteOffset, newLength);
}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index c242851de2..8708238cbc 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -182,6 +182,8 @@ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
JSTypedArray, RawPtr, uintptr): void;
extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView):
never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+extern macro IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean(JSArrayBufferView):
+ bool;
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index cfe17018ba..0c0bfea7e3 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -34,9 +34,10 @@ extern runtime WasmI32AtomicWait(
Context, WasmInstanceObject, Number, Number, BigInt): Smi;
extern runtime WasmI64AtomicWait(
Context, WasmInstanceObject, Number, BigInt, BigInt): Smi;
-extern runtime WasmAllocateRtt(Context, Smi, Map, Smi): Map;
extern runtime WasmArrayCopy(
Context, WasmArray, Smi, WasmArray, Smi, Smi): JSAny;
+extern runtime WasmArrayInitFromData(
+ Context, WasmInstanceObject, Smi, Smi, Smi, Map): Object;
}
namespace unsafe {
@@ -47,14 +48,10 @@ extern macro AllocateWasmArray(
}
namespace wasm {
-const kExternTableType: constexpr int31
- generates 'wasm::kWasmExternRef.raw_bit_field()';
-const kExternNonNullTableType: constexpr int31
- generates 'wasm::kWasmExternNonNullableRef.raw_bit_field()';
-
-const kRttSubCanonicalize: constexpr int31
- generates 'WasmRttSubMode::kCanonicalize';
-const kRttSubFresh: constexpr int31 generates 'WasmRttSubMode::kFresh';
+const kAnyTableType: constexpr int31
+ generates 'wasm::kWasmAnyRef.raw_bit_field()';
+const kAnyNonNullTableType: constexpr int31
+ generates 'wasm::kWasmAnyNonNullableRef.raw_bit_field()';
extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject;
@@ -77,6 +74,18 @@ builtin WasmInt32ToHeapNumber(val: int32): HeapNumber {
return AllocateHeapNumberWithValue(Convert<float64>(val));
}
+builtin WasmFuncRefToJS(val: WasmInternalFunction|Null): JSFunction|Null|
+ Undefined {
+ typeswitch (val) {
+ case (Null): {
+ return Null;
+ }
+ case (func: WasmInternalFunction): {
+ return func.external;
+ }
+ }
+}
+
builtin WasmTaggedNonSmiToInt32(implicit context: Context)(val: JSAnyNotSmi):
int32 {
return ChangeTaggedNonSmiToInt32(val);
@@ -204,8 +213,8 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
// function dispatch tables.
// TODO(7748): Update this if further table types are supported.
const tableType: Smi = table.raw_type;
- if (tableType != SmiConstant(kExternTableType) &&
- tableType != SmiConstant(kExternNonNullTableType)) {
+ if (tableType != SmiConstant(kAnyTableType) &&
+ tableType != SmiConstant(kAnyNonNullTableType)) {
goto CallRuntime;
}
@@ -292,18 +301,6 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
}
-builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
- tail runtime::WasmAllocateRtt(
- LoadContextFromFrame(), SmiTag(typeIndex), parent,
- SmiConstant(kRttSubCanonicalize));
-}
-
-builtin WasmAllocateFreshRtt(typeIndex: intptr, parent: Map): Map {
- tail runtime::WasmAllocateRtt(
- LoadContextFromFrame(), SmiTag(typeIndex), parent,
- SmiConstant(kRttSubFresh));
-}
-
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
const typeInfo: WasmTypeInfo = %RawDownCast<WasmTypeInfo>(
rtt.constructor_or_back_pointer_or_native_context);
@@ -352,6 +349,14 @@ builtin WasmAllocateArray_InitNull(
rtt, length, elementSize, InitializationMode::kInitializeToNull);
}
+builtin WasmArrayInitFromData(
+ dataSegment: uint32, offset: uint32, length: uint32, rtt: Map): Object {
+ const instance = LoadInstanceFromFrame();
+ tail runtime::WasmArrayInitFromData(
+ LoadContextFromInstance(instance), instance, SmiFromUint32(dataSegment),
+ SmiFromUint32(offset), SmiFromUint32(length), rtt);
+}
+
// We put all uint32 parameters at the beginning so that they are assigned to
// registers.
builtin WasmArrayCopyWithChecks(
@@ -626,8 +631,8 @@ builtin ThrowWasmTrapFuncSigMismatch(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncSigMismatch));
}
-builtin ThrowWasmTrapDataSegmentDropped(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDataSegmentDropped));
+builtin ThrowWasmTrapDataSegmentOutOfBounds(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDataSegmentOutOfBounds));
}
builtin ThrowWasmTrapElemSegmentDropped(): JSAny {
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 125614fa3d..9ffd1ea2be 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -94,11 +94,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
Register counter = scratch;
Label loop, entry;
- if (kJSArgcIncludesReceiver) {
- __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
- } else {
- __ movq(counter, argc);
- }
+ __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
__ jmp(&entry);
__ bind(&loop);
Operand value(array, counter, times_system_pointer_size, 0);
@@ -161,9 +157,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ ret(0);
@@ -317,9 +311,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -484,7 +476,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
- Handle<Code> trampoline_code =
+ Handle<CodeT> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@@ -657,9 +649,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(r9);
// Invoke the builtin code.
- Handle<Code> builtin = is_construct
- ? BUILTIN_CODE(masm->isolate(), Construct)
- : masm->isolate()->builtins()->Call();
+ Handle<CodeT> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty
@@ -684,19 +676,19 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
- Register code, Register scratch) {
+static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
- __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(scratch);
+ __ movl(scratch, FieldOperand(code, CodeT::kFlagsOffset));
+ __ DecodeField<CodeT::KindField>(scratch);
__ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
-static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
- Register scratch) {
+static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
DCHECK(!AreAliased(code, scratch));
- return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+ return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
}
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -711,12 +703,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
if (FLAG_debug_code) {
Label not_baseline;
__ j(not_equal, &not_baseline);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
- AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
- } else {
- AssertCodeIsBaseline(masm, sfi_data, scratch1);
- }
+ AssertCodeTIsBaseline(masm, sfi_data, scratch1);
__ j(equal, is_baseline);
__ bind(&not_baseline);
} else {
@@ -797,9 +784,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
- if (kJSArgcIncludesReceiver) {
- __ decq(rcx);
- }
+ __ decq(rcx); // Exclude receiver.
__ LoadTaggedPointerField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -913,19 +898,18 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
ASM_CODE_COMMENT(masm);
Register params_size = scratch1;
- // Get the size of the formal parameters + receiver (in bytes).
+ // Get the size of the formal parameters (in bytes).
__ movq(params_size,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movl(params_size,
FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
- // Compute the size of the actual parameters + receiver (in bytes).
+ // Compute the size of the actual parameters (in bytes).
__ movq(actual_params_size,
Operand(rbp, StandardFrameConstants::kArgCOffset));
__ leaq(actual_params_size,
- Operand(actual_params_size, times_system_pointer_size,
- kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
+ Operand(actual_params_size, times_system_pointer_size, 0));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -968,22 +952,23 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileMaglev_NotConcurrent,
+ Runtime::kCompileMaglev_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
+ OptimizationMarker::kCompileMaglev_Concurrent,
+ Runtime::kCompileMaglev_Concurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimization_marker,
+ OptimizationMarker::kCompileTurbofan_NotConcurrent,
+ Runtime::kCompileTurbofan_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ OptimizationMarker::kCompileTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
- // Marker should be one of LogFirstExecution / CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
+ // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
+ // InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ int3();
}
@@ -1146,9 +1131,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
- __ testl(
- optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ testl(optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
@@ -1448,14 +1432,9 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ decl(rax);
}
- int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ __ movl(rcx, rax);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- argc_modification -= 1;
- }
- if (argc_modification != 0) {
- __ leal(rcx, Operand(rax, argc_modification));
- } else {
- __ movl(rcx, rax);
+ __ decl(rcx); // Exclude receiver.
}
// Add a stack check before pushing arguments.
@@ -1526,11 +1505,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
// rcx and r8 will be modified.
- Register argc_without_receiver = rax;
- if (kJSArgcIncludesReceiver) {
- argc_without_receiver = r11;
- __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
- }
+ Register argc_without_receiver = r11;
+ __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
// Push slot for the receiver to be constructed.
@@ -1551,8 +1527,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// context at this point).
__ AssertFunction(rdi);
// Jump to the constructor function (rax, rbx, rdx passed on).
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor (rax, rdx, rdi passed on).
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1879,7 +1855,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// from LAZY is always the last argument.
__ movq(Operand(rsp, rax, times_system_pointer_size,
BuiltinContinuationFrameConstants::kFixedFrameSize -
- (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
+ kJSArgcReceiverSlots * kSystemPointerSize),
kScratchRegister);
}
__ movq(
@@ -1963,10 +1939,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(
- rax, rdx, rcx, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2023,13 +1998,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- if (kJSArgcIncludesReceiver) {
- __ cmpq(rax, Immediate(JSParameterCount(0)));
- __ j(greater, &done, Label::kNear);
- } else {
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
- }
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
__ PushRoot(RootIndex::kUndefinedValue);
__ incq(rax);
__ bind(&done);
@@ -2074,10 +2044,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(
- rax, rdx, rcx, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2129,8 +2098,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2175,13 +2143,6 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
__ AllocateStackSpace(new_space);
Register copy_count = argc_in_out;
- if (!kJSArgcIncludesReceiver) {
- // We have a spare register, so use it instead of clobbering argc.
- // lea + add (to add the count to argc in the end) uses 1 less byte than
- // inc + lea (with base, index and disp), at the cost of 1 extra register.
- copy_count = scratch1;
- __ leaq(copy_count, Operand(argc_in_out, 1)); // Include the receiver.
- }
Register current = scratch2;
Register value = kScratchRegister;
@@ -2212,7 +2173,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+ Handle<CodeT> code) {
// ----------- S t a t e -------------
// -- rdi : target
// -- rax : number of parameters on the stack
@@ -2283,7 +2244,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<Code> code) {
+ Handle<CodeT> code) {
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
@@ -2311,9 +2272,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
- if (kJSArgcIncludesReceiver) {
- __ decq(r8);
- }
+ __ decq(r8); // Exclude receiver.
__ subl(r8, rcx);
__ j(less_equal, &stack_done);
{
@@ -2377,14 +2336,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
StackArgumentsAccessor args(rax);
- __ AssertFunction(rdi);
+ __ AssertCallableFunction(rdi);
- Label class_constructor;
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the shared function info.
@@ -2469,14 +2424,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2618,6 +2565,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET,
equal);
+ // Check if target is a wrapped function and call CallWrappedFunction external
+ // builtin
+ __ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
+ RelocInfo::CODE_TARGET, equal);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
@@ -2799,6 +2752,10 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
__ leave();
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(rax, rax);
+ }
+
// Load deoptimization data from the code object.
__ LoadTaggedPointerField(
rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
@@ -2888,19 +2845,19 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
- for (int reg_code : base::bits::IterateBitsBackwards(
- WasmDebugBreakFrameConstants::kPushedGpRegs)) {
- __ Push(Register::from_code(reg_code));
+ for (Register reg :
+ base::Reversed(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(reg);
}
constexpr int kFpStackSize =
kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
__ AllocateStackSpace(kFpStackSize);
int offset = kFpStackSize;
- for (int reg_code : base::bits::IterateBitsBackwards(
- WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ for (DoubleRegister reg :
+ base::Reversed(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
offset -= kSimd128Size;
- __ movdqu(Operand(rsp, offset), DoubleRegister::from_code(reg_code));
+ __ movdqu(Operand(rsp, offset), reg);
}
// Initialize the JavaScript context with 0. CEntry will use it to
@@ -2909,15 +2866,13 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ CallRuntime(Runtime::kWasmDebugBreak, 0);
// Restore registers.
- for (int reg_code :
- base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
- __ movdqu(DoubleRegister::from_code(reg_code), Operand(rsp, offset));
+ for (DoubleRegister reg : WasmDebugBreakFrameConstants::kPushedFpRegs) {
+ __ movdqu(reg, Operand(rsp, offset));
offset += kSimd128Size;
}
__ addq(rsp, Immediate(kFpStackSize));
- for (int reg_code :
- base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
- __ Pop(Register::from_code(reg_code));
+ for (Register reg : WasmDebugBreakFrameConstants::kPushedGpRegs) {
+ __ Pop(reg);
}
}
@@ -2967,43 +2922,154 @@ void RestoreAfterBuiltinCall(MacroAssembler* masm, Register function_data,
__ popq(param_limit);
__ popq(current_param);
}
-} // namespace
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // Set up the stackframe.
- __ EnterFrame(StackFrame::JS_TO_WASM);
+void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc) {
+ __ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp);
+ __ movq(MemOperand(jmpbuf, wasm::kJmpBufFpOffset), rbp);
+ __ movq(kScratchRegister,
+ __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
+ __ movq(MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset), kScratchRegister);
+ __ leaq(kScratchRegister, MemOperand(pc, 0));
+ __ movq(MemOperand(jmpbuf, wasm::kJmpBufPcOffset), kScratchRegister);
+}
- // -------------------------------------------
- // Compute offsets and prepare for GC.
- // -------------------------------------------
- // The number of parameters passed to this function.
- constexpr int kInParamCountOffset =
- BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - kSystemPointerSize;
- // The number of parameters according to the signature.
- constexpr int kParamCountOffset = kInParamCountOffset - kSystemPointerSize;
- constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
- constexpr int kValueTypesArrayStartOffset =
- kReturnCountOffset - kSystemPointerSize;
- // We set and use this slot only when moving parameters into the parameter
- // registers (so no GC scan is needed).
- constexpr int kFunctionDataOffset =
- kValueTypesArrayStartOffset - kSystemPointerSize;
- constexpr int kLastSpillOffset = kFunctionDataOffset;
- constexpr int kNumSpillSlots = 6;
- __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
- // Put the in_parameter count on the stack, we only need it at the very end
- // when we pop the parameters off the stack.
- Register in_param_count = rax;
- if (kJSArgcIncludesReceiver) {
- __ decq(in_param_count);
+void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc) {
+ __ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
+ __ movq(rbp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
+ if (load_pc) {
+ __ jmp(MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
}
- __ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
- in_param_count = no_reg;
+ // The stack limit is set separately under the ExecutionAccess lock.
+}
- // -------------------------------------------
- // Load the Wasm exported function data and the Wasm instance.
- // -------------------------------------------
- Register closure = rdi;
+void SaveState(MacroAssembler* masm, Register active_continuation, Register tmp,
+ Label* suspend) {
+ Register foreign_jmpbuf = tmp;
+ __ LoadAnyTaggedField(
+ foreign_jmpbuf,
+ FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
+ Register jmpbuf = foreign_jmpbuf;
+ __ LoadExternalPointerField(
+ jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, kScratchRegister);
+ FillJumpBuffer(masm, jmpbuf, suspend);
+}
+
+// Returns the new continuation in rax.
+void AllocateContinuation(MacroAssembler* masm, Register function_data,
+ Register wasm_instance) {
+ Register suspender = kScratchRegister;
+ __ LoadAnyTaggedField(
+ suspender,
+ FieldOperand(function_data, WasmExportedFunctionData::kSuspenderOffset));
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
+ __ Move(GCScanSlotPlace, 3);
+ __ Push(wasm_instance);
+ __ Push(function_data);
+ __ Push(suspender); // Argument.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmAllocateContinuation);
+ __ Pop(function_data);
+ __ Pop(wasm_instance);
+ STATIC_ASSERT(kReturnRegister0 == rax);
+ suspender = no_reg;
+}
+
+void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation) {
+ Register foreign_jmpbuf = target_continuation;
+ __ LoadAnyTaggedField(
+ foreign_jmpbuf,
+ FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset));
+ Register target_jmpbuf = foreign_jmpbuf;
+ __ LoadExternalPointerField(
+ target_jmpbuf,
+ FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, kScratchRegister);
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
+ __ Move(GCScanSlotPlace, 0);
+ // Switch stack!
+ LoadJumpBuffer(masm, target_jmpbuf, false);
+}
+
+void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
+ Register return_reg, Register tmp1,
+ Register tmp2) {
+ Register active_continuation = tmp1;
+ __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
+
+ // Set a null pointer in the jump buffer's SP slot to indicate to the stack
+ // frame iterator that this stack is empty.
+ Register foreign_jmpbuf = kScratchRegister;
+ __ LoadAnyTaggedField(
+ foreign_jmpbuf,
+ FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
+ Register jmpbuf = foreign_jmpbuf;
+ __ LoadExternalPointerField(
+ jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, tmp2);
+ __ movq(Operand(jmpbuf, wasm::kJmpBufSpOffset), Immediate(kNullAddress));
+
+ Register parent = tmp2;
+ __ LoadAnyTaggedField(
+ parent,
+ FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
+
+ // Update active continuation root.
+ __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), parent);
+ foreign_jmpbuf = tmp1;
+ __ LoadAnyTaggedField(
+ foreign_jmpbuf,
+ FieldOperand(parent, WasmContinuationObject::kJmpbufOffset));
+ jmpbuf = foreign_jmpbuf;
+ __ LoadExternalPointerField(
+ jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, tmp2);
+
+ // Switch stack!
+ LoadJumpBuffer(masm, jmpbuf, false);
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
+ __ Move(GCScanSlotPlace, 1);
+ __ Push(return_reg);
+ __ Push(wasm_instance); // Spill.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmSyncStackLimit);
+ __ Pop(wasm_instance);
+ __ Pop(return_reg);
+}
+
+void RestoreParentSuspender(MacroAssembler* masm) {
+ Register suspender = kScratchRegister;
+ __ LoadRoot(suspender, RootIndex::kActiveSuspender);
+ __ LoadAnyTaggedField(
+ suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
+ __ CompareRoot(suspender, RootIndex::kUndefinedValue);
+ Label undefined;
+ __ j(equal, &undefined, Label::kNear);
+#ifdef DEBUG
+ // Check that the parent suspender is inactive.
+ Label parent_inactive;
+ Register state = rbx;
+ __ LoadTaggedSignedField(
+ state, FieldOperand(suspender, WasmSuspenderObject::kStateOffset));
+ __ SmiCompare(state, Smi::FromInt(WasmSuspenderObject::Inactive));
+ __ j(equal, &parent_inactive, Label::kNear);
+ __ Trap();
+ __ bind(&parent_inactive);
+#endif
+ __ StoreTaggedSignedField(
+ FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
+ Smi::FromInt(WasmSuspenderObject::State::Active));
+ __ bind(&undefined);
+ __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
+}
+
+void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
+ Register function_data,
+ Register wasm_instance) {
+ Register closure = function_data;
Register shared_function_info = closure;
__ LoadAnyTaggedField(
shared_function_info,
@@ -3011,40 +3077,22 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
closure = no_reg;
- Register function_data = shared_function_info;
__ LoadAnyTaggedField(
function_data,
MemOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
shared_function_info = no_reg;
- Register wasm_instance = rsi;
__ LoadAnyTaggedField(
wasm_instance,
MemOperand(function_data,
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
+}
- // -------------------------------------------
- // Decrement the budget of the generic wrapper in function data.
- // -------------------------------------------
- __ SmiAddConstant(
- MemOperand(function_data, WasmExportedFunctionData::kWrapperBudgetOffset -
- kHeapObjectTag),
- Smi::FromInt(-1));
-
- // -------------------------------------------
- // Check if the budget of the generic wrapper reached 0 (zero).
- // -------------------------------------------
- // Instead of a specific comparison, we can directly use the flags set
- // from the previous addition.
- Label compile_wrapper, compile_wrapper_done;
- __ j(less_equal, &compile_wrapper);
- __ bind(&compile_wrapper_done);
-
- // -------------------------------------------
- // Load values from the signature.
- // -------------------------------------------
- Register foreign_signature = r11;
+void LoadValueTypesArray(MacroAssembler* masm, Register function_data,
+ Register valuetypes_array_ptr, Register return_count,
+ Register param_count) {
+ Register foreign_signature = valuetypes_array_ptr;
__ LoadAnyTaggedField(
foreign_signature,
MemOperand(function_data,
@@ -3055,16 +3103,145 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
kForeignForeignAddressTag, kScratchRegister);
foreign_signature = no_reg;
- Register return_count = r8;
__ movq(return_count,
MemOperand(signature, wasm::FunctionSig::kReturnCountOffset));
- Register param_count = rcx;
__ movq(param_count,
MemOperand(signature, wasm::FunctionSig::kParameterCountOffset));
- Register valuetypes_array_ptr = signature;
+ valuetypes_array_ptr = signature;
__ movq(valuetypes_array_ptr,
MemOperand(signature, wasm::FunctionSig::kRepsOffset));
- signature = no_reg;
+}
+
+void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
+ // Set up the stackframe.
+ __ EnterFrame(stack_switch ? StackFrame::STACK_SWITCH
+ : StackFrame::JS_TO_WASM);
+
+ // -------------------------------------------
+ // Compute offsets and prepare for GC.
+ // -------------------------------------------
+ constexpr int kGCScanSlotCountOffset =
+ BuiltinWasmWrapperConstants::kGCScanSlotCountOffset;
+ // The number of parameters passed to this function.
+ constexpr int kInParamCountOffset =
+ BuiltinWasmWrapperConstants::kInParamCountOffset;
+ // The number of parameters according to the signature.
+ constexpr int kParamCountOffset =
+ BuiltinWasmWrapperConstants::kParamCountOffset;
+ constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
+ constexpr int kValueTypesArrayStartOffset =
+ kReturnCountOffset - kSystemPointerSize;
+ // A boolean flag to check if one of the parameters is a reference. If so, we
+ // iterate over the parameters two times, first for all value types, and then
+ // for all references.
+ constexpr int kHasRefTypesOffset =
+ kValueTypesArrayStartOffset - kSystemPointerSize;
+ // We set and use this slot only when moving parameters into the parameter
+ // registers (so no GC scan is needed).
+ constexpr int kFunctionDataOffset = kHasRefTypesOffset - kSystemPointerSize;
+ constexpr int kLastSpillOffset = kFunctionDataOffset;
+ constexpr int kNumSpillSlots = 7;
+ __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
+ // Put the in_parameter count on the stack, we only need it at the very end
+ // when we pop the parameters off the stack.
+ Register in_param_count = rax;
+ __ decq(in_param_count); // Exclude receiver.
+ __ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
+ in_param_count = no_reg;
+
+ Register function_data = rdi;
+ Register wasm_instance = rsi;
+ LoadFunctionDataAndWasmInstance(masm, function_data, wasm_instance);
+
+ Label compile_wrapper, compile_wrapper_done;
+ if (!stack_switch) {
+ // -------------------------------------------
+ // Decrement the budget of the generic wrapper in function data.
+ // -------------------------------------------
+ __ SmiAddConstant(
+ MemOperand(
+ function_data,
+ WasmExportedFunctionData::kWrapperBudgetOffset - kHeapObjectTag),
+ Smi::FromInt(-1));
+
+ // -------------------------------------------
+ // Check if the budget of the generic wrapper reached 0 (zero).
+ // -------------------------------------------
+ // Instead of a specific comparison, we can directly use the flags set
+ // from the previous addition.
+ __ j(less_equal, &compile_wrapper);
+ __ bind(&compile_wrapper_done);
+ }
+
+ Label suspend;
+ if (stack_switch) {
+ Register active_continuation = rbx;
+ __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
+ SaveState(masm, active_continuation, rcx, &suspend);
+ AllocateContinuation(masm, function_data, wasm_instance);
+ Register target_continuation = rax; /* fixed */
+ // Save the old stack's rbp in r9, and use it to access the parameters in
+ // the parent frame.
+ // We also distribute the spill slots across the two stacks as needed by
+ // creating a "shadow frame":
+ //
+ // old stack: new stack:
+ // +-----------------+
+ // | <parent frame> |
+ // r9-> +-----------------+ +-----------------+
+ // | <fixed> | | 0 (jmpbuf rbp) |
+ // +-----------------+ rbp-> +-----------------+
+ // |kGCScanSlotCount | |kGCScanSlotCount |
+ // +-----------------+ +-----------------+
+ // | kParamCount | | / |
+ // +-----------------+ +-----------------+
+ // | kInParamCount | | / |
+ // +-----------------+ +-----------------+
+ // | / | | kReturnCount |
+ // +-----------------+ +-----------------+
+ // | / | |kValueTypesArray |
+ // +-----------------+ +-----------------+
+ // | / | | kHasRefTypes |
+ // +-----------------+ +-----------------+
+ // | / | | kFunctionData |
+ // +-----------------+ rsp-> +-----------------+
+ // seal stack |
+ // V
+ //
+ // - When we first enter the prompt, we have access to both frames, so it
+ // does not matter where the values are spilled.
+ // - When we suspend for the first time, we longjmp to the original frame
+ // (left). So the frame needs to contain the necessary information to
+ // properly deconstruct itself (actual param count and signature param
+ // count).
+ // - When we suspend for the second time, we longjmp to the frame that was
+ // set up by the WasmResume builtin, which has the same layout as the
+ // original frame (left).
+ // - When the closure finally resolves, we use the value types pointer
+ // stored in the shadow frame to get the return type and convert the return
+ // value accordingly.
+ __ movq(r9, rbp);
+ LoadTargetJumpBuffer(masm, target_continuation);
+ // Push the loaded rbp. We know it is null, because there is no frame yet,
+ // so we could also push 0 directly. In any case we need to push it, because
+ // this marks the base of the stack segment for the stack frame iterator.
+ __ pushq(rbp);
+ __ movq(rbp, rsp);
+ __ addq(rsp, Immediate(kLastSpillOffset));
+ }
+ Register original_fp = stack_switch ? r9 : rbp;
+
+ // -------------------------------------------
+ // Load values from the signature.
+ // -------------------------------------------
+ Register valuetypes_array_ptr = r11;
+ Register return_count = r8;
+ Register param_count = rcx;
+ LoadValueTypesArray(masm, function_data, valuetypes_array_ptr, return_count,
+ param_count);
+
+ // Initialize the {HasRefTypes} slot.
+ __ movq(MemOperand(rbp, kHasRefTypesOffset), Immediate(0));
// -------------------------------------------
// Store signature-related values to the stack.
@@ -3073,7 +3250,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// We cannot push values onto the stack right before the wasm call. The wasm
// function expects the parameters, that didn't fit into the registers, on the
// top of the stack.
- __ movq(MemOperand(rbp, kParamCountOffset), param_count);
+ __ movq(MemOperand(original_fp, kParamCountOffset), param_count);
__ movq(MemOperand(rbp, kReturnCountOffset), return_count);
__ movq(MemOperand(rbp, kValueTypesArrayStartOffset), valuetypes_array_ptr);
@@ -3143,7 +3320,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ subq(rsp, params_size);
params_size = no_reg;
param_count = rcx;
- __ movq(param_count, MemOperand(rbp, kParamCountOffset));
+ __ movq(param_count, MemOperand(original_fp, kParamCountOffset));
// -------------------------------------------
// Set up for the param evaluation loop.
@@ -3198,7 +3375,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Label loop_through_params;
__ bind(&loop_through_params);
- __ movq(param, MemOperand(rbp, current_param, times_1, 0));
+ __ movq(param, MemOperand(original_fp, current_param, times_1, 0));
__ movl(valuetype,
Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
@@ -3232,6 +3409,64 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ j(not_equal, &loop_through_params);
// -------------------------------------------
+ // Second loop to handle references.
+ // -------------------------------------------
+ // In this loop we iterate over all parameters for a second time and copy all
+ // reference parameters at the end of the integer parameters section.
+ Label ref_params_done;
+ // We check if we have seen a reference in the first parameter loop.
+ __ cmpq(MemOperand(rbp, kHasRefTypesOffset), Immediate(0));
+ __ j(equal, &ref_params_done);
+ // We re-calculate the beginning of the value-types array and the beginning of
+ // the parameters ({valuetypes_array_ptr} and {current_param}).
+ __ movq(valuetypes_array_ptr, MemOperand(rbp, kValueTypesArrayStartOffset));
+ return_count = current_param;
+ current_param = no_reg;
+ __ movq(return_count, MemOperand(rbp, kReturnCountOffset));
+ returns_size = return_count;
+ return_count = no_reg;
+ __ shlq(returns_size, Immediate(kValueTypeSizeLog2));
+ __ addq(valuetypes_array_ptr, returns_size);
+
+ current_param = returns_size;
+ returns_size = no_reg;
+ __ Move(current_param,
+ kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize);
+
+ Label ref_loop_through_params;
+ Label ref_loop_end;
+ // Start of the loop.
+ __ bind(&ref_loop_through_params);
+
+ // Load the current parameter with type.
+ __ movq(param, MemOperand(original_fp, current_param, times_1, 0));
+ __ movl(valuetype,
+ Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
+ // Extract the ValueKind of the type, to check for kRef and kOptRef.
+ __ andl(valuetype, Immediate(wasm::kWasmValueKindBitsMask));
+ Label move_ref_to_slot;
+ __ cmpq(valuetype, Immediate(wasm::ValueKind::kOptRef));
+ __ j(equal, &move_ref_to_slot);
+ __ cmpq(valuetype, Immediate(wasm::ValueKind::kRef));
+ __ j(equal, &move_ref_to_slot);
+ __ jmp(&ref_loop_end);
+
+ // Place the param into the proper slot in Integer section.
+ __ bind(&move_ref_to_slot);
+ __ movq(MemOperand(current_int_param_slot, 0), param);
+ __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
+
+ // Move to the next parameter.
+ __ bind(&ref_loop_end);
+ __ addq(current_param, Immediate(increment));
+ __ addq(valuetypes_array_ptr, Immediate(kValueTypeSize));
+
+ // Check if we finished all parameters.
+ __ cmpq(current_param, param_limit);
+ __ j(not_equal, &ref_loop_through_params);
+
+ __ bind(&ref_params_done);
+ // -------------------------------------------
// Move the parameters into the proper param registers.
// -------------------------------------------
// The Wasm function expects that the params can be popped from the top of the
@@ -3252,7 +3487,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// -----------------------------------
Register temp_params_size = rax;
- __ movq(temp_params_size, MemOperand(rbp, kParamCountOffset));
+ __ movq(temp_params_size, MemOperand(original_fp, kParamCountOffset));
__ shlq(temp_params_size, Immediate(kSystemPointerSizeLog2));
// We want to use the register of the function_data = rdi.
__ movq(MemOperand(rbp, kFunctionDataOffset), function_data);
@@ -3342,7 +3577,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
__ j(equal, &place_float_param);
- __ int3();
+ // All other types are reference types. We can just fall through to place them
+ // in the integer section.
__ bind(&place_integer_param);
__ cmpq(start_int_section, current_int_param_slot);
@@ -3397,13 +3633,15 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// We set the indicating value for the GC to the proper one for Wasm call.
constexpr int kWasmCallGCScanSlotCount = 0;
- __ Move(MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset),
- kWasmCallGCScanSlotCount);
+ __ Move(MemOperand(rbp, kGCScanSlotCountOffset), kWasmCallGCScanSlotCount);
// -------------------------------------------
// Call the Wasm function.
// -------------------------------------------
__ call(function_entry);
+ // Note: we might be returning to a different frame if the stack was suspended
+ // and resumed during the call. The new frame is set up by WasmResume and has
+ // a compatible layout.
function_entry = no_reg;
// -------------------------------------------
@@ -3437,6 +3675,14 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Label return_done;
__ bind(&return_done);
+ if (stack_switch) {
+ ReloadParentContinuation(masm, wasm_instance, return_reg, rbx, rcx);
+ RestoreParentSuspender(masm);
+ }
+ __ bind(&suspend);
+ // No need to process the return value if the stack is suspended, there is a
+ // single 'externref' value (the promise) which doesn't require conversion.
+
__ movq(param_count, MemOperand(rbp, kParamCountOffset));
// Calculate the number of parameters we have to pop off the stack. This
@@ -3449,7 +3695,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// -------------------------------------------
// Deconstrunct the stack frame.
// -------------------------------------------
- __ LeaveFrame(StackFrame::JS_TO_WASM);
+ __ LeaveFrame(stack_switch ? StackFrame::STACK_SWITCH
+ : StackFrame::JS_TO_WASM);
// We have to remove the caller frame slots:
// - JS arguments
@@ -3481,12 +3728,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// The builtin expects the parameter to be in register param = rax.
constexpr int kBuiltinCallGCScanSlotCount = 2;
- PrepareForBuiltinCall(
- masm,
- MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset),
- kBuiltinCallGCScanSlotCount, current_param, param_limit,
- current_int_param_slot, current_float_param_slot, valuetypes_array_ptr,
- wasm_instance, function_data);
+ PrepareForBuiltinCall(masm, MemOperand(rbp, kGCScanSlotCountOffset),
+ kBuiltinCallGCScanSlotCount, current_param, param_limit,
+ current_int_param_slot, current_float_param_slot,
+ valuetypes_array_ptr, wasm_instance, function_data);
Label param_kWasmI32_not_smi;
Label param_kWasmI64;
@@ -3505,6 +3750,21 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
__ j(equal, &param_kWasmF64);
+ // The parameter is a reference. We do not convert the parameter immediately.
+ // Instead we will later loop over all parameters again to handle reference
+ // parameters. The reason is that later value type parameters may trigger a
+ // GC, and we cannot keep reference parameters alive then. Instead we leave
+ // reference parameters at their initial place on the stack and only copy them
+ // once no GC can happen anymore.
+ // As an optimization we set a flag here that indicates that we have seen a
+ // reference so far. If there was no reference parameter, we would not iterate
+ // over the parameters for a second time.
+ __ movq(MemOperand(rbp, kHasRefTypesOffset), Immediate(1));
+ RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
+ valuetypes_array_ptr, current_float_param_slot,
+ current_int_param_slot, param_limit, current_param);
+ __ jmp(&param_conversion_done);
+
__ int3();
__ bind(&param_kWasmI32_not_smi);
@@ -3570,6 +3830,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Label return_kWasmI64;
Label return_kWasmF32;
Label return_kWasmF64;
+ Label return_kWasmFuncRef;
__ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
__ j(equal, &return_kWasmI32);
@@ -3583,6 +3844,14 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
__ j(equal, &return_kWasmF64);
+ __ cmpq(valuetype, Immediate(wasm::kWasmFuncRef.raw_bit_field()));
+ __ j(equal, &return_kWasmFuncRef);
+
+ // All types that are not SIMD are reference types.
+ __ cmpq(valuetype, Immediate(wasm::kWasmS128.raw_bit_field()));
+ // References can be passed to JavaScript as is.
+ __ j(not_equal, &return_done);
+
__ int3();
__ bind(&return_kWasmI32);
@@ -3628,221 +3897,265 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
__ jmp(&return_done);
- // -------------------------------------------
- // Kick off compilation.
- // -------------------------------------------
- __ bind(&compile_wrapper);
- // Enable GC.
- MemOperand GCScanSlotPlace =
- MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
- __ Move(GCScanSlotPlace, 4);
- // Save registers to the stack.
- __ pushq(wasm_instance);
- __ pushq(function_data);
- // Push the arguments for the runtime call.
- __ Push(wasm_instance); // first argument
- __ Push(function_data); // second argument
- // Set up context.
- __ Move(kContextRegister, Smi::zero());
- // Call the runtime function that kicks off compilation.
- __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
- // Pop the result.
- __ movq(r9, kReturnRegister0);
- // Restore registers from the stack.
- __ popq(function_data);
- __ popq(wasm_instance);
- __ jmp(&compile_wrapper_done);
-}
+ __ bind(&return_kWasmFuncRef);
+ __ Call(BUILTIN_CODE(masm->isolate(), WasmFuncRefToJS),
+ RelocInfo::CODE_TARGET);
+ __ jmp(&return_done);
-namespace {
-// Helper function for WasmReturnPromiseOnSuspend.
-void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) {
- __ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
- __ movq(rbp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
- // The stack limit is set separately under the ExecutionAccess lock.
- // TODO(thibaudm): Reload live registers.
+ if (!stack_switch) {
+ // -------------------------------------------
+ // Kick off compilation.
+ // -------------------------------------------
+ __ bind(&compile_wrapper);
+ // Enable GC.
+ MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
+ __ Move(GCScanSlotPlace, 4);
+ // Save registers to the stack.
+ __ pushq(wasm_instance);
+ __ pushq(function_data);
+ // Push the arguments for the runtime call.
+ __ Push(wasm_instance); // first argument
+ __ Push(function_data); // second argument
+ // Set up context.
+ __ Move(kContextRegister, Smi::zero());
+ // Call the runtime function that kicks off compilation.
+ __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
+ // Pop the result.
+ __ movq(r9, kReturnRegister0);
+ // Restore registers from the stack.
+ __ popq(function_data);
+ __ popq(wasm_instance);
+ __ jmp(&compile_wrapper_done);
+ }
}
} // namespace
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ GenericJSToWasmWrapperHelper(masm, false);
+}
+
void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
+ GenericJSToWasmWrapperHelper(masm, true);
+}
+
+void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// Set up the stackframe.
- __ EnterFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND);
+ __ EnterFrame(StackFrame::STACK_SWITCH);
- // Parameters.
- Register closure = kJSFunctionRegister; // rdi
- Register param_count = kJavaScriptCallArgCountRegister; // rax
- if (kJSArgcIncludesReceiver) {
- __ decq(param_count);
- }
+ Register promise = rax;
+ Register suspender = rbx;
- __ subq(rsp, Immediate(ReturnPromiseOnSuspendFrameConstants::kSpillAreaSize));
+ __ subq(rsp, Immediate(-(BuiltinWasmWrapperConstants::kGCScanSlotCountOffset -
+ TypedFrameConstants::kFixedFrameSizeFromFp)));
- __ movq(
- MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset),
- param_count);
+ // TODO(thibaudm): Throw if any of the following holds:
+ // - caller is null
+ // - ActiveSuspender is undefined
+ // - 'suspender' is not the active suspender
// -------------------------------------------
- // Get the instance and wasm call target.
+ // Save current state in active jump buffer.
// -------------------------------------------
- Register sfi = closure;
- __ LoadAnyTaggedField(
- sfi,
- MemOperand(
- closure,
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
- Register function_data = sfi;
+ Label resume;
+ Register continuation = rcx;
+ __ LoadRoot(continuation, RootIndex::kActiveContinuation);
+ Register jmpbuf = rdx;
__ LoadAnyTaggedField(
- function_data,
- FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
- Register wasm_instance = kWasmInstanceRegister; // rsi
+ jmpbuf,
+ FieldOperand(continuation, WasmContinuationObject::kJmpbufOffset));
+ __ LoadExternalPointerField(
+ jmpbuf, FieldOperand(jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, r8);
+ FillJumpBuffer(masm, jmpbuf, &resume);
+ __ StoreTaggedSignedField(
+ FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
+ Smi::FromInt(WasmSuspenderObject::Suspended));
+ jmpbuf = no_reg;
+ // live: [rax, rbx, rcx]
+
+#ifdef DEBUG
+ // -------------------------------------------
+ // Check that the suspender's continuation is the active continuation.
+ // -------------------------------------------
+ // TODO(thibaudm): Once we add core stack-switching instructions, this check
+ // will not hold anymore: it's possible that the active continuation changed
+ // (due to an internal switch), so we have to update the suspender.
+ Register suspender_continuation = rdx;
__ LoadAnyTaggedField(
- wasm_instance,
- FieldOperand(function_data, WasmExportedFunctionData::kInstanceOffset));
- sfi = no_reg;
- closure = no_reg;
- // live: [rsi, rdi]
+ suspender_continuation,
+ FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
+ __ cmpq(suspender_continuation, continuation);
+ Label ok;
+ __ j(equal, &ok);
+ __ Trap();
+ __ bind(&ok);
+#endif
// -------------------------------------------
- // Save current state in active jmpbuf.
+ // Update roots.
// -------------------------------------------
- Register active_continuation = rax;
- Register foreign_jmpbuf = rbx;
- __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
+ Register caller = rcx;
__ LoadAnyTaggedField(
- foreign_jmpbuf,
- FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
- Register jmpbuf = rbx;
- __ LoadExternalPointerField(
- jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag, r8);
- __ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp);
- __ movq(MemOperand(jmpbuf, wasm::kJmpBufFpOffset), rbp);
- Register stack_limit_address = rcx;
- __ movq(stack_limit_address,
- FieldOperand(wasm_instance,
- WasmInstanceObject::kRealStackLimitAddressOffset));
- Register stack_limit = rdx;
- __ movq(stack_limit, MemOperand(stack_limit_address, 0));
- __ movq(MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset), stack_limit);
- // TODO(thibaudm): Save live registers.
- foreign_jmpbuf = no_reg;
- stack_limit = no_reg;
- stack_limit_address = no_reg;
- // live: [rsi, rdi, rax]
+ caller,
+ FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
+ __ LoadAnyTaggedField(
+ caller, FieldOperand(caller, WasmContinuationObject::kParentOffset));
+ __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
+ Register parent = rdx;
+ __ LoadAnyTaggedField(
+ parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
+ __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
+ parent = no_reg;
+ // live: [rax, rcx]
// -------------------------------------------
- // Allocate a new continuation.
+ // Load jump buffer.
// -------------------------------------------
MemOperand GCScanSlotPlace =
MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
__ Move(GCScanSlotPlace, 2);
- __ Push(wasm_instance);
- __ Push(function_data);
+ __ Push(promise);
+ __ Push(caller);
__ Move(kContextRegister, Smi::zero());
- __ CallRuntime(Runtime::kWasmAllocateContinuation);
- __ Pop(function_data);
- __ Pop(wasm_instance);
- STATIC_ASSERT(kReturnRegister0 == rax);
- Register target_continuation = rax;
- // live: [rsi, rdi, rax]
-
- // -------------------------------------------
- // Load target continuation jmpbuf.
- // -------------------------------------------
- foreign_jmpbuf = rbx;
+ __ CallRuntime(Runtime::kWasmSyncStackLimit);
+ __ Pop(caller);
+ __ Pop(promise);
+ jmpbuf = caller;
__ LoadAnyTaggedField(
- foreign_jmpbuf,
- FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset));
- Register target_jmpbuf = rbx;
+ jmpbuf, FieldOperand(caller, WasmContinuationObject::kJmpbufOffset));
+ caller = no_reg;
__ LoadExternalPointerField(
- target_jmpbuf,
- FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
+ jmpbuf, FieldOperand(jmpbuf, Foreign::kForeignAddressOffset),
kForeignForeignAddressTag, r8);
+ __ movq(kReturnRegister0, promise);
__ Move(GCScanSlotPlace, 0);
- // Switch stack!
- LoadJumpBuffer(masm, target_jmpbuf);
- foreign_jmpbuf = no_reg;
- target_jmpbuf = no_reg;
- // live: [rsi, rdi]
+ LoadJumpBuffer(masm, jmpbuf, true);
+ __ Trap();
+ __ bind(&resume);
+ __ LeaveFrame(StackFrame::STACK_SWITCH);
+ __ ret(0);
+}
+
+// Resume the suspender stored in the closure.
+void Builtins::Generate_WasmResume(MacroAssembler* masm) {
+ __ EnterFrame(StackFrame::STACK_SWITCH);
+
+ Register param_count = rax;
+ Register closure = kJSFunctionRegister; // rdi
+
+ // These slots are not used in this builtin. But when we return from the
+ // resumed continuation, we return to the GenericJSToWasmWrapper code, which
+ // expects these slots to be set.
+ constexpr int kInParamCountOffset =
+ BuiltinWasmWrapperConstants::kInParamCountOffset;
+ constexpr int kParamCountOffset =
+ BuiltinWasmWrapperConstants::kParamCountOffset;
+ __ subq(rsp, Immediate(3 * kSystemPointerSize));
+ __ movq(MemOperand(rbp, kParamCountOffset), param_count);
+ __ movq(MemOperand(rbp, kInParamCountOffset), param_count);
+
+ param_count = no_reg;
// -------------------------------------------
- // Load and call target wasm function.
+ // Load suspender from closure.
// -------------------------------------------
- // TODO(thibaudm): Handle arguments.
- // TODO(thibaudm): Handle GC.
- // Set thread_in_wasm_flag.
- Register thread_in_wasm_flag_addr = rax;
- __ movq(
- thread_in_wasm_flag_addr,
- MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
- __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
- Register function_entry = function_data;
+ Register sfi = closure;
__ LoadAnyTaggedField(
- function_entry,
- FieldOperand(function_entry, WasmExportedFunctionData::kInternalOffset));
- __ LoadExternalPointerField(
- function_entry,
- FieldOperand(function_data, WasmInternalFunction::kForeignAddressOffset),
- kForeignForeignAddressTag, r8);
- __ Push(wasm_instance);
- __ call(function_entry);
- __ Pop(wasm_instance);
- // Unset thread_in_wasm_flag.
- __ movq(
- thread_in_wasm_flag_addr,
- MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
- __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0));
- thread_in_wasm_flag_addr = no_reg;
- function_entry = no_reg;
- function_data = no_reg;
- // live: [rsi]
+ sfi,
+ MemOperand(
+ closure,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
+ Register function_data = sfi;
+ __ LoadAnyTaggedField(
+ function_data,
+ FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
+ Register suspender = rax;
+ __ LoadAnyTaggedField(
+ suspender,
+ FieldOperand(function_data, WasmOnFulfilledData::kSuspenderOffset));
+ // Check the suspender state.
+ Label suspender_is_suspended;
+ Register state = rdx;
+ __ LoadTaggedSignedField(
+ state, FieldOperand(suspender, WasmSuspenderObject::kStateOffset));
+ __ SmiCompare(state, Smi::FromInt(WasmSuspenderObject::Suspended));
+ __ j(equal, &suspender_is_suspended);
+ __ Trap(); // TODO(thibaudm): Throw a wasm trap.
+ closure = no_reg;
+ sfi = no_reg;
+ __ bind(&suspender_is_suspended);
// -------------------------------------------
- // Reload parent continuation.
+ // Save current state.
// -------------------------------------------
- active_continuation = rbx;
+ Label suspend;
+ Register active_continuation = r9;
__ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
- Register parent = rdx;
+ Register current_jmpbuf = rdi;
__ LoadAnyTaggedField(
- parent,
- FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
- active_continuation = no_reg;
- // live: [rsi]
+ current_jmpbuf,
+ FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
+ __ LoadExternalPointerField(
+ current_jmpbuf,
+ FieldOperand(current_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, rdx);
+ FillJumpBuffer(masm, current_jmpbuf, &suspend);
+ current_jmpbuf = no_reg;
// -------------------------------------------
- // Update instance active continuation.
+ // Set suspender's parent to active continuation.
// -------------------------------------------
- __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), parent);
- foreign_jmpbuf = rax;
+ __ StoreTaggedSignedField(
+ FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
+ Smi::FromInt(WasmSuspenderObject::Active));
+ Register target_continuation = rdi;
__ LoadAnyTaggedField(
- foreign_jmpbuf,
- FieldOperand(parent, WasmContinuationObject::kJmpbufOffset));
- jmpbuf = foreign_jmpbuf;
- __ LoadExternalPointerField(
- jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag, r8);
- // Switch stack!
- LoadJumpBuffer(masm, jmpbuf);
+ target_continuation,
+ FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ __ StoreTaggedField(
+ FieldOperand(target_continuation, WasmContinuationObject::kParentOffset),
+ active_continuation);
+ __ RecordWriteField(
+ target_continuation, WasmContinuationObject::kParentOffset,
+ active_continuation, slot_address, SaveFPRegsMode::kIgnore);
+ active_continuation = no_reg;
+
+ // -------------------------------------------
+ // Update roots.
+ // -------------------------------------------
+ __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation),
+ target_continuation);
+ __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
+ suspender = no_reg;
+
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
__ Move(GCScanSlotPlace, 1);
- __ Push(wasm_instance); // Spill.
+ __ Push(target_continuation);
__ Move(kContextRegister, Smi::zero());
__ CallRuntime(Runtime::kWasmSyncStackLimit);
- __ Pop(wasm_instance);
- parent = no_reg;
- active_continuation = no_reg;
- foreign_jmpbuf = no_reg;
- wasm_instance = no_reg;
+ __ Pop(target_continuation);
// -------------------------------------------
- // Epilogue.
+ // Load state from target jmpbuf (longjmp).
// -------------------------------------------
- __ movq(
- param_count,
- MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset));
- __ LeaveFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND);
- __ DropArguments(param_count, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
- __ ret(0);
+ Register target_jmpbuf = target_continuation;
+ __ LoadAnyTaggedField(
+ target_jmpbuf,
+ FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset));
+ __ LoadExternalPointerField(
+ target_jmpbuf,
+ FieldOperand(target_jmpbuf, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag, rax);
+ // Move resolved value to return register.
+ __ movq(kReturnRegister0, Operand(rbp, 3 * kSystemPointerSize));
+ __ Move(GCScanSlotPlace, 0);
+ LoadJumpBuffer(masm, target_jmpbuf, true);
+ __ Trap();
+ __ bind(&suspend);
+ __ LeaveFrame(StackFrame::STACK_SWITCH);
+ __ ret(3);
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
@@ -3995,6 +4308,18 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ PrepareCallCFunction(3);
__ CallCFunction(find_handler, 3);
}
+
+#ifdef V8_ENABLE_CET_SHADOW_STACK
+ // Drop frames from the shadow stack.
+ ExternalReference num_frames_above_pending_handler_address =
+ ExternalReference::Create(
+ IsolateAddressId::kNumFramesAbovePendingHandlerAddress,
+ masm->isolate());
+ __ movq(rcx, masm->ExternalReferenceAsOperand(
+ num_frames_above_pending_handler_address));
+ __ IncsspqIfSupported(rcx, kScratchRegister);
+#endif // V8_ENABLE_CET_SHADOW_STACK
+
// Retrieve the handler context, SP and FP.
__ movq(rsi,
masm->ExternalReferenceAsOperand(pending_handler_context_address));
@@ -4223,8 +4548,12 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
} else {
DCHECK_EQ(stack_space, 0);
__ PopReturnAddressTo(rcx);
+ // {stack_space_operand} was loaded into {rbx} above.
__ addq(rsp, rbx);
- __ jmp(rcx);
+ // Push and ret (instead of jmp) to keep the RSB and the CET shadow stack
+ // balanced.
+ __ PushReturnAddressFrom(rcx);
+ __ ret(0);
}
// Re-throw by promoting a scheduled exception.
@@ -4713,13 +5042,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
+ if (FLAG_debug_code) {
+ AssertCodeTIsBaseline(masm, code_obj, r11);
+ }
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
- if (FLAG_debug_code) {
- AssertCodeIsBaseline(masm, code_obj, r11);
- }
// Load the feedback vector.
Register feedback_vector = r11;
@@ -4856,7 +5184,7 @@ void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
+ MacroAssembler* masm, Handle<CodeT> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -4898,14 +5226,14 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
+ Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
+ Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index bf654f6789..4a9e9ec701 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,14 +1,11 @@
-bbudge@chromium.org
+cbruni@chromium.org
clemensb@chromium.org
-delphick@chromium.org
gdeepti@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
mslekova@chromium.org
-mvstanton@chromium.org
nicohartmann@chromium.org
-zhin@chromium.org
per-file compiler.*=marja@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index e434cac32d..ff612406f6 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -533,7 +533,7 @@ Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
pending_32_bit_constants_(),
- scratch_register_list_(ip.bit()) {
+ scratch_register_list_({ip}) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constant_pool_deadline_ = kMaxInt;
const_pool_blocked_nesting_ = 0;
@@ -594,7 +594,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -1412,9 +1412,9 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
- DCHECK_NE(rl, 0);
+ DCHECK(!rl.is_empty());
DCHECK(rn != pc);
- emit(instr | rn.code() * B16 | rl);
+ emit(instr | rn.code() * B16 | rl.bits());
}
void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
@@ -2251,12 +2251,12 @@ void Assembler::pld(const MemOperand& address) {
void Assembler::ldm(BlockAddrMode am, Register base, RegList dst,
Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- DCHECK(base == sp || (dst & sp.bit()) == 0);
+ DCHECK(base == sp || !dst.has(sp));
AddrMode4(cond | B27 | am | L, base, dst);
// Emit the constant pool after a function return implemented by ldm ..{..pc}.
- if (cond == al && (dst & pc.bit()) != 0) {
+ if (cond == al && dst.has(pc)) {
// There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we
// recognize this case by checking if the emission of the pool was blocked
@@ -5499,11 +5499,7 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
- DCHECK_NE(*available, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
- Register reg = Register::from_code(index);
- *available &= ~reg.bit();
- return reg;
+ return available->PopFirst();
}
LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 4cce50f795..9408dd0793 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -43,7 +43,6 @@
#include <stdio.h>
#include <memory>
-#include <vector>
#include "src/base/numbers/double.h"
#include "src/base/small-vector.h"
@@ -1400,21 +1399,25 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
}
// Check if we have registers available to acquire.
- bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ bool CanAcquire() const {
+ return !assembler_->GetScratchRegisterList()->is_empty();
+ }
bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
- DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()), 0);
- *available |= reg1.bit() | reg2.bit();
+ DCHECK(!available->has(reg1));
+ DCHECK(!available->has(reg2));
+ available->set(reg1);
+ available->set(reg2);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
- DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()),
- reg1.bit() | reg2.bit());
- *available &= ~(reg1.bit() | reg2.bit());
+ DCHECK(available->has(reg1));
+ DCHECK_IMPLIES(reg2.is_valid(), available->has(reg2));
+ available->clear(RegList{reg1, reg2});
}
private:
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
index 14960a3193..70e8e9f361 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -24,21 +24,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | r0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | r1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | r2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | r3.bit());
- if (argc >= 5) DCHECK(allocatable_regs | r4.bit());
- if (argc >= 6) DCHECK(allocatable_regs | r5.bit());
- if (argc >= 7) DCHECK(allocatable_regs | r6.bit());
- if (argc >= 8) DCHECK(allocatable_regs | r7.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(r0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(r1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(r2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(r3));
+ if (argc >= 5) DCHECK(allocatable_regs.has(r4));
+ if (argc >= 6) DCHECK(allocatable_regs.has(r5));
+ if (argc >= 7) DCHECK(allocatable_regs.has(r6));
+ if (argc >= 8) DCHECK(allocatable_regs.has(r7));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(r1, r5, r4, r2, r0);
+ return RegisterArray(r1, r5, r4, r2, r0, r3, kContextRegister);
}
// static
@@ -64,6 +64,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return r0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return r1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return r2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return r3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return r1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return r2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return r3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return r4;
@@ -106,7 +136,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -116,6 +146,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // r0 : the source
+ // r1 : the excluded property count
+ return RegisterArray(r1, r0);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // r0 : the source
+ // r1 : the excluded property count
+ // r2 : the excluded property base
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// r0 : number of arguments (on the stack)
// r1 : the target to call
@@ -225,6 +271,14 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // r0: left operand
+ // r1: right operand
+ // r2: feedback slot
+ return RegisterArray(r0, r1, r2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(r1, // kApiFunctionAddress
r2, // kArgc
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 95eb8795e9..d0d854dc50 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -45,20 +45,10 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = (kCallerSaved | lr) - exclusions;
- RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
-
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
@@ -71,21 +61,11 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = (kCallerSaved | lr) - exclusions;
stm(db_w, sp, list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
SaveFPRegs(sp, lr);
@@ -104,21 +84,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = (kCallerSaved | lr) - exclusions;
ldm(ia_w, sp, list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
return bytes;
}
@@ -696,27 +666,15 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
+ if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- stm(db_w, sp, regs);
+ stm(db_w, sp, registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
+ if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- ldm(ia_w, sp, regs);
+ ldm(ia_w, sp, registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
@@ -876,15 +834,15 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
ASM_CODE_COMMENT(this);
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
- stm(db_w, sp, fp.bit() | lr.bit());
+ stm(db_w, sp, {fp, lr});
mov(fp, Operand(sp));
Push(marker_reg);
} else {
- stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ stm(db_w, sp, {marker_reg, fp, lr});
add(fp, sp, Operand(kPointerSize));
}
} else {
- stm(db_w, sp, fp.bit() | lr.bit());
+ stm(db_w, sp, {fp, lr});
mov(fp, sp);
}
}
@@ -892,9 +850,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
void TurboAssembler::PushStandardFrame(Register function_reg) {
ASM_CODE_COMMENT(this);
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
- stm(db_w, sp,
- (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
- lr.bit());
+ stm(db_w, sp, {function_reg, cp, fp, lr});
int offset = -StandardFrameConstants::kContextOffset;
offset += function_reg.is_valid() ? kPointerSize : 0;
add(fp, sp, Operand(offset));
@@ -1426,7 +1382,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
// the caller frame pointer and return address.
mov(sp, fp);
int frame_ends = pc_offset();
- ldm(ia_w, sp, fp.bit() | lr.bit());
+ ldm(ia_w, sp, {fp, lr});
return frame_ends;
}
@@ -1575,7 +1531,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Tear down the exit frame, pop the arguments, and return.
mov(sp, Operand(fp));
- ldm(ia_w, sp, fp.bit() | lr.bit());
+ ldm(ia_w, sp, {fp, lr});
if (argument_count.is_valid()) {
if (argument_count_is_length) {
add(sp, sp, argument_count);
@@ -1674,11 +1630,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
sub(num, num, Operand(1), SetCC);
bind(&check);
- if (kJSArgcIncludesReceiver) {
- b(gt, &copy);
- } else {
- b(ge, &copy);
- }
+ b(gt, &copy);
}
// Fill remaining expected arguments with undefined values.
@@ -2663,19 +2615,13 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index e43aec485f..a17e1a964f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
if (src1.code() > src2.code()) {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ stm(db_w, sp, {src1, src2}, cond);
} else {
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
str(src2, MemOperand(sp, 4, NegPreIndex), cond);
@@ -108,9 +108,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ stm(db_w, sp, {src1, src2, src3}, cond);
} else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ stm(db_w, sp, {src1, src2}, cond);
str(src3, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
@@ -125,14 +125,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
+ stm(db_w, sp, {src1, src2, src3, src4}, cond);
} else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ stm(db_w, sp, {src1, src2, src3}, cond);
str(src4, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ stm(db_w, sp, {src1, src2}, cond);
Push(src3, src4, cond);
}
} else {
@@ -148,20 +147,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
if (src4.code() > src5.code()) {
- stm(db_w, sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
- cond);
+ stm(db_w, sp, {src1, src2, src3, src4, src5}, cond);
} else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
+ stm(db_w, sp, {src1, src2, src3, src4}, cond);
str(src5, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ stm(db_w, sp, {src1, src2, src3}, cond);
Push(src4, src5, cond);
}
} else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ stm(db_w, sp, {src1, src2}, cond);
Push(src3, src4, src5, cond);
}
} else {
@@ -182,7 +178,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(src1 != src2);
if (src1.code() > src2.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ ldm(ia_w, sp, {src1, src2}, cond);
} else {
ldr(src2, MemOperand(sp, 4, PostIndex), cond);
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
@@ -194,10 +190,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ ldm(ia_w, sp, {src1, src2, src3}, cond);
} else {
ldr(src3, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ ldm(ia_w, sp, {src1, src2}, cond);
}
} else {
Pop(src2, src3, cond);
@@ -212,15 +208,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
+ ldm(ia_w, sp, {src1, src2, src3, src4}, cond);
} else {
ldr(src4, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ ldm(ia_w, sp, {src1, src2, src3}, cond);
}
} else {
Pop(src3, src4, cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ ldm(ia_w, sp, {src1, src2}, cond);
}
} else {
Pop(src2, src3, src4, cond);
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 8cc838945d..810abcbdb0 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_ARM_REGISTER_ARM_H_
#define V8_CODEGEN_ARM_REGISTER_ARM_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -61,40 +60,6 @@ namespace internal {
// leave it alone. Adjust the value of kR9Available accordingly:
const int kR9Available = 1; // 1 if available to us, 0 if reserved
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 0 | // r0 a1
- 1 << 1 | // r1 a2
- 1 << 2 | // r2 a3
- 1 << 3; // r3 a4
-
-const int kNumJSCallerSaved = 4;
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved = 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4 (cp in JavaScript code)
- 1 << 8 | // r8 v5 (pp in JavaScript code)
- kR9Available << 9 | // r9 v6
- 1 << 10 | // r10 v7
- 1 << 11; // r11 v8 (fp in JavaScript code)
-
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-const RegList kCallerSaved = 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 9; // r9
-
-const int kNumCalleeSaved = 7 + kR9Available;
-
-// Double registers d8 to d15 are callee-saved.
-const int kNumDoubleCalleeSaved = 8;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -109,7 +74,7 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
+static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");
// r7: context register
@@ -125,7 +90,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = false;
+constexpr AliasingKind kFPAliasing = AliasingKind::kCombine;
constexpr bool kSimdMaskRegisters = false;
enum SwVfpRegisterCode {
@@ -169,7 +134,7 @@ class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
-static_assert(sizeof(SwVfpRegister) == sizeof(int),
+static_assert(sizeof(SwVfpRegister) <= sizeof(int),
"SwVfpRegister can efficiently be passed by value");
using FloatRegister = SwVfpRegister;
@@ -210,7 +175,7 @@ class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
-static_assert(sizeof(DwVfpRegister) == sizeof(int),
+static_assert(sizeof(DwVfpRegister) <= sizeof(int),
"DwVfpRegister can efficiently be passed by value");
using DoubleRegister = DwVfpRegister;
@@ -356,6 +321,7 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r4;
// Give alias names to registers
constexpr Register cp = r7; // JavaScript context pointer.
+constexpr Register r11 = fp;
constexpr Register kRootRegister = r10; // Roots array pointer.
constexpr DoubleRegister kFPReturnRegister0 = d0;
diff --git a/deps/v8/src/codegen/arm/reglist-arm.h b/deps/v8/src/codegen/arm/reglist-arm.h
new file mode 100644
index 0000000000..c6834a5db5
--- /dev/null
+++ b/deps/v8/src/codegen/arm/reglist-arm.h
@@ -0,0 +1,56 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM_REGLIST_ARM_H_
+#define V8_CODEGEN_ARM_REGLIST_ARM_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = {r0, // r0 a1
+ r1, // r1 a2
+ r2, // r2 a3
+ r3}; // r3 a4
+
+const int kNumJSCallerSaved = 4;
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = {r4, // r4 v1
+ r5, // r5 v2
+ r6, // r6 v3
+ r7, // r7 v4 (cp in JavaScript code)
+ r8, // r8 v5 (pp in JavaScript code)
+ kR9Available ? r9 : Register::no_reg(), // r9 v6
+ r10, // r10 v7
+ r11}; // r11 v8 (fp in JavaScript code)
+
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved = {r0, // r0
+ r1, // r1
+ r2, // r2
+ r3, // r3
+ r9}; // r9
+
+const int kNumCalleeSaved = 7 + kR9Available;
+
+// Double registers d8 to d15 are callee-saved.
+const int kNumDoubleCalleeSaved = 8;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ARM_REGLIST_ARM_H_
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 40b9a94dd8..4b22127696 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -54,12 +54,12 @@ inline bool CPURegister::IsSP() const {
inline void CPURegList::Combine(const CPURegList& other) {
DCHECK(other.type() == type_);
DCHECK(other.RegisterSizeInBits() == size_);
- list_ |= other.list();
+ list_ |= other.list_;
}
inline void CPURegList::Remove(const CPURegList& other) {
if (other.type() == type_) {
- list_ &= ~other.list();
+ list_ &= ~other.list_;
}
}
@@ -487,15 +487,15 @@ Tagged_t Assembler::target_compressed_address_at(Address pc,
return Memory<Tagged_t>(target_pointer_address_at(pc));
}
-Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
+Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- return Handle<Code>(reinterpret_cast<Address*>(
+ return Handle<CodeT>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
- return Handle<Code>::cast(
+ return Handle<CodeT>::cast(
GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
}
}
@@ -536,7 +536,7 @@ Address Assembler::runtime_entry_at(Address pc) {
return Assembler::target_address_at(pc, 0 /* unused */);
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return instr->ImmPCOffset() + options().code_range_start;
+ return instr->ImmPCOffset() + options().code_range_base;
}
}
@@ -666,7 +666,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Object obj(DecompressTaggedPointer(cage_base, compressed));
// Embedding of compressed Code objects must not happen when external code
// space is enabled, because CodeDataContainers must be used instead.
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode(cage_base));
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
+ !IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);
} else {
return HeapObject::cast(
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index fd5cd326ec..1edc2bd6cb 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -73,7 +73,7 @@ constexpr unsigned CpuFeaturesFromCompiler() {
constexpr unsigned CpuFeaturesFromTargetOS() {
unsigned features = 0;
-#if defined(V8_TARGET_OS_MACOSX)
+#if defined(V8_TARGET_OS_MACOS)
features |= 1u << JSCVT;
#endif
return features;
@@ -228,18 +228,18 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
int number_of_valid_regs = 0;
int number_of_valid_fpregs = 0;
- RegList unique_regs = 0;
- RegList unique_fpregs = 0;
+ uint64_t unique_regs = 0;
+ uint64_t unique_fpregs = 0;
const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
for (unsigned i = 0; i < arraysize(regs); i++) {
if (regs[i].IsRegister()) {
number_of_valid_regs++;
- unique_regs |= regs[i].bit();
+ unique_regs |= (uint64_t{1} << regs[i].code());
} else if (regs[i].IsVRegister()) {
number_of_valid_fpregs++;
- unique_fpregs |= regs[i].bit();
+ unique_fpregs |= (uint64_t{1} << regs[i].code());
} else {
DCHECK(!regs[i].is_valid());
}
@@ -420,7 +420,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -3696,9 +3696,12 @@ void Assembler::EmitStringData(const char* string) {
void Assembler::debug(const char* message, uint32_t code, Instr params) {
if (options().enable_simulator_code) {
+ size_t size_of_debug_sequence =
+ 4 * kInstrSize + RoundUp<kInstrSize>(strlen(message) + 1);
+
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools.
- BlockPoolsScope scope(this);
+ BlockPoolsScope scope(this, size_of_debug_sequence);
Label start;
bind(&start);
@@ -3713,6 +3716,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugMessageOffset);
EmitStringData(message);
hlt(kImmExceptionIsUnreachable);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(&start), size_of_debug_sequence);
return;
}
@@ -4390,7 +4394,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
void Assembler::near_call(HeapObjectRequest request) {
BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapObject(request);
- EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
+ EmbeddedObjectIndex index = AddEmbeddedObject(Handle<CodeT>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
DCHECK(is_int32(index));
bl(static_cast<int>(index));
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index df8fadf1f1..f9e991a57b 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -6,10 +6,8 @@
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
#include <deque>
-#include <list>
#include <map>
#include <memory>
-#include <vector>
#include "src/base/optional.h"
#include "src/codegen/arm64/constants-arm64.h"
@@ -264,7 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
- inline Handle<Code> code_target_object_handle_at(Address pc);
+ inline Handle<CodeT> code_target_object_handle_at(Address pc);
inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
inline void set_embedded_object_index_referenced_from(
Address p, EmbeddedObjectIndex index);
@@ -274,8 +272,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Returns the target address for a runtime function for the call encoded
// at 'pc'.
// Runtime entries can be temporarily encoded as the offset between the
- // runtime function entrypoint and the code range start (stored in the
- // code_range_start field), in order to be encodable as we generate the code,
+ // runtime function entrypoint and the code range base (stored in the
+ // code_range_base field), in order to be encodable as we generate the code,
// before it is moved into the code space.
inline Address runtime_entry_at(Address pc);
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index 4c61e1fd82..96bb73f0d4 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -9,7 +9,7 @@
#include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/cpu-features.h"
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <libkern/OSCacheControl.h>
#endif
@@ -49,7 +49,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#if defined(V8_HOST_ARCH_ARM64)
#if defined(V8_OS_WIN)
::FlushInstructionCache(GetCurrentProcess(), address, length);
-#elif defined(V8_OS_MACOSX)
+#elif defined(V8_OS_DARWIN)
sys_icache_invalidate(address, length);
#else
// The code below assumes user space cache operations are allowed. The goal
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
index f1fa16673c..709a01264d 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -25,20 +25,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | x0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | x1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | x2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | x3.bit());
- if (argc >= 5) DCHECK(allocatable_regs | x4.bit());
- if (argc >= 6) DCHECK(allocatable_regs | x5.bit());
- if (argc >= 7) DCHECK(allocatable_regs | x6.bit());
- if (argc >= 8) DCHECK(allocatable_regs | x7.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(x0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(x1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(x2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(x3));
+ if (argc >= 5) DCHECK(allocatable_regs.has(x4));
+ if (argc >= 6) DCHECK(allocatable_regs.has(x5));
+ if (argc >= 7) DCHECK(allocatable_regs.has(x6));
+ if (argc >= 8) DCHECK(allocatable_regs.has(x7));
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(x1, x5, x4, x2, x0, x3);
+ // TODO(leszeks): Remove x7 which is just there for padding.
+ return RegisterArray(x1, x5, x4, x2, x0, x3, kContextRegister, x7);
}
// static
@@ -64,6 +65,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return x0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return x1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return x2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return x3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return x1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return x2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return x3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return x4;
@@ -106,7 +137,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(x3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(x0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -115,6 +146,20 @@ constexpr auto CallTrampolineDescriptor::registers() {
return RegisterArray(x1, x0);
}
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // r1 : the source
+ // r0 : the excluded property count
+ return RegisterArray(x1, x0);
+}
+
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // r1 : the source
+ // r0 : the excluded property count
+ // x2 : the excluded property base
+ return RegisterArray(x1, x0, x2);
+}
+
// static
constexpr auto CallVarargsDescriptor::registers() {
// x0 : number of arguments (on the stack)
@@ -233,6 +278,14 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // x0: left operand
+ // x1: right operand
+ // x2: feedback slot
+ return RegisterArray(x0, x1, x2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(x1, // kApiFunctionAddress
x2, // kArgc
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 58920c343a..6ea0322afe 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1641,7 +1641,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
Mov(x0, num_arguments);
Mov(x1, ExternalReference::Create(f));
- Handle<Code> code =
+ Handle<CodeT> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -1650,8 +1650,9 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
Mov(x1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<CodeT> code =
+ CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1843,8 +1844,8 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target,
// Assembler::runtime_entry_at()).
// Note, that builtin-to-builitin calls use different OFF_HEAP_TARGET mode
// and therefore are encoded differently.
- DCHECK_NE(options().code_range_start, 0);
- offset -= static_cast<int64_t>(options().code_range_start);
+ DCHECK_NE(options().code_range_base, 0);
+ offset -= static_cast<int64_t>(options().code_range_base);
} else {
offset -= reinterpret_cast<int64_t>(pc);
}
@@ -1859,11 +1860,11 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
JumpHelper(offset, rmode, cond);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
- Builtins::IsIsolateIndependentBuiltin(*code));
+ Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code)));
if (options().inline_offheap_trampolines) {
Builtin builtin = Builtin::kNoBuiltinId;
@@ -1907,9 +1908,9 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
+void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
- Builtins::IsIsolateIndependentBuiltin(*code));
+ Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code)));
BlockPoolsScope scope(this);
if (options().inline_offheap_trampolines) {
@@ -1921,7 +1922,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
}
}
- DCHECK(code->IsExecutable());
+ DCHECK(FromCodeT(*code).IsExecutable());
if (CanUseNearCallOrJump(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
@@ -2023,6 +2024,11 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ LoadCodeDataContainerEntry(destination, code_object);
+ return;
+ }
+
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -2292,11 +2298,7 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register slots_to_copy = x4;
Register slots_to_claim = x5;
- if (kJSArgcIncludesReceiver) {
- Mov(slots_to_copy, actual_argument_count);
- } else {
- Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
- }
+ Mov(slots_to_copy, actual_argument_count);
Mov(slots_to_claim, extra_argument_count);
Tbz(extra_argument_count, 0, &even_extra_count);
@@ -2310,9 +2312,6 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register scratch = x11;
Add(slots_to_claim, extra_argument_count, 1);
And(scratch, actual_argument_count, 1);
- if (!kJSArgcIncludesReceiver) {
- Eor(scratch, scratch, 1);
- }
Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2333,13 +2332,7 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
}
Bind(&skip_move);
- Register actual_argument_with_receiver = actual_argument_count;
Register pointer_next_value = x5;
- if (!kJSArgcIncludesReceiver) {
- actual_argument_with_receiver = x4;
- Add(actual_argument_with_receiver, actual_argument_count,
- 1); // {slots_to_copy} was scratched.
- }
// Copy extra arguments as undefined values.
{
@@ -2347,7 +2340,7 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register undefined_value = x6;
Register count = x7;
LoadRoot(undefined_value, RootIndex::kUndefinedValue);
- SlotAddress(pointer_next_value, actual_argument_with_receiver);
+ SlotAddress(pointer_next_value, actual_argument_count);
Mov(count, extra_argument_count);
Bind(&loop);
Str(undefined_value,
@@ -2360,7 +2353,7 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
{
Label skip;
Register total_args_slots = x4;
- Add(total_args_slots, actual_argument_with_receiver, extra_argument_count);
+ Add(total_args_slots, actual_argument_count, extra_argument_count);
Tbz(total_args_slots, 0, &skip);
Str(padreg, MemOperand(pointer_next_value));
Bind(&skip);
@@ -3072,40 +3065,40 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Bind(&done);
}
-void TurboAssembler::EncodeCagedPointer(const Register& value) {
+void TurboAssembler::EncodeSandboxedPointer(const Register& value) {
ASM_CODE_COMMENT(this);
-#ifdef V8_CAGED_POINTERS
+#ifdef V8_SANDBOXED_POINTERS
Sub(value, value, kPtrComprCageBaseRegister);
- Mov(value, Operand(value, LSL, kCagedPointerShift));
+ Mov(value, Operand(value, LSL, kSandboxedPointerShift));
#else
UNREACHABLE();
#endif
}
-void TurboAssembler::DecodeCagedPointer(const Register& value) {
+void TurboAssembler::DecodeSandboxedPointer(const Register& value) {
ASM_CODE_COMMENT(this);
-#ifdef V8_CAGED_POINTERS
+#ifdef V8_SANDBOXED_POINTERS
Add(value, kPtrComprCageBaseRegister,
- Operand(value, LSR, kCagedPointerShift));
+ Operand(value, LSR, kSandboxedPointerShift));
#else
UNREACHABLE();
#endif
}
-void TurboAssembler::LoadCagedPointerField(const Register& destination,
- const MemOperand& field_operand) {
+void TurboAssembler::LoadSandboxedPointerField(
+ const Register& destination, const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Ldr(destination, field_operand);
- DecodeCagedPointer(destination);
+ DecodeSandboxedPointer(destination);
}
-void TurboAssembler::StoreCagedPointerField(
+void TurboAssembler::StoreSandboxedPointerField(
const Register& value, const MemOperand& dst_field_operand) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
Mov(scratch, value);
- EncodeCagedPointer(scratch);
+ EncodeSandboxedPointer(scratch);
Str(scratch, dst_field_operand);
}
@@ -3115,7 +3108,8 @@ void TurboAssembler::LoadExternalPointerField(Register destination,
Register isolate_root) {
DCHECK(!AreAliased(destination, isolate_root));
ASM_CODE_COMMENT(this);
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ DCHECK_NE(kExternalPointerNullTag, tag);
UseScratchRegisterScope temps(this);
Register external_table = temps.AcquireX();
if (isolate_root == no_reg) {
@@ -3126,21 +3120,23 @@ void TurboAssembler::LoadExternalPointerField(Register destination,
MemOperand(isolate_root,
IsolateData::external_pointer_table_offset() +
Internals::kExternalPointerTableBufferOffset));
- Ldr(destination, field_operand);
- Ldr(destination,
- MemOperand(external_table, destination, LSL, kSystemPointerSizeLog2));
- if (tag != 0) {
- And(destination, destination, Immediate(~tag));
- }
+ Ldr(destination.W(), field_operand);
+ // MemOperand doesn't support LSR currently (only LSL), so here we do the
+ // offset computation separately first.
+ STATIC_ASSERT(kExternalPointerIndexShift > kSystemPointerSizeLog2);
+ int shift_amount = kExternalPointerIndexShift - kSystemPointerSizeLog2;
+ Mov(destination, Operand(destination, LSR, shift_amount));
+ Ldr(destination, MemOperand(external_table, destination));
+ And(destination, destination, Immediate(~tag));
#else
Ldr(destination, field_operand);
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
+ if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
- CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
+ CPURegList regs(kXRegSizeInBits, registers);
// If we were saving LR, we might need to sign it.
DCHECK(!regs.IncludesAliasOf(lr));
regs.Align();
@@ -3148,9 +3144,9 @@ void TurboAssembler::MaybeSaveRegisters(RegList registers) {
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
+ if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
- CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
+ CPURegList regs(kXRegSizeInBits, registers);
// If we were saving LR, we might need to sign it.
DCHECK(!regs.IncludesAliasOf(lr));
regs.Align();
@@ -3212,7 +3208,7 @@ void TurboAssembler::CallRecordWriteStub(
if (options().inline_offheap_trampolines) {
CallBuiltin(builtin);
} else {
- Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
}
@@ -3350,7 +3346,7 @@ void TurboAssembler::Abort(AbortReason reason) {
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.
- RegList old_tmp_list = TmpList()->list();
+ uint64_t old_tmp_list = TmpList()->bits();
TmpList()->Combine(MacroAssembler::DefaultTmpList());
if (should_abort_hard()) {
@@ -3375,7 +3371,7 @@ void TurboAssembler::Abort(AbortReason reason) {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
- TmpList()->set_list(old_tmp_list);
+ TmpList()->set_bits(old_tmp_list);
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
@@ -3425,8 +3421,8 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
// Override the TurboAssembler's scratch register list. The lists will be
// reset automatically at the end of the UseScratchRegisterScope.
UseScratchRegisterScope temps(this);
- TmpList()->set_list(tmp_list.list());
- FPTmpList()->set_list(fp_tmp_list.list());
+ TmpList()->set_bits(tmp_list.bits());
+ FPTmpList()->set_bits(fp_tmp_list.bits());
// Copies of the printf vararg registers that we can pop from.
CPURegList pcs_varargs = kPCSVarargs;
@@ -3574,10 +3570,10 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
ASM_CODE_COMMENT(this);
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
- RegList old_tmp_list = TmpList()->list();
- RegList old_fp_tmp_list = FPTmpList()->list();
- TmpList()->set_list(0);
- FPTmpList()->set_list(0);
+ uint64_t old_tmp_list = TmpList()->bits();
+ uint64_t old_fp_tmp_list = FPTmpList()->bits();
+ TmpList()->set_bits(0);
+ FPTmpList()->set_bits(0);
CPURegList saved_registers = kCallerSaved;
saved_registers.Align();
@@ -3593,8 +3589,8 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
CPURegList fp_tmp_list = kCallerSavedV;
tmp_list.Remove(arg0, arg1, arg2, arg3);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
- TmpList()->set_list(tmp_list.list());
- FPTmpList()->set_list(fp_tmp_list.list());
+ TmpList()->set_bits(tmp_list.bits());
+ FPTmpList()->set_bits(fp_tmp_list.bits());
{
UseScratchRegisterScope temps(this);
@@ -3640,13 +3636,13 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
PopCPURegList(kCallerSavedV);
PopCPURegList<kDontLoadLR>(saved_registers);
- TmpList()->set_list(old_tmp_list);
- FPTmpList()->set_list(old_fp_tmp_list);
+ TmpList()->set_bits(old_tmp_list);
+ FPTmpList()->set_bits(old_fp_tmp_list);
}
UseScratchRegisterScope::~UseScratchRegisterScope() {
- available_->set_list(old_available_);
- availablefp_->set_list(old_availablefp_);
+ available_->set_bits(old_available_);
+ availablefp_->set_bits(old_availablefp_);
}
Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 7c972bd307..022d84cb60 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -9,8 +9,6 @@
#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
-#include <vector>
-
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/bailout-reason.h"
@@ -952,12 +950,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode);
- void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
+ void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(ExternalReference target);
// Generate an indirect call (for when a direct call's range is not adequate).
@@ -992,6 +990,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Helper functions that dispatch either to Call/JumpCodeObject or to
// Call/JumpCodeDataContainerObject.
+ // TODO(v8:11880): remove since CodeT targets are now default.
void LoadCodeTEntry(Register destination, Register code);
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
@@ -1439,23 +1438,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I64x2AllTrue(Register dst, VRegister src);
// ---------------------------------------------------------------------------
- // V8 Heap sandbox support
+ // V8 Sandbox support
- // Transform a CagedPointer from/to its encoded form, which is used when the
- // pointer is stored on the heap and ensures that the pointer will always
- // point into the virtual memory cage.
- void EncodeCagedPointer(const Register& value);
- void DecodeCagedPointer(const Register& value);
+ // Transform a SandboxedPointer from/to its encoded form, which is used when
+ // the pointer is stored on the heap and ensures that the pointer will always
+ // point into the sandbox.
+ void EncodeSandboxedPointer(const Register& value);
+ void DecodeSandboxedPointer(const Register& value);
- // Load and decode a CagedPointer from the heap.
- void LoadCagedPointerField(const Register& destination,
- const MemOperand& field_operand);
- // Encode and store a CagedPointer to the heap.
- void StoreCagedPointerField(const Register& value,
- const MemOperand& dst_field_operand);
+ // Load and decode a SandboxedPointer from the heap.
+ void LoadSandboxedPointerField(const Register& destination,
+ const MemOperand& field_operand);
+ // Encode and store a SandboxedPointer to the heap.
+ void StoreSandboxedPointerField(const Register& value,
+ const MemOperand& dst_field_operand);
// Loads a field containing off-heap pointer and does necessary decoding
- // if V8 heap sandbox is enabled.
+ // if sandboxed external pointers are enabled.
void LoadExternalPointerField(Register destination, MemOperand field_operand,
ExternalPointerTag tag,
Register isolate_root = Register::no_reg());
@@ -1756,16 +1755,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// For the 'lr_mode' template argument of the following methods, see
// PushCPURegList/PopCPURegList.
template <StoreLRMode lr_mode = kDontStoreLR>
- inline void PushSizeRegList(
- RegList registers, unsigned reg_size,
- CPURegister::RegisterType type = CPURegister::kRegister) {
- PushCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
+ inline void PushSizeRegList(RegList registers, unsigned reg_size) {
+ PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ }
+ template <StoreLRMode lr_mode = kDontStoreLR>
+ inline void PushSizeRegList(DoubleRegList registers, unsigned reg_size) {
+ PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ }
+ template <LoadLRMode lr_mode = kDontLoadLR>
+ inline void PopSizeRegList(RegList registers, unsigned reg_size) {
+ PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
}
template <LoadLRMode lr_mode = kDontLoadLR>
- inline void PopSizeRegList(
- RegList registers, unsigned reg_size,
- CPURegister::RegisterType type = CPURegister::kRegister) {
- PopCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
+ inline void PopSizeRegList(DoubleRegList registers, unsigned reg_size) {
+ PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
}
template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushXRegList(RegList regs) {
@@ -1781,23 +1784,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void PopWRegList(RegList regs) {
PopSizeRegList(regs, kWRegSizeInBits);
}
- inline void PushQRegList(RegList regs) {
- PushSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
+ inline void PushQRegList(DoubleRegList regs) {
+ PushSizeRegList(regs, kQRegSizeInBits);
}
- inline void PopQRegList(RegList regs) {
- PopSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
+ inline void PopQRegList(DoubleRegList regs) {
+ PopSizeRegList(regs, kQRegSizeInBits);
}
- inline void PushDRegList(RegList regs) {
- PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
+ inline void PushDRegList(DoubleRegList regs) {
+ PushSizeRegList(regs, kDRegSizeInBits);
}
- inline void PopDRegList(RegList regs) {
- PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
+ inline void PopDRegList(DoubleRegList regs) {
+ PopSizeRegList(regs, kDRegSizeInBits);
}
- inline void PushSRegList(RegList regs) {
- PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
+ inline void PushSRegList(DoubleRegList regs) {
+ PushSizeRegList(regs, kSRegSizeInBits);
}
- inline void PopSRegList(RegList regs) {
- PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
+ inline void PopSRegList(DoubleRegList regs) {
+ PopSizeRegList(regs, kSRegSizeInBits);
}
// Push the specified register 'count' times.
@@ -2155,8 +2158,8 @@ class V8_NODISCARD UseScratchRegisterScope {
explicit UseScratchRegisterScope(TurboAssembler* tasm)
: available_(tasm->TmpList()),
availablefp_(tasm->FPTmpList()),
- old_available_(available_->list()),
- old_availablefp_(availablefp_->list()) {
+ old_available_(available_->bits()),
+ old_availablefp_(availablefp_->bits()) {
DCHECK_EQ(available_->type(), CPURegister::kRegister);
DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
}
@@ -2206,8 +2209,8 @@ class V8_NODISCARD UseScratchRegisterScope {
CPURegList* availablefp_; // kVRegister
// The state of the available lists at the start of this scope.
- RegList old_available_; // kRegister
- RegList old_availablefp_; // kVRegister
+ uint64_t old_available_; // kRegister
+ uint64_t old_availablefp_; // kVRegister
};
} // namespace internal
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index ae6c4c9200..83fb23098a 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -6,8 +6,7 @@
#define V8_CODEGEN_ARM64_REGISTER_ARM64_H_
#include "src/codegen/arm64/utils-arm64.h"
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
#include "src/common/globals.h"
namespace v8 {
@@ -79,8 +78,6 @@ namespace internal {
R(d25) R(d26) R(d27) R(d28)
// clang-format on
-constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-
// Some CPURegister methods can return Register and VRegister types, so we
// need to declare them in advance.
class Register;
@@ -95,7 +92,7 @@ enum RegisterCode {
class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
public:
- enum RegisterType { kRegister, kVRegister, kNoRegister };
+ enum RegisterType : int8_t { kRegister, kVRegister, kNoRegister };
static constexpr CPURegister no_reg() {
return CPURegister{kCode_no_reg, 0, kNoRegister};
@@ -188,7 +185,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
bool IsSameSizeAndType(const CPURegister& other) const;
protected:
- int reg_size_;
+ uint8_t reg_size_;
RegisterType reg_type_;
#if defined(V8_OS_WIN) && !defined(__clang__)
@@ -224,6 +221,8 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(CPURegister);
+static_assert(sizeof(CPURegister) <= sizeof(int),
+ "CPURegister can efficiently be passed by value");
class Register : public CPURegister {
public:
@@ -250,6 +249,8 @@ class Register : public CPURegister {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) <= sizeof(int),
+ "Register can efficiently be passed by value");
// Stack frame alignment and padding.
constexpr int ArgumentPaddingSlots(int argument_count) {
@@ -259,7 +260,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return argument_count & alignment_mask;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
@@ -419,7 +420,7 @@ class VRegister : public CPURegister {
}
private:
- int lane_count_;
+ int8_t lane_count_;
constexpr explicit VRegister(const CPURegister& r, int lane_count)
: CPURegister(r), lane_count_(lane_count) {}
@@ -430,6 +431,8 @@ class VRegister : public CPURegister {
};
ASSERT_TRIVIALLY_COPYABLE(VRegister);
+static_assert(sizeof(VRegister) <= sizeof(int),
+ "VRegister can efficiently be passed by value");
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal. The Register and VRegister variants are provided for
@@ -543,152 +546,6 @@ using FloatRegister = VRegister;
using DoubleRegister = VRegister;
using Simd128Register = VRegister;
-// -----------------------------------------------------------------------------
-// Lists of registers.
-class V8_EXPORT_PRIVATE CPURegList {
- public:
- CPURegList() = default;
-
- template <typename... CPURegisters>
- explicit CPURegList(CPURegister reg0, CPURegisters... regs)
- : list_(CPURegister::ListOf(reg0, regs...)),
- size_(reg0.SizeInBits()),
- type_(reg0.type()) {
- DCHECK(AreSameSizeAndType(reg0, regs...));
- DCHECK(is_valid());
- }
-
- CPURegList(CPURegister::RegisterType type, int size, RegList list)
- : list_(list), size_(size), type_(type) {
- DCHECK(is_valid());
- }
-
- CPURegList(CPURegister::RegisterType type, int size, int first_reg,
- int last_reg)
- : size_(size), type_(type) {
- DCHECK(
- ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
- ((type == CPURegister::kVRegister) &&
- (last_reg < kNumberOfVRegisters)));
- DCHECK(last_reg >= first_reg);
- list_ = (1ULL << (last_reg + 1)) - 1;
- list_ &= ~((1ULL << first_reg) - 1);
- DCHECK(is_valid());
- }
-
- CPURegister::RegisterType type() const {
- return type_;
- }
-
- RegList list() const {
- return list_;
- }
-
- inline void set_list(RegList new_list) {
- list_ = new_list;
- DCHECK(is_valid());
- }
-
- // Combine another CPURegList into this one. Registers that already exist in
- // this list are left unchanged. The type and size of the registers in the
- // 'other' list must match those in this list.
- void Combine(const CPURegList& other);
-
- // Remove every register in the other CPURegList from this one. Registers that
- // do not exist in this list are ignored. The type of the registers in the
- // 'other' list must match those in this list.
- void Remove(const CPURegList& other);
-
- // Variants of Combine and Remove which take CPURegisters.
- void Combine(const CPURegister& other);
- void Remove(const CPURegister& other1, const CPURegister& other2 = NoCPUReg,
- const CPURegister& other3 = NoCPUReg,
- const CPURegister& other4 = NoCPUReg);
-
- // Variants of Combine and Remove which take a single register by its code;
- // the type and size of the register is inferred from this list.
- void Combine(int code);
- void Remove(int code);
-
- // Align the list to 16 bytes.
- void Align();
-
- CPURegister PopLowestIndex();
- CPURegister PopHighestIndex();
-
- // AAPCS64 callee-saved registers.
- static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
- static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits);
-
- // AAPCS64 caller-saved registers. Note that this includes lr.
- // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
- // 64-bits being caller-saved.
- static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
- static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
-
- bool IsEmpty() const {
- return list_ == 0;
- }
-
- bool IncludesAliasOf(const CPURegister& other1,
- const CPURegister& other2 = NoCPUReg,
- const CPURegister& other3 = NoCPUReg,
- const CPURegister& other4 = NoCPUReg) const {
- RegList list = 0;
- if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
- if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
- if (!other3.IsNone() && (other3.type() == type_)) list |= other3.bit();
- if (!other4.IsNone() && (other4.type() == type_)) list |= other4.bit();
- return (list_ & list) != 0;
- }
-
- int Count() const {
- return CountSetBits(list_, kRegListSizeInBits);
- }
-
- int RegisterSizeInBits() const {
- return size_;
- }
-
- int RegisterSizeInBytes() const {
- int size_in_bits = RegisterSizeInBits();
- DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
- return size_in_bits / kBitsPerByte;
- }
-
- int TotalSizeInBytes() const {
- return RegisterSizeInBytes() * Count();
- }
-
- private:
- RegList list_;
- int size_;
- CPURegister::RegisterType type_;
-
- bool is_valid() const {
- constexpr RegList kValidRegisters{0x8000000ffffffff};
- constexpr RegList kValidVRegisters{0x0000000ffffffff};
- switch (type_) {
- case CPURegister::kRegister:
- return (list_ & kValidRegisters) == list_;
- case CPURegister::kVRegister:
- return (list_ & kValidVRegisters) == list_;
- case CPURegister::kNoRegister:
- return list_ == 0;
- default:
- UNREACHABLE();
- }
- }
-};
-
-// AAPCS64 callee-saved registers.
-#define kCalleeSaved CPURegList::GetCalleeSaved()
-#define kCalleeSavedV CPURegList::GetCalleeSavedV()
-
-// AAPCS64 caller-saved registers. Note that this includes lr.
-#define kCallerSaved CPURegList::GetCallerSaved()
-#define kCallerSavedV CPURegList::GetCallerSavedV()
-
// Define a {RegisterName} method for {Register} and {VRegister}.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS)
diff --git a/deps/v8/src/codegen/arm64/reglist-arm64.h b/deps/v8/src/codegen/arm64/reglist-arm64.h
new file mode 100644
index 0000000000..9f29589098
--- /dev/null
+++ b/deps/v8/src/codegen/arm64/reglist-arm64.h
@@ -0,0 +1,176 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM64_REGLIST_ARM64_H_
+#define V8_CODEGEN_ARM64_REGLIST_ARM64_H_
+
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class V8_EXPORT_PRIVATE CPURegList {
+ public:
+ template <typename... CPURegisters>
+ explicit CPURegList(CPURegister reg0, CPURegisters... regs)
+ : list_(base::fold(
+ [](uint64_t acc, CPURegister v) {
+ if (!v.is_valid()) return acc;
+ return acc | (uint64_t{1} << v.code());
+ },
+ 0, reg0, regs...)),
+ size_(reg0.SizeInBits()),
+ type_(reg0.type()) {
+ DCHECK(AreSameSizeAndType(reg0, regs...));
+ DCHECK(is_valid());
+ }
+
+ CPURegList(int size, RegList list)
+ : list_(list.bits()), size_(size), type_(CPURegister::kRegister) {
+ DCHECK(is_valid());
+ }
+
+ CPURegList(int size, DoubleRegList list)
+ : list_(list.bits()), size_(size), type_(CPURegister::kVRegister) {
+ DCHECK(is_valid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, int size, int first_reg,
+ int last_reg)
+ : size_(size), type_(type) {
+ DCHECK(
+ ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
+ DCHECK(last_reg >= first_reg);
+ list_ = (1ULL << (last_reg + 1)) - 1;
+ list_ &= ~((1ULL << first_reg) - 1);
+ DCHECK(is_valid());
+ }
+
+ CPURegister::RegisterType type() const { return type_; }
+
+ uint64_t bits() const { return list_; }
+
+ inline void set_bits(uint64_t new_bits) {
+ list_ = new_bits;
+ DCHECK(is_valid());
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type of the registers in the
+ // 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take CPURegisters.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other1, const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Align the list to 16 bytes.
+ void Align();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
+ static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
+
+ bool IsEmpty() const { return list_ == 0; }
+
+ bool IncludesAliasOf(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg) const {
+ uint64_t list = 0;
+ if (!other1.IsNone() && (other1.type() == type_)) {
+ list |= (uint64_t{1} << other1.code());
+ }
+ if (!other2.IsNone() && (other2.type() == type_)) {
+ list |= (uint64_t{1} << other2.code());
+ }
+ if (!other3.IsNone() && (other3.type() == type_)) {
+ list |= (uint64_t{1} << other3.code());
+ }
+ if (!other4.IsNone() && (other4.type() == type_)) {
+ list |= (uint64_t{1} << other4.code());
+ }
+ return (list_ & list) != 0;
+ }
+
+ int Count() const { return CountSetBits(list_, kRegListSizeInBits); }
+
+ int RegisterSizeInBits() const { return size_; }
+
+ int RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ int TotalSizeInBytes() const { return RegisterSizeInBytes() * Count(); }
+
+ private:
+ uint64_t list_;
+ int size_;
+ CPURegister::RegisterType type_;
+
+ bool is_valid() const {
+ constexpr uint64_t kValidRegisters{0x8000000ffffffff};
+ constexpr uint64_t kValidVRegisters{0x0000000ffffffff};
+ switch (type_) {
+ case CPURegister::kRegister:
+ return (list_ & kValidRegisters) == list_;
+ case CPURegister::kVRegister:
+ return (list_ & kValidVRegisters) == list_;
+ case CPURegister::kNoRegister:
+ return list_ == 0;
+ default:
+ UNREACHABLE();
+ }
+ }
+};
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedV CPURegList::GetCalleeSavedV()
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedV CPURegList::GetCallerSavedV()
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ARM64_REGLIST_ARM64_H_
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index dd5c8b2d9a..4ec93befb4 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -72,14 +72,12 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
#endif
options.inline_offheap_trampolines &= !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- const base::AddressRegion& code_range = isolate->heap()->code_region();
- DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
- options.code_range_start = code_range.begin();
+ options.code_range_base = isolate->heap()->code_range_base();
#endif
options.short_builtin_calls =
isolate->is_short_builtin_calls_enabled() &&
!generating_embedded_builtin &&
- (options.code_range_start != kNullAddress) &&
+ (options.code_range_base != kNullAddress) &&
// Serialization of RUNTIME_ENTRY reloc infos is not supported yet.
!serializer;
return options;
@@ -224,6 +222,7 @@ CpuFeatureScope::~CpuFeatureScope() {
bool CpuFeatures::initialized_ = false;
bool CpuFeatures::supports_wasm_simd_128_ = false;
+bool CpuFeatures::supports_cetss_ = false;
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
@@ -270,7 +269,7 @@ void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
heap_object_requests_.push_front(request);
}
-int AssemblerBase::AddCodeTarget(Handle<Code> target) {
+int AssemblerBase::AddCodeTarget(Handle<CodeT> target) {
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@@ -282,7 +281,7 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
}
-Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+Handle<CodeT> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
return code_targets_[code_target_index];
}
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 6519520278..3fb302be7d 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -175,9 +175,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// instructions. For example, when the bultins code is re-embedded into the
// code range.
bool short_builtin_calls = false;
- // On some platforms, all code is within a given range in the process,
- // and the start of this range is configured here.
- Address code_range_start = 0;
+ // On some platforms, all code is created within a certain address range in
+ // the process, and the base of this code range is configured here.
+ Address code_range_base = 0;
// Enable pc-relative calls/jumps on platforms that support it. When setting
// this flag, the code range must be small enough to fit all offsets into
// the instruction immediates.
@@ -353,8 +353,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
- int AddCodeTarget(Handle<Code> target);
- Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
+ int AddCodeTarget(Handle<CodeT> target);
+ Handle<CodeT> GetCodeTarget(intptr_t code_target_index) const;
// Add 'object' to the {embedded_objects_} vector and return the index at
// which it is stored.
@@ -409,7 +409,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// guaranteed to fit in the instruction's offset field. We keep track of the
// code handles we encounter in calls in this vector, and encode the index of
// the code handle in the vector instead.
- std::vector<Handle<Code>> code_targets_;
+ std::vector<Handle<CodeT>> code_targets_;
// If an assembler needs a small number to refer to a heap object handle
// (for example, because there are only 32bit available on a 64bit arch), the
diff --git a/deps/v8/src/codegen/callable.h b/deps/v8/src/codegen/callable.h
index 49ee70717e..79a70514af 100644
--- a/deps/v8/src/codegen/callable.h
+++ b/deps/v8/src/codegen/callable.h
@@ -16,14 +16,14 @@ class Code;
// Associates a body of code with an interface descriptor.
class Callable final {
public:
- Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
+ Callable(Handle<CodeT> code, CallInterfaceDescriptor descriptor)
: code_(code), descriptor_(descriptor) {}
- Handle<Code> code() const { return code_; }
+ Handle<CodeT> code() const { return code_; }
CallInterfaceDescriptor descriptor() const { return descriptor_; }
private:
- const Handle<Code> code_;
+ const Handle<CodeT> code_;
const CallInterfaceDescriptor descriptor_;
};
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 494f23de76..c611445512 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
// static
-Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
+Handle<CodeT> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
return CodeFactory::CEntry(isolate, result_size);
}
@@ -22,9 +22,9 @@ Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
BUILTIN_CODE(isolate, CEntry_##RS##_##SD##_##AM##_##BE)
// static
-Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
- SaveFPRegsMode save_doubles,
- ArgvMode argv_mode, bool builtin_exit_frame) {
+Handle<CodeT> CodeFactory::CEntry(Isolate* isolate, int result_size,
+ SaveFPRegsMode save_doubles,
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Aliases for readability below.
const int rs = result_size;
const SaveFPRegsMode sd = save_doubles;
@@ -95,12 +95,12 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
Builtin::kLoadGlobalICInsideTypeof);
}
-Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtin::kStoreOwnICTrampoline);
+Callable CodeFactory::DefineNamedOwnIC(Isolate* isolate) {
+ return Builtins::CallableFor(isolate, Builtin::kDefineNamedOwnICTrampoline);
}
-Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtin::kStoreOwnIC);
+Callable CodeFactory::DefineNamedOwnICInOptimizedCode(Isolate* isolate) {
+ return Builtins::CallableFor(isolate, Builtin::kDefineNamedOwnIC);
}
// static
@@ -281,7 +281,7 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to
// save fpregs too.
- Handle<Code> code = CodeFactory::CEntry(
+ Handle<CodeT> code = CodeFactory::CEntry(
isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister);
if (result_size == 1) {
return Callable(code, InterpreterCEntry1Descriptor{});
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 05b27bef0e..937ad2e5b4 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -26,9 +26,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// stack and the arguments count is passed via register) which currently
// can't be expressed in CallInterfaceDescriptor. Therefore only the code
// is exported here.
- static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
+ static Handle<CodeT> RuntimeCEntry(Isolate* isolate, int result_size = 1);
- static Handle<Code> CEntry(
+ static Handle<CodeT> CEntry(
Isolate* isolate, int result_size = 1,
SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
@@ -37,8 +37,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
- static Callable StoreOwnIC(Isolate* isolate);
- static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
+ static Callable DefineNamedOwnIC(Isolate* isolate);
+ static Callable DefineNamedOwnICInOptimizedCode(Isolate* isolate);
static Callable ResumeGenerator(Isolate* isolate);
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index db50f7d3e4..c90b644a3c 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -17,6 +17,7 @@
#include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop.
#include "src/heap/memory-chunk.h"
#include "src/logging/counters.h"
+#include "src/numbers/integer-literal-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
@@ -1246,15 +1247,9 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
- if (FLAG_young_generation_large_objects) {
- result =
- CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
- SmiTag(size_in_bytes), runtime_flags);
- } else {
- result =
- CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
- SmiTag(size_in_bytes), runtime_flags);
- }
+ result =
+ CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
Goto(&out);
BIND(&next);
@@ -1347,10 +1342,10 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
top_address, limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
+// TODO(ishell, v8:8875): Consider using aligned allocations once the
+// allocation alignment inconsistency is fixed. For now we keep using
+// unaligned access since both x64 and arm64 architectures (where pointer
+// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~AllocationFlag::kDoubleAlignment,
@@ -1375,10 +1370,6 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
bool const new_space = !(flags & AllocationFlag::kPretenured);
bool const allow_large_objects =
flags & AllocationFlag::kAllowLargeObjectAllocation;
- // For optimized allocations, we don't allow the allocation to happen in a
- // different generation than requested.
- bool const always_allocated_in_requested_space =
- !new_space || !allow_large_objects || FLAG_young_generation_large_objects;
if (!allow_large_objects) {
intptr_t size_constant;
if (TryToIntPtrConstant(size_in_bytes, &size_constant)) {
@@ -1387,8 +1378,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
- if (!(flags & AllocationFlag::kDoubleAlignment) &&
- always_allocated_in_requested_space) {
+ if (!(flags & AllocationFlag::kDoubleAlignment)) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
@@ -1540,106 +1530,97 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
}
}
-TNode<RawPtrT> CodeStubAssembler::LoadCagedPointerFromObject(
+TNode<RawPtrT> CodeStubAssembler::LoadSandboxedPointerFromObject(
TNode<HeapObject> object, TNode<IntPtrT> field_offset) {
-#ifdef V8_CAGED_POINTERS
+#ifdef V8_SANDBOXED_POINTERS
return ReinterpretCast<RawPtrT>(
- LoadObjectField<CagedPtrT>(object, field_offset));
+ LoadObjectField<SandboxedPtrT>(object, field_offset));
#else
return LoadObjectField<RawPtrT>(object, field_offset);
-#endif // V8_CAGED_POINTERS
-}
-
-void CodeStubAssembler::StoreCagedPointerToObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset,
- TNode<RawPtrT> pointer) {
-#ifdef V8_CAGED_POINTERS
- TNode<CagedPtrT> caged_pointer = ReinterpretCast<CagedPtrT>(pointer);
-#ifdef DEBUG
- // Verify pointer points into the cage.
- TNode<ExternalReference> cage_base_address =
- ExternalConstant(ExternalReference::virtual_memory_cage_base_address());
- TNode<ExternalReference> cage_end_address =
- ExternalConstant(ExternalReference::virtual_memory_cage_end_address());
- TNode<UintPtrT> cage_base = Load<UintPtrT>(cage_base_address);
- TNode<UintPtrT> cage_end = Load<UintPtrT>(cage_end_address);
- CSA_DCHECK(this, UintPtrGreaterThanOrEqual(caged_pointer, cage_base));
- CSA_DCHECK(this, UintPtrLessThan(caged_pointer, cage_end));
-#endif // DEBUG
- StoreObjectFieldNoWriteBarrier<CagedPtrT>(object, offset, caged_pointer);
+#endif // V8_SANDBOXED_POINTERS
+}
+
+void CodeStubAssembler::StoreSandboxedPointerToObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer) {
+#ifdef V8_SANDBOXED_POINTERS
+ TNode<SandboxedPtrT> sbx_ptr = ReinterpretCast<SandboxedPtrT>(pointer);
+
+ // Ensure pointer points into the sandbox.
+ TNode<ExternalReference> sandbox_base_address =
+ ExternalConstant(ExternalReference::sandbox_base_address());
+ TNode<ExternalReference> sandbox_end_address =
+ ExternalConstant(ExternalReference::sandbox_end_address());
+ TNode<UintPtrT> sandbox_base = Load<UintPtrT>(sandbox_base_address);
+ TNode<UintPtrT> sandbox_end = Load<UintPtrT>(sandbox_end_address);
+ CSA_CHECK(this, UintPtrGreaterThanOrEqual(sbx_ptr, sandbox_base));
+ CSA_CHECK(this, UintPtrLessThan(sbx_ptr, sandbox_end));
+
+ StoreObjectFieldNoWriteBarrier<SandboxedPtrT>(object, offset, sbx_ptr);
#else
StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
-#endif // V8_CAGED_POINTERS
+#endif // V8_SANDBOXED_POINTERS
}
TNode<RawPtrT> CodeStubAssembler::EmptyBackingStoreBufferConstant() {
-#ifdef V8_CAGED_POINTERS
- // TODO(chromium:1218005) consider creating a LoadCagedPointerConstant() if
- // more of these constants are required later on.
+#ifdef V8_SANDBOXED_POINTERS
+ // TODO(chromium:1218005) consider creating a LoadSandboxedPointerConstant()
+ // if more of these constants are required later on.
TNode<ExternalReference> empty_backing_store_buffer =
ExternalConstant(ExternalReference::empty_backing_store_buffer());
return Load<RawPtrT>(empty_backing_store_buffer);
#else
return ReinterpretCast<RawPtrT>(IntPtrConstant(0));
-#endif // V8_CAGED_POINTERS
+#endif // V8_SANDBOXED_POINTERS
}
-TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer(
- TNode<Uint32T> value) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- return ReinterpretCast<ExternalPointerT>(ChangeUint32ToWord(value));
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+TNode<ExternalPointerT> CodeStubAssembler::ChangeIndexToExternalPointer(
+ TNode<Uint32T> index) {
+ DCHECK_EQ(kExternalPointerSize, kUInt32Size);
+ TNode<Uint32T> shifted_index =
+ Word32Shl(index, Uint32Constant(kExternalPointerIndexShift));
+ return ReinterpretCast<ExternalPointerT>(shifted_index);
}
-TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToUint32(
- TNode<ExternalPointerT> value) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- return Unsigned(TruncateWordToInt32(ReinterpretCast<UintPtrT>(value)));
+TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToIndex(
+ TNode<ExternalPointerT> external_pointer) {
+ DCHECK_EQ(kExternalPointerSize, kUInt32Size);
+ TNode<Uint32T> shifted_index = ReinterpretCast<Uint32T>(external_pointer);
+ return Word32Shr(shifted_index, Uint32Constant(kExternalPointerIndexShift));
}
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
TNode<IntPtrT> offset) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
ExternalReference::external_pointer_table_address(isolate()));
- TNode<Uint32T> table_length = UncheckedCast<Uint32T>(
- Load(MachineType::Uint32(), external_pointer_table_address,
- UintPtrConstant(Internals::kExternalPointerTableLengthOffset)));
- TNode<Uint32T> table_capacity = UncheckedCast<Uint32T>(
- Load(MachineType::Uint32(), external_pointer_table_address,
- UintPtrConstant(Internals::kExternalPointerTableCapacityOffset)));
-
- Label grow_table(this, Label::kDeferred), finish(this);
- TNode<BoolT> compare = Uint32LessThan(table_length, table_capacity);
- Branch(compare, &finish, &grow_table);
-
- BIND(&grow_table);
- {
- TNode<ExternalReference> table_grow_function = ExternalConstant(
- ExternalReference::external_pointer_table_grow_table_function());
- CallCFunction(
- table_grow_function, MachineType::Pointer(),
- std::make_pair(MachineType::Pointer(), external_pointer_table_address));
- Goto(&finish);
- }
- BIND(&finish);
-
- TNode<Uint32T> new_table_length = Uint32Add(table_length, Uint32Constant(1));
- StoreNoWriteBarrier(
- MachineRepresentation::kWord32, external_pointer_table_address,
- UintPtrConstant(Internals::kExternalPointerTableLengthOffset),
- new_table_length);
-
- TNode<Uint32T> index = table_length;
- TNode<ExternalPointerT> encoded = ChangeUint32ToExternalPointer(index);
- StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, encoded);
+ // We could implement the fast path for allocating from the freelist here,
+ // however, this logic needs to be atomic and so requires CSA to expose
+ // atomic operations.
+ TNode<ExternalReference> table_allocate_function = ExternalConstant(
+ ExternalReference::external_pointer_table_allocate_entry());
+ TNode<Uint32T> index = UncheckedCast<Uint32T>(CallCFunction(
+ table_allocate_function, MachineType::Uint32(),
+ std::make_pair(MachineType::Pointer(), external_pointer_table_address)));
+
+ // Currently, we assume that the caller will immediately initialize the entry
+ // through StoreExternalPointerToObject after allocating it. That way, we
+ // avoid initializing the entry twice (once with nullptr, then again with the
+ // real value). TODO(saelo) initialize the entry with zero here and switch
+ // callers to a version that initializes the entry with a given pointer.
+
+ TNode<ExternalPointerT> pointer = ChangeIndexToExternalPointer(index);
+ StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, pointer);
#endif
}
TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
TNode<HeapObject> object, TNode<IntPtrT> offset,
ExternalPointerTag external_pointer_tag) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
ExternalReference::external_pointer_table_address(isolate()));
TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
@@ -1648,8 +1629,9 @@ TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
TNode<ExternalPointerT> encoded =
LoadObjectField<ExternalPointerT>(object, offset);
- TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
- // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<Uint32T> index = ChangeExternalPointerToIndex(encoded);
+ // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
+ // that does one shift right instead of two shifts (right and then left).
TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
@@ -1661,13 +1643,13 @@ TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
#else
return LoadObjectField<RawPtrT>(object, offset);
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
void CodeStubAssembler::StoreExternalPointerToObject(
TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<RawPtrT> pointer,
ExternalPointerTag external_pointer_tag) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
ExternalReference::external_pointer_table_address(isolate()));
TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
@@ -1676,8 +1658,9 @@ void CodeStubAssembler::StoreExternalPointerToObject(
TNode<ExternalPointerT> encoded =
LoadObjectField<ExternalPointerT>(object, offset);
- TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
- // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<Uint32T> index = ChangeExternalPointerToIndex(encoded);
+ // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
+ // that does one shift right instead of two shifts (right and then left).
TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
@@ -1690,7 +1673,7 @@ void CodeStubAssembler::StoreExternalPointerToObject(
value);
#else
StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
@@ -2076,7 +2059,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
TNode<Uint32T> CodeStubAssembler::LoadNameHashAssumeComputed(TNode<Name> name) {
TNode<Uint32T> hash_field = LoadNameRawHashField(name);
CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask));
- return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
+ return DecodeWord32<Name::HashBits>(hash_field);
}
TNode<Uint32T> CodeStubAssembler::LoadNameHash(TNode<Name> name,
@@ -2086,7 +2069,7 @@ TNode<Uint32T> CodeStubAssembler::LoadNameHash(TNode<Name> name,
GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask),
if_hash_not_computed);
}
- return Unsigned(Word32Shr(raw_hash_field, Int32Constant(Name::kHashShift)));
+ return DecodeWord32<Name::HashBits>(raw_hash_field);
}
TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(TNode<String> string) {
@@ -2985,13 +2968,16 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
&check_for_interpreter_data);
{
- TNode<Code> code = FromCodeT(CAST(var_result.value()));
+ TNode<CodeT> code = CAST(var_result.value());
+#ifdef DEBUG
+ TNode<Int32T> code_flags =
+ LoadObjectField<Int32T>(code, CodeT::kFlagsOffset);
CSA_DCHECK(
- this, Word32Equal(DecodeWord32<Code::KindField>(LoadObjectField<Int32T>(
- code, Code::kFlagsOffset)),
+ this, Word32Equal(DecodeWord32<CodeT::KindField>(code_flags),
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
+#endif // DEBUG
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
- code, Code::kDeoptimizationDataOrInterpreterDataOffset);
+ FromCodeT(code), Code::kDeoptimizationDataOrInterpreterDataOffset);
var_result = baseline_data;
}
Goto(&check_for_interpreter_data);
@@ -3056,6 +3042,25 @@ void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
object, offset, value);
}
+void CodeStubAssembler::StoreJSSharedStructInObjectField(
+ TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<Object> value) {
+ CSA_DCHECK(this, IsJSSharedStruct(object));
+ // JSSharedStructs are allocated in the shared old space, which is currently
+ // collected by stopping the world, so the incremental write barrier is not
+ // needed. They can only store Smis and other HeapObjects in the shared old
+ // space, so the generational write barrier is also not needed.
+ // TODO(v8:12547): Add a safer, shared variant of NoWriteBarrier instead of
+ // using Unsafe.
+ int const_offset;
+ if (TryToInt32Constant(offset, &const_offset)) {
+ UnsafeStoreObjectFieldNoWriteBarrier(object, const_offset, value);
+ } else {
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object,
+ IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
+ value);
+ }
+}
+
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
DcheckHasValidMap(object);
@@ -4370,7 +4375,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
TVARIABLE(Map, var_target_map, source_map);
Label done(this, {&var_result}), is_cow(this),
- new_space_check(this, {&var_target_map});
+ new_space_handler(this, {&var_target_map});
// If source_map is either FixedDoubleArrayMap, or FixedCOWArrayMap but
// we can't just use COW, use FixedArrayMap as the target map. Otherwise, use
@@ -4378,11 +4383,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
if (IsDoubleElementsKind(from_kind)) {
CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
var_target_map = FixedArrayMapConstant();
- Goto(&new_space_check);
+ Goto(&new_space_handler);
} else {
CSA_DCHECK(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
Branch(TaggedEqual(var_target_map.value(), FixedCOWArrayMapConstant()),
- &is_cow, &new_space_check);
+ &is_cow, &new_space_handler);
BIND(&is_cow);
{
@@ -4392,34 +4397,19 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// 2) we're asked to extract only part of the |source| (|first| != 0).
if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), first),
- &new_space_check, [&] {
+ &new_space_handler, [&] {
var_result = source;
Goto(&done);
});
} else {
var_target_map = FixedArrayMapConstant();
- Goto(&new_space_check);
+ Goto(&new_space_handler);
}
}
}
- BIND(&new_space_check);
+ BIND(&new_space_handler);
{
- bool handle_old_space = !FLAG_young_generation_large_objects;
- if (handle_old_space) {
- int constant_count;
- handle_old_space =
- !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
- (constant_count >
- FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
- }
-
- Label old_space(this, Label::kDeferred);
- if (handle_old_space) {
- GotoIfFixedArraySizeDoesntFitInNewSpace(capacity, &old_space,
- FixedArray::kHeaderSize);
- }
-
Comment("Copy FixedArray in young generation");
// We use PACKED_ELEMENTS to tell AllocateFixedArray and
// CopyFixedArrayElements that we want a FixedArray.
@@ -4459,50 +4449,6 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
var_holes_converted);
}
Goto(&done);
-
- if (handle_old_space) {
- BIND(&old_space);
- {
- Comment("Copy FixedArray in old generation");
- Label copy_one_by_one(this);
-
- // Try to use memcpy if we don't need to convert holes to undefined.
- if (convert_holes == HoleConversionMode::kDontConvert &&
- source_elements_kind) {
- // Only try memcpy if we're not copying object pointers.
- GotoIfNot(IsFastSmiElementsKind(*source_elements_kind),
- &copy_one_by_one);
-
- const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS;
- to_elements = AllocateFixedArray(
- to_smi_kind, capacity, allocation_flags, var_target_map.value());
- var_result = to_elements;
-
- FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity,
- RootIndex::kTheHoleValue);
- // CopyElements will try to use memcpy if it's not conflicting with
- // GC. Otherwise it will copy elements by elements, but skip write
- // barriers (since we're copying smis to smis).
- CopyElements(to_smi_kind, to_elements, IntPtrConstant(0), source,
- ParameterToIntPtr(first), ParameterToIntPtr(count),
- SKIP_WRITE_BARRIER);
- Goto(&done);
- } else {
- Goto(&copy_one_by_one);
- }
-
- BIND(&copy_one_by_one);
- {
- to_elements = AllocateFixedArray(to_kind, capacity, allocation_flags,
- var_target_map.value());
- var_result = to_elements;
- CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
- count, capacity, UPDATE_WRITE_BARRIER,
- convert_holes, var_holes_converted);
- Goto(&done);
- }
- }
- }
}
BIND(&done);
@@ -5530,8 +5476,8 @@ TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(TNode<Context> context,
TNode<Object> value) {
TVARIABLE(Word32T, var_result);
Label done(this);
- TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
- &done, &var_result);
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(
+ context, value, &done, &var_result, IsKnownTaggedPointer::kNo);
BIND(&done);
return var_result.value();
}
@@ -5543,7 +5489,8 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(
TVariable<Word32T>* var_word32, Label* if_bigint,
TVariable<BigInt>* var_maybe_bigint) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_maybe_bigint);
+ context, value, if_number, var_word32, IsKnownTaggedPointer::kNo,
+ if_bigint, var_maybe_bigint);
}
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
@@ -5554,14 +5501,27 @@ void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
TVariable<Word32T>* var_word32, Label* if_bigint,
TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_maybe_bigint,
- var_feedback);
+ context, value, if_number, var_word32, IsKnownTaggedPointer::kNo,
+ if_bigint, var_maybe_bigint, var_feedback);
+}
+
+// Truncate {pointer} to word32 and jump to {if_number} if it is a Number,
+// or find that it is a BigInt and jump to {if_bigint}. In either case,
+// store the type feedback in {var_feedback}.
+void CodeStubAssembler::TaggedPointerToWord32OrBigIntWithFeedback(
+ TNode<Context> context, TNode<HeapObject> pointer, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
+ context, pointer, if_number, var_word32, IsKnownTaggedPointer::kYes,
+ if_bigint, var_maybe_bigint, var_feedback);
}
template <Object::Conversion conversion>
void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
TNode<Context> context, TNode<Object> value, Label* if_number,
- TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Word32T>* var_word32,
+ IsKnownTaggedPointer is_known_tagged_pointer, Label* if_bigint,
TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
// We might need to loop after conversion.
TVARIABLE(Object, var_value, value);
@@ -5569,20 +5529,22 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
VariableList loop_vars({&var_value}, zone());
if (var_feedback != nullptr) loop_vars.push_back(var_feedback);
Label loop(this, loop_vars);
- Goto(&loop);
- BIND(&loop);
- {
- value = var_value.value();
- Label not_smi(this), is_heap_number(this), is_oddball(this),
- is_bigint(this);
- GotoIf(TaggedIsNotSmi(value), &not_smi);
+ if (is_known_tagged_pointer == IsKnownTaggedPointer::kNo) {
+ GotoIf(TaggedIsNotSmi(value), &loop);
// {value} is a Smi.
*var_word32 = SmiToInt32(CAST(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
+ } else {
+ Goto(&loop);
+ }
+ BIND(&loop);
+ {
+ value = var_value.value();
+ Label not_smi(this), is_heap_number(this), is_oddball(this),
+ is_bigint(this), check_if_smi(this);
- BIND(&not_smi);
TNode<HeapObject> value_heap_object = CAST(value);
TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &is_heap_number);
@@ -5607,13 +5569,13 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
: Builtin::kNonNumberToNumber;
var_value = CallBuiltin(builtin, context, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
- Goto(&loop);
+ Goto(&check_if_smi);
BIND(&is_oddball);
var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
OverwriteFeedback(var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
- Goto(&loop);
+ Goto(&check_if_smi);
}
BIND(&is_heap_number);
@@ -5627,6 +5589,15 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
+
+ BIND(&check_if_smi);
+ value = var_value.value();
+ GotoIf(TaggedIsNotSmi(value), &loop);
+
+ // {value} is a Smi.
+ *var_word32 = SmiToInt32(CAST(value));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
+ Goto(if_number);
}
}
@@ -5789,6 +5760,18 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(TNode<Int32T> value) {
return var_result.value();
}
+TNode<Number> CodeStubAssembler::ChangeInt32ToTaggedNoOverflow(
+ TNode<Int32T> value) {
+ if (SmiValuesAre32Bits()) {
+ return SmiTag(ChangeInt32ToIntPtr(value));
+ }
+ DCHECK(SmiValuesAre31Bits());
+ TNode<Int32T> result_int32 = Int32Add(value, value);
+ TNode<IntPtrT> almost_tagged_value = ChangeInt32ToIntPtr(result_int32);
+ TNode<Smi> result = BitcastWordToTaggedSigned(almost_tagged_value);
+ return result;
+}
+
TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(TNode<Uint32T> value) {
Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
if_join(this);
@@ -6381,6 +6364,11 @@ TNode<BoolT> CodeStubAssembler::IsStringInstanceType(
return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
}
+TNode<BoolT> CodeStubAssembler::IsTemporalInstantInstanceType(
+ TNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, JS_TEMPORAL_INSTANT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
TNode<Int32T> instance_type) {
CSA_DCHECK(this, IsStringInstanceType(instance_type));
@@ -6554,6 +6542,10 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(TNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::IsJSWrappedFunction(TNode<HeapObject> object) {
+ return HasInstanceType(object, JS_WRAPPED_FUNCTION_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
@@ -6571,6 +6563,28 @@ TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_ITERATOR_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSSharedStructInstanceType(
+ TNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, JS_SHARED_STRUCT_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSSharedStructMap(TNode<Map> map) {
+ return IsJSSharedStructInstanceType(LoadMapInstanceType(map));
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSSharedStruct(TNode<HeapObject> object) {
+ return IsJSSharedStructMap(LoadMap(object));
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSSharedStruct(TNode<Object> object) {
+ return Select<BoolT>(
+ TaggedIsSmi(object), [=] { return Int32FalseConstant(); },
+ [=] {
+ TNode<HeapObject> heap_object = CAST(object);
+ return IsJSSharedStruct(heap_object);
+ });
+}
+
TNode<BoolT> CodeStubAssembler::IsJSAsyncGeneratorObject(
TNode<HeapObject> object) {
return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -6699,6 +6713,19 @@ TNode<BoolT> CodeStubAssembler::IsInternalizedStringInstanceType(
Int32Constant(kStringTag | kInternalizedTag));
}
+TNode<BoolT> CodeStubAssembler::IsSharedStringInstanceType(
+ TNode<Int32T> instance_type) {
+ TNode<BoolT> is_shared = Word32Equal(
+ Word32And(instance_type,
+ Int32Constant(kIsNotStringMask | kSharedStringMask)),
+ Int32Constant(kStringTag | kSharedStringTag));
+ // TODO(v8:12007): Internalized strings do not have kSharedStringTag until
+ // the shared string table ships.
+ return Word32Or(is_shared,
+ Word32And(HasSharedStringTableFlag(),
+ IsInternalizedStringInstanceType(instance_type)));
+}
+
TNode<BoolT> CodeStubAssembler::IsUniqueName(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return Select<BoolT>(
@@ -6716,8 +6743,9 @@ TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
return Select<BoolT>(
IsInternalizedStringInstanceType(instance_type),
[=] {
- return IsSetWord32(LoadNameRawHashField(CAST(object)),
- Name::kIsNotIntegerIndexMask);
+ return IsNotEqualInWord32<Name::HashFieldTypeBits>(
+ LoadNameRawHashField(CAST(object)),
+ Name::HashFieldType::kIntegerIndex);
},
[=] { return IsSymbolInstanceType(instance_type); });
}
@@ -6800,8 +6828,9 @@ TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(TNode<HeapObject> object) {
TNode<BoolT> CodeStubAssembler::IsFunctionInstanceType(
TNode<Int32T> instance_type) {
- return IsInRange(instance_type, FIRST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE,
- LAST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE);
+ return IsInRange(instance_type,
+ FIRST_JS_FUNCTION_OR_BOUND_FUNCTION_OR_WRAPPED_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_OR_BOUND_FUNCTION_OR_WRAPPED_FUNCTION_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
TNode<Int32T> instance_type) {
@@ -6971,6 +7000,15 @@ TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(TNode<Number> number) {
[=] { return IsHeapNumberUint32(CAST(number)); });
}
+TNode<IntPtrT> CodeStubAssembler::LoadBasicMemoryChunkFlags(
+ TNode<HeapObject> object) {
+ TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
+ TNode<IntPtrT> page = PageFromAddress(object_word);
+ return UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), page,
+ IntPtrConstant(BasicMemoryChunk::kFlagsOffset)));
+}
+
template <typename TIndex>
TNode<BoolT> CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(
TNode<TIndex> element_count, int base_size) {
@@ -8041,7 +8079,8 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
&if_has_cached_index);
// No cached array index. If the string knows that it contains an index,
// then it must be an uncacheable index. Handle this case in the runtime.
- GotoIf(IsClearWord32(raw_hash_field, Name::kIsNotIntegerIndexMask),
+ GotoIf(IsEqualInWord32<Name::HashFieldTypeBits>(
+ raw_hash_field, Name::HashFieldType::kIntegerIndex),
if_bailout);
GotoIf(InstanceTypeEqual(var_instance_type.value(), THIN_STRING_TYPE),
@@ -8451,12 +8490,34 @@ TNode<HeapObject> CodeStubAssembler::LoadName<GlobalDictionary>(
return CAST(LoadObjectField(property_cell, PropertyCell::kNameOffset));
}
+template <>
+TNode<HeapObject> CodeStubAssembler::LoadName<NameToIndexHashTable>(
+ TNode<HeapObject> key) {
+ CSA_DCHECK(this, IsName(key));
+ return key;
+}
+
+// The implementation should be in sync with NameToIndexHashTable::Lookup.
+TNode<IntPtrT> CodeStubAssembler::NameToIndexHashTableLookup(
+ TNode<NameToIndexHashTable> table, TNode<Name> name, Label* not_found) {
+ TVARIABLE(IntPtrT, var_entry);
+ Label index_found(this, {&var_entry});
+ NameDictionaryLookup<NameToIndexHashTable>(table, name, &index_found,
+ &var_entry, not_found,
+ LookupMode::kFindExisting);
+ BIND(&index_found);
+ TNode<Smi> value =
+ CAST(LoadValueByKeyIndex<NameToIndexHashTable>(table, var_entry.value()));
+ return SmiToIntPtr(value);
+}
+
template <typename Dictionary>
void CodeStubAssembler::NameDictionaryLookup(
TNode<Dictionary> dictionary, TNode<Name> unique_name, Label* if_found,
TVariable<IntPtrT>* var_name_index, Label* if_not_found, LookupMode mode) {
static_assert(std::is_same<Dictionary, NameDictionary>::value ||
- std::is_same<Dictionary, GlobalDictionary>::value,
+ std::is_same<Dictionary, GlobalDictionary>::value ||
+ std::is_same<Dictionary, NameToIndexHashTable>::value,
"Unexpected NameDictionary");
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
DCHECK_IMPLIES(mode == kFindInsertionIndex, if_found == nullptr);
@@ -9116,7 +9177,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
var_value = CallGetterIfAccessor(
var_value.value(), object, var_details.value(), context, object,
- next_key, &slow_load, kCallJSGetter);
+ next_key, &slow_load, kCallJSGetterUseCachedName);
Goto(&callback);
BIND(&slow_load);
@@ -9219,6 +9280,70 @@ TNode<NativeContext> CodeStubAssembler::GetCreationContext(
return native_context;
}
+TNode<NativeContext> CodeStubAssembler::GetFunctionRealm(
+ TNode<Context> context, TNode<JSReceiver> receiver, Label* if_bailout) {
+ TVARIABLE(JSReceiver, current);
+ Label loop(this, VariableList({&current}, zone())), is_proxy(this),
+ is_function(this), is_bound_function(this), is_wrapped_function(this),
+ proxy_revoked(this, Label::kDeferred);
+ CSA_DCHECK(this, IsCallable(receiver));
+ current = receiver;
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ TNode<JSReceiver> current_value = current.value();
+ GotoIf(IsJSProxy(current_value), &is_proxy);
+ GotoIf(IsJSFunction(current_value), &is_function);
+ GotoIf(IsJSBoundFunction(current_value), &is_bound_function);
+ GotoIf(IsJSWrappedFunction(current_value), &is_wrapped_function);
+ Goto(if_bailout);
+ }
+
+ BIND(&is_proxy);
+ {
+ TNode<JSProxy> proxy = CAST(current.value());
+ TNode<HeapObject> handler =
+ CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
+ // Proxy is revoked.
+ GotoIfNot(IsJSReceiver(handler), &proxy_revoked);
+ TNode<JSReceiver> target =
+ CAST(LoadObjectField(proxy, JSProxy::kTargetOffset));
+ current = target;
+ Goto(&loop);
+ }
+
+ BIND(&proxy_revoked);
+ { ThrowTypeError(context, MessageTemplate::kProxyRevoked, "apply"); }
+
+ BIND(&is_bound_function);
+ {
+ TNode<JSBoundFunction> bound_function = CAST(current.value());
+ TNode<JSReceiver> target = CAST(LoadObjectField(
+ bound_function, JSBoundFunction::kBoundTargetFunctionOffset));
+ current = target;
+ Goto(&loop);
+ }
+
+ BIND(&is_wrapped_function);
+ {
+ TNode<JSWrappedFunction> wrapped_function = CAST(current.value());
+ TNode<JSReceiver> target = CAST(LoadObjectField(
+ wrapped_function, JSWrappedFunction::kWrappedTargetFunctionOffset));
+ current = target;
+ Goto(&loop);
+ }
+
+ BIND(&is_function);
+ {
+ TNode<JSFunction> function = CAST(current.value());
+ TNode<Context> context =
+ CAST(LoadObjectField(function, JSFunction::kContextOffset));
+ TNode<NativeContext> native_context = LoadNativeContext(context);
+ return native_context;
+ }
+}
+
void CodeStubAssembler::DescriptorLookup(TNode<Name> unique_name,
TNode<DescriptorArray> descriptors,
TNode<Uint32T> bitfield3,
@@ -9587,7 +9712,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
// AccessorPair case.
{
- if (mode == kCallJSGetter) {
+ if (mode == kCallJSGetterUseCachedName ||
+ mode == kCallJSGetterDontUseCachedName) {
Label if_callable(this), if_function_template_info(this);
TNode<AccessorPair> accessor_pair = CAST(value);
TNode<HeapObject> getter =
@@ -9612,10 +9738,15 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
BIND(&if_function_template_info);
{
Label runtime(this, Label::kDeferred);
+ Label use_cached_property(this);
GotoIf(IsSideEffectFreeDebuggingActive(), &runtime);
TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>(
getter, FunctionTemplateInfo::kCachedPropertyNameOffset);
- GotoIfNot(IsTheHole(cached_property_name), if_bailout);
+
+ Label* has_cached_property = mode == kCallJSGetterUseCachedName
+ ? &use_cached_property
+ : if_bailout;
+ GotoIfNot(IsTheHole(cached_property_name), has_cached_property);
TNode<NativeContext> creation_context =
GetCreationContext(CAST(holder), if_bailout);
@@ -9625,6 +9756,14 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
receiver);
Goto(&done);
+ if (mode == kCallJSGetterUseCachedName) {
+ Bind(&use_cached_property);
+
+ var_value = GetProperty(context, holder, cached_property_name);
+
+ Goto(&done);
+ }
+
BIND(&runtime);
{
var_value = CallRuntime(Runtime::kGetProperty, context, holder, name,
@@ -9633,6 +9772,7 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
}
}
} else {
+ DCHECK_EQ(mode, kReturnAccessorPair);
Goto(&done);
}
}
@@ -9704,7 +9844,7 @@ void CodeStubAssembler::TryGetOwnProperty(
Label* if_bailout) {
TryGetOwnProperty(context, receiver, object, map, instance_type, unique_name,
if_found_value, var_value, nullptr, nullptr, if_not_found,
- if_bailout, kCallJSGetter);
+ if_bailout, kCallJSGetterUseCachedName);
}
void CodeStubAssembler::TryGetOwnProperty(
@@ -13327,7 +13467,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
TNode<Name> name = CAST(CallBuiltin(Builtin::kToName, context, key));
switch (mode) {
case kHasProperty:
- GotoIf(IsPrivateSymbol(name), &return_false);
+ GotoIf(IsPrivateSymbol(name), &call_runtime);
result = CAST(
CallBuiltin(Builtin::kProxyHasProperty, context, object, name));
@@ -13815,6 +13955,36 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
UNREACHABLE();
}
+TNode<Number> CodeStubAssembler::BitwiseSmiOp(TNode<Smi> left, TNode<Smi> right,
+ Operation bitwise_op) {
+ switch (bitwise_op) {
+ case Operation::kBitwiseAnd:
+ return SmiAnd(left, right);
+ case Operation::kBitwiseOr:
+ return SmiOr(left, right);
+ case Operation::kBitwiseXor:
+ return SmiXor(left, right);
+ // Smi shift left and logical shift rihgt can have (Heap)Number output, so
+ // perform int32 operation.
+ case Operation::kShiftLeft:
+ case Operation::kShiftRightLogical:
+ return BitwiseOp(SmiToInt32(left), SmiToInt32(right), bitwise_op);
+ // Arithmetic shift right of a Smi can't overflow to the heap number, so
+ // perform int32 operation but don't check for overflow.
+ case Operation::kShiftRight: {
+ TNode<Int32T> left32 = SmiToInt32(left);
+ TNode<Int32T> right32 = SmiToInt32(right);
+ if (!Word32ShiftIsSafe()) {
+ right32 = Word32And(right32, Int32Constant(0x1F));
+ }
+ return ChangeInt32ToTaggedNoOverflow(Word32Sar(left32, right32));
+ }
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
TNode<Context> context, TNode<Object> value, TNode<Oddball> done) {
CSA_DCHECK(this, IsBoolean(done));
@@ -13893,8 +14063,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadCagedPointerFromObject(array_buffer,
- JSArrayBuffer::kBackingStoreOffset);
+ return LoadSandboxedPointerFromObject(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -14024,21 +14194,23 @@ CodeStubAssembler::LoadVariableLengthJSArrayBufferViewByteLength(
}
void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds(
- TNode<JSArrayBufferView> array, Label* detached_or_oob,
+ TNode<JSArrayBufferView> array_buffer_view, Label* detached_or_oob,
Label* not_detached_nor_oob) {
- TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array_buffer_view);
GotoIf(IsDetachedBuffer(buffer), detached_or_oob);
- GotoIfNot(IsVariableLengthJSArrayBufferView(array), not_detached_nor_oob);
+ GotoIfNot(IsVariableLengthJSArrayBufferView(array_buffer_view),
+ not_detached_nor_oob);
GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob);
{
TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
- TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
+ TNode<UintPtrT> array_byte_offset =
+ LoadJSArrayBufferViewByteOffset(array_buffer_view);
Label length_tracking(this), not_length_tracking(this);
- Branch(IsLengthTrackingJSArrayBufferView(array), &length_tracking,
- &not_length_tracking);
+ Branch(IsLengthTrackingJSArrayBufferView(array_buffer_view),
+ &length_tracking, &not_length_tracking);
BIND(&length_tracking);
{
@@ -14053,7 +14225,7 @@ void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds(
// Check if the backing RAB has shrunk so that the buffer is out of
// bounds.
TNode<UintPtrT> array_byte_length =
- LoadJSArrayBufferViewByteLength(array);
+ LoadJSArrayBufferViewByteLength(array_buffer_view);
Branch(UintPtrGreaterThanOrEqual(
buffer_byte_length,
UintPtrAdd(array_byte_offset, array_byte_length)),
@@ -14062,6 +14234,38 @@ void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds(
}
}
+TNode<BoolT> CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean(
+ TNode<JSArrayBufferView> array_buffer_view) {
+ Label is_detached_or_out_of_bounds(this),
+ not_detached_nor_out_of_bounds(this), end(this);
+ TVARIABLE(BoolT, result);
+
+ IsJSArrayBufferViewDetachedOrOutOfBounds(array_buffer_view,
+ &is_detached_or_out_of_bounds,
+ &not_detached_nor_out_of_bounds);
+ BIND(&is_detached_or_out_of_bounds);
+ {
+ result = BoolConstant(true);
+ Goto(&end);
+ }
+ BIND(&not_detached_nor_out_of_bounds);
+ {
+ result = BoolConstant(false);
+ Goto(&end);
+ }
+ BIND(&end);
+ return result.value();
+}
+
+void CodeStubAssembler::CheckJSTypedArrayIndex(
+ TNode<UintPtrT> index, TNode<JSTypedArray> typed_array,
+ Label* detached_or_out_of_bounds) {
+ TNode<UintPtrT> len = LoadJSTypedArrayLengthAndCheckDetached(
+ typed_array, detached_or_out_of_bounds);
+
+ GotoIf(UintPtrGreaterThanOrEqual(index, len), detached_or_out_of_bounds);
+}
+
// ES #sec-integerindexedobjectbytelength
TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
@@ -14200,19 +14404,12 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
}
TNode<IntPtrT> CodeStubArguments::GetLengthWithoutReceiver() const {
- TNode<IntPtrT> argc = argc_;
- if (kJSArgcIncludesReceiver) {
- argc = assembler_->IntPtrSub(argc, assembler_->IntPtrConstant(1));
- }
- return argc;
+ return assembler_->IntPtrSub(
+ argc_, assembler_->IntPtrConstant(kJSArgcReceiverSlots));
}
TNode<IntPtrT> CodeStubArguments::GetLengthWithReceiver() const {
- TNode<IntPtrT> argc = argc_;
- if (!kJSArgcIncludesReceiver) {
- argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
- }
- return argc;
+ return argc_;
}
TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
@@ -14381,10 +14578,17 @@ TNode<BoolT> CodeStubAssembler::IsAnyPromiseHookEnabled(TNode<Uint32T> flags) {
return IsSetWord32(flags, mask);
}
+TNode<BoolT> CodeStubAssembler::IsIsolatePromiseHookEnabled(
+ TNode<Uint32T> flags) {
+ return IsSetWord32<Isolate::PromiseHookFields::HasIsolatePromiseHook>(flags);
+}
+
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
TNode<BoolT> CodeStubAssembler::IsContextPromiseHookEnabled(
TNode<Uint32T> flags) {
return IsSetWord32<Isolate::PromiseHookFields::HasContextPromiseHook>(flags);
}
+#endif
TNode<BoolT> CodeStubAssembler::
IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(TNode<Uint32T> flags) {
@@ -14412,13 +14616,8 @@ TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
- TNode<ExternalReference> table = ExternalConstant(
-#ifdef V8_EXTERNAL_CODE_SPACE
- ExternalReference::builtins_code_data_container_table(isolate())
-#else
- ExternalReference::builtins_table(isolate())
-#endif // V8_EXTERNAL_CODE_SPACE
- ); // NOLINT(whitespace/parens)
+ TNode<ExternalReference> table =
+ ExternalConstant(ExternalReference::builtins_table(isolate()));
return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
}
@@ -14466,6 +14665,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
WASM_EXPORTED_FUNCTION_DATA_TYPE,
WASM_JS_FUNCTION_DATA_TYPE,
ASM_WASM_DATA_TYPE,
+ WASM_ON_FULFILLED_DATA_TYPE,
#endif // V8_ENABLE_WEBASSEMBLY
};
Label check_is_bytecode_array(this);
@@ -14475,6 +14675,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
Label check_is_wasm_function_data(this);
+ Label check_is_wasm_on_fulfilled(this);
Label* case_labels[] = {
&check_is_bytecode_array,
&check_is_baseline_data,
@@ -14488,6 +14689,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
&check_is_wasm_function_data,
&check_is_wasm_function_data,
&check_is_asm_wasm_data,
+ &check_is_wasm_on_fulfilled,
#endif // V8_ENABLE_WEBASSEMBLY
};
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
@@ -14496,7 +14698,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBytecodeArray: Interpret bytecode
BIND(&check_is_bytecode_array);
- sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InterpreterEntryTrampoline));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
Goto(&done);
// IsBaselineData: Execute baseline code
@@ -14510,12 +14712,12 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
BIND(&check_is_uncompiled_data);
- sfi_code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
Goto(if_compile_lazy ? if_compile_lazy : &done);
// IsFunctionTemplateInfo: API call
BIND(&check_is_function_template_info);
- sfi_code = HeapConstant(BUILTIN_CODET(isolate(), HandleApiCall));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall));
Goto(&done);
// IsInterpreterData: Interpret bytecode
@@ -14539,7 +14741,12 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsAsmWasmData: Instantiate using AsmWasmData
BIND(&check_is_asm_wasm_data);
- sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InstantiateAsmJs));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
+ Goto(&done);
+
+ // IsWasmOnFulfilledData: Resume the suspended wasm continuation.
+ BIND(&check_is_wasm_on_fulfilled);
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), WasmResume));
Goto(&done);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -14697,12 +14904,8 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
TNode<RawPtrT> frame, TNode<IntPtrT> argc,
FrameArgumentsArgcType argc_type) {
- if (kJSArgcIncludesReceiver &&
- argc_type == FrameArgumentsArgcType::kCountExcludesReceiver) {
+ if (argc_type == FrameArgumentsArgcType::kCountExcludesReceiver) {
argc = IntPtrAdd(argc, IntPtrConstant(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver &&
- argc_type == FrameArgumentsArgcType::kCountIncludesReceiver) {
- argc = IntPtrSub(argc, IntPtrConstant(1));
}
return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
@@ -14730,6 +14933,19 @@ void CodeStubAssembler::Print(const char* prefix,
CallRuntime(Runtime::kDebugPrint, NoContextConstant(), arg);
}
+IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralAdd(
+ const IntegerLiteral& lhs, const IntegerLiteral& rhs) {
+ return lhs + rhs;
+}
+IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralLeftShift(
+ const IntegerLiteral& lhs, const IntegerLiteral& rhs) {
+ return lhs << rhs;
+}
+IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralBitwiseOr(
+ const IntegerLiteral& lhs, const IntegerLiteral& rhs) {
+ return lhs | rhs;
+}
+
void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
Label ok(this), stack_check_interrupt(this, Label::kDeferred);
@@ -15725,5 +15941,65 @@ void CodeStubAssembler::SwissNameDictionaryAdd(TNode<SwissNameDictionary> table,
}
}
+void CodeStubAssembler::SharedValueBarrier(
+ TNode<Context> context, TVariable<Object>* var_shared_value) {
+ // The barrier ensures that the value can be shared across Isolates.
+ // The fast paths should be kept in sync with Object::Share.
+
+ TNode<Object> value = var_shared_value->value();
+ Label check_in_shared_heap(this), slow(this), skip_barrier(this), done(this);
+
+ // Fast path: Smis are trivially shared.
+ GotoIf(TaggedIsSmi(value), &done);
+ // Fast path: Shared memory features imply shared RO space, so RO objects are
+ // trivially shared.
+ DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
+ TNode<IntPtrT> page_flags = LoadBasicMemoryChunkFlags(CAST(value));
+ GotoIf(WordNotEqual(WordAnd(page_flags,
+ IntPtrConstant(BasicMemoryChunk::READ_ONLY_HEAP)),
+ IntPtrConstant(0)),
+ &skip_barrier);
+
+ // Fast path: Check if the HeapObject is already shared.
+ TNode<Uint16T> value_instance_type =
+ LoadMapInstanceType(LoadMap(CAST(value)));
+ GotoIf(IsSharedStringInstanceType(value_instance_type), &skip_barrier);
+ GotoIf(IsJSSharedStructInstanceType(value_instance_type), &skip_barrier);
+ GotoIf(IsHeapNumberInstanceType(value_instance_type), &check_in_shared_heap);
+ Goto(&slow);
+
+ BIND(&check_in_shared_heap);
+ {
+ Branch(
+ WordNotEqual(WordAnd(page_flags,
+ IntPtrConstant(BasicMemoryChunk::IN_SHARED_HEAP)),
+ IntPtrConstant(0)),
+ &skip_barrier, &slow);
+ }
+
+ // Slow path: Call out to runtime to share primitives and to throw on
+ // non-shared JS objects.
+ BIND(&slow);
+ {
+ *var_shared_value =
+ CallRuntime(Runtime::kSharedValueBarrierSlow, context, value);
+ Goto(&skip_barrier);
+ }
+
+ BIND(&skip_barrier);
+ {
+ CSA_DCHECK(
+ this,
+ WordNotEqual(
+ WordAnd(LoadBasicMemoryChunkFlags(CAST(var_shared_value->value())),
+ IntPtrConstant(BasicMemoryChunk::READ_ONLY_HEAP |
+ BasicMemoryChunk::IN_SHARED_HEAP)),
+ IntPtrConstant(0)));
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 109bd9cfa4..55485d004a 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -13,6 +13,7 @@
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
+#include "src/numbers/integer-literal.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
#include "src/objects/cell.h"
@@ -27,7 +28,7 @@
#include "src/objects/swiss-name-dictionary.h"
#include "src/objects/tagged-index.h"
#include "src/roots/roots.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#include "torque-generated/exported-macros-assembler.h"
namespace v8 {
@@ -616,6 +617,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And)
SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or)
+ SMI_ARITHMETIC_BINOP(SmiXor, WordXor, Word32Xor)
#undef SMI_ARITHMETIC_BINOP
TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
@@ -629,28 +631,44 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> TrySmiAbs(TNode<Smi> a, Label* if_overflow);
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
- return BitcastWordToTaggedSigned(
+ TNode<Smi> result = BitcastWordToTaggedSigned(
WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift));
+ // Smi shift have different result to int32 shift when the inputs are not
+ // strictly limited. The CSA_DCHECK is to ensure valid inputs.
+ CSA_DCHECK(
+ this, TaggedEqual(result, BitwiseOp(SmiToInt32(a), Int32Constant(shift),
+ Operation::kShiftLeft)));
+ return result;
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
+ TNode<Smi> result;
if (kTaggedSize == kInt64Size) {
- return BitcastWordToTaggedSigned(
+ result = BitcastWordToTaggedSigned(
WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift),
BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
} else {
// For pointer compressed Smis, we want to make sure that we truncate to
// int32 before shifting, to avoid the values of the top 32-bits from
// leaking into the sign bit of the smi.
- return BitcastWordToTaggedSigned(WordAnd(
+ result = BitcastWordToTaggedSigned(WordAnd(
ChangeInt32ToIntPtr(Word32Shr(
TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
shift)),
BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
}
+ // Smi shift have different result to int32 shift when the inputs are not
+ // strictly limited. The CSA_DCHECK is to ensure valid inputs.
+ CSA_DCHECK(
+ this, TaggedEqual(result, BitwiseOp(SmiToInt32(a), Int32Constant(shift),
+ Operation::kShiftRightLogical)));
+ return result;
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
+ // The number of shift bits is |shift % 64| for 64-bits value and |shift %
+ // 32| for 32-bits value. The DCHECK is to ensure valid inputs.
+ DCHECK_LT(shift, 32);
if (kTaggedSize == kInt64Size) {
return BitcastWordToTaggedSigned(
WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift),
@@ -752,6 +770,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> BitwiseOp(TNode<Word32T> left32, TNode<Word32T> right32,
Operation bitwise_op);
+ TNode<Number> BitwiseSmiOp(TNode<Smi> left32, TNode<Smi> right32,
+ Operation bitwise_op);
// Allocate an object of the given size.
TNode<HeapObject> AllocateInNewSpace(
@@ -803,6 +823,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return IsCodeTMap(LoadMap(object));
}
+ // TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
+ // is cached in or moved to CodeT.
TNode<Code> FromCodeT(TNode<CodeT> code) {
#ifdef V8_EXTERNAL_CODE_SPACE
#if V8_TARGET_BIG_ENDIAN
@@ -1048,22 +1070,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Load a caged pointer value from an object.
- TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- int offset) {
- return LoadCagedPointerFromObject(object, IntPtrConstant(offset));
+ TNode<RawPtrT> LoadSandboxedPointerFromObject(TNode<HeapObject> object,
+ int offset) {
+ return LoadSandboxedPointerFromObject(object, IntPtrConstant(offset));
}
- TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset);
+ TNode<RawPtrT> LoadSandboxedPointerFromObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
// Stored a caged pointer value to an object.
- void StoreCagedPointerToObject(TNode<HeapObject> object, int offset,
- TNode<RawPtrT> pointer) {
- StoreCagedPointerToObject(object, IntPtrConstant(offset), pointer);
+ void StoreSandboxedPointerToObject(TNode<HeapObject> object, int offset,
+ TNode<RawPtrT> pointer) {
+ StoreSandboxedPointerToObject(object, IntPtrConstant(offset), pointer);
}
- void StoreCagedPointerToObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset, TNode<RawPtrT> pointer);
+ void StoreSandboxedPointerToObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer);
TNode<RawPtrT> EmptyBackingStoreBufferConstant();
@@ -1071,8 +1094,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// ExternalPointerT-related functionality.
//
- TNode<ExternalPointerT> ChangeUint32ToExternalPointer(TNode<Uint32T> value);
- TNode<Uint32T> ChangeExternalPointerToUint32(TNode<ExternalPointerT> value);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ TNode<ExternalPointerT> ChangeIndexToExternalPointer(TNode<Uint32T> index);
+ TNode<Uint32T> ChangeExternalPointerToIndex(TNode<ExternalPointerT> pointer);
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
// Initialize an external pointer field in an object.
void InitializeExternalPointerField(TNode<HeapObject> object, int offset) {
@@ -1145,14 +1170,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadCagedPointerFromObject(holder,
- JSTypedArray::kExternalPointerOffset);
+ return LoadSandboxedPointerFromObject(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreCagedPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
- value);
+ StoreSandboxedPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
+ value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1782,6 +1807,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0);
+ void StoreJSSharedStructInObjectField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<Object> value);
+
+ void StoreJSSharedStructPropertyArrayElement(TNode<PropertyArray> array,
+ TNode<IntPtrT> index,
+ TNode<Object> value) {
+ // JSSharedStructs are allocated in the shared old space, which is currently
+ // collected by stopping the world, so the incremental write barrier is not
+ // needed. They can only store Smis and other HeapObjects in the shared old
+ // space, so the generational write barrier is also not needed.
+ // TODO(v8:12547): Add a safer, shared variant of SKIP_WRITE_BARRIER.
+ StoreFixedArrayOrPropertyArrayElement(array, index, value,
+ UNSAFE_SKIP_WRITE_BARRIER);
+ }
+
// EnsureArrayPushable verifies that receiver with this map is:
// 1. Is not a prototype.
// 2. Is not a dictionary.
@@ -1986,6 +2027,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
Label* if_bailout);
+ TNode<NativeContext> GetFunctionRealm(TNode<Context> context,
+ TNode<JSReceiver> receiver,
+ Label* if_bailout);
TNode<Object> GetConstructor(TNode<Map> map);
TNode<Map> GetInstanceTypeMap(InstanceType instance_type);
@@ -2364,6 +2408,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bigint,
TVariable<BigInt>* var_maybe_bigint,
TVariable<Smi>* var_feedback);
+ void TaggedPointerToWord32OrBigIntWithFeedback(
+ TNode<Context> context, TNode<HeapObject> pointer, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback);
TNode<Int32T> TruncateNumberToWord32(TNode<Number> value);
// Truncate the floating point value of a HeapNumber to an Int32.
@@ -2379,6 +2427,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ChangeFloat32ToTagged(TNode<Float32T> value);
TNode<Number> ChangeFloat64ToTagged(TNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(TNode<Int32T> value);
+ TNode<Number> ChangeInt32ToTaggedNoOverflow(TNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(TNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
@@ -2397,6 +2446,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Numeric>* var_numeric,
TVariable<Smi>* var_feedback);
+ // Ensures that {var_shared_value} is shareable across Isolates, and throws if
+ // not.
+ void SharedValueBarrier(TNode<Context> context,
+ TVariable<Object>* var_shared_value);
+
TNode<WordT> TimesSystemPointerSize(TNode<WordT> value);
TNode<IntPtrT> TimesSystemPointerSize(TNode<IntPtrT> value) {
return Signed(TimesSystemPointerSize(implicit_cast<TNode<WordT>>(value)));
@@ -2540,6 +2594,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSPrimitiveWrapperInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSPrimitiveWrapperMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapper(TNode<HeapObject> object);
+ TNode<BoolT> IsJSSharedStructInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsJSSharedStructMap(TNode<Map> map);
+ TNode<BoolT> IsJSSharedStruct(TNode<HeapObject> object);
+ TNode<BoolT> IsJSSharedStruct(TNode<Object> object);
+ TNode<BoolT> IsJSWrappedFunction(TNode<HeapObject> object);
TNode<BoolT> IsMap(TNode<HeapObject> object);
TNode<BoolT> IsName(TNode<HeapObject> object);
TNode<BoolT> IsNameInstanceType(TNode<Int32T> instance_type);
@@ -2579,6 +2638,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSymbolInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsSharedStringInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsTemporalInstantInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoCachedIndex(TNode<HeapObject> object);
@@ -2594,6 +2655,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
+ TNode<IntPtrT> LoadBasicMemoryChunkFlags(TNode<HeapObject> object);
+
TNode<BoolT> LoadRuntimeFlag(ExternalReference address_of_flag) {
TNode<Word32T> flag_value = UncheckedCast<Word32T>(
Load(MachineType::Uint8(), ExternalConstant(address_of_flag)));
@@ -2611,6 +2674,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ExternalReference::address_of_builtin_subclassing_flag());
}
+ TNode<BoolT> HasSharedStringTableFlag() {
+ return LoadRuntimeFlag(
+ ExternalReference::address_of_shared_string_table_flag());
+ }
+
// True iff |object| is a Smi or a HeapNumber or a BigInt.
TNode<BoolT> IsNumeric(TNode<Object> object);
@@ -2847,7 +2915,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
- // Returns true if the bit field |BitField| in |word32| is equal to a given.
+ // Returns true if the bit field |BitField| in |word32| is equal to a given
// constant |value|. Avoids a shift compared to using DecodeWord32.
template <typename BitField>
TNode<BoolT> IsEqualInWord32(TNode<Word32T> word32,
@@ -2857,6 +2925,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(masked_word32, Int32Constant(BitField::encode(value)));
}
+ // Returns true if the bit field |BitField| in |word32| is not equal to a
+ // given constant |value|. Avoids a shift compared to using DecodeWord32.
+ template <typename BitField>
+ TNode<BoolT> IsNotEqualInWord32(TNode<Word32T> word32,
+ typename BitField::FieldType value) {
+ return Word32BinaryNot(IsEqualInWord32<BitField>(word32, value));
+ }
+
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
TNode<BoolT> IsSetWord(TNode<WordT> word) {
@@ -2995,6 +3071,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Calculate a valid size for the a hash table.
TNode<IntPtrT> HashTableComputeCapacity(TNode<IntPtrT> at_least_space_for);
+ TNode<IntPtrT> NameToIndexHashTableLookup(TNode<NameToIndexHashTable> table,
+ TNode<Name> name, Label* not_found);
+
template <class Dictionary>
TNode<Smi> GetNumberOfElements(TNode<Dictionary> dictionary);
@@ -3102,8 +3181,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bailout);
// Operating mode for TryGetOwnProperty and CallGetterIfAccessor
- // kReturnAccessorPair is used when we're only getting the property descriptor
- enum GetOwnPropertyMode { kCallJSGetter, kReturnAccessorPair };
+ enum GetOwnPropertyMode {
+ // kCallJSGetterDontUseCachedName is used when we want to get the result of
+ // the getter call, and don't use cached_name_property when the getter is
+ // the function template and it has cached_property_name, which would just
+ // bailout for the IC system to create a named property handler
+ kCallJSGetterDontUseCachedName,
+ // kCallJSGetterUseCachedName is used when we want to get the result of
+ // the getter call, and use cached_name_property when the getter is
+ // the function template and it has cached_property_name, which would call
+ // GetProperty rather than bailout for Generic/NoFeedback load
+ kCallJSGetterUseCachedName,
+ // kReturnAccessorPair is used when we're only getting the property
+ // descriptor
+ kReturnAccessorPair
+ };
// Tries to get {object}'s own {unique_name} property value. If the property
// is an accessor then it also calls a getter. If the property is a double
// field it re-wraps value in an immutable heap number. {unique_name} must be
@@ -3620,9 +3712,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSArrayBufferView> array, TNode<JSArrayBuffer> buffer,
Label* detached_or_out_of_bounds);
- void IsJSArrayBufferViewDetachedOrOutOfBounds(TNode<JSArrayBufferView> array,
- Label* detached_or_oob,
- Label* not_detached_nor_oob);
+ void IsJSArrayBufferViewDetachedOrOutOfBounds(
+ TNode<JSArrayBufferView> array_buffer_view, Label* detached_or_oob,
+ Label* not_detached_nor_oob);
+
+ TNode<BoolT> IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean(
+ TNode<JSArrayBufferView> array_buffer_view);
+
+ void CheckJSTypedArrayIndex(TNode<UintPtrT> index,
+ TNode<JSTypedArray> typed_array,
+ Label* detached_or_out_of_bounds);
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
@@ -3660,10 +3759,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Promise helpers
TNode<Uint32T> PromiseHookFlags();
TNode<BoolT> HasAsyncEventDelegate();
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
TNode<BoolT> IsContextPromiseHookEnabled(TNode<Uint32T> flags);
- TNode<BoolT> IsContextPromiseHookEnabled() {
- return IsContextPromiseHookEnabled(PromiseHookFlags());
- }
+#endif
+ TNode<BoolT> IsIsolatePromiseHookEnabled(TNode<Uint32T> flags);
TNode<BoolT> IsAnyPromiseHookEnabled(TNode<Uint32T> flags);
TNode<BoolT> IsAnyPromiseHookEnabled() {
return IsAnyPromiseHookEnabled(PromiseHookFlags());
@@ -3708,17 +3807,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
FrameArgumentsArgcType argc_type =
FrameArgumentsArgcType::kCountExcludesReceiver);
- inline TNode<Int32T> JSParameterCount(TNode<Int32T> argc_without_receiver) {
- return kJSArgcIncludesReceiver
- ? Int32Add(argc_without_receiver,
- Int32Constant(kJSArgcReceiverSlots))
- : argc_without_receiver;
+ inline TNode<Int32T> JSParameterCount(int argc_without_receiver) {
+ return Int32Constant(argc_without_receiver + kJSArgcReceiverSlots);
}
inline TNode<Word32T> JSParameterCount(TNode<Word32T> argc_without_receiver) {
- return kJSArgcIncludesReceiver
- ? Int32Add(argc_without_receiver,
- Int32Constant(kJSArgcReceiverSlots))
- : argc_without_receiver;
+ return Int32Add(argc_without_receiver, Int32Constant(kJSArgcReceiverSlots));
}
// Support for printf-style debugging
@@ -3742,6 +3835,45 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
bool ConstexprBoolNot(bool value) { return !value; }
+ int31_t ConstexprIntegerLiteralToInt31(const IntegerLiteral& i) {
+ return int31_t(i.To<int32_t>());
+ }
+ int32_t ConstexprIntegerLiteralToInt32(const IntegerLiteral& i) {
+ return i.To<int32_t>();
+ }
+ uint32_t ConstexprIntegerLiteralToUint32(const IntegerLiteral& i) {
+ return i.To<uint32_t>();
+ }
+ int8_t ConstexprIntegerLiteralToInt8(const IntegerLiteral& i) {
+ return i.To<int8_t>();
+ }
+ uint8_t ConstexprIntegerLiteralToUint8(const IntegerLiteral& i) {
+ return i.To<uint8_t>();
+ }
+ uint64_t ConstexprIntegerLiteralToUint64(const IntegerLiteral& i) {
+ return i.To<uint64_t>();
+ }
+ intptr_t ConstexprIntegerLiteralToIntptr(const IntegerLiteral& i) {
+ return i.To<intptr_t>();
+ }
+ uintptr_t ConstexprIntegerLiteralToUintptr(const IntegerLiteral& i) {
+ return i.To<uintptr_t>();
+ }
+ double ConstexprIntegerLiteralToFloat64(const IntegerLiteral& i) {
+ int64_t i_value = i.To<int64_t>();
+ double d_value = static_cast<double>(i_value);
+ CHECK_EQ(i_value, static_cast<int64_t>(d_value));
+ return d_value;
+ }
+ bool ConstexprIntegerLiteralEqual(IntegerLiteral lhs, IntegerLiteral rhs) {
+ return lhs == rhs;
+ }
+ IntegerLiteral ConstexprIntegerLiteralAdd(const IntegerLiteral& lhs,
+ const IntegerLiteral& rhs);
+ IntegerLiteral ConstexprIntegerLiteralLeftShift(const IntegerLiteral& lhs,
+ const IntegerLiteral& rhs);
+ IntegerLiteral ConstexprIntegerLiteralBitwiseOr(const IntegerLiteral& lhs,
+ const IntegerLiteral& rhs);
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; }
@@ -3883,7 +4015,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> CallGetterIfAccessor(
TNode<Object> value, TNode<HeapObject> holder, TNode<Uint32T> details,
TNode<Context> context, TNode<Object> receiver, TNode<Object> name,
- Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter);
+ Label* if_bailout,
+ GetOwnPropertyMode mode = kCallJSGetterDontUseCachedName);
TNode<IntPtrT> TryToIntptr(TNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type = nullptr);
@@ -4060,10 +4193,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Numeric>* var_numeric,
TVariable<Smi>* var_feedback);
+ enum IsKnownTaggedPointer { kNo, kYes };
template <Object::Conversion conversion>
void TaggedToWord32OrBigIntImpl(TNode<Context> context, TNode<Object> value,
Label* if_number,
TVariable<Word32T>* var_word32,
+ IsKnownTaggedPointer is_known_tagged_pointer,
Label* if_bigint = nullptr,
TVariable<BigInt>* var_maybe_bigint = nullptr,
TVariable<Smi>* var_feedback = nullptr);
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index d603298897..df237d44bd 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -34,7 +34,6 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
@@ -49,6 +48,7 @@
#include "src/logging/counters-scopes.h"
#include "src/logging/log-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
+#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/map.h"
@@ -65,6 +65,10 @@
#include "src/web-snapshot/web-snapshot.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
+#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev.h"
+#endif // V8_ENABLE_MAGLEV
+
namespace v8 {
namespace internal {
@@ -193,31 +197,19 @@ class CompilerTracer : public AllStatic {
}
};
-} // namespace
-
-// Helper that times a scoped region and records the elapsed time.
-struct ScopedTimer {
- explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
- DCHECK_NOT_NULL(location_);
- timer_.Start();
- }
-
- ~ScopedTimer() { *location_ += timer_.Elapsed(); }
-
- base::ElapsedTimer timer_;
- base::TimeDelta* location_;
-};
-
-// static
-void Compiler::LogFunctionCompilation(Isolate* isolate,
- CodeEventListener::LogEventsAndTags tag,
- Handle<Script> script,
- Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> vector,
- Handle<AbstractCode> abstract_code,
- CodeKind kind, double time_taken_ms) {
+void LogFunctionCompilation(Isolate* isolate,
+ CodeEventListener::LogEventsAndTags tag,
+ Handle<Script> script,
+ Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> vector,
+ Handle<AbstractCode> abstract_code, CodeKind kind,
+ double time_taken_ms) {
DCHECK(!abstract_code.is_null());
- DCHECK(!abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy)));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ DCHECK_NE(*abstract_code, FromCodeT(*BUILTIN_CODE(isolate, CompileLazy)));
+ } else {
+ DCHECK(!abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy)));
+ }
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
@@ -251,9 +243,6 @@ void Compiler::LogFunctionCompilation(Isolate* isolate,
case CodeKind::BASELINE:
name = "baseline";
break;
- case CodeKind::TURBOPROP:
- name = "turboprop";
- break;
case CodeKind::TURBOFAN:
name = "optimize";
break;
@@ -282,6 +271,21 @@ void Compiler::LogFunctionCompilation(Isolate* isolate,
*debug_name));
}
+} // namespace
+
+// Helper that times a scoped region and records the elapsed time.
+struct ScopedTimer {
+ explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
+ DCHECK_NOT_NULL(location_);
+ timer_.Start();
+ }
+
+ ~ScopedTimer() { *location_ += timer_.Elapsed(); }
+
+ base::ElapsedTimer timer_;
+ base::TimeDelta* location_;
+};
+
namespace {
ScriptOriginOptions OriginOptionsForEval(Object script) {
@@ -359,7 +363,7 @@ void RecordUnoptimizedFunctionCompilation(
#if V8_ENABLE_WEBASSEMBLY
DCHECK(shared->HasAsmWasmData());
abstract_code =
- Handle<AbstractCode>::cast(BUILTIN_CODE(isolate, InstantiateAsmJs));
+ ToAbstractCode(BUILTIN_CODE(isolate, InstantiateAsmJs), isolate);
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
@@ -369,9 +373,9 @@ void RecordUnoptimizedFunctionCompilation(
time_taken_to_finalize.InMillisecondsF();
Handle<Script> script(Script::cast(shared->script()), isolate);
- Compiler::LogFunctionCompilation(
- isolate, tag, script, shared, Handle<FeedbackVector>(), abstract_code,
- CodeKind::INTERPRETED_FUNCTION, time_taken_ms);
+ LogFunctionCompilation(isolate, tag, script, shared, Handle<FeedbackVector>(),
+ abstract_code, CodeKind::INTERPRETED_FUNCTION,
+ time_taken_ms);
}
} // namespace
@@ -507,7 +511,7 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
Script::cast(compilation_info()->shared_info()->script()), isolate);
Handle<FeedbackVector> feedback_vector(
compilation_info()->closure()->feedback_vector(), isolate);
- Compiler::LogFunctionCompilation(
+ LogFunctionCompilation(
isolate, tag, script, compilation_info()->shared_info(), feedback_vector,
abstract_code, compilation_info()->code_kind(), time_taken_ms);
}
@@ -831,19 +835,18 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
return true;
}
-V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
+V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BytecodeOffset osr_offset,
CodeKind code_kind) {
Isolate* isolate = function->GetIsolate();
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DisallowGarbageCollection no_gc;
- Code code;
+ CodeT code;
if (osr_offset.IsNone() && function->has_feedback_vector()) {
FeedbackVector feedback_vector = function->feedback_vector();
feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->raw_feedback_cell(), function->shared(),
- "GetCodeFromOptimizedCodeCache");
+ function->shared(), "GetCodeFromOptimizedCodeCache");
code = feedback_vector.optimized_code();
} else if (!osr_offset.IsNone()) {
code = function->context()
@@ -858,9 +861,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
DCHECK(function->shared().is_compiled());
DCHECK(CodeKindIsStoredInOptimizedCodeCache(code.kind()));
DCHECK_IMPLIES(!osr_offset.IsNone(), CodeKindCanOSR(code.kind()));
- return Handle<Code>(code, isolate);
+ return Handle<CodeT>(code, isolate);
}
- return MaybeHandle<Code>();
+ return MaybeHandle<CodeT>();
}
void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
@@ -885,16 +888,16 @@ void InsertCodeIntoOptimizedCodeCache(
}
// Cache optimized code.
- Handle<Code> code = compilation_info->code();
Handle<JSFunction> function = compilation_info->closure();
- Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
+ Isolate* isolate = function->GetIsolate();
+ Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<NativeContext> native_context(function->context().native_context(),
- function->GetIsolate());
+ isolate);
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
- handle(function->feedback_vector(), function->GetIsolate());
- FeedbackVector::SetOptimizedCode(vector, code,
- function->raw_feedback_cell());
+ handle(function->feedback_vector(), isolate);
+ FeedbackVector::SetOptimizedCode(vector, code);
} else {
DCHECK(CodeKindCanOSR(kind));
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
@@ -908,7 +911,7 @@ void InsertCodeIntoOptimizedCodeCache(
bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
CompilationHandleScope compilation(isolate, compilation_info);
- CanonicalHandleScope canonical(isolate, compilation_info);
+ CanonicalHandleScopeForTurbofan canonical(isolate, compilation_info);
compilation_info->ReopenHandlesInNewHandleScope(isolate);
return job->PrepareJob(isolate) == CompilationJob::SUCCEEDED;
}
@@ -1004,29 +1007,12 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
// Returns the code object at which execution continues after a concurrent
// optimization job has been started (but not finished).
-Handle<Code> ContinuationForConcurrentOptimization(
+Handle<CodeT> ContinuationForConcurrentOptimization(
Isolate* isolate, Handle<JSFunction> function) {
- Handle<Code> cached_code;
- if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
- DCHECK(!FLAG_turboprop_as_toptier);
- DCHECK(function->NextTier() == CodeKind::TURBOFAN);
- // It is possible that we have marked a closure for TurboFan optimization
- // but the marker is processed by another closure that doesn't have
- // optimized code yet. So heal the closure here and return the optimized
- // code.
- if (!function->HasAttachedOptimizedCode()) {
- DCHECK(function->feedback_vector().has_optimized_code());
- // Release store isn't required here because it was done on store
- // into the feedback vector.
- STATIC_ASSERT(
- FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
- function->set_code(function->feedback_vector().optimized_code());
- }
- return handle(function->code(), isolate);
- } else if (function->shared().HasBaselineCode()) {
+ if (function->shared().HasBaselineCode()) {
CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
- return handle(FromCodeT(baseline_code), isolate);
+ return handle(baseline_code, isolate);
}
DCHECK(function->ActiveTierIsIgnition());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
@@ -1039,7 +1025,111 @@ enum class GetOptimizedCodeResultHandling {
kDiscardForTesting,
};
-MaybeHandle<Code> GetOptimizedCode(
+bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
+ DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
+ switch (code_kind) {
+ case CodeKind::TURBOFAN:
+ return FLAG_opt && shared->PassesFilter(FLAG_turbo_filter);
+ case CodeKind::MAGLEV:
+ // TODO(v8:7700): FLAG_maglev_filter.
+ return FLAG_maglev;
+ default:
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<CodeT> CompileTurbofan(
+ Isolate* isolate, Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
+ BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
+ GetOptimizedCodeResultHandling result_handling) {
+ VMState<COMPILER> state(isolate);
+ TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
+
+ static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
+
+ DCHECK(!isolate->has_pending_exception());
+ PostponeInterruptsScope postpone(isolate);
+ bool has_script = shared->script().IsScript();
+ // BUG(5946): This DCHECK is necessary to make certain that we won't
+ // tolerate the lack of a script without bytecode.
+ DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
+ std::unique_ptr<OptimizedCompilationJob> job(
+ compiler::Pipeline::NewCompilationJob(isolate, function, kCodeKind,
+ has_script, osr_offset, osr_frame));
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
+
+ if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
+ compilation_info->set_discard_result_for_testing();
+ }
+
+ // Prepare the job and launch concurrent compilation, or compile now.
+ if (mode == ConcurrencyMode::kConcurrent) {
+ if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
+ kCodeKind, function)) {
+ return ContinuationForConcurrentOptimization(isolate, function);
+ }
+ } else {
+ DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
+ if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
+ return ToCodeT(compilation_info->code(), isolate);
+ }
+ }
+
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return {};
+}
+
+MaybeHandle<CodeT> CompileMaglev(
+ Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
+ BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
+ GetOptimizedCodeResultHandling result_handling) {
+#ifdef V8_ENABLE_MAGLEV
+ DCHECK(FLAG_maglev);
+ // TODO(v8:7700): Add missing support.
+ CHECK(osr_offset.IsNone());
+ CHECK(osr_frame == nullptr);
+ CHECK(result_handling == GetOptimizedCodeResultHandling::kDefault);
+
+ // TODO(v8:7700): Tracing, see CompileTurbofan.
+
+ DCHECK(!isolate->has_pending_exception());
+ PostponeInterruptsScope postpone(isolate);
+
+ if (mode == ConcurrencyMode::kNotConcurrent) {
+ function->ClearOptimizationMarker();
+ return Maglev::Compile(isolate, function);
+ }
+
+ DCHECK_EQ(mode, ConcurrencyMode::kConcurrent);
+
+ // TODO(v8:7700): See everything in GetOptimizedCodeLater.
+ // - Tracing,
+ // - timers,
+ // - aborts on memory pressure,
+ // ...
+
+ // Prepare the job.
+ auto job = maglev::MaglevCompilationJob::New(isolate, function);
+ CompilationJob::Status status = job->PrepareJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED); // TODO(v8:7700): Use status.
+
+ // Enqueue it.
+ isolate->maglev_concurrent_dispatcher()->EnqueueJob(std::move(job));
+
+ // Remember that the function is currently being processed.
+ function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
+
+ // The code that triggered optimization continues execution here.
+ return ContinuationForConcurrentOptimization(isolate, function);
+#else // V8_ENABLE_MAGLEV
+ UNREACHABLE();
+#endif // V8_ENABLE_MAGLEV
+}
+
+MaybeHandle<CodeT> GetOptimizedCode(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
JavaScriptFrame* osr_frame = nullptr,
@@ -1053,6 +1143,7 @@ MaybeHandle<Code> GetOptimizedCode(
// don't try to re-optimize.
if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
+ // TODO(v8:7700): Distinguish between Maglev and Turbofan.
if (shared->optimization_disabled() &&
shared->disabled_optimization_reason() == BailoutReason::kNeverOptimize) {
return {};
@@ -1061,12 +1152,12 @@ MaybeHandle<Code> GetOptimizedCode(
// Do not optimize when debugger needs to hook into every call.
if (isolate->debug()->needs_check_on_function_call()) return {};
- // Do not use TurboFan if we need to be able to set break points.
+ // Do not optimize if we need to be able to set break points.
if (shared->HasBreakInfo()) return {};
- // Do not use TurboFan if optimization is disabled or function doesn't pass
+ // Do not optimize if optimization is disabled or function doesn't pass
// turbo_filter.
- if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) return {};
+ if (!ShouldOptimize(code_kind, shared)) return {};
// If code was pending optimization for testing, remove the entry from the
// table that was preventing the bytecode from being flushed.
@@ -1076,7 +1167,7 @@ MaybeHandle<Code> GetOptimizedCode(
// Check the optimized code cache (stored on the SharedFunctionInfo).
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
- Handle<Code> cached_code;
+ Handle<CodeT> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_offset, code_kind)
.ToHandle(&cached_code)) {
CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
@@ -1085,45 +1176,19 @@ MaybeHandle<Code> GetOptimizedCode(
}
}
- // Reset profiler ticks, function is no longer considered hot.
+ // Reset profiler ticks, the function is no longer considered hot.
+ // TODO(v8:7700): Update for Maglev tiering.
DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0);
- VMState<COMPILER> state(isolate);
- TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
-
- DCHECK(!isolate->has_pending_exception());
- PostponeInterruptsScope postpone(isolate);
- bool has_script = shared->script().IsScript();
- // BUG(5946): This DCHECK is necessary to make certain that we won't
- // tolerate the lack of a script without bytecode.
- DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
- std::unique_ptr<OptimizedCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(isolate, function, code_kind,
- has_script, osr_offset, osr_frame));
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
-
- if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
- compilation_info->set_discard_result_for_testing();
- }
-
- // Prepare the job and launch concurrent compilation, or compile now.
- if (mode == ConcurrencyMode::kConcurrent) {
- if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
- code_kind, function)) {
- return ContinuationForConcurrentOptimization(isolate, function);
- }
+ if (code_kind == CodeKind::TURBOFAN) {
+ return CompileTurbofan(isolate, function, shared, mode, osr_offset,
+ osr_frame, result_handling);
} else {
- DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
- if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
- return compilation_info->code();
- }
+ DCHECK_EQ(code_kind, CodeKind::MAGLEV);
+ return CompileMaglev(isolate, function, mode, osr_offset, osr_frame,
+ result_handling);
}
-
- if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return {};
}
// When --stress-concurrent-inlining is enabled, spawn concurrent jobs in
@@ -1134,6 +1199,9 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
Handle<JSFunction> function,
ConcurrencyMode mode,
CodeKind code_kind) {
+ // TODO(v8:7700): Support Maglev.
+ if (code_kind == CodeKind::MAGLEV) return;
+
DCHECK(FLAG_stress_concurrent_inlining &&
isolate->concurrent_recompilation_enabled() &&
mode == ConcurrencyMode::kNotConcurrent &&
@@ -1340,15 +1408,14 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
}
#ifdef V8_RUNTIME_CALL_STATS
-RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
- ParseInfo* parse_info) {
+RuntimeCallCounterId RuntimeCallCounterIdForCompile(ParseInfo* parse_info) {
if (parse_info->flags().is_toplevel()) {
if (parse_info->flags().is_eval()) {
- return RuntimeCallCounterId::kCompileBackgroundEval;
+ return RuntimeCallCounterId::kCompileEval;
}
- return RuntimeCallCounterId::kCompileBackgroundScript;
+ return RuntimeCallCounterId::kCompileScript;
}
- return RuntimeCallCounterId::kCompileBackgroundFunction;
+ return RuntimeCallCounterId::kCompileFunction;
}
#endif // V8_RUNTIME_CALL_STATS
@@ -1389,8 +1456,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
timer_(isolate->counters()->compile_script_on_background()),
start_position_(0),
end_position_(0),
- function_literal_id_(kFunctionLiteralIdTopLevel),
- language_mode_(flags_.outer_language_mode()) {
+ function_literal_id_(kFunctionLiteralIdTopLevel) {
VMState<PARSER> state(isolate);
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
@@ -1414,8 +1480,7 @@ BackgroundCompileTask::BackgroundCompileTask(
input_shared_info_(shared_info),
start_position_(shared_info->StartPosition()),
end_position_(shared_info->EndPosition()),
- function_literal_id_(shared_info->function_literal_id()),
- language_mode_(flags_.outer_language_mode()) {
+ function_literal_id_(shared_info->function_literal_id()) {
DCHECK(!shared_info->is_toplevel());
character_stream_->Seek(start_position_);
@@ -1460,11 +1525,8 @@ void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
} // namespace
void BackgroundCompileTask::Run() {
- WorkerThreadRuntimeCallStatsScope worker_thread_scope(
- worker_thread_runtime_call_stats_);
-
- LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground,
- worker_thread_scope.Get());
+ DCHECK_NE(ThreadId::Current(), isolate_for_local_isolate_->thread_id());
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&isolate);
LocalHandleScope handle_scope(&isolate);
@@ -1473,13 +1535,20 @@ void BackgroundCompileTask::Run() {
Run(&isolate, &reusable_state);
}
+void BackgroundCompileTask::RunOnMainThread(Isolate* isolate) {
+ LocalHandleScope handle_scope(isolate->main_thread_local_isolate());
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ Run(isolate->main_thread_local_isolate(), &reusable_state);
+}
+
void BackgroundCompileTask::Run(
LocalIsolate* isolate, ReusableUnoptimizedCompileState* reusable_state) {
TimedHistogramScope timer(timer_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
- RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBackgroundCompileTask);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileCompileTask,
+ RuntimeCallStats::CounterMode::kThreadSpecific);
bool toplevel_script_compilation = flags_.is_toplevel();
@@ -1549,13 +1618,10 @@ void BackgroundCompileTask::Run(
function_literal_id_);
parser.UpdateStatistics(script_, &use_counts_, &total_preparse_skipped_);
- // Save the language mode.
- language_mode_ = info.language_mode();
-
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
- RCS_SCOPE(info.runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(&info));
+ RCS_SCOPE(isolate, RuntimeCallCounterIdForCompile(&info),
+ RuntimeCallStats::CounterMode::kThreadSpecific);
MaybeHandle<SharedFunctionInfo> maybe_result;
if (info.literal() != nullptr) {
@@ -1583,9 +1649,6 @@ void BackgroundCompileTask::Run(
outer_function_sfi_ = isolate->heap()->NewPersistentMaybeHandle(maybe_result);
DCHECK(isolate->heap()->ContainsPersistentHandle(script_.location()));
persistent_handles_ = isolate->heap()->DetachPersistentHandles();
-
- // Make sure the language mode didn't change.
- DCHECK_EQ(language_mode_, info.language_mode());
}
MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
@@ -1942,7 +2005,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
DCHECK(is_compiled_scope->is_compiled());
- Handle<Code> code = handle(FromCodeT(shared_info->GetCode()), isolate);
+ Handle<CodeT> code = handle(shared_info->GetCode(), isolate);
// Initialize the feedback cell for this JSFunction and reset the interrupt
// budget for feedback vector allocation even if there is a closure feedback
@@ -1972,7 +2035,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
concurrency_mode, code_kind);
}
- Handle<Code> maybe_code;
+ Handle<CodeT> maybe_code;
if (GetOptimizedCode(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
@@ -1984,7 +2047,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// Install a feedback vector if necessary.
if (code->kind() == CodeKind::BASELINE) {
- JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
}
// Check postconditions on success.
@@ -2039,11 +2102,11 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
CompilerTracer::TraceFinishBaselineCompile(isolate, shared, time_taken_ms);
if (shared->script().IsScript()) {
- Compiler::LogFunctionCompilation(
- isolate, CodeEventListener::FUNCTION_TAG,
- handle(Script::cast(shared->script()), isolate), shared,
- Handle<FeedbackVector>(), Handle<AbstractCode>::cast(code),
- CodeKind::BASELINE, time_taken_ms);
+ LogFunctionCompilation(isolate, CodeEventListener::FUNCTION_TAG,
+ handle(Script::cast(shared->script()), isolate),
+ shared, Handle<FeedbackVector>(),
+ Handle<AbstractCode>::cast(code), CodeKind::BASELINE,
+ time_taken_ms);
}
return true;
}
@@ -2058,7 +2121,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
}
// Baseline code needs a feedback vector.
- JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
CodeT baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
@@ -2068,6 +2131,32 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
}
// static
+bool Compiler::CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ IsCompiledScope* is_compiled_scope) {
+#ifdef V8_ENABLE_MAGLEV
+ // Bytecode must be available for maglev compilation.
+ DCHECK(is_compiled_scope->is_compiled());
+ // TODO(v8:7700): Support concurrent compilation.
+ DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
+
+ // Maglev code needs a feedback vector.
+ JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
+
+ MaybeHandle<CodeT> maybe_code = Maglev::Compile(isolate, function);
+ Handle<CodeT> code;
+ if (!maybe_code.ToHandle(&code)) return false;
+
+ DCHECK_EQ(code->kind(), CodeKind::MAGLEV);
+ function->set_code(*code);
+
+ return true;
+#else
+ return false;
+#endif // V8_ENABLE_MAGLEV
+}
+
+// static
MaybeHandle<SharedFunctionInfo> Compiler::CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
@@ -2093,7 +2182,7 @@ bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
}
// static
-bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
+void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
DCHECK(AllowCompilation::IsAllowed(isolate));
@@ -2106,14 +2195,14 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
code_kind);
}
- Handle<Code> code;
+ Handle<CodeT> code;
if (!GetOptimizedCode(isolate, function, mode, code_kind).ToHandle(&code)) {
// Optimization failed, get the existing code. We could have optimized code
// from a lower tier here. Unoptimized code must exist already if we are
// optimizing.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
- DCHECK(function->shared().IsInterpreted());
+ DCHECK(function->shared().HasBytecodeArray());
code = ContinuationForConcurrentOptimization(isolate, function);
}
@@ -2129,7 +2218,6 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
function->ChecksOptimizationMarker());
DCHECK_IMPLIES(function->IsInOptimizationQueue(),
mode == ConcurrencyMode::kConcurrent);
- return true;
}
// static
@@ -2737,7 +2825,7 @@ bool CompilationExceptionIsRangeError(Isolate* isolate, Handle<Object> obj) {
if (!obj->IsJSError(isolate)) return false;
Handle<JSReceiver> js_obj = Handle<JSReceiver>::cast(obj);
Handle<JSReceiver> constructor;
- if (!JSReceiver::GetConstructor(js_obj).ToHandle(&constructor)) {
+ if (!JSReceiver::GetConstructor(isolate, js_obj).ToHandle(&constructor)) {
return false;
}
return *constructor == *isolate->range_error_function();
@@ -2850,7 +2938,8 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
}
}
if (magic_matches) {
- return Compiler::GetSharedFunctionInfoForWebSnapshot(isolate, source);
+ return Compiler::GetSharedFunctionInfoForWebSnapshot(
+ isolate, source, script_details.name_obj);
}
}
@@ -3111,8 +3200,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.CheckCache");
- maybe_result = compilation_cache->LookupScript(source, script_details,
- task->language_mode());
+ maybe_result = compilation_cache->LookupScript(
+ source, script_details, task->flags().outer_language_mode());
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
}
@@ -3133,7 +3222,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
// Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
- compilation_cache->PutScript(source, task->language_mode(), result);
+ compilation_cache->PutScript(source, task->flags().outer_language_mode(),
+ result);
}
}
@@ -3145,7 +3235,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
// static
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForWebSnapshot(
- Isolate* isolate, Handle<String> source) {
+ Isolate* isolate, Handle<String> source,
+ MaybeHandle<Object> maybe_script_name) {
// This script won't hold the functions created from the web snapshot;
// reserving space only for the top-level SharedFunctionInfo is enough.
Handle<WeakFixedArray> shared_function_infos =
@@ -3153,6 +3244,12 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForWebSnapshot(
Handle<Script> script = isolate->factory()->NewScript(source);
script->set_type(Script::TYPE_WEB_SNAPSHOT);
script->set_shared_function_infos(*shared_function_infos);
+ Handle<Object> script_name;
+ if (maybe_script_name.ToHandle(&script_name) && script_name->IsString()) {
+ script->set_name(String::cast(*script_name));
+ } else {
+ script->set_name(*isolate->factory()->empty_string());
+ }
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWebSnapshot();
@@ -3212,10 +3309,9 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Isolate* isolate,
- Handle<JSFunction> function,
- BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame) {
+MaybeHandle<CodeT> Compiler::GetOptimizedCodeForOSR(
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame) {
DCHECK(!osr_offset.IsNone());
DCHECK_NOT_NULL(osr_frame);
return GetOptimizedCode(isolate, function, ConcurrencyMode::kNotConcurrent,
@@ -3299,9 +3395,8 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// deoptimized the code on the feedback vector. So check for any
// deoptimized code just before installing it on the funciton.
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->raw_feedback_cell(), *shared,
- "new function from shared function info");
- Code code = function->feedback_vector().optimized_code();
+ *shared, "new function from shared function info");
+ CodeT code = function->feedback_vector().optimized_code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
@@ -3319,8 +3414,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
!shared->optimization_disabled() &&
!function->HasAvailableOptimizedCode()) {
CompilerTracer::TraceMarkForAlwaysOpt(isolate, function);
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
+ function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
+ ConcurrencyMode::kNotConcurrent);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index f49bd727bc..f34c0a3326 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -77,6 +77,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
+ static MaybeHandle<SharedFunctionInfo> CompileToplevel(
+ ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope);
+
static bool CompileSharedWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
@@ -84,29 +88,24 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
- static bool CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
+
+ static bool CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ IsCompiledScope* is_compiled_scope);
+
+ static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
- static MaybeHandle<SharedFunctionInfo> CompileToplevel(
- ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
- IsCompiledScope* is_compiled_scope);
- static void LogFunctionCompilation(Isolate* isolate,
- CodeEventListener::LogEventsAndTags tag,
- Handle<Script> script,
- Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- Handle<AbstractCode> abstract_code,
- CodeKind kind, double time_taken_ms);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
+ CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
+ Isolate* isolate);
+
// Collect source positions for a function that has already been compiled to
// bytecode, but for which source positions were not collected (e.g. because
// they were not immediately needed).
static bool CollectSourcePositions(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
- V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
- CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
- Isolate* isolate);
-
// Finalize and install code from previously run background compile task.
static bool FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
Isolate* isolate,
@@ -215,7 +214,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
const ScriptDetails& script_details, ScriptStreamingData* streaming_data);
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForWebSnapshot(
- Isolate* isolate, Handle<String> source);
+ Isolate* isolate, Handle<String> source, MaybeHandle<Object> script_name);
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
@@ -234,7 +233,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// instead of generating JIT code for a function at all.
// Generate and return optimized code for OSR, or empty handle on failure.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> GetOptimizedCodeForOSR(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
};
@@ -517,6 +516,7 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
TimedHistogram* timer, int max_stack_size);
void Run();
+ void RunOnMainThread(Isolate* isolate);
void Run(LocalIsolate* isolate,
ReusableUnoptimizedCompileState* reusable_state);
@@ -529,7 +529,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
void AbortFunction();
UnoptimizedCompileFlags flags() const { return flags_; }
- LanguageMode language_mode() const { return language_mode_; }
private:
void ReportStatistics(Isolate* isolate);
@@ -561,8 +560,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
int start_position_;
int end_position_;
int function_literal_id_;
-
- LanguageMode language_mode_;
};
// Contains all data which needs to be transmitted between threads for
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index e80d560fd1..88f28b9238 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -27,6 +27,7 @@ enum CpuFeature {
LZCNT,
POPCNT,
INTEL_ATOM,
+ CETSS,
#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
@@ -147,6 +148,7 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
// at runtime in builtins using an extern ref. Other callers should use
// CpuFeatures::SupportWasmSimd128().
static bool supports_wasm_simd_128_;
+ static bool supports_cetss_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/external-reference-table.h b/deps/v8/src/codegen/external-reference-table.h
index 0bf42477ae..4f8839aa53 100644
--- a/deps/v8/src/codegen/external-reference-table.h
+++ b/deps/v8/src/codegen/external-reference-table.h
@@ -5,8 +5,6 @@
#ifndef V8_CODEGEN_EXTERNAL_REFERENCE_TABLE_H_
#define V8_CODEGEN_EXTERNAL_REFERENCE_TABLE_H_
-#include <vector>
-
#include "src/builtins/accessors.h"
#include "src/builtins/builtins.h"
#include "src/codegen/external-reference.h"
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 075eaf8c09..6d206663ba 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -5,7 +5,7 @@
#include "src/codegen/external-reference.h"
#include "include/v8-fast-api-calls.h"
-#include "src/api/api.h"
+#include "src/api/api-inl.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
#include "src/common/globals.h"
@@ -214,40 +214,33 @@ ExternalReference ExternalReference::builtins_table(Isolate* isolate) {
return ExternalReference(isolate->builtin_table());
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-ExternalReference ExternalReference::builtins_code_data_container_table(
- Isolate* isolate) {
- return ExternalReference(isolate->builtin_code_data_container_table());
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
ExternalReference ExternalReference::handle_scope_implementer_address(
Isolate* isolate) {
return ExternalReference(isolate->handle_scope_implementer_address());
}
-#ifdef V8_CAGED_POINTERS
-ExternalReference ExternalReference::virtual_memory_cage_base_address() {
- return ExternalReference(GetProcessWideVirtualMemoryCage()->base_address());
+#ifdef V8_SANDBOXED_POINTERS
+ExternalReference ExternalReference::sandbox_base_address() {
+ return ExternalReference(GetProcessWideSandbox()->base_address());
}
-ExternalReference ExternalReference::virtual_memory_cage_end_address() {
- return ExternalReference(GetProcessWideVirtualMemoryCage()->end_address());
+ExternalReference ExternalReference::sandbox_end_address() {
+ return ExternalReference(GetProcessWideSandbox()->end_address());
}
ExternalReference ExternalReference::empty_backing_store_buffer() {
- return ExternalReference(GetProcessWideVirtualMemoryCage()
+ return ExternalReference(GetProcessWideSandbox()
->constants()
.empty_backing_store_buffer_address());
}
-#endif // V8_CAGED_POINTERS
+#endif // V8_SANDBOXED_POINTERS
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
ExternalReference ExternalReference::external_pointer_table_address(
Isolate* isolate) {
return ExternalReference(isolate->external_pointer_table_address());
}
-#endif
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
@@ -565,6 +558,10 @@ ExternalReference ExternalReference::address_of_runtime_stats_flag() {
return ExternalReference(&TracingFlags::runtime_stats);
}
+ExternalReference ExternalReference::address_of_shared_string_table_flag() {
+ return ExternalReference(&FLAG_shared_string_table);
+}
+
ExternalReference ExternalReference::address_of_load_from_stack_count(
const char* function_name) {
return ExternalReference(
@@ -667,6 +664,11 @@ ExternalReference ExternalReference::address_of_wasm_int32_overflow_as_float() {
reinterpret_cast<Address>(&wasm_int32_overflow_as_float));
}
+ExternalReference ExternalReference::supports_cetss_address() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&CpuFeatures::supports_cetss_));
+}
+
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
@@ -1377,9 +1379,9 @@ FUNCTION_REFERENCE(
js_finalization_registry_remove_cell_from_unregister_token_map,
JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap)
-#ifdef V8_HEAP_SANDBOX
-FUNCTION_REFERENCE(external_pointer_table_grow_table_function,
- ExternalPointerTable::GrowTable)
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+FUNCTION_REFERENCE(external_pointer_table_allocate_entry,
+ ExternalPointerTable::AllocateEntry)
#endif
bool operator==(ExternalReference lhs, ExternalReference rhs) {
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index a0c27d207e..7715aa6a81 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -79,24 +79,16 @@ class StatsCounter;
V(thread_in_wasm_flag_address_address, \
"Isolate::thread_in_wasm_flag_address_address") \
V(javascript_execution_assert, "javascript_execution_assert") \
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_SANDBOXED_EXTERNAL_POINTERS(V)
-#ifdef V8_EXTERNAL_CODE_SPACE
-#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
- V(builtins_code_data_container_table, "builtins_code_data_container_table")
-#else
-#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V)
-#endif // V8_EXTERNAL_CODE_SPACE
-
-#ifdef V8_HEAP_SANDBOX
-#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V) \
- V(external_pointer_table_address, \
- "Isolate::external_pointer_table_address(" \
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_SANDBOXED_EXTERNAL_POINTERS(V) \
+ V(external_pointer_table_address, \
+ "Isolate::external_pointer_table_address(" \
")")
#else
-#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
-#endif // V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_SANDBOXED_EXTERNAL_POINTERS(V)
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
#define EXTERNAL_REFERENCE_LIST(V) \
V(abort_with_reason, "abort_with_reason") \
@@ -112,6 +104,7 @@ class StatsCounter;
"FLAG_mock_arraybuffer_allocator") \
V(address_of_one_half, "LDoubleConstant::one_half") \
V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
+ V(address_of_shared_string_table_flag, "FLAG_shared_string_table") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
@@ -257,6 +250,7 @@ class StatsCounter;
V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
+ V(supports_cetss_address, "CpuFeatures::supports_cetss_address") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
@@ -306,8 +300,8 @@ class StatsCounter;
V(re_experimental_match_for_call_from_js, \
"ExperimentalRegExp::MatchForCallFromJs") \
EXTERNAL_REFERENCE_LIST_INTL(V) \
- EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V) \
- EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
+ EXTERNAL_REFERENCE_LIST_SANDBOX(V) \
+ EXTERNAL_REFERENCE_LIST_SANDBOXED_EXTERNAL_POINTERS(V)
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
@@ -318,22 +312,22 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST_INTL(V)
#endif // V8_INTL_SUPPORT
-#ifdef V8_CAGED_POINTERS
-#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V) \
- V(virtual_memory_cage_base_address, "V8VirtualMemoryCage::base()") \
- V(virtual_memory_cage_end_address, "V8VirtualMemoryCage::end()") \
+#ifdef V8_SANDBOXED_POINTERS
+#define EXTERNAL_REFERENCE_LIST_SANDBOX(V) \
+ V(sandbox_base_address, "Sandbox::base()") \
+ V(sandbox_end_address, "Sandbox::end()") \
V(empty_backing_store_buffer, "EmptyBackingStoreBuffer()")
#else
-#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V)
-#endif // V8_CAGED_POINTERS
+#define EXTERNAL_REFERENCE_LIST_SANDBOX(V)
+#endif // V8_SANDBOXED_POINTERS
-#ifdef V8_HEAP_SANDBOX
-#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V) \
- V(external_pointer_table_grow_table_function, \
- "ExternalPointerTable::GrowTable")
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+#define EXTERNAL_REFERENCE_LIST_SANDBOXED_EXTERNAL_POINTERS(V) \
+ V(external_pointer_table_allocate_entry, \
+ "ExternalPointerTable::AllocateEntry")
#else
-#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
-#endif // V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_SANDBOXED_EXTERNAL_POINTERS(V)
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 389640e89a..8a19b091d7 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -43,7 +43,7 @@
#if V8_LIBC_MSVCRT
#include <intrin.h> // _xgetbv()
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <sys/sysctl.h>
#endif
@@ -96,7 +96,7 @@ V8_INLINE uint64_t xgetbv(unsigned int xcr) {
}
bool OSHasAVXSupport() {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
// caused by ISRs, so we detect that here and disable AVX in that case.
char buffer[128];
@@ -112,7 +112,7 @@ bool OSHasAVXSupport() {
*period_pos = '\0';
long kernel_version_major = strtol(buffer, nullptr, 10); // NOLINT
if (kernel_version_major <= 13) return false;
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
// Check whether OS claims to support AVX.
uint64_t feature_mask = xgetbv(0); // XCR_XFEATURE_ENABLED_MASK
return (feature_mask & 0x6) == 0x6;
@@ -369,7 +369,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
index 4b20fd7bca..2da91a0f59 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -24,8 +24,8 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data,
int nof_expected_args) {
RegList allocatable_regs = data->allocatable_registers();
- if (nof_expected_args >= 1) DCHECK(allocatable_regs | esi.bit());
- if (nof_expected_args >= 2) DCHECK(allocatable_regs | edi.bit());
+ if (nof_expected_args >= 1) DCHECK(allocatable_regs.has(esi));
+ if (nof_expected_args >= 2) DCHECK(allocatable_regs.has(edi));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
@@ -60,6 +60,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return eax; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return edx;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return ecx; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return edx; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return ecx; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return edi;
@@ -105,7 +135,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(ecx); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(eax); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -115,6 +145,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // edi : the source
+ // eax : the excluded property count
+ return RegisterArray(edi, eax);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // edi : the source
+ // eax : the excluded property count
+ // ecx : the excluded property base
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// eax : number of arguments (on the stack)
// edi : the target to call
@@ -223,6 +269,11 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ return RegisterArray(eax, edx, ecx);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(edx, // kApiFunctionAddress
ecx, // kArgc
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index b4824736b9..0678ad31c5 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -428,22 +428,14 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- ASM_CODE_COMMENT(this);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- push(Register::from_code(i));
- }
+ for (Register reg : registers) {
+ push(reg);
}
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- ASM_CODE_COMMENT(this);
- for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
- if ((registers >> i) & 1u) {
- pop(Register::from_code(i));
- }
+ for (Register reg : base::Reversed(registers)) {
+ pop(reg);
}
}
@@ -1303,7 +1295,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Extra words are the receiver (if not already included in argc) and the
// return address (if a jump).
int extra_words = type == InvokeType::kCall ? 0 : 1;
- if (!kJSArgcIncludesReceiver) extra_words++;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -1466,14 +1457,6 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
mov(destination, Operand(destination, Context::SlotOffset(index)));
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the lowest encoding,
- // which means that lowest encodings are furthest away from
- // the stack pointer.
- DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return kNumSafepointRegisters - reg_code - 1;
-}
-
void TurboAssembler::Ret() { ret(0); }
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index e1b7e15363..97ab71753f 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -655,10 +655,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow, bool include_receiver = false);
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
private:
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
@@ -670,13 +666,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue();
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class CommonFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index 37a5783ded..e06a372ea6 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_IA32_REGISTER_IA32_H_
#define V8_CODEGEN_IA32_REGISTER_IA32_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -67,7 +66,7 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
+static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -82,7 +81,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleCode {
@@ -112,17 +111,6 @@ constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// Note that the bit values must match those used in actual instruction encoding
constexpr int kNumRegs = 8;
-// Caller-saved registers
-constexpr RegList kJSCallerSaved =
- Register::ListOf(eax, ecx, edx,
- ebx, // used as caller-saved register in JavaScript code
- edi); // callee function
-
-constexpr int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-constexpr int kNumSafepointRegisters = 8;
-
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
diff --git a/deps/v8/src/codegen/ia32/reglist-ia32.h b/deps/v8/src/codegen/ia32/reglist-ia32.h
new file mode 100644
index 0000000000..e8f3448dd9
--- /dev/null
+++ b/deps/v8/src/codegen/ia32/reglist-ia32.h
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_IA32_REGLIST_IA32_H_
+#define V8_CODEGEN_IA32_REGLIST_IA32_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+// Caller-saved registers
+constexpr RegList kJSCallerSaved = {
+ eax, ecx, edx,
+ ebx, // used as caller-saved register in JavaScript code
+ edi}; // callee function
+
+constexpr int kNumJSCallerSaved = 5;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_IA32_REGLIST_IA32_H_
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
index d5a8ccf6e4..349874e2b7 100644
--- a/deps/v8/src/codegen/interface-descriptors-inl.h
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -9,7 +9,7 @@
#include "src/base/logging.h"
#include "src/codegen/interface-descriptors.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#if V8_TARGET_ARCH_X64
#include "src/codegen/x64/interface-descriptors-x64-inl.h"
@@ -217,19 +217,24 @@ constexpr Register WriteBarrierDescriptor::ValueRegister() {
constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters(
Register object, Register slot_address) {
DCHECK(!AreAliased(object, slot_address));
- RegList saved_registers = 0;
+ RegList saved_registers;
#if V8_TARGET_ARCH_X64
// Only push clobbered registers.
- if (object != ObjectRegister()) saved_registers |= ObjectRegister().bit();
+ if (object != ObjectRegister()) saved_registers.set(ObjectRegister());
if (slot_address != no_reg && slot_address != SlotAddressRegister()) {
- saved_registers |= SlotAddressRegister().bit();
+ saved_registers.set(SlotAddressRegister());
}
+#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64 || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
+ if (object != ObjectRegister()) saved_registers.set(ObjectRegister());
+ // The slot address is always clobbered.
+ saved_registers.set(SlotAddressRegister());
#else
// TODO(cbruni): Enable callee-saved registers for other platforms.
// This is a temporary workaround to prepare code for callee-saved registers.
constexpr auto allocated_registers = registers();
for (size_t i = 0; i < allocated_registers.size(); ++i) {
- saved_registers |= allocated_registers[i].bit();
+ saved_registers.set(allocated_registers[i]);
}
#endif
return saved_registers;
@@ -457,6 +462,37 @@ constexpr auto LoadWithVectorDescriptor::registers() {
}
// static
+constexpr auto KeyedLoadBaselineDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), SlotRegister());
+}
+
+// static
+constexpr auto KeyedLoadDescriptor::registers() {
+ return KeyedLoadBaselineDescriptor::registers();
+}
+
+// static
+constexpr auto KeyedLoadWithVectorDescriptor::registers() {
+ return RegisterArray(KeyedLoadBaselineDescriptor::ReceiverRegister(),
+ KeyedLoadBaselineDescriptor::NameRegister(),
+ KeyedLoadBaselineDescriptor::SlotRegister(),
+ VectorRegister());
+}
+
+// static
+constexpr auto KeyedHasICBaselineDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), SlotRegister());
+}
+
+// static
+constexpr auto KeyedHasICWithVectorDescriptor::registers() {
+ return RegisterArray(KeyedHasICBaselineDescriptor::ReceiverRegister(),
+ KeyedHasICBaselineDescriptor::NameRegister(),
+ KeyedHasICBaselineDescriptor::SlotRegister(),
+ VectorRegister());
+}
+
+// static
constexpr auto StoreWithVectorDescriptor::registers() {
return RegisterArray(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(),
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index a30299011d..5f7d09fb2a 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -19,16 +19,16 @@ void CallInterfaceDescriptorData::InitializeRegisters(
#ifdef DEBUG
{
// Make sure that the registers are all valid, and don't alias each other.
- RegList reglist = 0;
+ RegList reglist;
for (int i = 0; i < register_parameter_count; ++i) {
Register reg = registers[i];
DCHECK(reg.is_valid());
- DCHECK_EQ(reglist & reg.bit(), 0);
+ DCHECK(!reglist.has(reg));
DCHECK_NE(reg, kRootRegister);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
DCHECK_NE(reg, kPtrComprCageBaseRegister);
#endif
- reglist = CombineRegLists(reglist, reg.bit());
+ reglist.set(reg);
}
}
#endif
@@ -145,8 +145,8 @@ void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
DCHECK_EQ(ObjectRegister(), kJSFunctionRegister);
// We need a certain set of registers by default:
RegList allocatable_regs = data->allocatable_registers();
- DCHECK(allocatable_regs | kContextRegister.bit());
- DCHECK(allocatable_regs | kReturnRegister0.bit());
+ DCHECK(allocatable_regs.has(kContextRegister));
+ DCHECK(allocatable_regs.has(kReturnRegister0));
VerifyArgumentRegisterCount(data, 4);
}
#endif // DEBUG
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index b083246fad..3e10c6dcd7 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -9,7 +9,7 @@
#include "src/base/logging.h"
#include "src/codegen/machine-type.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -21,111 +21,120 @@ namespace internal {
BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Abort) \
- V(Allocate) \
- V(ApiCallback) \
- V(ApiGetter) \
- V(ArrayConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(AsyncFunctionStackParameter) \
- V(BigIntToI32Pair) \
- V(BigIntToI64) \
- V(BinaryOp) \
- V(BinaryOp_Baseline) \
- V(BinaryOp_WithFeedback) \
- V(CallForwardVarargs) \
- V(CallFunctionTemplate) \
- V(CallTrampoline) \
- V(CallTrampoline_Baseline) \
- V(CallTrampoline_Baseline_Compact) \
- V(CallTrampoline_WithFeedback) \
- V(CallVarargs) \
- V(CallWithArrayLike) \
- V(CallWithArrayLike_WithFeedback) \
- V(CallWithSpread) \
- V(CallWithSpread_Baseline) \
- V(CallWithSpread_WithFeedback) \
- V(CEntry1ArgvOnStack) \
- V(CloneObjectBaseline) \
- V(CloneObjectWithVector) \
- V(Compare) \
- V(Compare_Baseline) \
- V(Compare_WithFeedback) \
- V(ConstructForwardVarargs) \
- V(ConstructStub) \
- V(ConstructVarargs) \
- V(ConstructWithArrayLike) \
- V(ConstructWithArrayLike_WithFeedback) \
- V(Construct_WithFeedback) \
- V(Construct_Baseline) \
- V(ConstructWithSpread) \
- V(ConstructWithSpread_Baseline) \
- V(ConstructWithSpread_WithFeedback) \
- V(ContextOnly) \
- V(CppBuiltinAdaptor) \
- V(DynamicCheckMaps) \
- V(DynamicCheckMapsWithFeedbackVector) \
- V(FastNewObject) \
- V(ForInPrepare) \
- V(GetIteratorStackParameter) \
- V(GetProperty) \
- V(GrowArrayElements) \
- V(I32PairToBigInt) \
- V(I64ToBigInt) \
- V(InterpreterCEntry1) \
- V(InterpreterCEntry2) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsThenCall) \
- V(InterpreterPushArgsThenConstruct) \
- V(JSTrampoline) \
- V(BaselineOutOfLinePrologue) \
- V(BaselineLeaveFrame) \
- V(Load) \
- V(LoadBaseline) \
- V(LoadGlobal) \
- V(LoadGlobalBaseline) \
- V(LoadGlobalNoFeedback) \
- V(LoadGlobalWithVector) \
- V(LoadNoFeedback) \
- V(LoadWithVector) \
- V(LoadWithReceiverAndVector) \
- V(LoadWithReceiverBaseline) \
- V(LookupBaseline) \
- V(NoContext) \
- V(ResumeGenerator) \
- V(SuspendGeneratorBaseline) \
- V(ResumeGeneratorBaseline) \
- V(RunMicrotasks) \
- V(RunMicrotasksEntry) \
- V(SingleParameterOnStack) \
- V(Store) \
- V(StoreBaseline) \
- V(StoreGlobal) \
- V(StoreGlobalBaseline) \
- V(StoreGlobalWithVector) \
- V(StoreTransition) \
- V(StoreWithVector) \
- V(StringAt) \
- V(StringAtAsString) \
- V(StringSubstring) \
- IF_TSAN(V, TSANStore) \
- IF_TSAN(V, TSANLoad) \
- V(TypeConversion) \
- V(TypeConversionNoContext) \
- V(TypeConversion_Baseline) \
- V(Typeof) \
- V(UnaryOp_Baseline) \
- V(UnaryOp_WithFeedback) \
- V(Void) \
- V(WasmFloat32ToNumber) \
- V(WasmFloat64ToNumber) \
- V(WasmI32AtomicWait32) \
- V(WasmI64AtomicWait32) \
- V(WriteBarrier) \
- BUILTIN_LIST_TFS(V) \
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Abort) \
+ V(Allocate) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(ArrayConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(AsyncFunctionStackParameter) \
+ V(BigIntToI32Pair) \
+ V(BigIntToI64) \
+ V(BinaryOp) \
+ V(BinaryOp_Baseline) \
+ V(BinarySmiOp_Baseline) \
+ V(BinaryOp_WithFeedback) \
+ V(CallForwardVarargs) \
+ V(CallFunctionTemplate) \
+ V(CopyDataPropertiesWithExcludedProperties) \
+ V(CopyDataPropertiesWithExcludedPropertiesOnStack) \
+ V(CallTrampoline) \
+ V(CallTrampoline_Baseline) \
+ V(CallTrampoline_Baseline_Compact) \
+ V(CallTrampoline_WithFeedback) \
+ V(CallVarargs) \
+ V(CallWithArrayLike) \
+ V(CallWithArrayLike_WithFeedback) \
+ V(CallWithSpread) \
+ V(CallWithSpread_Baseline) \
+ V(CallWithSpread_WithFeedback) \
+ V(CEntry1ArgvOnStack) \
+ V(CloneObjectBaseline) \
+ V(CloneObjectWithVector) \
+ V(Compare) \
+ V(Compare_Baseline) \
+ V(Compare_WithFeedback) \
+ V(ConstructForwardVarargs) \
+ V(ConstructStub) \
+ V(ConstructVarargs) \
+ V(ConstructWithArrayLike) \
+ V(ConstructWithArrayLike_WithFeedback) \
+ V(Construct_WithFeedback) \
+ V(Construct_Baseline) \
+ V(ConstructWithSpread) \
+ V(ConstructWithSpread_Baseline) \
+ V(ConstructWithSpread_WithFeedback) \
+ V(ContextOnly) \
+ V(CppBuiltinAdaptor) \
+ V(DynamicCheckMaps) \
+ V(DynamicCheckMapsWithFeedbackVector) \
+ V(FastNewObject) \
+ V(ForInPrepare) \
+ V(GetIteratorStackParameter) \
+ V(GetProperty) \
+ V(GrowArrayElements) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsThenCall) \
+ V(InterpreterPushArgsThenConstruct) \
+ V(JSTrampoline) \
+ V(BaselineOutOfLinePrologue) \
+ V(BaselineLeaveFrame) \
+ V(Load) \
+ V(LoadBaseline) \
+ V(LoadGlobal) \
+ V(LoadGlobalBaseline) \
+ V(LoadGlobalNoFeedback) \
+ V(LoadGlobalWithVector) \
+ V(LoadNoFeedback) \
+ V(LoadWithVector) \
+ V(KeyedLoad) \
+ V(KeyedLoadBaseline) \
+ V(KeyedLoadWithVector) \
+ V(KeyedHasICBaseline) \
+ V(KeyedHasICWithVector) \
+ V(LoadWithReceiverAndVector) \
+ V(LoadWithReceiverBaseline) \
+ V(LookupBaseline) \
+ V(NoContext) \
+ V(ResumeGenerator) \
+ V(SuspendGeneratorBaseline) \
+ V(ResumeGeneratorBaseline) \
+ V(RunMicrotasks) \
+ V(RunMicrotasksEntry) \
+ V(SingleParameterOnStack) \
+ V(Store) \
+ V(StoreBaseline) \
+ V(StoreGlobal) \
+ V(StoreGlobalBaseline) \
+ V(StoreGlobalWithVector) \
+ V(StoreTransition) \
+ V(StoreWithVector) \
+ V(StringAt) \
+ V(StringAtAsString) \
+ V(StringSubstring) \
+ IF_TSAN(V, TSANStore) \
+ IF_TSAN(V, TSANLoad) \
+ V(TypeConversion) \
+ V(TypeConversionNoContext) \
+ V(TypeConversion_Baseline) \
+ V(Typeof) \
+ V(UnaryOp_Baseline) \
+ V(UnaryOp_WithFeedback) \
+ V(Void) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI64AtomicWait32) \
+ V(WasmSuspend) \
+ V(WriteBarrier) \
+ BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
enum class StackArgumentOrder {
@@ -204,11 +213,11 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
StackArgumentOrder stack_order() const { return stack_order_; }
void RestrictAllocatableRegisters(const Register* registers, size_t num) {
- DCHECK_EQ(allocatable_registers_, 0);
+ DCHECK(allocatable_registers_.is_empty());
for (size_t i = 0; i < num; ++i) {
- allocatable_registers_ |= registers[i].bit();
+ allocatable_registers_.set(registers[i]);
}
- DCHECK_GT(NumRegs(allocatable_registers_), 0);
+ DCHECK(!allocatable_registers_.is_empty());
}
RegList allocatable_registers() const { return allocatable_registers_; }
@@ -240,7 +249,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// Specifying the set of registers that could be used by the register
// allocator. Currently, it's only used by RecordWrite code stub.
- RegList allocatable_registers_ = 0;
+ RegList allocatable_registers_;
// |registers_params_| defines registers that are used for parameter passing.
// |machine_types_| defines machine types for resulting values and incomping
@@ -725,7 +734,7 @@ class NoContextDescriptor
static constexpr auto registers();
};
-// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
+// LoadDescriptor is used by all stubs that implement Load ICs.
class LoadDescriptor : public StaticCallInterfaceDescriptor<LoadDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot)
@@ -931,6 +940,80 @@ class LoadWithVectorDescriptor
static constexpr auto registers();
};
+class KeyedLoadBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<KeyedLoadBaselineDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(KeyedLoadBaselineDescriptor)
+
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register SlotRegister();
+
+ static constexpr auto registers();
+};
+
+class KeyedLoadDescriptor
+ : public StaticCallInterfaceDescriptor<KeyedLoadDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(KeyedLoadDescriptor)
+
+ static constexpr auto registers();
+};
+
+class KeyedLoadWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<KeyedLoadWithVectorDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(KeyedLoadWithVectorDescriptor)
+
+ static constexpr inline Register VectorRegister();
+
+ static constexpr auto registers();
+};
+
+class KeyedHasICBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<KeyedHasICBaselineDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(KeyedHasICBaselineDescriptor)
+
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register SlotRegister();
+
+ static constexpr auto registers();
+};
+
+class KeyedHasICWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<KeyedHasICWithVectorDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(KeyedHasICWithVectorDescriptor)
+
+ static constexpr inline Register VectorRegister();
+
+ static constexpr auto registers();
+};
+
// Like LoadWithVectorDescriptor, except we pass the receiver (the object which
// should be used as the receiver for accessor function calls) and the lookup
// start object separately.
@@ -1170,6 +1253,31 @@ class CallTrampolineDescriptor
static constexpr inline auto registers();
};
+class CopyDataPropertiesWithExcludedPropertiesDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CopyDataPropertiesWithExcludedPropertiesDescriptor> {
+ public:
+ DEFINE_PARAMETERS_VARARGS(kSource, kExcludedPropertyCount)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
+ MachineType::AnyTagged()) // kExcludedPropertyCount
+ DECLARE_DESCRIPTOR(CopyDataPropertiesWithExcludedPropertiesDescriptor)
+
+ static constexpr inline auto registers();
+};
+
+class CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kSource, kExcludedPropertyCount, kExcludedPropertyBase)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
+ MachineType::IntPtr(),
+ MachineType::IntPtr()) // kExcludedPropertyCount
+ DECLARE_DESCRIPTOR(CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor)
+
+ static constexpr inline auto registers();
+};
+
class CallVarargsDescriptor
: public StaticCallInterfaceDescriptor<CallVarargsDescriptor> {
public:
@@ -1468,6 +1576,18 @@ class BinaryOp_BaselineDescriptor
static constexpr inline auto registers();
};
+class BinarySmiOp_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<BinarySmiOp_BaselineDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::TaggedSigned(), // kRight
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(BinarySmiOp_BaselineDescriptor)
+
+ static constexpr inline auto registers();
+};
+
// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
// as they all have the same interface.
class StringAtDescriptor final
@@ -1787,6 +1907,16 @@ class WasmFloat64ToNumberDescriptor final
#endif
};
+class WasmSuspendDescriptor final
+ : public StaticCallInterfaceDescriptor<WasmSuspendDescriptor> {
+ public:
+ DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(1, kArg0, kArg1)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
+ MachineType::AnyTagged(), // value
+ MachineType::AnyTagged()) // value
+ DECLARE_DESCRIPTOR(WasmSuspendDescriptor)
+};
+
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
: public StaticCallInterfaceDescriptor<I64ToBigIntDescriptor> {
public:
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
index d212bec035..ee3877e530 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -167,7 +167,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(t7.bit() | t6.bit()) {
+ scratch_register_list_({t7, t6}) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_trampoline_pool_end_ = 0;
@@ -220,7 +220,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -2361,14 +2361,12 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
-
- return Register::from_code(index);
+ return available_->PopFirst();
}
-bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+bool UseScratchRegisterScope::hasAvailable() const {
+ return !available_->is_empty();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
index 63fe001d22..a25f105c70 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -1091,13 +1091,13 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
- void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Exclude(const RegList& list) { available_->clear(list); }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Exclude(list);
}
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h
index 394c5dc6ab..b5a9daeb32 100644
--- a/deps/v8/src/codegen/loong64/constants-loong64.h
+++ b/deps/v8/src/codegen/loong64/constants-loong64.h
@@ -337,8 +337,8 @@ enum Opcode : uint32_t {
SUB_D = 0x23U << 15,
SLT = 0x24U << 15,
SLTU = 0x25U << 15,
- MASKNEZ = 0x26U << 15,
- MASKEQZ = 0x27U << 15,
+ MASKEQZ = 0x26U << 15,
+ MASKNEZ = 0x27U << 15,
NOR = 0x28U << 15,
AND = 0x29U << 15,
OR = 0x2aU << 15,
diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
index f0fc3ac473..5b4e8c8e71 100644
--- a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -24,21 +24,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
- if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
- if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
- if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
- if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(a0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(a1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(a2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(a3));
+ if (argc >= 5) DCHECK(allocatable_regs.has(a4));
+ if (argc >= 6) DCHECK(allocatable_regs.has(a5));
+ if (argc >= 7) DCHECK(allocatable_regs.has(a6));
+ if (argc >= 8) DCHECK(allocatable_regs.has(a7));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(a1, a5, a4, a2, a0, a3);
+ return RegisterArray(a1, a5, a4, a2, a0, a3, kContextRegister);
}
// static
@@ -64,6 +64,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return a0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return a1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return a4;
@@ -106,7 +136,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -116,6 +146,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ // a2 : the excluded property base
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
@@ -225,6 +271,14 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // a0: left operand
+ // a1: right operand
+ // a2: feedback slot
+ return RegisterArray(a0, a1, a2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
// a1 : kApiFunctionAddress
// a2 : kArgc
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index cccfa6294c..982c4b7eb8 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -51,22 +51,13 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -74,25 +65,17 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -100,26 +83,17 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
- }
-
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
return bytes;
}
@@ -158,6 +132,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -168,7 +143,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
+ // of the object, so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
if (FLAG_debug_code) {
@@ -190,29 +165,18 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
+ ASM_CODE_COMMENT(this);
RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
MaybeSaveRegisters(registers);
@@ -272,6 +236,7 @@ void TurboAssembler::CallRecordWriteStub(
Register scratch = temps.Acquire();
li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
+ RecordComment("]");
} else {
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
@@ -281,6 +246,7 @@ void TurboAssembler::CallRecordWriteStub(
void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(dst_object, dst_slot);
// If `offset` is a register, it cannot overlap with `object`.
DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
@@ -1290,7 +1256,7 @@ void TurboAssembler::MultiPush(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
@@ -1299,17 +1265,17 @@ void TurboAssembler::MultiPush(RegList regs) {
}
void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
- DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK((regs1 & regs2).is_empty());
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs1 & (1 << i)) != 0) {
+ if ((regs1.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs2 & (1 << i)) != 0) {
+ if ((regs2.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
@@ -1318,25 +1284,25 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
}
void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
- DCHECK_EQ(regs1 & regs2, 0);
- DCHECK_EQ(regs1 & regs3, 0);
- DCHECK_EQ(regs2 & regs3, 0);
+ DCHECK((regs1 & regs2).is_empty());
+ DCHECK((regs1 & regs3).is_empty());
+ DCHECK((regs2 & regs3).is_empty());
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs1 & (1 << i)) != 0) {
+ if ((regs1.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs2 & (1 << i)) != 0) {
+ if ((regs2.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs3 & (1 << i)) != 0) {
+ if ((regs3.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
@@ -1348,7 +1314,7 @@ void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
@@ -1357,17 +1323,17 @@ void TurboAssembler::MultiPop(RegList regs) {
}
void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
- DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK((regs1 & regs2).is_empty());
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs2 & (1 << i)) != 0) {
+ if ((regs2.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs1 & (1 << i)) != 0) {
+ if ((regs1.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
@@ -1376,25 +1342,25 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
}
void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
- DCHECK_EQ(regs1 & regs2, 0);
- DCHECK_EQ(regs1 & regs3, 0);
- DCHECK_EQ(regs2 & regs3, 0);
+ DCHECK((regs1 & regs2).is_empty());
+ DCHECK((regs1 & regs3).is_empty());
+ DCHECK((regs2 & regs3).is_empty());
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs3 & (1 << i)) != 0) {
+ if ((regs3.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs2 & (1 << i)) != 0) {
+ if ((regs2.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs1 & (1 << i)) != 0) {
+ if ((regs1.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
@@ -1402,24 +1368,24 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushFPU(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
Sub_d(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
-void TurboAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
@@ -1938,16 +1904,16 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- maskeqz(scratch, rj, rk);
- masknez(rd, rd, rk);
+ masknez(scratch, rj, rk);
+ maskeqz(rd, rd, rk);
or_(rd, rd, scratch);
}
void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- masknez(scratch, rj, rk);
- maskeqz(rd, rd, rk);
+ maskeqz(scratch, rj, rk);
+ masknez(rd, rd, rk);
or_(rd, rd, scratch);
}
@@ -2037,12 +2003,12 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
- maskeqz(dest, dest, condition);
+ masknez(dest, dest, condition);
}
void TurboAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
- masknez(dest, dest, condition);
+ maskeqz(dest, dest, condition);
}
void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
@@ -2069,6 +2035,7 @@ void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
// TODO(LOONG_dev): Optimize like arm64, use simd instruction
void TurboAssembler::Popcnt_w(Register rd, Register rj) {
+ ASM_CODE_COMMENT(this);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -2112,6 +2079,7 @@ void TurboAssembler::Popcnt_w(Register rd, Register rj) {
}
void TurboAssembler::Popcnt_d(Register rd, Register rj) {
+ ASM_CODE_COMMENT(this);
int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
@@ -2538,6 +2506,7 @@ void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ld_d(destination,
@@ -2628,6 +2597,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t7, cc_always, rj, rk);
bind(&skip);
+ RecordComment("]");
return;
}
@@ -2659,6 +2629,7 @@ void TurboAssembler::Call(Register target, Condition cond, Register rj,
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
+ ASM_CODE_COMMENT(this);
if (lower_limit != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2727,6 +2698,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t7, cond, rj, rk);
bind(&skip);
+ RecordComment("]");
return;
}
@@ -2737,6 +2709,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
@@ -2760,16 +2733,18 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(Builtin builtin) {
RecordCommentForOffHeapTrampoline(builtin);
Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::PatchAndJump(Address target) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
pcaddi(scratch, 4);
@@ -2782,6 +2757,7 @@ void TurboAssembler::PatchAndJump(Address target) {
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
@@ -2968,6 +2944,7 @@ void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
// JavaScript invokes.
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
ExternalReference limit =
@@ -2985,6 +2962,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Register scratch2,
Label* stack_overflow) {
+ ASM_CODE_COMMENT(this);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -3002,6 +2980,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
+ ASM_CODE_COMMENT(this);
Label regular_invoke;
// a0: actual arguments count
@@ -3043,11 +3022,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Sub_d(t0, t0, Operand(1));
Add_d(src, src, Operand(kSystemPointerSize));
Add_d(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- Branch(&copy, gt, t0, Operand(zero_reg));
- } else {
- Branch(&copy, ge, t0, Operand(zero_reg));
- }
+ Branch(&copy, gt, t0, Operand(zero_reg));
}
// Fill remaining expected arguments with undefined values.
@@ -3162,6 +3137,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -3184,6 +3160,7 @@ void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -3218,6 +3195,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
void TurboAssembler::AddOverflow_d(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3250,6 +3228,7 @@ void TurboAssembler::AddOverflow_d(Register dst, Register left,
void TurboAssembler::SubOverflow_d(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3282,6 +3261,7 @@ void TurboAssembler::SubOverflow_d(Register dst, Register left,
void TurboAssembler::MulOverflow_w(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3313,6 +3293,7 @@ void TurboAssembler::MulOverflow_w(Register dst, Register left,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -3332,6 +3313,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -3356,7 +3338,6 @@ void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
-
And(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -3365,6 +3346,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t dummy_stats_counter_
// field.
@@ -3380,6 +3362,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t dummy_stats_counter_
// field.
@@ -3482,6 +3465,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Push(ra, fp);
Move(fp, sp);
@@ -3495,6 +3479,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
addi_d(sp, fp, 2 * kPointerSize);
Ld_d(ra, MemOperand(fp, 1 * kPointerSize));
Ld_d(fp, MemOperand(fp, 0 * kPointerSize));
@@ -3502,6 +3487,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -3581,6 +3567,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
@@ -3649,6 +3636,7 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::AssertStackIsAligned() {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -3696,6 +3684,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3706,6 +3695,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3716,6 +3706,7 @@ void TurboAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -3731,6 +3722,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -3747,6 +3739,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertCallableFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -3764,6 +3757,7 @@ void MacroAssembler::AssertCallableFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -3777,6 +3771,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -3804,6 +3799,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -3817,6 +3813,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -3836,6 +3833,7 @@ void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -3855,6 +3853,7 @@ void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -3874,6 +3873,7 @@ void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -3908,6 +3908,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = ActivationFrameAlignment();
// Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
@@ -3935,6 +3936,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t7, function);
CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments);
@@ -3942,6 +3944,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@@ -4045,6 +4048,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
temps.Include(t8);
Register scratch = temps.Acquire();
@@ -4057,19 +4061,13 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -4083,6 +4081,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld_d(t7,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
@@ -4101,6 +4100,7 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -4145,11 +4145,13 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 3d82b87a47..734e7cf931 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -308,7 +308,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPush(RegList regs);
void MultiPush(RegList regs1, RegList regs2);
void MultiPush(RegList regs1, RegList regs2, RegList regs3);
- void MultiPushFPU(RegList regs);
+ void MultiPushFPU(DoubleRegList regs);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -355,7 +355,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPop(RegList regs1, RegList regs2);
void MultiPop(RegList regs1, RegList regs2, RegList regs3);
- void MultiPopFPU(RegList regs);
+ void MultiPopFPU(DoubleRegList regs);
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rj, const Operand& rk); \
@@ -1056,8 +1056,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeType type);
- friend class CommonFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h
index 7d9d88c1f0..8e8fb282e0 100644
--- a/deps/v8/src/codegen/loong64/register-loong64.h
+++ b/deps/v8/src/codegen/loong64/register-loong64.h
@@ -6,8 +6,7 @@
#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
#include "src/codegen/loong64/constants-loong64.h"
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -48,74 +47,6 @@ namespace internal {
// encoding.
const int kNumRegs = 32;
-const RegList kJSCallerSaved = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // a4
- 1 << 9 | // a5
- 1 << 10 | // a6
- 1 << 11 | // a7
- 1 << 12 | // t0
- 1 << 13 | // t1
- 1 << 14 | // t2
- 1 << 15 | // t3
- 1 << 16 | // t4
- 1 << 17 | // t5
- 1 << 20; // t8
-
-const int kNumJSCallerSaved = 15;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 22 | // fp
- 1 << 23 | // s0
- 1 << 24 | // s1
- 1 << 25 | // s2
- 1 << 26 | // s3
- 1 << 27 | // s4
- 1 << 28 | // s5
- 1 << 29 | // s6 (roots in Javascript code)
- 1 << 30 | // s7 (cp in Javascript code)
- 1 << 31; // s8
-
-const int kNumCalleeSaved = 10;
-
-const RegList kCalleeSavedFPU = 1 << 24 | // f24
- 1 << 25 | // f25
- 1 << 26 | // f26
- 1 << 27 | // f27
- 1 << 28 | // f28
- 1 << 29 | // f29
- 1 << 30 | // f30
- 1 << 31; // f31
-
-const int kNumCalleeSavedFPU = 8;
-
-const RegList kCallerSavedFPU = 1 << 0 | // f0
- 1 << 1 | // f1
- 1 << 2 | // f2
- 1 << 3 | // f3
- 1 << 4 | // f4
- 1 << 5 | // f5
- 1 << 6 | // f6
- 1 << 7 | // f7
- 1 << 8 | // f8
- 1 << 9 | // f9
- 1 << 10 | // f10
- 1 << 11 | // f11
- 1 << 12 | // f12
- 1 << 13 | // f13
- 1 << 14 | // f14
- 1 << 15 | // f15
- 1 << 16 | // f16
- 1 << 17 | // f17
- 1 << 18 | // f18
- 1 << 19 | // f19
- 1 << 20 | // f20
- 1 << 21 | // f21
- 1 << 22 | // f22
- 1 << 23; // f23
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -177,7 +108,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
diff --git a/deps/v8/src/codegen/loong64/reglist-loong64.h b/deps/v8/src/codegen/loong64/reglist-loong64.h
new file mode 100644
index 0000000000..ab70ee2f6e
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/reglist-loong64.h
@@ -0,0 +1,50 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can b in the
+// LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_REGLIST_LOONG64_H_
+#define V8_CODEGEN_LOONG64_REGLIST_LOONG64_H_
+
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+const RegList kJSCallerSaved = {a0, a1, a2, a3, a4, a5, a6, a7,
+ t0, t1, t2, t3, t4, t5, t8};
+
+const int kNumJSCallerSaved = 15;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = {fp, // fp
+ s0, // s0
+ s1, // s1
+ s2, // s2
+ s3, // s3
+ s4, // s4
+ s5, // s5
+ s6, // s6 (roots in Javascript code)
+ s7, // s7 (cp in Javascript code)
+ s8}; // s8
+
+const int kNumCalleeSaved = 10;
+
+const DoubleRegList kCalleeSavedFPU = {f24, f25, f26, f27, f28, f29, f30, f31};
+
+const int kNumCalleeSavedFPU = 8;
+
+const DoubleRegList kCallerSavedFPU = {f0, f1, f2, f3, f4, f5, f6, f7,
+ f8, f9, f10, f11, f12, f13, f14, f15,
+ f16, f17, f18, f19, f20, f21, f22, f23};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_REGLIST_LOONG64_H_
diff --git a/deps/v8/src/codegen/machine-type.cc b/deps/v8/src/codegen/machine-type.cc
index 5679563bd1..2059f05b87 100644
--- a/deps/v8/src/codegen/machine-type.cc
+++ b/deps/v8/src/codegen/machine-type.cc
@@ -57,8 +57,8 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepCompressed";
case MachineRepresentation::kMapWord:
return "kRepMapWord";
- case MachineRepresentation::kCagedPointer:
- return "kRepCagedPointer";
+ case MachineRepresentation::kSandboxedPointer:
+ return "kRepSandboxedPointer";
}
UNREACHABLE();
}
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 981ac9783f..2a16e0d347 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -7,6 +7,7 @@
#include <iosfwd>
+#include "include/v8-fast-api-calls.h"
#include "src/base/bits.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -41,8 +42,8 @@ enum class MachineRepresentation : uint8_t {
kCompressedPointer, // (compressed) HeapObject
kCompressed, // (compressed) Object (Smi or HeapObject)
// A 64-bit pointer encoded in a way (e.g. as offset) that guarantees it will
- // point into the virtual memory cage.
- kCagedPointer,
+ // point into the sandbox.
+ kSandboxedPointer,
// FP and SIMD representations must be last, and in order of increasing size.
kFloat32,
kFloat64,
@@ -225,8 +226,8 @@ class MachineType {
return MachineType(MachineRepresentation::kCompressed,
MachineSemantic::kAny);
}
- constexpr static MachineType CagedPointer() {
- return MachineType(MachineRepresentation::kCagedPointer,
+ constexpr static MachineType SandboxedPointer() {
+ return MachineType(MachineRepresentation::kSandboxedPointer,
MachineSemantic::kNone);
}
constexpr static MachineType Bool() {
@@ -267,13 +268,42 @@ class MachineType {
return MachineType::AnyCompressed();
case MachineRepresentation::kCompressedPointer:
return MachineType::CompressedPointer();
- case MachineRepresentation::kCagedPointer:
- return MachineType::CagedPointer();
+ case MachineRepresentation::kSandboxedPointer:
+ return MachineType::SandboxedPointer();
default:
UNREACHABLE();
}
}
+ static MachineType TypeForCType(const CTypeInfo& type) {
+ switch (type.GetType()) {
+ case CTypeInfo::Type::kVoid:
+ return MachineType::AnyTagged();
+ case CTypeInfo::Type::kBool:
+ return MachineType::Bool();
+ case CTypeInfo::Type::kInt32:
+ return MachineType::Int32();
+ case CTypeInfo::Type::kUint32:
+ return MachineType::Uint32();
+ case CTypeInfo::Type::kInt64:
+ return MachineType::Int64();
+ case CTypeInfo::Type::kAny:
+ static_assert(
+ sizeof(AnyCType) == kInt64Size,
+ "CTypeInfo::Type::kAny is assumed to be of size 64 bits.");
+ return MachineType::Int64();
+ case CTypeInfo::Type::kUint64:
+ return MachineType::Uint64();
+ case CTypeInfo::Type::kFloat32:
+ return MachineType::Float32();
+ case CTypeInfo::Type::kFloat64:
+ return MachineType::Float64();
+ case CTypeInfo::Type::kV8Value:
+ case CTypeInfo::Type::kApiObject:
+ return MachineType::AnyTagged();
+ }
+ }
+
constexpr bool LessThanOrEqualPointerSize() const {
return ElementSizeLog2Of(this->representation()) <= kSystemPointerSizeLog2;
}
@@ -310,6 +340,10 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep >= MachineRepresentation::kFirstFPRepresentation;
}
+inline bool IsSimd128(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kSimd128;
+}
+
inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged ||
rep == MachineRepresentation::kTaggedPointer ||
@@ -362,7 +396,7 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
return kTaggedSizeLog2;
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
return kSystemPointerSizeLog2;
default:
UNREACHABLE();
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 267281396a..788651e6fc 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -290,8 +290,7 @@ const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
- : AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(at.bit()) {
+ : AssemblerBase(options, std::move(buffer)), scratch_register_list_({at}) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_trampoline_pool_end_ = 0;
@@ -344,7 +343,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -3841,14 +3840,12 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
-
- return Register::from_code(index);
+ return available_->PopFirst();
}
-bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+bool UseScratchRegisterScope::hasAvailable() const {
+ return !available_->is_empty();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 0acee5e39d..628a8bc652 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -1903,13 +1903,13 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
- void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Exclude(const RegList& list) { available_->clear(list); }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Exclude(list);
}
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
index bdd168d831..6b5a791cff 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -24,17 +24,17 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(a0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(a1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(a2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(a3));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(a1, t1, t0, a0, a2, v0, a3);
+ return RegisterArray(a1, t1, t0, a0, a2, v0, a3, kContextRegister);
}
// static
@@ -60,6 +60,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return a0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return a1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return t0;
@@ -105,7 +135,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -115,6 +145,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ // a2 : the excluded property base
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
@@ -222,6 +268,12 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return RegisterArray(a0, a1, a2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
// a1 : kApiFunctionAddress
// a2 : kArgc
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index ea4639c37c..53c2217d52 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -51,22 +51,13 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -74,25 +65,17 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -100,26 +83,17 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
- }
-
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
return bytes;
}
@@ -159,12 +133,6 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Addu(fp, sp, Operand(offset));
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- return kSafepointRegisterStackIndexMap[reg_code];
-}
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -174,6 +142,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -185,7 +154,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
+ // of the object, so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
Addu(dst, object, Operand(offset - kHeapObjectTag));
@@ -212,30 +181,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address));
RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
@@ -304,6 +262,7 @@ void TurboAssembler::CallRecordWriteStub(
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9);
+ RecordComment("]");
} else {
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
@@ -1415,12 +1374,12 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
void TurboAssembler::MultiPush(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
}
@@ -1431,7 +1390,7 @@ void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
@@ -1439,24 +1398,24 @@ void TurboAssembler::MultiPop(RegList regs) {
addiu(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushFPU(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
-void TurboAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
@@ -2656,6 +2615,7 @@ void TurboAssembler::Ctz(Register rd, Register rs) {
}
void TurboAssembler::Popcnt(Register rd, Register rs) {
+ ASM_CODE_COMMENT(this);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -3622,6 +3582,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
lw(destination,
@@ -3800,6 +3761,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t9, 0, cond, rs, rt, bd);
+ RecordComment("]");
return;
}
@@ -3814,6 +3776,7 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
+ ASM_CODE_COMMENT(this);
if (lower_limit != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3937,6 +3900,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9, 0, cond, rs, rt, bd);
+ RecordComment("]");
return;
}
@@ -3946,6 +3910,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
@@ -3968,17 +3933,19 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(Builtin builtin) {
RecordCommentForOffHeapTrampoline(builtin);
Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips32r6) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, ra);
@@ -3997,6 +3964,7 @@ void TurboAssembler::PatchAndJump(Address target) {
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
@@ -4332,6 +4300,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
// JavaScript invokes.
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
ExternalReference limit =
@@ -4349,6 +4318,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Register scratch2,
Label* stack_overflow) {
+ ASM_CODE_COMMENT(this);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -4366,6 +4336,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
+ ASM_CODE_COMMENT(this);
Label regular_invoke;
// a0: actual arguments count
@@ -4407,11 +4378,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Subu(t0, t0, Operand(1));
Addu(src, src, Operand(kSystemPointerSize));
Addu(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- Branch(&copy, gt, t0, Operand(zero_reg));
- } else {
- Branch(&copy, ge, t0, Operand(zero_reg));
- }
+ Branch(&copy, gt, t0, Operand(zero_reg));
}
// Fill remaining expected arguments with undefined values.
@@ -4523,6 +4490,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -4545,6 +4513,7 @@ void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -4579,6 +4548,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
void TurboAssembler::AddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -4609,6 +4579,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left,
void TurboAssembler::SubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -4639,6 +4610,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left,
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -4668,6 +4640,7 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4687,6 +4660,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -4721,6 +4695,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
li(scratch2, ExternalReference::Create(counter));
lw(scratch1, MemOperand(scratch2));
Addu(scratch1, scratch1, Operand(value));
@@ -4733,6 +4708,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
li(scratch2, ExternalReference::Create(counter));
lw(scratch1, MemOperand(scratch2));
Subu(scratch1, scratch1, Operand(value));
@@ -4832,6 +4808,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Push(ra, fp);
Move(fp, sp);
@@ -4845,6 +4822,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
addiu(sp, fp, 2 * kPointerSize);
lw(ra, MemOperand(fp, 1 * kPointerSize));
lw(fp, MemOperand(fp, 0 * kPointerSize));
@@ -4852,6 +4830,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -4933,6 +4912,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
@@ -4998,6 +4978,7 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::AssertStackIsAligned() {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5035,6 +5016,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
void MacroAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5045,6 +5027,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5055,6 +5038,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5070,6 +5054,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5086,6 +5071,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertCallableFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5103,6 +5089,7 @@ void MacroAssembler::AssertCallableFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5116,6 +5103,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5143,6 +5131,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5156,6 +5145,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -5205,6 +5195,7 @@ void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -5254,6 +5245,7 @@ void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -5304,6 +5296,7 @@ void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst,
void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -5370,6 +5363,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers a0..a3.
@@ -5400,6 +5394,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
// Linux/MIPS convention demands that register t9 contains
// the address of the function being call in case of
// Position independent code
@@ -5410,6 +5405,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
}
@@ -5524,6 +5520,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
+ ASM_CODE_COMMENT(this);
And(scratch, object, Operand(~kPageAlignmentMask));
lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
@@ -5533,19 +5530,13 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -5574,6 +5565,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Lw(t9,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
@@ -5591,6 +5583,7 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -5634,10 +5627,12 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index f2491fcf19..dc31b6e1b8 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -350,7 +350,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
- void MultiPushFPU(RegList regs);
+ void MultiPushFPU(DoubleRegList regs);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -398,7 +398,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
- void MultiPopFPU(RegList regs);
+ void MultiPopFPU(DoubleRegList regs);
// Load Scaled Address instructions. Parameter sa (shift argument) must be
// between [1, 31] (inclusive). On pre-r6 architectures the scratch register
@@ -1164,13 +1164,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeType type);
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class CommonFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index 7fd259bf9b..f2ed9786c6 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -6,8 +6,7 @@
#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_
#include "src/codegen/mips/constants-mips.h"
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -47,101 +46,6 @@ namespace internal {
// encoding.
const int kNumRegs = 32;
-const RegList kJSCallerSaved = 1 << 2 | // v0
- 1 << 3 | // v1
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // t0
- 1 << 9 | // t1
- 1 << 10 | // t2
- 1 << 11 | // t3
- 1 << 12 | // t4
- 1 << 13 | // t5
- 1 << 14 | // t6
- 1 << 15; // t7
-
-const int kNumJSCallerSaved = 14;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 16 | // s0
- 1 << 17 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 30; // fp/s8
-
-const int kNumCalleeSaved = 9;
-
-const RegList kCalleeSavedFPU = 1 << 20 | // f20
- 1 << 22 | // f22
- 1 << 24 | // f24
- 1 << 26 | // f26
- 1 << 28 | // f28
- 1 << 30; // f30
-
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU = 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 24;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-const int kUndefIndex = -1;
-// Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
- kUndefIndex, // at
- 0, // v0
- 1, // v1
- 2, // a0
- 3, // a1
- 4, // a2
- 5, // a3
- 6, // t0
- 7, // t1
- 8, // t2
- 9, // t3
- 10, // t4
- 11, // t5
- 12, // t6
- 13, // t7
- 14, // s0
- 15, // s1
- 16, // s2
- 17, // s3
- 18, // s4
- 19, // s5
- 20, // s6
- 21, // s7
- kUndefIndex, // t8
- kUndefIndex, // t9
- kUndefIndex, // k0
- kUndefIndex, // k1
- kUndefIndex, // gp
- kUndefIndex, // sp
- 22, // fp
- kUndefIndex};
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -209,7 +113,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
diff --git a/deps/v8/src/codegen/mips/reglist-mips.h b/deps/v8/src/codegen/mips/reglist-mips.h
new file mode 100644
index 0000000000..5c458858f6
--- /dev/null
+++ b/deps/v8/src/codegen/mips/reglist-mips.h
@@ -0,0 +1,48 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS_REGLIST_MIPS_H_
+#define V8_CODEGEN_MIPS_REGLIST_MIPS_H_
+
+#include "src/codegen/mips/constants-mips.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, t0,
+ t1, t2, t3, t4, t5, t6, t7};
+
+const int kNumJSCallerSaved = 14;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = {s0, // s0
+ s1, // s1
+ s2, // s2
+ s3, // s3
+ s4, // s4
+ s5, // s5
+ s6, // s6 (roots in Javascript code)
+ s7, // s7 (cp in Javascript code)
+ fp}; // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30};
+
+const int kNumCalleeSavedFPU = 6;
+
+const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8,
+ f10, f12, f14, f16, f18};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_MIPS_REGLIST_MIPS_H_
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 9f5b34e956..14f3734812 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -265,7 +265,7 @@ const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(at.bit() | s0.bit()) {
+ scratch_register_list_({at, s0}) {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
EnableCpuFeature(MIPS_SIMD);
}
@@ -321,7 +321,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -3991,14 +3991,12 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
-
- return Register::from_code(index);
+ return available_->PopFirst();
}
-bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+bool UseScratchRegisterScope::hasAvailable() const {
+ return !available_->is_empty();
+}
LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
uint8_t laneidx) {
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index f17d47e990..2bfb6e2dfd 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -1933,13 +1933,13 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
- void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Exclude(const RegList& list) { available_->clear(list); }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Exclude(list);
}
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
index 0c92898939..7d6ba8bc73 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -24,21 +24,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
- if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
- if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
- if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
- if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(a0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(a1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(a2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(a3));
+ if (argc >= 5) DCHECK(allocatable_regs.has(a4));
+ if (argc >= 6) DCHECK(allocatable_regs.has(a5));
+ if (argc >= 7) DCHECK(allocatable_regs.has(a6));
+ if (argc >= 8) DCHECK(allocatable_regs.has(a7));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(a1, a5, a4, a0, a2, v0, a3);
+ return RegisterArray(a1, a5, a4, a0, a2, v0, a3, kContextRegister);
}
// static
@@ -64,6 +64,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return a0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return a1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return a4;
@@ -106,7 +136,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -116,6 +146,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ // a2 : the excluded property base
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
@@ -225,6 +271,14 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // a0: left operand
+ // a1: right operand
+ // a2: feedback slot
+ return RegisterArray(a0, a1, a2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
// a1 : kApiFunctionAddress
// a2 : kArgc
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 291d6d5b6a..2d13884be3 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -51,22 +51,12 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -74,25 +64,16 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -100,26 +81,17 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
- }
-
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += list.Count() * kPointerSize;
return bytes;
}
@@ -157,12 +129,6 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Daddu(fp, sp, Operand(offset));
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- return kSafepointRegisterStackIndexMap[reg_code];
-}
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -172,6 +138,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -183,7 +150,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
+ // of the object, so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
Daddu(dst, object, Operand(offset - kHeapObjectTag));
@@ -210,30 +177,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address));
RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
@@ -301,6 +257,7 @@ void TurboAssembler::CallRecordWriteStub(
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9);
+ RecordComment("]");
} else {
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
@@ -1960,12 +1917,12 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
void TurboAssembler::MultiPush(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kPointerSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kPointerSize;
Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
@@ -1976,7 +1933,7 @@ void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
@@ -1984,24 +1941,24 @@ void TurboAssembler::MultiPop(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushFPU(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
-void TurboAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
@@ -2009,24 +1966,24 @@ void TurboAssembler::MultiPopFPU(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushMSA(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+void TurboAssembler::MultiPushMSA(DoubleRegList regs) {
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kSimd128Size;
st_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
-void TurboAssembler::MultiPopMSA(RegList regs) {
+void TurboAssembler::MultiPopMSA(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
ld_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kSimd128Size;
}
@@ -3265,6 +3222,7 @@ void TurboAssembler::Dctz(Register rd, Register rs) {
}
void TurboAssembler::Popcnt(Register rd, Register rs) {
+ ASM_CODE_COMMENT(this);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -3316,6 +3274,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
}
void TurboAssembler::Dpopcnt(Register rd, Register rs) {
+ ASM_CODE_COMMENT(this);
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
@@ -4266,6 +4225,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ld(destination,
@@ -4352,6 +4312,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t9, cond, rs, rt, bd);
+ RecordComment("]");
return;
}
}
@@ -4393,6 +4354,7 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
+ ASM_CODE_COMMENT(this);
if (lower_limit != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4428,6 +4390,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9, cond, rs, rt, bd);
+ RecordComment("]");
return;
}
}
@@ -4438,6 +4401,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
@@ -4459,17 +4423,19 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(Builtin builtin) {
RecordCommentForOffHeapTrampoline(builtin);
Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips64r6) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, ra);
@@ -4488,6 +4454,7 @@ void TurboAssembler::PatchAndJump(Address target) {
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
@@ -4860,6 +4827,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
// JavaScript invokes.
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
ExternalReference limit =
@@ -4877,6 +4845,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Register scratch2,
Label* stack_overflow) {
+ ASM_CODE_COMMENT(this);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -4894,6 +4863,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
+ ASM_CODE_COMMENT(this);
Label regular_invoke;
// a0: actual arguments count
@@ -4935,11 +4905,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Dsubu(t0, t0, Operand(1));
Daddu(src, src, Operand(kSystemPointerSize));
Daddu(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- Branch(&copy, gt, t0, Operand(zero_reg));
- } else {
- Branch(&copy, ge, t0, Operand(zero_reg));
- }
+ Branch(&copy, gt, t0, Operand(zero_reg));
}
// Fill remaining expected arguments with undefined values.
@@ -5052,6 +5018,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -5074,6 +5041,7 @@ void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -5108,6 +5076,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
void TurboAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -5138,6 +5107,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left,
void TurboAssembler::DsubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -5168,6 +5138,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left,
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
@@ -5197,6 +5168,7 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -5216,6 +5188,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -5250,6 +5223,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t dummy_stats_counter_
// field.
@@ -5265,6 +5239,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t dummy_stats_counter_
// field.
@@ -5367,6 +5342,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Push(ra, fp);
Move(fp, sp);
@@ -5380,6 +5356,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
daddiu(sp, fp, 2 * kPointerSize);
Ld(ra, MemOperand(fp, 1 * kPointerSize));
Ld(fp, MemOperand(fp, 0 * kPointerSize));
@@ -5387,6 +5364,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -5466,6 +5444,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
@@ -5535,6 +5514,7 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::AssertStackIsAligned() {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5584,6 +5564,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5594,6 +5575,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5604,6 +5586,7 @@ void TurboAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5619,6 +5602,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5635,6 +5619,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertCallableFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5652,6 +5637,7 @@ void MacroAssembler::AssertCallableFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5665,6 +5651,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5692,6 +5679,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5705,6 +5693,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -5755,6 +5744,7 @@ void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_s(dst, src1);
return;
@@ -5805,6 +5795,7 @@ void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -5854,6 +5845,7 @@ void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
+ ASM_CODE_COMMENT(this);
if (src1 == src2) {
Move_d(dst, src1);
return;
@@ -5920,6 +5912,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = ActivationFrameAlignment();
// n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
@@ -5952,6 +5945,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, function);
CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
@@ -5959,6 +5953,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@@ -6068,6 +6063,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
+ ASM_CODE_COMMENT(this);
And(scratch, object, Operand(~kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
@@ -6077,19 +6073,13 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -6118,6 +6108,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t9,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
@@ -6136,6 +6127,7 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -6179,11 +6171,13 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index bcb11adf69..43f28ac40e 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -370,8 +370,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
- void MultiPushFPU(RegList regs);
- void MultiPushMSA(RegList regs);
+ void MultiPushFPU(DoubleRegList regs);
+ void MultiPushMSA(DoubleRegList regs);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -418,8 +418,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
- void MultiPopFPU(RegList regs);
- void MultiPopMSA(RegList regs);
+ void MultiPopFPU(DoubleRegList regs);
+ void MultiPopMSA(DoubleRegList regs);
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
@@ -1243,13 +1243,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeType type);
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class CommonFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 1fbe3ec7ac..6c9980f50d 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -6,8 +6,7 @@
#define V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
#include "src/codegen/mips64/constants-mips64.h"
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -46,101 +45,6 @@ namespace internal {
// encoding.
const int kNumRegs = 32;
-const RegList kJSCallerSaved = 1 << 2 | // v0
- 1 << 3 | // v1
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // a4
- 1 << 9 | // a5
- 1 << 10 | // a6
- 1 << 11 | // a7
- 1 << 12 | // t0
- 1 << 13 | // t1
- 1 << 14 | // t2
- 1 << 15; // t3
-
-const int kNumJSCallerSaved = 14;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 16 | // s0
- 1 << 17 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 30; // fp/s8
-
-const int kNumCalleeSaved = 9;
-
-const RegList kCalleeSavedFPU = 1 << 20 | // f20
- 1 << 22 | // f22
- 1 << 24 | // f24
- 1 << 26 | // f26
- 1 << 28 | // f28
- 1 << 30; // f30
-
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU = 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 24;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-const int kUndefIndex = -1;
-// Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
- kUndefIndex, // at
- 0, // v0
- 1, // v1
- 2, // a0
- 3, // a1
- 4, // a2
- 5, // a3
- 6, // a4
- 7, // a5
- 8, // a6
- 9, // a7
- 10, // t0
- 11, // t1
- 12, // t2
- 13, // t3
- 14, // s0
- 15, // s1
- 16, // s2
- 17, // s3
- 18, // s4
- 19, // s5
- 20, // s6
- 21, // s7
- kUndefIndex, // t8
- kUndefIndex, // t9
- kUndefIndex, // k0
- kUndefIndex, // k1
- kUndefIndex, // gp
- kUndefIndex, // sp
- 22, // fp
- kUndefIndex};
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -209,7 +113,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum MSARegisterCode {
diff --git a/deps/v8/src/codegen/mips64/reglist-mips64.h b/deps/v8/src/codegen/mips64/reglist-mips64.h
new file mode 100644
index 0000000000..7d97da5c18
--- /dev/null
+++ b/deps/v8/src/codegen/mips64/reglist-mips64.h
@@ -0,0 +1,48 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS64_REGLIST_MIPS64_H_
+#define V8_CODEGEN_MIPS64_REGLIST_MIPS64_H_
+
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, a4,
+ a5, a6, a7, t0, t1, t2, t3};
+
+const int kNumJSCallerSaved = 14;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = {s0, // s0
+ s1, // s1
+ s2, // s2
+ s3, // s3
+ s4, // s4
+ s5, // s5
+ s6, // s6 (roots in Javascript code)
+ s7, // s7 (cp in Javascript code)
+ fp}; // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30};
+
+const int kNumCalleeSavedFPU = 6;
+
+const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8,
+ f10, f12, f14, f16, f18};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_MIPS64_REGLIST_MIPS64_H_
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index d0c4ed52e6..7678298ab3 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -66,22 +66,17 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
- if (IsTurboprop() || FLAG_concurrent_inlining) {
- set_concurrent_inlining();
- }
-
switch (code_kind_) {
case CodeKind::TURBOFAN:
+ set_called_with_code_start_register();
+ set_switch_jump_table();
+ if (FLAG_analyze_environment_liveness) {
+ set_analyze_environment_liveness();
+ }
if (FLAG_function_context_specialization) {
set_function_context_specializing();
}
if (FLAG_turbo_splitting) set_splitting();
- V8_FALLTHROUGH;
- case CodeKind::TURBOPROP:
- set_called_with_code_start_register();
- set_switch_jump_table();
- // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
- if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;
case CodeKind::BYTECODE_HANDLER:
set_called_with_code_start_register();
@@ -104,6 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_JS_FUNCTION:
break;
case CodeKind::BASELINE:
+ case CodeKind::MAGLEV:
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::REGEXP:
UNREACHABLE();
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index d92964c796..5f1308fc4e 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -68,9 +68,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(TraceTurboAllocation, trace_turbo_allocation, 14) \
V(TraceHeapBroker, trace_heap_broker, 15) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
- V(ConcurrentInlining, concurrent_inlining, 17) \
- V(DiscardResultForTesting, discard_result_for_testing, 18) \
- V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
+ V(DiscardResultForTesting, discard_result_for_testing, 17) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 18)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -154,7 +153,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsOptimizing() const {
return CodeKindIsOptimizedJSFunction(code_kind());
}
- bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
#if V8_ENABLE_WEBASSEMBLY
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index b65fe2e729..9543f5f4b1 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -226,7 +226,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(ip.bit()),
+ scratch_register_list_({ip}),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -276,7 +276,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -2109,11 +2109,7 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
- DCHECK_NE(*available, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
- Register reg = Register::from_code(index);
- *available &= ~reg.bit();
- return reg;
+ return available->PopFirst();
}
} // namespace internal
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index ea82539afb..fe21a1c8ad 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -43,7 +43,6 @@
#include <stdio.h>
#include <memory>
-#include <vector>
#include "src/base/numbers/double.h"
#include "src/codegen/assembler.h"
@@ -1514,7 +1513,9 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
// Check if we have registers available to acquire.
- bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ bool CanAcquire() const {
+ return !assembler_->GetScratchRegisterList()->is_empty();
+ }
private:
friend class Assembler;
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 4f17f08969..37593003e1 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -431,9 +431,15 @@ using Instr = uint32_t;
/* signalling */ \
V(xscvspdpn, XSCVSPDPN, 0xF000052C)
-#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
- /* Vector Byte-Reverse Quadword */ \
- V(xxbrq, XXBRQ, 0xF01F076C)
+#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
+ /* Vector Byte-Reverse Quadword */ \
+ V(xxbrq, XXBRQ, 0xF01F076C) \
+ /* Vector Byte-Reverse Doubleword */ \
+ V(xxbrd, XXBRD, 0xF017076C) \
+ /* Vector Byte-Reverse Word */ \
+ V(xxbrw, XXBRW, 0xF00F076C) \
+ /* Vector Byte-Reverse Halfword */ \
+ V(xxbrh, XXBRH, 0xF007076C)
#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
/* VSX Scalar Square Root Double-Precision */ \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index f3359d3ca8..15e673b1db 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -24,21 +24,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | r3.bit());
- if (argc >= 2) DCHECK(allocatable_regs | r4.bit());
- if (argc >= 3) DCHECK(allocatable_regs | r5.bit());
- if (argc >= 4) DCHECK(allocatable_regs | r6.bit());
- if (argc >= 5) DCHECK(allocatable_regs | r7.bit());
- if (argc >= 6) DCHECK(allocatable_regs | r8.bit());
- if (argc >= 7) DCHECK(allocatable_regs | r9.bit());
- if (argc >= 8) DCHECK(allocatable_regs | r10.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(r3));
+ if (argc >= 2) DCHECK(allocatable_regs.has(r4));
+ if (argc >= 3) DCHECK(allocatable_regs.has(r5));
+ if (argc >= 4) DCHECK(allocatable_regs.has(r6));
+ if (argc >= 5) DCHECK(allocatable_regs.has(r7));
+ if (argc >= 6) DCHECK(allocatable_regs.has(r8));
+ if (argc >= 7) DCHECK(allocatable_regs.has(r9));
+ if (argc >= 8) DCHECK(allocatable_regs.has(r10));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(r4, r8, r7, r5, r3);
+ return RegisterArray(r4, r8, r7, r5, r3, r6, kContextRegister);
}
// static
@@ -64,6 +64,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return r3; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return r4;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return r5; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return r6;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return r4; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return r5; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return r6;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return r7;
@@ -110,7 +140,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(r6); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -120,6 +150,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // r4 : the source
+ // r3 : the excluded property count
+ return RegisterArray(r4, r3);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // r4 : the source
+ // r3 : the excluded property count
+ // r5 : the excluded property base
+ return RegisterArray(r4, r3, r5);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// r3 : number of arguments (on the stack)
// r4 : the target to call
@@ -225,6 +271,12 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(r4, // kApiFunctionAddress
r5, // kArgc
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 724cedc1c2..2727749295 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -58,19 +58,10 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kSystemPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kStackSavedSavedFPSizeInBytes;
@@ -82,23 +73,14 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPushF64AndV128(kCallerSavedDoubles, kCallerSavedDoubles);
+ MultiPushF64AndV128(kCallerSavedDoubles, kCallerSavedSimd128s);
bytes += kStackSavedSavedFPSizeInBytes;
}
@@ -109,24 +91,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPopF64AndV128(kCallerSavedDoubles, kCallerSavedDoubles);
+ MultiPopF64AndV128(kCallerSavedDoubles, kCallerSavedSimd128s);
bytes += kStackSavedSavedFPSizeInBytes;
}
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
return bytes;
}
@@ -405,12 +377,12 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
void TurboAssembler::MultiPush(RegList regs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
subi(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kSystemPointerSize;
StoreU64(ToRegister(i), MemOperand(location, stack_offset));
}
@@ -421,7 +393,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
LoadU64(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kSystemPointerSize;
}
@@ -429,13 +401,13 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(dregs);
+void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
+ int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
subi(location, location, Operand(stack_offset));
for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
stack_offset -= kDoubleSize;
stfd(dreg, MemOperand(location, stack_offset));
@@ -443,26 +415,27 @@ void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(dregs);
+void TurboAssembler::MultiPushV128(Simd128RegList simd_regs,
+ Register location) {
+ int16_t num_to_push = simd_regs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
subi(location, location, Operand(stack_offset));
for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
- if ((dregs & (1 << i)) != 0) {
- Simd128Register dreg = Simd128Register::from_code(i);
+ if ((simd_regs.bits() & (1 << i)) != 0) {
+ Simd128Register simd_reg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
li(ip, Operand(stack_offset));
- StoreSimd128(dreg, MemOperand(location, ip));
+ StoreSimd128(simd_reg, MemOperand(location, ip));
}
}
}
-void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
lfd(dreg, MemOperand(location, stack_offset));
stack_offset += kDoubleSize;
@@ -471,21 +444,22 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
+void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
- if ((dregs & (1 << i)) != 0) {
- Simd128Register dreg = Simd128Register::from_code(i);
+ if ((simd_regs.bits() & (1 << i)) != 0) {
+ Simd128Register simd_reg = Simd128Register::from_code(i);
li(ip, Operand(stack_offset));
- LoadSimd128(dreg, MemOperand(location, ip));
+ LoadSimd128(simd_reg, MemOperand(location, ip));
stack_offset += kSimd128Size;
}
}
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64AndV128(RegList dregs, RegList simd_regs,
+void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs,
+ Simd128RegList simd_regs,
Register location) {
MultiPushDoubles(dregs);
#if V8_ENABLE_WEBASSEMBLY
@@ -507,20 +481,21 @@ void TurboAssembler::MultiPushF64AndV128(RegList dregs, RegList simd_regs,
// We still need to allocate empty space on the stack even if we
// are not pushing Simd registers (see kFixedFrameSizeFromFp).
addi(sp, sp,
- Operand(-static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
bind(&simd_pushed);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
MultiPushV128(simd_regs);
} else {
addi(sp, sp,
- Operand(-static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
}
}
#endif
}
-void TurboAssembler::MultiPopF64AndV128(RegList dregs, RegList simd_regs,
+void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs,
+ Simd128RegList simd_regs,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -535,14 +510,14 @@ void TurboAssembler::MultiPopF64AndV128(RegList dregs, RegList simd_regs,
b(&simd_popped);
bind(&pop_empty_simd);
addi(sp, sp,
- Operand(static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
bind(&simd_popped);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
MultiPopV128(simd_regs);
} else {
addi(sp, sp,
- Operand(static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
}
}
#endif
@@ -687,25 +662,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
@@ -1500,13 +1463,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
sub(sp, sp, r0);
// Update stack pointer.
addi(dest, sp, Operand(-kSystemPointerSize));
- if (!kJSArgcIncludesReceiver) {
- addi(r0, actual_parameter_count, Operand(1));
- } else {
- mr(r0, actual_parameter_count);
- cmpi(r0, Operand::Zero());
- ble(&skip);
- }
+ mr(r0, actual_parameter_count);
+ cmpi(r0, Operand::Zero());
+ ble(&skip);
mtctr(r0);
bind(&copy);
@@ -3409,19 +3368,13 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -3605,7 +3558,7 @@ void TurboAssembler::ByteReverseU32(Register dst, Register val,
ZeroExtWord32(dst, scratch);
}
-void TurboAssembler::ByteReverseU64(Register dst, Register val) {
+void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) {
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
brd(dst, val);
return;
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 200015bd85..76ed4c2018 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -557,15 +557,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
- void MultiPushDoubles(RegList dregs, Register location = sp);
- void MultiPopDoubles(RegList dregs, Register location = sp);
+ void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
+ void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
- void MultiPushV128(RegList dregs, Register location = sp);
- void MultiPopV128(RegList dregs, Register location = sp);
+ void MultiPushV128(Simd128RegList dregs, Register location = sp);
+ void MultiPopV128(Simd128RegList dregs, Register location = sp);
- void MultiPushF64AndV128(RegList dregs, RegList simd_regs,
+ void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs,
Register location = sp);
- void MultiPopF64AndV128(RegList dregs, RegList simd_regs,
+ void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs,
Register location = sp);
// Calculate how much stack space (in bytes) are required to store caller
@@ -614,7 +614,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ByteReverseU16(Register dst, Register val, Register scratch);
void ByteReverseU32(Register dst, Register val, Register scratch);
- void ByteReverseU64(Register dst, Register val);
+ void ByteReverseU64(Register dst, Register val, Register = r0);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index 68adfdb155..e5aebf1f8a 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_PPC_REGISTER_PPC_H_
#define V8_CODEGEN_PPC_REGISTER_PPC_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -61,86 +60,6 @@ namespace internal {
V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 3 | // r3 a1
- 1 << 4 | // r4 a2
- 1 << 5 | // r5 a3
- 1 << 6 | // r6 a4
- 1 << 7 | // r7 a5
- 1 << 8 | // r8 a6
- 1 << 9 | // r9 a7
- 1 << 10 | // r10 a8
- 1 << 11;
-
-const int kNumJSCallerSaved = 9;
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved = 1 << 14 | // r14
- 1 << 15 | // r15
- 1 << 16 | // r16
- 1 << 17 | // r17
- 1 << 18 | // r18
- 1 << 19 | // r19
- 1 << 20 | // r20
- 1 << 21 | // r21
- 1 << 22 | // r22
- 1 << 23 | // r23
- 1 << 24 | // r24
- 1 << 25 | // r25
- 1 << 26 | // r26
- 1 << 27 | // r27
- 1 << 28 | // r28
- 1 << 29 | // r29
- 1 << 30 | // r20
- 1 << 31; // r31
-
-const int kNumCalleeSaved = 18;
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7 | // d7
- 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13; // d13
-
-const int kNumCallerSavedDoubles = 14;
-
-const RegList kCalleeSavedDoubles = 1 << 14 | // d14
- 1 << 15 | // d15
- 1 << 16 | // d16
- 1 << 17 | // d17
- 1 << 18 | // d18
- 1 << 19 | // d19
- 1 << 20 | // d20
- 1 << 21 | // d21
- 1 << 22 | // d22
- 1 << 23 | // d23
- 1 << 24 | // d24
- 1 << 25 | // d25
- 1 << 26 | // d26
- 1 << 27 | // d27
- 1 << 28 | // d28
- 1 << 29 | // d29
- 1 << 30 | // d30
- 1 << 31; // d31
-
-const int kNumCalleeSavedDoubles = 18;
-
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
@@ -199,7 +118,7 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
+static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -219,7 +138,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
@@ -249,7 +168,7 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
-static_assert(sizeof(DoubleRegister) == sizeof(int),
+static_assert(sizeof(DoubleRegister) <= sizeof(int),
"DoubleRegister can efficiently be passed by value");
using FloatRegister = DoubleRegister;
@@ -292,7 +211,7 @@ class Simd128Register
explicit constexpr Simd128Register(int code) : RegisterBase(code) {}
};
ASSERT_TRIVIALLY_COPYABLE(Simd128Register);
-static_assert(sizeof(Simd128Register) == sizeof(int),
+static_assert(sizeof(Simd128Register) <= sizeof(int),
"Simd128Register can efficiently be passed by value");
#define DECLARE_SIMD128_REGISTER(R) \
diff --git a/deps/v8/src/codegen/ppc/reglist-ppc.h b/deps/v8/src/codegen/ppc/reglist-ppc.h
new file mode 100644
index 0000000000..7d3d88941b
--- /dev/null
+++ b/deps/v8/src/codegen/ppc/reglist-ppc.h
@@ -0,0 +1,63 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_PPC_REGLIST_PPC_H_
+#define V8_CODEGEN_PPC_REGLIST_PPC_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+using Simd128RegList = RegListBase<Simd128Register>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = {r3, // a1
+ r4, // a2
+ r5, // a3
+ r6, // a4
+ r7, // a5
+ r8, // a6
+ r9, // a7
+ r10, // a8
+ r11};
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = {r14, r15, r16, r17, r18, r19, r20, r21, r22,
+ r23, r24, r25, r26, r27, r28, r29, r30, fp};
+
+const int kNumCalleeSaved = 18;
+
+const DoubleRegList kCallerSavedDoubles = {d0, d1, d2, d3, d4, d5, d6,
+ d7, d8, d9, d10, d11, d12, d13};
+
+const Simd128RegList kCallerSavedSimd128s = {v0, v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13};
+
+const int kNumCallerSavedDoubles = 14;
+
+const DoubleRegList kCalleeSavedDoubles = {d14, d15, d16, d17, d18, d19,
+ d20, d21, d22, d23, d24, d25,
+ d26, d27, d28, d29, d30, d31};
+
+const int kNumCalleeSavedDoubles = 18;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_PPC_REGLIST_PPC_H_
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index d5ea2879da..c9c3a98407 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -1,12 +1,11 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
+// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_REGISTER_ARCH_H_
#define V8_CODEGEN_REGISTER_ARCH_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@@ -32,39 +31,4 @@
#error Unknown architecture.
#endif
-namespace v8 {
-namespace internal {
-
-constexpr int AddArgumentPaddingSlots(int argument_count) {
- return argument_count + ArgumentPaddingSlots(argument_count);
-}
-
-constexpr bool ShouldPadArguments(int argument_count) {
- return ArgumentPaddingSlots(argument_count) != 0;
-}
-
-#ifdef DEBUG
-struct CountIfValidRegisterFunctor {
- template <typename RegType>
- constexpr int operator()(int count, RegType reg) const {
- return count + (reg.is_valid() ? 1 : 0);
- }
-};
-
-template <typename RegType, typename... RegTypes,
- // All arguments must be either Register or DoubleRegister.
- typename = typename std::enable_if<
- base::is_same<Register, RegType, RegTypes...>::value ||
- base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
-inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
- int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
- int num_given_regs =
- base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
- return num_different_regs < num_given_regs;
-}
-#endif
-
-} // namespace internal
-} // namespace v8
-
#endif // V8_CODEGEN_REGISTER_ARCH_H_
diff --git a/deps/v8/src/codegen/register-base.h b/deps/v8/src/codegen/register-base.h
new file mode 100644
index 0000000000..b6cbc963e3
--- /dev/null
+++ b/deps/v8/src/codegen/register-base.h
@@ -0,0 +1,85 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_REGISTER_BASE_H_
+#define V8_CODEGEN_REGISTER_BASE_H_
+
+#include "src/base/bits.h"
+#include "src/base/bounds.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+namespace internal {
+
+// Base type for CPU Registers.
+//
+// 1) We would prefer to use an enum for registers, but enum values are
+// assignment-compatible with int, which has caused code-generation bugs.
+//
+// 2) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the class in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+template <typename SubType, int kAfterLastRegister>
+class RegisterBase {
+ public:
+ static constexpr int8_t kCode_no_reg = -1;
+ static constexpr int8_t kNumRegisters = kAfterLastRegister;
+
+ static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
+
+ static constexpr SubType from_code(int8_t code) {
+ DCHECK(base::IsInRange(static_cast<int>(code), 0, kNumRegisters - 1));
+ return SubType{code};
+ }
+
+ constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
+
+ constexpr int8_t code() const {
+ DCHECK(is_valid());
+ return reg_code_;
+ }
+
+ inline constexpr bool operator==(SubType other) const {
+ return reg_code_ == other.reg_code_;
+ }
+ inline constexpr bool operator!=(SubType other) const {
+ return reg_code_ != other.reg_code_;
+ }
+
+ // Used to print the name of some special registers.
+ static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
+
+ protected:
+ explicit constexpr RegisterBase(int code) : reg_code_(code) {}
+
+ private:
+ int8_t reg_code_;
+ STATIC_ASSERT(kAfterLastRegister <= kMaxInt8);
+};
+
+template <typename RegType,
+ typename = decltype(RegisterName(std::declval<RegType>()))>
+inline std::ostream& operator<<(std::ostream& os, RegType reg) {
+ return os << RegisterName(reg);
+}
+
+// Helper macros to define a {RegisterName} method based on a macro list
+// containing all names.
+#define DEFINE_REGISTER_NAMES_NAME(name) #name,
+#define DEFINE_REGISTER_NAMES(RegType, LIST) \
+ inline const char* RegisterName(RegType reg) { \
+ static constexpr const char* Names[] = {LIST(DEFINE_REGISTER_NAMES_NAME)}; \
+ STATIC_ASSERT(arraysize(Names) == RegType::kNumRegisters); \
+ return reg.is_valid() ? Names[reg.code()] : "invalid"; \
+ }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_REGISTER_BASE_H_
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index 2fc97e2fec..91f06aec5b 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -3,9 +3,10 @@
// found in the LICENSE file.
#include "src/codegen/register-configuration.h"
+
#include "src/base/lazy-instance.h"
#include "src/codegen/cpu-features.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/common/globals.h"
namespace v8 {
@@ -18,6 +19,10 @@ static const int kMaxAllocatableGeneralRegisterCount =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
+#if V8_TARGET_ARCH_RISCV64
+static const int kMaxAllocatableSIMD128RegisterCount =
+ ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
+#endif
static const int kAllocatableGeneralCodes[] = {
#define REGISTER_CODE(R) kRegCode_##R,
@@ -33,6 +38,13 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
#endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE
+#if V8_TARGET_ARCH_RISCV64
+static const int kAllocatableSIMD128Codes[] = {
+#define REGISTER_CODE(R) kVRCode_##R,
+ ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+#endif // V8_TARGET_ARCH_RISCV64
+
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
@@ -42,6 +54,15 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
+static int get_num_simd128_registers() {
+ return
+#if V8_TARGET_ARCH_RISCV64
+ Simd128Register::kNumRegisters;
+#else
+ 0;
+#endif // V8_TARGET_ARCH_RISCV64
+}
+
// Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms.
static int get_num_allocatable_double_registers() {
@@ -77,6 +98,15 @@ static int get_num_allocatable_double_registers() {
#undef REGISTER_COUNT
+static int get_num_allocatable_simd128_registers() {
+ return
+#if V8_TARGET_ARCH_RISCV64
+ kMaxAllocatableSIMD128RegisterCount;
+#else
+ 0;
+#endif
+}
+
// Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms.
static const int* get_allocatable_double_codes() {
@@ -89,16 +119,24 @@ static const int* get_allocatable_double_codes() {
#endif
}
+static const int* get_allocatable_simd128_codes() {
+ return
+#if V8_TARGET_ARCH_RISCV64
+ kAllocatableSIMD128Codes;
+#else
+ kAllocatableDoubleCodes;
+#endif
+}
+
class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- kMaxAllocatableGeneralRegisterCount,
- get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
- get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
- }
+ kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
+ get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount,
+ get_num_allocatable_double_registers(),
+ get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes,
+ get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
@@ -114,12 +152,12 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
std::unique_ptr<int[]> allocatable_general_register_codes,
std::unique_ptr<char const*[]> allocatable_general_register_names)
: RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- num_allocatable_general_registers,
+ kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
+ get_num_simd128_registers(), num_allocatable_general_registers,
get_num_allocatable_double_registers(),
+ get_num_allocatable_simd128_registers(),
allocatable_general_register_codes.get(),
- get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
+ get_allocatable_double_codes(), get_allocatable_simd128_codes()),
allocatable_general_register_codes_(
std::move(allocatable_general_register_codes)),
allocatable_general_register_names_(
@@ -152,13 +190,13 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
- int num = NumRegs(registers);
+ int num = registers.Count();
std::unique_ptr<int[]> codes{new int[num]};
std::unique_ptr<char const* []> names { new char const*[num] };
int counter = 0;
for (int i = 0; i < Default()->num_allocatable_general_registers(); ++i) {
auto reg = Register::from_code(Default()->GetAllocatableGeneralCode(i));
- if (reg.bit() & registers) {
+ if (registers.has(reg)) {
DCHECK(counter < num);
codes[counter] = reg.code();
names[counter] = RegisterName(Register::from_code(i));
@@ -171,18 +209,20 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
}
RegisterConfiguration::RegisterConfiguration(
- int num_general_registers, int num_double_registers,
+ AliasingKind fp_aliasing_kind, int num_general_registers,
+ int num_double_registers, int num_simd128_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
- const int* allocatable_general_codes, const int* allocatable_double_codes,
- AliasingKind fp_aliasing_kind)
+ int num_allocatable_simd128_registers, const int* allocatable_general_codes,
+ const int* allocatable_double_codes,
+ const int* independent_allocatable_simd128_codes)
: num_general_registers_(num_general_registers),
num_float_registers_(0),
num_double_registers_(num_double_registers),
- num_simd128_registers_(0),
+ num_simd128_registers_(num_simd128_registers),
num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers),
- num_allocatable_simd128_registers_(0),
+ num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
allocatable_general_codes_mask_(0),
allocatable_float_codes_mask_(0),
allocatable_double_codes_mask_(0),
@@ -200,7 +240,7 @@ RegisterConfiguration::RegisterConfiguration(
allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
}
- if (fp_aliasing_kind_ == COMBINE) {
+ if (fp_aliasing_kind_ == AliasingKind::kCombine) {
num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
? num_double_registers_ * 2
: kMaxFPRegisters;
@@ -227,8 +267,7 @@ RegisterConfiguration::RegisterConfiguration(
}
last_simd128_code = next_simd128_code;
}
- } else {
- DCHECK(fp_aliasing_kind_ == OVERLAP);
+ } else if (fp_aliasing_kind_ == AliasingKind::kOverlap) {
num_float_registers_ = num_simd128_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
num_allocatable_double_registers_;
@@ -238,6 +277,21 @@ RegisterConfiguration::RegisterConfiguration(
}
allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
allocatable_double_codes_mask_;
+ } else {
+ DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent);
+ DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
+ num_float_registers_ = num_double_registers_;
+ num_allocatable_float_registers_ = num_allocatable_double_registers_;
+ for (int i = 0; i < num_allocatable_float_registers_; ++i) {
+ allocatable_float_codes_[i] = allocatable_double_codes_[i];
+ }
+ allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
+ for (int i = 0; i < num_allocatable_simd128_registers; i++) {
+ allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
+ }
+ for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
+ allocatable_simd128_codes_mask_ |= (1 << allocatable_simd128_codes_[i]);
+ }
}
}
@@ -250,7 +304,7 @@ STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int* alias_base_index) const {
- DCHECK(fp_aliasing_kind_ == COMBINE);
+ DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) {
*alias_base_index = index;
@@ -276,7 +330,7 @@ int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int other_index) const {
- DCHECK(fp_aliasing_kind_ == COMBINE);
+ DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) {
return index == other_index;
diff --git a/deps/v8/src/codegen/register-configuration.h b/deps/v8/src/codegen/register-configuration.h
index cdf9ddae35..537079cdd6 100644
--- a/deps/v8/src/codegen/register-configuration.h
+++ b/deps/v8/src/codegen/register-configuration.h
@@ -14,17 +14,8 @@
namespace v8 {
namespace internal {
-// An architecture independent representation of the sets of registers available
-// for instruction creation.
class V8_EXPORT_PRIVATE RegisterConfiguration {
public:
- enum AliasingKind {
- // Registers alias a single register of every other size (e.g. Intel).
- OVERLAP,
- // Registers alias two registers of the next smaller size (e.g. ARM).
- COMBINE
- };
-
// Architecture independent maxes.
static constexpr int kMaxGeneralRegisters = 32;
static constexpr int kMaxFPRegisters = 32;
@@ -40,12 +31,14 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
- RegisterConfiguration(int num_general_registers, int num_double_registers,
- int num_allocatable_general_registers,
- int num_allocatable_double_registers,
- const int* allocatable_general_codes,
- const int* allocatable_double_codes,
- AliasingKind fp_aliasing_kind);
+ RegisterConfiguration(
+ AliasingKind fp_aliasing_kind, int num_general_registers,
+ int num_double_registers, int num_simd128_registers,
+ int num_allocatable_general_registers,
+ int num_allocatable_double_registers,
+ int num_allocatable_simd128_registers,
+ const int* allocatable_general_codes, const int* allocatable_double_codes,
+ const int* independent_allocatable_simd128_codes = nullptr);
int num_general_registers() const { return num_general_registers_; }
int num_float_registers() const { return num_float_registers_; }
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 49f67ceb1d..28dc5981fe 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -1,91 +1,46 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_REGISTER_H_
#define V8_CODEGEN_REGISTER_H_
-#include "src/base/bounds.h"
+#include "src/codegen/register-arch.h"
#include "src/codegen/reglist.h"
namespace v8 {
-
namespace internal {
-// Base type for CPU Registers.
-//
-// 1) We would prefer to use an enum for registers, but enum values are
-// assignment-compatible with int, which has caused code-generation bugs.
-//
-// 2) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the class in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-template <typename SubType, int kAfterLastRegister>
-class RegisterBase {
- public:
- static constexpr int kCode_no_reg = -1;
- static constexpr int kNumRegisters = kAfterLastRegister;
-
- static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
-
- static constexpr SubType from_code(int code) {
- DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
- return SubType{code};
- }
-
- template <typename... Register>
- static constexpr RegList ListOf(Register... regs) {
- return CombineRegLists(regs.bit()...);
- }
-
- constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
-
- constexpr int code() const {
- DCHECK(is_valid());
- return reg_code_;
- }
+constexpr int AddArgumentPaddingSlots(int argument_count) {
+ return argument_count + ArgumentPaddingSlots(argument_count);
+}
- constexpr RegList bit() const {
- return is_valid() ? RegList{1} << code() : RegList{};
- }
+constexpr bool ShouldPadArguments(int argument_count) {
+ return ArgumentPaddingSlots(argument_count) != 0;
+}
- inline constexpr bool operator==(SubType other) const {
- return reg_code_ == other.reg_code_;
- }
- inline constexpr bool operator!=(SubType other) const {
- return reg_code_ != other.reg_code_;
+#ifdef DEBUG
+struct CountIfValidRegisterFunctor {
+ template <typename RegType>
+ constexpr int operator()(int count, RegType reg) const {
+ return count + (reg.is_valid() ? 1 : 0);
}
-
- // Used to print the name of some special registers.
- static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
-
- protected:
- explicit constexpr RegisterBase(int code) : reg_code_(code) {}
-
- private:
- int reg_code_;
};
-template <typename RegType,
- typename = decltype(RegisterName(std::declval<RegType>()))>
-inline std::ostream& operator<<(std::ostream& os, RegType reg) {
- return os << RegisterName(reg);
+template <typename RegType, typename... RegTypes,
+ // All arguments must be either Register or DoubleRegister.
+ typename = typename std::enable_if<
+ base::is_same<Register, RegType, RegTypes...>::value ||
+ base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
+inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
+ int num_different_regs = RegListBase<RegType>{first_reg, regs...}.Count();
+ int num_given_regs =
+ base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
+ return num_different_regs < num_given_regs;
}
-
-// Helper macros to define a {RegisterName} method based on a macro list
-// containing all names.
-#define DEFINE_REGISTER_NAMES_NAME(name) #name,
-#define DEFINE_REGISTER_NAMES(RegType, LIST) \
- inline const char* RegisterName(RegType reg) { \
- static constexpr const char* Names[] = {LIST(DEFINE_REGISTER_NAMES_NAME)}; \
- STATIC_ASSERT(arraysize(Names) == RegType::kNumRegisters); \
- return reg.is_valid() ? Names[reg.code()] : "invalid"; \
- }
+#endif
} // namespace internal
} // namespace v8
+
#endif // V8_CODEGEN_REGISTER_H_
diff --git a/deps/v8/src/codegen/reglist-base.h b/deps/v8/src/codegen/reglist-base.h
new file mode 100644
index 0000000000..6fc67cd304
--- /dev/null
+++ b/deps/v8/src/codegen/reglist-base.h
@@ -0,0 +1,232 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_REGLIST_BASE_H_
+#define V8_CODEGEN_REGLIST_BASE_H_
+
+#include <cstdint>
+#include <initializer_list>
+
+#include "src/base/bits.h"
+#include "src/base/iterator.h"
+#include "src/base/template-utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Register;
+
+template <typename RegisterT>
+class RegListBase {
+ using num_registers_sized_storage_t = typename std::conditional<
+ RegisterT::kNumRegisters <= 16, uint16_t,
+ typename std::conditional<RegisterT::kNumRegisters <= 32, uint32_t,
+ uint64_t>::type>::type;
+ STATIC_ASSERT(RegisterT::kNumRegisters <= 64);
+
+ public:
+ class Iterator;
+ class ReverseIterator;
+
+#ifdef V8_TARGET_ARCH_ARM64
+ // On ARM64 the sp register has the special value 63 (kSPRegInternalCode)
+ using storage_t = typename std::conditional<
+ std::is_same<RegisterT, v8::internal::Register>::value, uint64_t,
+ num_registers_sized_storage_t>::type;
+#else
+ using storage_t = num_registers_sized_storage_t;
+#endif
+
+ constexpr RegListBase() = default;
+ constexpr RegListBase(std::initializer_list<RegisterT> regs) {
+ for (RegisterT reg : regs) {
+ set(reg);
+ }
+ }
+
+ constexpr void set(RegisterT reg) {
+ if (!reg.is_valid()) return;
+ regs_ |= storage_t{1} << reg.code();
+ }
+
+ constexpr void clear(RegisterT reg) {
+ if (!reg.is_valid()) return;
+ regs_ &= ~(storage_t{1} << reg.code());
+ }
+
+ constexpr bool has(RegisterT reg) const {
+ if (!reg.is_valid()) return false;
+ return (regs_ & (storage_t{1} << reg.code())) != 0;
+ }
+
+ constexpr void clear(RegListBase other) { regs_ &= ~other.regs_; }
+
+ constexpr bool is_empty() const { return regs_ == 0; }
+
+ constexpr unsigned Count() const {
+ return base::bits::CountPopulation(regs_);
+ }
+
+ constexpr RegListBase operator&(const RegListBase other) const {
+ return RegListBase(regs_ & other.regs_);
+ }
+
+ constexpr RegListBase operator|(const RegListBase other) const {
+ return RegListBase(regs_ | other.regs_);
+ }
+
+ constexpr RegListBase operator^(const RegListBase other) const {
+ return RegListBase(regs_ ^ other.regs_);
+ }
+
+ constexpr RegListBase operator-(const RegListBase other) const {
+ return RegListBase(regs_ & ~other.regs_);
+ }
+
+ constexpr RegListBase operator|(const RegisterT reg) const {
+ return *this | RegListBase{reg};
+ }
+
+ constexpr RegListBase operator-(const RegisterT reg) const {
+ return *this - RegListBase{reg};
+ }
+
+ constexpr RegListBase& operator&=(const RegListBase other) {
+ regs_ &= other.regs_;
+ return *this;
+ }
+
+ constexpr RegListBase& operator|=(const RegListBase other) {
+ regs_ |= other.regs_;
+ return *this;
+ }
+
+ constexpr bool operator==(const RegListBase other) const {
+ return regs_ == other.regs_;
+ }
+ constexpr bool operator!=(const RegListBase other) const {
+ return regs_ != other.regs_;
+ }
+
+ constexpr RegisterT first() const {
+ DCHECK(!is_empty());
+ int first_code = base::bits::CountTrailingZerosNonZero(regs_);
+ return RegisterT::from_code(first_code);
+ }
+
+ constexpr RegisterT last() const {
+ DCHECK(!is_empty());
+ int last_code =
+ 8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
+ return RegisterT::from_code(last_code);
+ }
+
+ constexpr RegisterT PopFirst() {
+ RegisterT reg = first();
+ clear(reg);
+ return reg;
+ }
+
+ constexpr storage_t bits() const { return regs_; }
+
+ inline Iterator begin() const;
+ inline Iterator end() const;
+
+ inline ReverseIterator rbegin() const;
+ inline ReverseIterator rend() const;
+
+ static RegListBase FromBits(storage_t bits) { return RegListBase(bits); }
+
+ template <storage_t bits>
+ static constexpr RegListBase FromBits() {
+ return RegListBase{bits};
+ }
+
+ private:
+ // Unchecked constructor. Only use for valid bits.
+ explicit constexpr RegListBase(storage_t bits) : regs_(bits) {}
+
+ storage_t regs_ = 0;
+};
+
+template <typename RegisterT>
+class RegListBase<RegisterT>::Iterator
+ : public base::iterator<std::forward_iterator_tag, RegisterT> {
+ public:
+ RegisterT operator*() { return remaining_.first(); }
+ Iterator& operator++() {
+ remaining_.clear(remaining_.first());
+ return *this;
+ }
+ bool operator==(Iterator other) { return remaining_ == other.remaining_; }
+ bool operator!=(Iterator other) { return remaining_ != other.remaining_; }
+
+ private:
+ explicit Iterator(RegListBase<RegisterT> remaining) : remaining_(remaining) {}
+ friend class RegListBase;
+
+ RegListBase<RegisterT> remaining_;
+};
+
+template <typename RegisterT>
+class RegListBase<RegisterT>::ReverseIterator
+ : public base::iterator<std::forward_iterator_tag, RegisterT> {
+ public:
+ RegisterT operator*() { return remaining_.last(); }
+ ReverseIterator& operator++() {
+ remaining_.clear(remaining_.last());
+ return *this;
+ }
+ bool operator==(ReverseIterator other) {
+ return remaining_ == other.remaining_;
+ }
+ bool operator!=(ReverseIterator other) {
+ return remaining_ != other.remaining_;
+ }
+
+ private:
+ explicit ReverseIterator(RegListBase<RegisterT> remaining)
+ : remaining_(remaining) {}
+ friend class RegListBase;
+
+ RegListBase<RegisterT> remaining_;
+};
+
+template <typename RegisterT>
+typename RegListBase<RegisterT>::Iterator RegListBase<RegisterT>::begin()
+ const {
+ return Iterator{*this};
+}
+template <typename RegisterT>
+typename RegListBase<RegisterT>::Iterator RegListBase<RegisterT>::end() const {
+ return Iterator{RegListBase<RegisterT>{}};
+}
+
+template <typename RegisterT>
+typename RegListBase<RegisterT>::ReverseIterator
+RegListBase<RegisterT>::rbegin() const {
+ return ReverseIterator{*this};
+}
+template <typename RegisterT>
+typename RegListBase<RegisterT>::ReverseIterator RegListBase<RegisterT>::rend()
+ const {
+ return ReverseIterator{RegListBase<RegisterT>{}};
+}
+
+template <typename RegisterT>
+inline std::ostream& operator<<(std::ostream& os,
+ RegListBase<RegisterT> reglist) {
+ os << "{";
+ for (bool first = true; !reglist.is_empty(); first = false) {
+ RegisterT reg = reglist.first();
+ reglist.clear(reg);
+ os << (first ? "" : ", ") << reg;
+ }
+ return os << "}";
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_REGLIST_BASE_H_
diff --git a/deps/v8/src/codegen/reglist.h b/deps/v8/src/codegen/reglist.h
index 4f1d35267d..eb9ff45163 100644
--- a/deps/v8/src/codegen/reglist.h
+++ b/deps/v8/src/codegen/reglist.h
@@ -5,39 +5,39 @@
#ifndef V8_CODEGEN_REGLIST_H_
#define V8_CODEGEN_REGLIST_H_
-#include <cstdint>
-
-#include "src/base/bits.h"
-#include "src/base/template-utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Register configurations.
-#if V8_TARGET_ARCH_ARM64
-using RegList = uint64_t;
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/reglist-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/reglist-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/reglist-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/reglist-arm.h"
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+#include "src/codegen/ppc/reglist-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/reglist-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/reglist-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/reglist-loong64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/reglist-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/reglist-riscv64.h"
#else
-using RegList = uint32_t;
+#error Unknown architecture.
#endif
-// Get the number of registers in a given register list.
-constexpr int NumRegs(RegList list) {
- return base::bits::CountPopulation(list);
-}
+namespace v8 {
+namespace internal {
-namespace detail {
-// Combine two RegLists by building the union of the contained registers.
-// TODO(clemensb): Replace by constexpr lambda once we have C++17.
-constexpr RegList CombineRegListsHelper(RegList list1, RegList list2) {
- return list1 | list2;
-}
-} // namespace detail
+static constexpr RegList kEmptyRegList = {};
-// Combine several RegLists by building the union of the contained registers.
-template <typename... RegLists>
-constexpr RegList CombineRegLists(RegLists... lists) {
- return base::fold(detail::CombineRegListsHelper, 0, lists...);
-}
+#define LIST_REG(V) V,
+static constexpr RegList kAllocatableGeneralRegisters = {
+ ALLOCATABLE_GENERAL_REGISTERS(LIST_REG) Register::no_reg()};
+#undef LIST_REG
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 6057eca4a1..d1b4ed2b92 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -476,7 +476,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
- os << " (" << Deoptimizer::MessageFor(type, false)
+ os << " (" << Deoptimizer::MessageFor(type)
<< " deoptimization bailout)";
}
} else if (IsConstPool(rmode_)) {
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 9304f012d0..a5809286ef 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,9 +57,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
-#ifdef CAN_USE_RVV_INSTRUCTIONS
+#if (defined CAN_USE_RVV_INSTRUCTIONS) || (defined USE_SIMULATOR)
answer |= 1u << RISCV_SIMD;
-#endif // def CAN_USE_RVV_INSTRUCTIONS
+#endif // def CAN_USE_RVV_INSTRUCTIONS || USE_SIMULATOR
return answer;
}
@@ -72,6 +72,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+ if (cpu.has_rvv()) supported_ |= 1u << RISCV_SIMD;
// Set a static value on whether SIMD is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
// at runtime in builtins using an extern ref. Other callers should use
@@ -213,7 +214,7 @@ Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
VU(this),
- scratch_register_list_(t3.bit() | t5.bit()),
+ scratch_register_list_({t3, t5}),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -270,7 +271,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -1269,6 +1270,16 @@ void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
((Nf << kRvvNfShift) & kRvvNfMask);
emit(instr);
}
+// vmv_xs vcpop_m vfirst_m
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ uint8_t vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
// ----- Instruction class templates match those in the compiler
void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
@@ -2495,7 +2506,7 @@ void Assembler::vmv_vi(VRegister vd, uint8_t simm5) {
}
void Assembler::vmv_xs(Register rd, VRegister vs2) {
- GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, v0, vs2, NoMask);
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
}
void Assembler::vmv_sx(VRegister vd, Register rs1) {
@@ -2578,6 +2589,12 @@ void Assembler::vid_v(VRegister vd, MaskType mask) {
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
}
+#define DEFINE_OPFWV(name, funct6) \
+ void Assembler::name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
#define DEFINE_OPFRED(name, funct6) \
void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
@@ -2616,6 +2633,12 @@ void Assembler::vid_v(VRegister vd, MaskType mask) {
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
}
+#define DEFINE_OPFWF(name, funct6) \
+ void Assembler::name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
#define DEFINE_OPFVV_FMA(name, funct6) \
void Assembler::name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
MaskType mask) { \
@@ -2761,6 +2784,24 @@ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+// Vector Widening Floating-Point Add/Subtract Instructions
+DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+// Vector Widening Floating-Point Reduction Instructions
+DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+// Vector Widening Floating-Point Multiply
+DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
@@ -2788,6 +2829,16 @@ DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+// Vector Widening Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
// Vector Narrowing Fixed-Point Clip Instructions
DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
@@ -2808,7 +2859,9 @@ DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
#undef DEFINE_OPFVV
+#undef DEFINE_OPFWV
#undef DEFINE_OPFVF
+#undef DEFINE_OPFWF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
#undef DEFINE_OPMVV_VIE
@@ -3115,6 +3168,14 @@ void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
}
+void Assembler::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
+}
+
+void Assembler::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
+}
+
// Privileged
void Assembler::uret() {
GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010));
@@ -3867,14 +3928,17 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
+ DCHECK(!available_->is_empty());
+ int index =
+ static_cast<int>(base::bits::CountTrailingZeros32(available_->bits()));
+ *available_ &= RegList::FromBits(~(1U << index));
return Register::from_code(index);
}
-bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+bool UseScratchRegisterScope::hasAvailable() const {
+ return !available_->is_empty();
+}
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 63e5dde19e..2b0d262369 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -664,25 +664,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
}
- void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
- TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
-
- void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
- TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
-
- inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
- TailAgnosticType tail = tu,
- MaskAgnosticType mask = mu) {
- vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
- }
-
- inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
- MaskAgnosticType mask = mu) {
- vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
- }
-
- void vsetvl(Register rd, Register rs1, Register rs2);
-
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask = NoMask);
void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
@@ -788,6 +769,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
+#define DEFINE_OPFWV(name, funct6) \
+ void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
#define DEFINE_OPFRED(name, funct6) \
void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
@@ -796,6 +781,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask = NoMask);
+#define DEFINE_OPFWF(name, funct6) \
+ void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
#define DEFINE_OPFVV_FMA(name, funct6) \
void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
MaskType mask = NoMask);
@@ -919,6 +908,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+ DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+ // Vector Widening Floating-Point Reduction Instructions
+ DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+ DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+ // Vector Widening Floating-Point Multiply
+ DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+ DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
@@ -952,6 +959,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
// Vector Narrowing Fixed-Point Clip Instructions
DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
@@ -974,7 +991,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef DEFINE_OPMVV
#undef DEFINE_OPMVX
#undef DEFINE_OPFVV
+#undef DEFINE_OPFWV
#undef DEFINE_OPFVF
+#undef DEFINE_OPFWF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
#undef DEFINE_OPMVV_VIE
@@ -1001,6 +1020,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
+ DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
+ DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
#undef DEFINE_VFUNARY
void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
@@ -1017,6 +1038,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
vfsngjx_vv(dst, src, src, mask);
}
+ void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
// Privileged
void uret();
void sret();
@@ -1293,6 +1318,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
constpool_.RecordEntry(data, rmode);
}
+ friend class VectorUnit;
class VectorUnit {
public:
inline int32_t sew() const { return 2 ^ (sew_ + 3); }
@@ -1445,6 +1471,25 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
private:
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1644,7 +1689,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
uint8_t Nf);
-
+ // vmv_xs vcpop_m vfirst_m
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, uint8_t vs1,
+ VRegister vs2, MaskType mask);
// Labels.
void print(const Label* L);
void bind_to(Label* L, int pos);
@@ -1747,13 +1794,15 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
- void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Exclude(const RegList& list) {
+ *available_ &= RegList::FromBits(~list.bits());
+ }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
- RegList list(reg1.bit() | reg2.bit());
+ RegList list({reg1, reg2});
Exclude(list);
}
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index b5afe9b1df..67856b771b 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -957,7 +957,7 @@ enum Opcode : uint32_t {
VFCLASS_V = 0b10000,
VFSQRT_V = 0b00000,
- VFSQRT7_V = 0b00100,
+ VFRSQRT7_V = 0b00100,
VFREC7_V = 0b00101,
VFADD_FUNCT6 = 0b000000,
@@ -976,6 +976,35 @@ enum Opcode : uint32_t {
RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ VFWADD_FUNCT6 = 0b110000,
+ RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_FUNCT6 = 0b110010,
+ RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFWADD_W_FUNCT6 = 0b110100,
+ RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_W_FUNCT6 = 0b110110,
+ RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Reduction Instructions
+ VFWREDUSUM_FUNCT6 = 0b110001,
+ RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift),
+
+ VFWREDOSUM_FUNCT6 = 0b110011,
+ RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Multiply
+ VFWMUL_FUNCT6 = 0b111000,
+ RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+
VMFEQ_FUNCT6 = 0b011000,
RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
@@ -1053,6 +1082,23 @@ enum Opcode : uint32_t {
RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ VFWMACC_FUNCT6 = 0b111100,
+ RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMACC_FUNCT6 = 0b111101,
+ RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWMSAC_FUNCT6 = 0b111110,
+ RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMSAC_FUNCT6 = 0b111111,
+ RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+
VNCLIP_FUNCT6 = 0b101111,
RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
index d86cbef0c0..62587b74f9 100644
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -25,21 +25,22 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
- if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
- if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
- if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
- if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
- if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
- if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
- if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(a0));
+ if (argc >= 2) DCHECK(allocatable_regs.has(a1));
+ if (argc >= 3) DCHECK(allocatable_regs.has(a2));
+ if (argc >= 4) DCHECK(allocatable_regs.has(a3));
+ if (argc >= 5) DCHECK(allocatable_regs.has(a4));
+ if (argc >= 6) DCHECK(allocatable_regs.has(a5));
+ if (argc >= 7) DCHECK(allocatable_regs.has(a6));
+ if (argc >= 8) DCHECK(allocatable_regs.has(a7));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(a1, a5, a4, a2, a0, a3);
+ // TODO(Yuxiang): Remove a7 which is just there for padding.
+ return RegisterArray(a1, a5, a4, a2, a0, a3, kContextRegister, a7);
}
// static
@@ -65,6 +66,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return a0; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return a1;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return a3;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return a4;
@@ -107,7 +138,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -117,6 +148,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // a1 : the source
+ // a0 : the excluded property count
+ // a2 : the excluded property base
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
@@ -234,6 +281,14 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ // a0: left operand
+ // a1: right operand
+ // a2: feedback slot
+ return RegisterArray(a0, a1, a2);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(a1, // kApiFunctionAddress
a2, // kArgc
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 8b3b76da32..3efba1211e 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -48,22 +48,13 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kSystemPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -72,24 +63,15 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
return bytes;
@@ -100,23 +82,13 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
- bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ bytes += kCallerSavedFPU.Count() * kDoubleSize;
}
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
return bytes;
}
@@ -205,25 +177,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
@@ -296,6 +256,7 @@ void TurboAssembler::CallRecordWriteStub(
Register scratch = temps.Acquire();
li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
+ RecordComment("]");
} else {
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
@@ -1692,20 +1653,19 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
}
-static RegList t_regs = Register::ListOf(t0, t1, t2, t3, t4, t5, t6);
-static RegList a_regs = Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7);
-static RegList s_regs =
- Register::ListOf(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11);
+static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6};
+static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7};
+static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11};
void TurboAssembler::MultiPush(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
#define TEST_AND_PUSH_REG(reg) \
- if ((regs & reg.bit()) != 0) { \
+ if (regs.has(reg)) { \
stack_offset -= kSystemPointerSize; \
Sd(reg, MemOperand(sp, stack_offset)); \
- regs &= ~reg.bit(); \
+ regs.clear(reg); \
}
#define T_REGS(V) V(t6) V(t5) V(t4) V(t3) V(t2) V(t1) V(t0)
@@ -1723,17 +1683,17 @@ void TurboAssembler::MultiPush(RegList regs) {
TEST_AND_PUSH_REG(sp);
TEST_AND_PUSH_REG(gp);
TEST_AND_PUSH_REG(tp);
- if ((regs & s_regs) != 0) {
+ if (!(regs & s_regs).is_empty()) {
S_REGS(TEST_AND_PUSH_REG)
}
- if ((regs & a_regs) != 0) {
+ if (!(regs & a_regs).is_empty()) {
A_REGS(TEST_AND_PUSH_REG)
}
- if ((regs & t_regs) != 0) {
+ if (!(regs & t_regs).is_empty()) {
T_REGS(TEST_AND_PUSH_REG)
}
- DCHECK_EQ(regs, 0);
+ DCHECK(regs.is_empty());
#undef TEST_AND_PUSH_REG
#undef T_REGS
@@ -1745,10 +1705,10 @@ void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
#define TEST_AND_POP_REG(reg) \
- if ((regs & reg.bit()) != 0) { \
+ if (regs.has(reg)) { \
Ld(reg, MemOperand(sp, stack_offset)); \
stack_offset += kSystemPointerSize; \
- regs &= ~reg.bit(); \
+ regs.clear(reg); \
}
#define T_REGS(V) V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6)
@@ -1757,13 +1717,13 @@ void TurboAssembler::MultiPop(RegList regs) {
V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) V(s10) V(s11)
// MultiPop pops from the stack in reverse order as MultiPush
- if ((regs & t_regs) != 0) {
+ if (!(regs & t_regs).is_empty()) {
T_REGS(TEST_AND_POP_REG)
}
- if ((regs & a_regs) != 0) {
+ if (!(regs & a_regs).is_empty()) {
A_REGS(TEST_AND_POP_REG)
}
- if ((regs & s_regs) != 0) {
+ if (!(regs & s_regs).is_empty()) {
S_REGS(TEST_AND_POP_REG)
}
TEST_AND_POP_REG(tp);
@@ -1772,7 +1732,7 @@ void TurboAssembler::MultiPop(RegList regs) {
TEST_AND_POP_REG(fp);
TEST_AND_POP_REG(ra);
- DCHECK_EQ(regs, 0);
+ DCHECK(regs.is_empty());
addi(sp, sp, stack_offset);
@@ -1782,24 +1742,24 @@ void TurboAssembler::MultiPop(RegList regs) {
#undef A_REGS
}
-void TurboAssembler::MultiPushFPU(RegList regs) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
Sub64(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
StoreDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
-void TurboAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
LoadDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
@@ -3306,6 +3266,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t6, cond, rs, rt);
+ RecordComment("]");
return;
}
@@ -3385,6 +3346,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
RecordCommentForOffHeapTrampoline(builtin);
li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t6, cond, rs, rt);
+ RecordComment("]");
return;
}
@@ -3786,11 +3748,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Sub64(t0, t0, Operand(1));
Add64(src, src, Operand(kSystemPointerSize));
Add64(dest, dest, Operand(kSystemPointerSize));
- if (kJSArgcIncludesReceiver) {
- Branch(&copy, gt, t0, Operand(zero_reg));
- } else {
- Branch(&copy, ge, t0, Operand(zero_reg));
- }
+ Branch(&copy, gt, t0, Operand(zero_reg));
}
// Fill remaining expected arguments with undefined values.
@@ -4153,6 +4111,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left,
void TurboAssembler::MulOverflow32(Register dst, Register left,
const Operand& right, Register overflow) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
@@ -4178,6 +4137,7 @@ void TurboAssembler::MulOverflow32(Register dst, Register left,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All parameters are on the stack. a0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4197,6 +4157,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -4207,6 +4168,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
+ ASM_CODE_COMMENT(this);
PrepareCEntryFunction(builtin);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, builtin_exit_frame);
@@ -4216,6 +4178,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
+ ASM_CODE_COMMENT(this);
if (!FLAG_riscv_constant_pool) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
} else {
@@ -4229,6 +4192,7 @@ void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
+ ASM_CODE_COMMENT(this);
Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
And(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -4238,6 +4202,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t
// dummy_stats_counter_ field.
@@ -4253,6 +4218,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
// This operation has to be exactly 32-bit wide in case the external
// reference table redirects the counter to a uint32_t
// dummy_stats_counter_ field.
@@ -4335,11 +4301,13 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void TurboAssembler::LoadMap(Register destination, Register object) {
+ ASM_CODE_COMMENT(this);
LoadTaggedPointerField(destination,
FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ ASM_CODE_COMMENT(this);
LoadMap(dst, cp);
LoadTaggedPointerField(
dst, FieldMemOperand(
@@ -4348,6 +4316,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(type)));
@@ -4357,6 +4326,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4372,6 +4342,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
addi(sp, fp, 2 * kSystemPointerSize);
Ld(ra, MemOperand(fp, 1 * kSystemPointerSize));
Ld(fp, MemOperand(fp, 0 * kSystemPointerSize));
@@ -4379,6 +4350,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -4431,12 +4403,15 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters;
- int space = kNumOfSavedRegisters * kDoubleSize;
+ int space = kNumCallerSavedFPU * kDoubleSize;
Sub64(sp, sp, Operand(space));
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(i);
- StoreDouble(reg, MemOperand(sp, i * kDoubleSize));
+ int count = 0;
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (kCallerSavedFPU.bits() & (1 << i)) {
+ FPURegister reg = FPURegister::from_code(i);
+ StoreDouble(reg, MemOperand(sp, count * kDoubleSize));
+ count++;
+ }
}
}
@@ -4461,19 +4436,23 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
- // Remember: we only need to restore every 2nd double FPU value.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ // Remember: we only need to restore kCallerSavedFPU.
Sub64(scratch, fp,
Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumOfSavedRegisters * kDoubleSize));
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(2 * i);
- LoadDouble(reg, MemOperand(scratch, i * kDoubleSize));
+ kNumCallerSavedFPU * kDoubleSize));
+ int cout = 0;
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (kCalleeSavedFPU.bits() & (1 << i)) {
+ FPURegister reg = FPURegister::from_code(i);
+ LoadDouble(reg, MemOperand(scratch, cout * kDoubleSize));
+ cout++;
+ }
}
}
@@ -4532,6 +4511,7 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::AssertStackIsAligned() {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -4552,6 +4532,7 @@ void MacroAssembler::AssertStackIsAligned() {
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ ASM_CODE_COMMENT(this);
if (SmiValuesAre32Bits()) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
@@ -4566,6 +4547,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
void TurboAssembler::SmiToInt32(Register smi) {
+ ASM_CODE_COMMENT(this);
if (FLAG_enable_slow_asserts) {
AssertSmi(smi);
}
@@ -4574,6 +4556,7 @@ void TurboAssembler::SmiToInt32(Register smi) {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4582,6 +4565,7 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK_EQ(0, kSmiTag);
@@ -4591,6 +4575,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4600,6 +4585,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4609,6 +4595,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
DCHECK(object != kScratchReg);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
@@ -4626,6 +4613,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4661,6 +4649,7 @@ void MacroAssembler::AssertCallableFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4675,6 +4664,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4703,6 +4693,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -4772,21 +4763,25 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
+ ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
}
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
+ ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
}
void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
+ ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
}
void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
+ ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
}
@@ -4811,6 +4806,7 @@ int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = ActivationFrameAlignment();
// Up to eight simple arguments in a0..a7, fa0..fa7.
@@ -4863,6 +4859,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
+ ASM_CODE_COMMENT(this);
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -4907,10 +4904,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Register scratch = t2;
auipc(pc_scratch, 0);
- // TODO(RISCV): Does this need an offset? It seems like this should be the
- // PC of the call, but MIPS does not seem to do that.
- // https://github.com/v8-riscv/v8/issues/378
-
// See x64 code for reasoning about how to address the isolate data fields.
if (root_array_available()) {
Sd(pc_scratch, MemOperand(kRootRegister,
@@ -4959,19 +4952,13 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -4994,6 +4981,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t6,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
@@ -5019,7 +5007,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
-
+ ASM_CODE_COMMENT(this);
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
@@ -5058,11 +5046,13 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
@@ -5110,38 +5100,34 @@ void TurboAssembler::StoreTaggedField(const Register& value,
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressTaggedSigned");
+ ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
if (FLAG_debug_code) {
// Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
Add64(destination, destination,
Operand(((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32));
}
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
Add64(destination, kPtrComprCageBaseRegister, destination);
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
And(destination, source, Operand(0xFFFFFFFF));
Add64(destination, kPtrComprCageBaseRegister, Operand(destination));
- RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressAnyTagged");
+ ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
Add64(destination, kPtrComprCageBaseRegister, destination);
- RecordComment("]");
}
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 89d88f7af2..1b04c73e6a 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -358,7 +358,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
- void MultiPushFPU(RegList regs);
+ void MultiPushFPU(DoubleRegList regs);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -407,7 +407,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
- void MultiPopFPU(RegList regs);
+ void MultiPopFPU(DoubleRegList regs);
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
@@ -1107,7 +1107,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
- // save_doubles - saves FPU registers on stack, currently disabled.
+ // save_doubles - saves FPU registers on stack.
// stack_space - extra stack space.
void EnterExitFrame(bool save_doubles, int stack_space = 0,
StackFrame::Type frame_type = StackFrame::EXIT);
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 2b1e4d3d65..fa5ffe4043 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
#include "src/codegen/riscv64/constants-riscv64.h"
namespace v8 {
@@ -55,10 +54,11 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define UNALLOACTABLE_VECTOR_REGISTERS(V) \
- V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
- V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
- V(v24) V(v25)
+#define ALLOCATABLE_SIMD128_REGISTERS(V) \
+ V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
+ V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) V(v16) \
+ V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v26) \
+ V(v27) V(v28) V(v29) V(v30) V(v31)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
@@ -77,83 +77,6 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
// encoding.
const int kNumRegs = 32;
-const RegList kJSCallerSaved = 1 << 5 | // t0
- 1 << 6 | // t1
- 1 << 7 | // t2
- 1 << 10 | // a0
- 1 << 11 | // a1
- 1 << 12 | // a2
- 1 << 13 | // a3
- 1 << 14 | // a4
- 1 << 15 | // a5
- 1 << 16 | // a6
- 1 << 17 | // a7
- 1 << 29; // t4
-
-const int kNumJSCallerSaved = 12;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 8 | // fp/s0
- 1 << 9 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3 scratch register
- 1 << 20 | // s4 scratch register 2
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 24 | // s8
- 1 << 25 | // s9
- 1 << 26 | // s10
- 1 << 27; // s11
-
-const int kNumCalleeSaved = 12;
-
-const RegList kCalleeSavedFPU = 1 << 8 | // fs0
- 1 << 9 | // fs1
- 1 << 18 | // fs2
- 1 << 19 | // fs3
- 1 << 20 | // fs4
- 1 << 21 | // fs5
- 1 << 22 | // fs6
- 1 << 23 | // fs7
- 1 << 24 | // fs8
- 1 << 25 | // fs9
- 1 << 26 | // fs10
- 1 << 27; // fs11
-
-const int kNumCalleeSavedFPU = 12;
-
-const RegList kCallerSavedFPU = 1 << 0 | // ft0
- 1 << 1 | // ft1
- 1 << 2 | // ft2
- 1 << 3 | // ft3
- 1 << 4 | // ft4
- 1 << 5 | // ft5
- 1 << 6 | // ft6
- 1 << 7 | // ft7
- 1 << 10 | // fa0
- 1 << 11 | // fa1
- 1 << 12 | // fa2
- 1 << 13 | // fa3
- 1 << 14 | // fa4
- 1 << 15 | // fa5
- 1 << 16 | // fa6
- 1 << 17 | // fa7
- 1 << 28 | // ft8
- 1 << 29 | // ft9
- 1 << 30 | // ft10
- 1 << 31; // ft11
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 32;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
const int kUndefIndex = -1;
// Map with indexes on stack that corresponds to codes of saved registers.
const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
@@ -251,7 +174,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
@@ -296,12 +219,7 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
// this cl, in order to facilitate modification, it is assumed that the vector
// register and floating point register are shared.
VRegister toV() const {
- DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
- // FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
- // And v8 is unallocated so we replace v0 with v8
- if (code() == 0) {
- return VRegister(8);
- }
+ DCHECK(base::IsInRange(static_cast<int>(code()), 0, kVRAfterLast - 1));
return VRegister(code());
}
@@ -379,8 +297,8 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
-constexpr VRegister kSimd128ScratchReg = v26;
-constexpr VRegister kSimd128ScratchReg2 = v27;
+constexpr VRegister kSimd128ScratchReg = v24;
+constexpr VRegister kSimd128ScratchReg2 = v23;
constexpr VRegister kSimd128ScratchReg3 = v8;
constexpr VRegister kSimd128RegZero = v25;
diff --git a/deps/v8/src/codegen/riscv64/reglist-riscv64.h b/deps/v8/src/codegen/riscv64/reglist-riscv64.h
new file mode 100644
index 0000000000..363dd46181
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/reglist-riscv64.h
@@ -0,0 +1,64 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
+#define V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+const RegList kJSCallerSaved = {t0, t1, t2, a0, a1, a2, a3, a4, a5, a6, a7, t4};
+
+const int kNumJSCallerSaved = 12;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = {fp, // fp/s0
+ s1, // s1
+ s2, // s2
+ s3, // s3 scratch register
+ s4, // s4 scratch register 2
+ s5, // s5
+ s6, // s6 (roots in Javascript code)
+ s7, // s7 (cp in Javascript code)
+ s8, // s8
+ s9, // s9
+ s10, // s10
+ s11}; // s11
+
+const int kNumCalleeSaved = 12;
+
+const DoubleRegList kCalleeSavedFPU = {fs0, fs1, fs2, fs3, fs4, fs5,
+ fs6, fs7, fs8, fs9, fs10, fs11};
+
+const int kNumCalleeSavedFPU = kCalleeSavedFPU.Count();
+
+const DoubleRegList kCallerSavedFPU = {ft0, ft1, ft2, ft3, ft4, ft5, ft6,
+ ft7, fa0, fa1, fa2, fa3, fa4, fa5,
+ fa6, fa7, ft8, ft9, ft10, ft11};
+
+const int kNumCallerSavedFPU = kCallerSavedFPU.Count();
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 1283c87317..1a2e3526a9 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -375,8 +375,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
- : AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(ip.bit()) {
+ : AssemblerBase(options, std::move(buffer)), scratch_register_list_({ip}) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_bound_pos_ = 0;
relocations_.reserve(128);
@@ -414,7 +413,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -885,11 +884,7 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
Register UseScratchRegisterScope::Acquire() {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
- DCHECK_NE(*available, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
- Register reg = Register::from_code(index);
- *available &= ~reg.bit();
- return reg;
+ return available->PopFirst();
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index cfc65f70d4..4df3df75cc 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -49,7 +49,6 @@
#include <fcntl.h>
#include <unistd.h>
-#include <vector>
#include "src/codegen/assembler.h"
#include "src/codegen/external-reference.h"
@@ -1490,7 +1489,9 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
// Check if we have registers available to acquire.
- bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ bool CanAcquire() const {
+ return !assembler_->GetScratchRegisterList()->is_empty();
+ }
private:
friend class Assembler;
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index 398637c40a..9864ff4db9 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -24,21 +24,21 @@ template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
- if (argc >= 1) DCHECK(allocatable_regs | r2.bit());
- if (argc >= 2) DCHECK(allocatable_regs | r3.bit());
- if (argc >= 3) DCHECK(allocatable_regs | r4.bit());
- if (argc >= 4) DCHECK(allocatable_regs | r5.bit());
- if (argc >= 5) DCHECK(allocatable_regs | r6.bit());
- if (argc >= 6) DCHECK(allocatable_regs | r7.bit());
- if (argc >= 7) DCHECK(allocatable_regs | r8.bit());
- if (argc >= 8) DCHECK(allocatable_regs | r9.bit());
+ if (argc >= 1) DCHECK(allocatable_regs.has(r2));
+ if (argc >= 2) DCHECK(allocatable_regs.has(r3));
+ if (argc >= 3) DCHECK(allocatable_regs.has(r4));
+ if (argc >= 4) DCHECK(allocatable_regs.has(r5));
+ if (argc >= 5) DCHECK(allocatable_regs.has(r6));
+ if (argc >= 6) DCHECK(allocatable_regs.has(r7));
+ if (argc >= 7) DCHECK(allocatable_regs.has(r8));
+ if (argc >= 8) DCHECK(allocatable_regs.has(r9));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(r3, r7, r6, r4, r2);
+ return RegisterArray(r3, r7, r6, r4, r2, r5, kContextRegister);
}
// static
@@ -64,6 +64,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return r2; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return r3;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return r4; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return r5;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return r3; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return r4; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return r5;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return r6;
@@ -110,7 +140,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(r5); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r2); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -120,6 +150,22 @@ constexpr auto CallTrampolineDescriptor::registers() {
}
// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // r3 : the source
+ // r2 : the excluded property count
+ return RegisterArray(r3, r2);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // r3 : the source
+ // r2 : the excluded property count
+ // r4 : the excluded property base
+ return RegisterArray(r3, r2, r4);
+}
+
+// static
constexpr auto CallVarargsDescriptor::registers() {
// r2 : number of arguments (on the stack)
// r3 : the target to call
@@ -223,6 +269,11 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ return RegisterArray(r2, r3, r4);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(r3, // kApiFunctionAddress
r4, // kArgc
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 7080e89eec..79a0130de2 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -284,19 +284,10 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kSystemPointerSize;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kStackSavedSavedFPSizeInBytes;
@@ -308,20 +299,11 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushF64OrV128(kCallerSavedDoubles);
@@ -339,20 +321,10 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += kStackSavedSavedFPSizeInBytes;
}
- RegList exclusions = 0;
- if (exclusion1 != no_reg) {
- exclusions |= exclusion1.bit();
- if (exclusion2 != no_reg) {
- exclusions |= exclusion2.bit();
- if (exclusion3 != no_reg) {
- exclusions |= exclusion3.bit();
- }
- }
- }
-
- RegList list = kJSCallerSaved & ~exclusions;
+ RegList exclusions = {exclusion1, exclusion2, exclusion3};
+ RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kSystemPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
return bytes;
}
@@ -578,6 +550,10 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
}
+void TurboAssembler::Move(Register dst, const MemOperand& src) {
+ LoadU64(dst, src);
+}
+
// Wrapper around Assembler::mvc (SS-a format)
void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length) {
@@ -653,12 +629,12 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
void TurboAssembler::MultiPush(RegList regs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
SubS64(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
stack_offset -= kSystemPointerSize;
StoreU64(ToRegister(i), MemOperand(location, stack_offset));
}
@@ -669,7 +645,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
+ if ((regs.bits() & (1 << i)) != 0) {
LoadU64(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kSystemPointerSize;
}
@@ -677,13 +653,13 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(dregs);
+void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
+ int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
SubS64(location, location, Operand(stack_offset));
for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
stack_offset -= kDoubleSize;
StoreF64(dreg, MemOperand(location, stack_offset));
@@ -691,13 +667,13 @@ void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
- int16_t num_to_push = base::bits::CountPopulation(dregs);
+void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register location) {
+ int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
SubS64(location, location, Operand(stack_offset));
for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
StoreV128(dreg, MemOperand(location, stack_offset), r0);
@@ -705,11 +681,11 @@ void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
LoadF64(dreg, MemOperand(location, stack_offset));
stack_offset += kDoubleSize;
@@ -718,11 +694,11 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
+void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
- if ((dregs & (1 << i)) != 0) {
+ if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
LoadV128(dreg, MemOperand(location, stack_offset), r0);
stack_offset += kSimd128Size;
@@ -731,7 +707,8 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64OrV128(RegList dregs, Register location) {
+void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
+ Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
@@ -749,14 +726,14 @@ void TurboAssembler::MultiPushF64OrV128(RegList dregs, Register location) {
MultiPushDoubles(dregs);
// We still need to allocate empty space on the stack as if
// Simd rgeisters were saved (see kFixedFrameSizeFromFp).
- lay(sp, MemOperand(sp, -(NumRegs(dregs) * kDoubleSize)));
+ lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
bind(&simd_pushed);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
MultiPushV128(dregs);
} else {
MultiPushDoubles(dregs);
- lay(sp, MemOperand(sp, -(NumRegs(dregs) * kDoubleSize)));
+ lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
}
}
#else
@@ -764,7 +741,7 @@ void TurboAssembler::MultiPushF64OrV128(RegList dregs, Register location) {
#endif
}
-void TurboAssembler::MultiPopF64OrV128(RegList dregs, Register location) {
+void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
@@ -779,14 +756,14 @@ void TurboAssembler::MultiPopF64OrV128(RegList dregs, Register location) {
b(&simd_popped);
bind(&pop_doubles);
// Simd not supported, only pop double registers.
- lay(sp, MemOperand(sp, NumRegs(dregs) * kDoubleSize));
+ lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
MultiPopDoubles(dregs);
bind(&simd_popped);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
MultiPopV128(dregs);
} else {
- lay(sp, MemOperand(sp, NumRegs(dregs) * kDoubleSize));
+ lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
MultiPopDoubles(dregs);
}
}
@@ -891,6 +868,16 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
agr(destination, kRootRegister);
RecordComment("]");
}
+
+void TurboAssembler::LoadTaggedSignedField(Register destination,
+ MemOperand field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ LoadU64(destination, field_operand);
+ }
+}
+
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register slot_address,
LinkRegisterStatus lr_status,
@@ -933,25 +920,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPush(regs);
+ if (registers.is_empty()) return;
+ MultiPush(registers);
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- MultiPop(regs);
+ if (registers.is_empty()) return;
+ MultiPop(registers);
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
@@ -1090,6 +1065,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ ASM_CODE_COMMENT(this);
int fp_delta = 0;
CleanseP(r14);
if (marker_reg.is_valid()) {
@@ -1443,6 +1419,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
+ ASM_CODE_COMMENT(this);
// We create a stack frame with:
// Return Addr <-- old sp
// Old FP <-- new fp
@@ -1450,14 +1427,19 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
// type
// CodeObject <-- new sp
- mov(ip, Operand(StackFrame::TypeToMarker(type)));
- PushCommonFrame(ip);
+ Register scratch = no_reg;
+ if (!StackFrame::IsJavaScript(type)) {
+ scratch = ip;
+ mov(scratch, Operand(StackFrame::TypeToMarker(type)));
+ }
+ PushCommonFrame(scratch);
#if V8_ENABLE_WEBASSEMBLY
if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+ ASM_CODE_COMMENT(this);
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
LoadU64(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
@@ -1698,11 +1680,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lay(dest, MemOperand(dest, kSystemPointerSize));
SubS64(num, num, Operand(1));
bind(&check);
- if (kJSArgcIncludesReceiver) {
- b(gt, &copy);
- } else {
- b(ge, &copy);
- }
+ b(gt, &copy);
}
// Fill remaining expected arguments with undefined values.
@@ -2438,19 +2416,13 @@ void TurboAssembler::CheckPageFlag(
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
+ RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
- if (regs & candidate.bit()) continue;
+ if (regs.has(candidate)) continue;
return candidate;
}
UNREACHABLE();
@@ -2923,6 +2895,25 @@ void TurboAssembler::AddU64(Register dst, const Operand& imm) {
algfi(dst, imm);
}
+void TurboAssembler::AddU64(Register dst, Register src1, Register src2) {
+ if (dst != src2 && dst != src1) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ algrk(dst, src1, src2);
+ } else {
+ lgr(dst, src1);
+ algr(dst, src2);
+ }
+ } else if (dst != src2) {
+ // dst == src1
+ DCHECK(dst == src1);
+ algr(dst, src2);
+ } else {
+ // dst == src2
+ DCHECK(dst == src2);
+ algr(dst, src1);
+ }
+}
+
// Add Logical 32-bit (Register-Memory)
void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
@@ -4736,6 +4727,19 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::BuiltinEntrySlotOffset(builtin));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@@ -5104,8 +5108,6 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
}
// Simd Support.
-#define kScratchDoubleReg d13
-
void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(3));
}
@@ -5135,69 +5137,70 @@ void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
}
void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3));
}
void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2));
}
void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
}
void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
- vlgv(r0, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
- lghr(dst, r0);
+ uint8_t imm_lane_idx, Register scratch) {
+ vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
+ lghr(dst, scratch);
}
void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
+ uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
- uint8_t imm_lane_idx) {
- vlgv(r0, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
- lgbr(dst, r0);
+ uint8_t imm_lane_idx, Register scratch) {
+ vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
+ lgbr(dst, scratch);
}
void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
- DoubleRegister src2,
- uint8_t imm_lane_idx) {
- vlgv(r0, src2, MemOperand(r0, 0), Condition(3));
+ DoubleRegister src2, uint8_t imm_lane_idx,
+ Register scratch) {
+ vlgv(scratch, src2, MemOperand(r0, 0), Condition(3));
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
- vlvg(dst, r0, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
+ vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
- DoubleRegister src2,
- uint8_t imm_lane_idx) {
- vlgv(r0, src2, MemOperand(r0, 0), Condition(2));
+ DoubleRegister src2, uint8_t imm_lane_idx,
+ Register scratch) {
+ vlgv(scratch, src2, MemOperand(r0, 0), Condition(2));
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
- vlvg(dst, r0, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
+ vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx) {
+ Register src2, uint8_t imm_lane_idx,
+ Register) {
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
@@ -5205,7 +5208,8 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx) {
+ Register src2, uint8_t imm_lane_idx,
+ Register) {
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
@@ -5213,7 +5217,8 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx) {
+ Register src2, uint8_t imm_lane_idx,
+ Register) {
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
@@ -5221,13 +5226,76 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx) {
+ Register src2, uint8_t imm_lane_idx,
+ Register) {
if (src1 != dst) {
vlr(dst, src1, Condition(0), Condition(0), Condition(0));
}
vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
+void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) {
+ vno(dst, src, src, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
+ vx(dst, src, src, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
+ vceq(dst, src, src, Condition(0), Condition(3));
+}
+
+void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register mask) {
+ vsel(dst, src1, src2, mask, Condition(0), Condition(0));
+}
+
+#define SIMD_UNOP_LIST_VRR_A(V) \
+ V(F64x2Abs, vfpso, 2, 0, 3) \
+ V(F64x2Neg, vfpso, 0, 0, 3) \
+ V(F64x2Sqrt, vfsq, 0, 0, 3) \
+ V(F64x2Ceil, vfi, 6, 0, 3) \
+ V(F64x2Floor, vfi, 7, 0, 3) \
+ V(F64x2Trunc, vfi, 5, 0, 3) \
+ V(F64x2NearestInt, vfi, 4, 0, 3) \
+ V(F32x4Abs, vfpso, 2, 0, 2) \
+ V(F32x4Neg, vfpso, 0, 0, 2) \
+ V(F32x4Sqrt, vfsq, 0, 0, 2) \
+ V(F32x4Ceil, vfi, 6, 0, 2) \
+ V(F32x4Floor, vfi, 7, 0, 2) \
+ V(F32x4Trunc, vfi, 5, 0, 2) \
+ V(F32x4NearestInt, vfi, 4, 0, 2) \
+ V(I64x2Abs, vlp, 0, 0, 3) \
+ V(I64x2Neg, vlc, 0, 0, 3) \
+ V(I64x2SConvertI32x4Low, vupl, 0, 0, 2) \
+ V(I64x2SConvertI32x4High, vuph, 0, 0, 2) \
+ V(I64x2UConvertI32x4Low, vupll, 0, 0, 2) \
+ V(I64x2UConvertI32x4High, vuplh, 0, 0, 2) \
+ V(I32x4Abs, vlp, 0, 0, 2) \
+ V(I32x4Neg, vlc, 0, 0, 2) \
+ V(I32x4SConvertI16x8Low, vupl, 0, 0, 1) \
+ V(I32x4SConvertI16x8High, vuph, 0, 0, 1) \
+ V(I32x4UConvertI16x8Low, vupll, 0, 0, 1) \
+ V(I32x4UConvertI16x8High, vuplh, 0, 0, 1) \
+ V(I16x8Abs, vlp, 0, 0, 1) \
+ V(I16x8Neg, vlc, 0, 0, 1) \
+ V(I16x8SConvertI8x16Low, vupl, 0, 0, 0) \
+ V(I16x8SConvertI8x16High, vuph, 0, 0, 0) \
+ V(I16x8UConvertI8x16Low, vupll, 0, 0, 0) \
+ V(I16x8UConvertI8x16High, vuplh, 0, 0, 0) \
+ V(I8x16Abs, vlp, 0, 0, 0) \
+ V(I8x16Neg, vlc, 0, 0, 0) \
+ V(I8x16Popcnt, vpopct, 0, 0, 0)
+
+#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
+ op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
+ }
+SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
+#undef EMIT_SIMD_UNOP_VRR_A
+#undef SIMD_UNOP_LIST_VRR_A
+
#define SIMD_BINOP_LIST_VRR_B(V) \
V(I64x2Eq, vceq, 0, 3) \
V(I64x2GtS, vch, 0, 3) \
@@ -5250,43 +5318,53 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
#undef EMIT_SIMD_BINOP_VRR_B
#undef SIMD_BINOP_LIST_VRR_B
-#define SIMD_BINOP_LIST_VRR_C(V) \
- V(F64x2Add, vfa, 0, 0, 3) \
- V(F64x2Sub, vfs, 0, 0, 3) \
- V(F64x2Mul, vfm, 0, 0, 3) \
- V(F64x2Div, vfd, 0, 0, 3) \
- V(F64x2Min, vfmin, 1, 0, 3) \
- V(F64x2Max, vfmax, 1, 0, 3) \
- V(F64x2Eq, vfce, 0, 0, 3) \
- V(F32x4Add, vfa, 0, 0, 2) \
- V(F32x4Sub, vfs, 0, 0, 2) \
- V(F32x4Mul, vfm, 0, 0, 2) \
- V(F32x4Div, vfd, 0, 0, 2) \
- V(F32x4Min, vfmin, 1, 0, 2) \
- V(F32x4Max, vfmax, 1, 0, 2) \
- V(F32x4Eq, vfce, 0, 0, 2) \
- V(I64x2Add, va, 0, 0, 3) \
- V(I64x2Sub, vs, 0, 0, 3) \
- V(I32x4Add, va, 0, 0, 2) \
- V(I32x4Sub, vs, 0, 0, 2) \
- V(I32x4Mul, vml, 0, 0, 2) \
- V(I32x4MinS, vmn, 0, 0, 2) \
- V(I32x4MinU, vmnl, 0, 0, 2) \
- V(I32x4MaxS, vmx, 0, 0, 2) \
- V(I32x4MaxU, vmxl, 0, 0, 2) \
- V(I16x8Add, va, 0, 0, 1) \
- V(I16x8Sub, vs, 0, 0, 1) \
- V(I16x8Mul, vml, 0, 0, 1) \
- V(I16x8MinS, vmn, 0, 0, 1) \
- V(I16x8MinU, vmnl, 0, 0, 1) \
- V(I16x8MaxS, vmx, 0, 0, 1) \
- V(I16x8MaxU, vmxl, 0, 0, 1) \
- V(I8x16Add, va, 0, 0, 0) \
- V(I8x16Sub, vs, 0, 0, 0) \
- V(I8x16MinS, vmn, 0, 0, 0) \
- V(I8x16MinU, vmnl, 0, 0, 0) \
- V(I8x16MaxS, vmx, 0, 0, 0) \
- V(I8x16MaxU, vmxl, 0, 0, 0)
+#define SIMD_BINOP_LIST_VRR_C(V) \
+ V(F64x2Add, vfa, 0, 0, 3) \
+ V(F64x2Sub, vfs, 0, 0, 3) \
+ V(F64x2Mul, vfm, 0, 0, 3) \
+ V(F64x2Div, vfd, 0, 0, 3) \
+ V(F64x2Min, vfmin, 1, 0, 3) \
+ V(F64x2Max, vfmax, 1, 0, 3) \
+ V(F64x2Eq, vfce, 0, 0, 3) \
+ V(F64x2Pmin, vfmin, 3, 0, 3) \
+ V(F64x2Pmax, vfmax, 3, 0, 3) \
+ V(F32x4Add, vfa, 0, 0, 2) \
+ V(F32x4Sub, vfs, 0, 0, 2) \
+ V(F32x4Mul, vfm, 0, 0, 2) \
+ V(F32x4Div, vfd, 0, 0, 2) \
+ V(F32x4Min, vfmin, 1, 0, 2) \
+ V(F32x4Max, vfmax, 1, 0, 2) \
+ V(F32x4Eq, vfce, 0, 0, 2) \
+ V(F32x4Pmin, vfmin, 3, 0, 2) \
+ V(F32x4Pmax, vfmax, 3, 0, 2) \
+ V(I64x2Add, va, 0, 0, 3) \
+ V(I64x2Sub, vs, 0, 0, 3) \
+ V(I32x4Add, va, 0, 0, 2) \
+ V(I32x4Sub, vs, 0, 0, 2) \
+ V(I32x4Mul, vml, 0, 0, 2) \
+ V(I32x4MinS, vmn, 0, 0, 2) \
+ V(I32x4MinU, vmnl, 0, 0, 2) \
+ V(I32x4MaxS, vmx, 0, 0, 2) \
+ V(I32x4MaxU, vmxl, 0, 0, 2) \
+ V(I16x8Add, va, 0, 0, 1) \
+ V(I16x8Sub, vs, 0, 0, 1) \
+ V(I16x8Mul, vml, 0, 0, 1) \
+ V(I16x8MinS, vmn, 0, 0, 1) \
+ V(I16x8MinU, vmnl, 0, 0, 1) \
+ V(I16x8MaxS, vmx, 0, 0, 1) \
+ V(I16x8MaxU, vmxl, 0, 0, 1) \
+ V(I16x8RoundingAverageU, vavgl, 0, 0, 1) \
+ V(I8x16Add, va, 0, 0, 0) \
+ V(I8x16Sub, vs, 0, 0, 0) \
+ V(I8x16MinS, vmn, 0, 0, 0) \
+ V(I8x16MinU, vmnl, 0, 0, 0) \
+ V(I8x16MaxS, vmx, 0, 0, 0) \
+ V(I8x16MaxU, vmxl, 0, 0, 0) \
+ V(I8x16RoundingAverageU, vavgl, 0, 0, 0) \
+ V(S128And, vn, 0, 0, 0) \
+ V(S128Or, vo, 0, 0, 0) \
+ V(S128Xor, vx, 0, 0, 0) \
+ V(S128AndNot, vnc, 0, 0, 0)
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
@@ -5311,35 +5389,98 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
V(I8x16ShrS, vesrav, 0) \
V(I8x16ShrU, vesrlv, 0)
-#define EMIT_SIMD_SHIFT(name, op, c1) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
- Register src2) { \
- vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
- vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
- op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
- Condition(c1)); \
- } \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
- const Operand& src2) { \
- mov(ip, src2); \
- name(dst, src1, ip); \
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2, Simd128Register scratch) { \
+ vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(scratch, scratch, Operand(0), Condition(c1)); \
+ op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2, Register scratch1, \
+ Simd128Register scratch2) { \
+ mov(scratch1, src2); \
+ name(dst, src1, scratch1, scratch2); \
}
SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
#undef EMIT_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
+#define SIMD_EXT_MUL_LIST(V) \
+ V(I64x2ExtMulLowI32x4S, vme, vmo, vmrl, 2) \
+ V(I64x2ExtMulHighI32x4S, vme, vmo, vmrh, 2) \
+ V(I64x2ExtMulLowI32x4U, vmle, vmlo, vmrl, 2) \
+ V(I64x2ExtMulHighI32x4U, vmle, vmlo, vmrh, 2) \
+ V(I32x4ExtMulLowI16x8S, vme, vmo, vmrl, 1) \
+ V(I32x4ExtMulHighI16x8S, vme, vmo, vmrh, 1) \
+ V(I32x4ExtMulLowI16x8U, vmle, vmlo, vmrl, 1) \
+ V(I32x4ExtMulHighI16x8U, vmle, vmlo, vmrh, 1) \
+ V(I16x8ExtMulLowI8x16S, vme, vmo, vmrl, 0) \
+ V(I16x8ExtMulHighI8x16S, vme, vmo, vmrh, 0) \
+ V(I16x8ExtMulLowI8x16U, vmle, vmlo, vmrl, 0) \
+ V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0)
+
+#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Simd128Register src2, Simd128Register scratch) { \
+ mul_even(scratch, src1, src2, Condition(0), Condition(0), \
+ Condition(mode)); \
+ mul_odd(dst, src1, src2, Condition(0), Condition(0), Condition(mode)); \
+ merge(dst, scratch, dst, Condition(0), Condition(0), Condition(mode + 1)); \
+ }
+SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
+#undef EMIT_SIMD_EXT_MUL
+#undef SIMD_EXT_MUL_LIST
+
+#define SIMD_ALL_TRUE_LIST(V) \
+ V(I64x2AllTrue, 3) \
+ V(I32x4AllTrue, 2) \
+ V(I16x8AllTrue, 1) \
+ V(I8x16AllTrue, 0)
+
+#define EMIT_SIMD_ALL_TRUE(name, mode) \
+ void TurboAssembler::name(Register dst, Simd128Register src, \
+ Register scratch1, Simd128Register scratch2) { \
+ mov(scratch1, Operand(1)); \
+ xgr(dst, dst); \
+ vx(scratch2, scratch2, scratch2, Condition(0), Condition(0), \
+ Condition(2)); \
+ vceq(scratch2, src, scratch2, Condition(0), Condition(mode)); \
+ vtm(scratch2, scratch2, Condition(0), Condition(0), Condition(0)); \
+ locgr(Condition(8), dst, scratch1); \
+ }
+SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
+#undef EMIT_SIMD_ALL_TRUE
+#undef SIMD_ALL_TRUE_LIST
+
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma, vfma, 3) \
+ V(F64x2Qfms, vfnms, 3) \
+ V(F32x4Qfma, vfma, 2) \
+ V(F32x4Qfms, vfnms, 2)
+
+#define EMIT_SIMD_QFM(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Simd128Register src2, Simd128Register src3) { \
+ op(dst, src2, src3, src1, Condition(c1), Condition(0)); \
+ }
+SIMD_QFM_LIST(EMIT_SIMD_QFM)
+#undef EMIT_SIMD_QFM
+#undef SIMD_QFM_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
- Simd128Register src2) {
- Register scratch_1 = r0;
- Register scratch_2 = r1;
+ Simd128Register src2, Register scratch1,
+ Register scratch2, Register scratch3) {
+ Register scratch_1 = scratch1;
+ Register scratch_2 = scratch2;
for (int i = 0; i < 2; i++) {
vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
vlgv(scratch_2, src2, MemOperand(r0, i), Condition(3));
MulS64(scratch_1, scratch_2);
- scratch_1 = r1;
- scratch_2 = ip;
+ scratch_1 = scratch2;
+ scratch_2 = scratch3;
}
- vlvgp(dst, r0, r1);
+ vlvgp(dst, scratch1, scratch2);
}
void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
@@ -5401,10 +5542,10 @@ void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
- Simd128Register src2) {
- vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(2));
+ Simd128Register src2, Simd128Register scratch) {
+ vceq(scratch, src1, src2, Condition(0), Condition(2));
vchl(dst, src1, src2, Condition(0), Condition(2));
- vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
+ vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
@@ -5421,10 +5562,10 @@ void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
- Simd128Register src2) {
- vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(1));
+ Simd128Register src2, Simd128Register scratch) {
+ vceq(scratch, src1, src2, Condition(0), Condition(1));
vchl(dst, src1, src2, Condition(0), Condition(1));
- vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
+ vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1));
}
void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
@@ -5441,11 +5582,419 @@ void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
}
void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
- Simd128Register src2) {
- vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(0));
+ Simd128Register src2, Simd128Register scratch) {
+ vceq(scratch, src1, src2, Condition(0), Condition(0));
vchl(dst, src1, src2, Condition(0), Condition(0));
- vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
+ vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src,
+ Register scratch1, Simd128Register scratch2) {
+ mov(scratch1, Operand(0x8080808080800040));
+ vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
+ vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
+ vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
+}
+
+void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src,
+ Register scratch1, Simd128Register scratch2) {
+ mov(scratch1, Operand(0x8080808000204060));
+ vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
+ vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
+ vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
+}
+
+void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src,
+ Register scratch1, Simd128Register scratch2) {
+ mov(scratch1, Operand(0x10203040506070));
+ vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
+ vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
+ vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
+}
+
+void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
+ Simd128Register src) {
+ vupl(dst, src, Condition(0), Condition(0), Condition(2));
+ vcdg(dst, dst, Condition(4), Condition(0), Condition(3));
+}
+
+void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
+ Simd128Register src) {
+ vupll(dst, src, Condition(0), Condition(0), Condition(2));
+ vcdlg(dst, dst, Condition(4), Condition(0), Condition(3));
+}
+
+void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src,
+ Register scratch1, Register scratch2,
+ Simd128Register scratch3) {
+ mov(scratch1, Operand(0x4048505860687078));
+ mov(scratch2, Operand(0x8101820283038));
+ vlvgp(scratch3, scratch2, scratch1);
+ vbperm(scratch3, src, scratch3, Condition(0), Condition(0), Condition(0));
+ vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1));
+}
+
+void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
+ Register scratch) {
+ mov(dst, Operand(1));
+ xgr(scratch, scratch);
+ vtm(src, src, Condition(0), Condition(0), Condition(0));
+ locgr(Condition(8), dst, scratch);
+}
+
+#define CONVERT_FLOAT_TO_INT32(convert, dst, src, scratch1, scratch2) \
+ for (int index = 0; index < 4; index++) { \
+ vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
+ MovIntToFloat(scratch1, scratch2); \
+ convert(scratch2, scratch1, kRoundToZero); \
+ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
+ }
+void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2) {
+ // NaN to 0.
+ vfce(scratch1, src, src, Condition(0), Condition(0), Condition(2));
+ vn(dst, src, scratch1, Condition(0), Condition(0), Condition(0));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vcgd(dst, dst, Condition(5), Condition(0), Condition(2));
+ } else {
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32, dst, dst, scratch1, scratch2)
+ }
+}
+
+void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2) {
+ // vclgd or ConvertFloat32ToUnsignedInt32 will convert NaN to 0, negative to 0
+ // automatically.
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vclgd(dst, src, Condition(5), Condition(0), Condition(2));
+ } else {
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32, dst, src, scratch1,
+ scratch2)
+ }
}
+#undef CONVERT_FLOAT_TO_INT32
+
+#define CONVERT_INT32_TO_FLOAT(convert, dst, src, scratch1, scratch2) \
+ for (int index = 0; index < 4; index++) { \
+ vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
+ convert(scratch1, scratch2); \
+ MovFloatToInt(scratch2, scratch1); \
+ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
+ }
+void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vcdg(dst, src, Condition(4), Condition(0), Condition(2));
+ } else {
+ CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2)
+ }
+}
+void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vcdlg(dst, src, Condition(4), Condition(0), Condition(2));
+ } else {
+ CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, dst, src, scratch1,
+ scratch2)
+ }
+}
+#undef CONVERT_INT32_TO_FLOAT
+
+void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst,
+ Simd128Register src1,
+ Simd128Register src2) {
+ vpks(dst, src2, src1, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
+ Simd128Register src1,
+ Simd128Register src2) {
+ vpks(dst, src2, src1, Condition(0), Condition(1));
+}
+
+#define VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, mode) \
+ vx(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero, Condition(0), \
+ Condition(0), Condition(mode)); \
+ vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \
+ Condition(mode)); \
+ vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode));
+void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
+ Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ // treat inputs as signed, and saturate to unsigned (negative to 0).
+ VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 2)
+ vpkls(dst, dst, scratch, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
+ Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ // treat inputs as signed, and saturate to unsigned (negative to 0).
+ VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 1)
+ vpkls(dst, dst, scratch, Condition(0), Condition(1));
+}
+#undef VECTOR_PACK_UNSIGNED
+
+#define BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, op, extract_high, \
+ extract_low, mode) \
+ DCHECK(dst != scratch1 && dst != scratch2); \
+ DCHECK(dst != src1 && dst != src2); \
+ extract_high(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
+ extract_high(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
+ op(dst, scratch1, scratch2, Condition(0), Condition(0), \
+ Condition(mode + 1)); \
+ extract_low(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
+ extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
+ op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \
+ Condition(mode + 1));
+void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 1)
+ vpks(dst, dst, scratch1, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuph, vupl, 1)
+ vpks(dst, dst, scratch1, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 1)
+ vpkls(dst, dst, scratch1, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuplh, vupll, 1)
+ // negative intermediate values to 0.
+ vx(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero, Condition(0), Condition(0),
+ Condition(0));
+ vmx(dst, kDoubleRegZero, dst, Condition(0), Condition(0), Condition(2));
+ vmx(scratch1, kDoubleRegZero, scratch1, Condition(0), Condition(0),
+ Condition(2));
+ vpkls(dst, dst, scratch1, Condition(0), Condition(2));
+}
+
+void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 0)
+ vpks(dst, dst, scratch1, Condition(0), Condition(1));
+}
+
+void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuph, vupl, 0)
+ vpks(dst, dst, scratch1, Condition(0), Condition(1));
+}
+
+void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 0)
+ vpkls(dst, dst, scratch1, Condition(0), Condition(1));
+}
+
+void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuplh, vupll, 0)
+ // negative intermediate values to 0.
+ vx(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero, Condition(0), Condition(0),
+ Condition(0));
+ vmx(dst, kDoubleRegZero, dst, Condition(0), Condition(0), Condition(1));
+ vmx(scratch1, kDoubleRegZero, scratch1, Condition(0), Condition(0),
+ Condition(1));
+ vpkls(dst, dst, scratch1, Condition(0), Condition(1));
+}
+#undef BINOP_EXTRACT
+
+void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4) {
+ Register holder = scratch3;
+ for (int index = 0; index < 2; ++index) {
+ vlgv(scratch2, src, MemOperand(scratch2, index + 2), Condition(2));
+ MovIntToFloat(scratch1, scratch2);
+ ldebr(scratch1, scratch1);
+ MovDoubleToInt64(holder, scratch1);
+ holder = scratch4;
+ }
+ vlvgp(dst, scratch3, scratch4);
+}
+
+void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4) {
+ Register holder = scratch3;
+ for (int index = 0; index < 2; ++index) {
+ vlgv(scratch2, src, MemOperand(r0, index), Condition(3));
+ MovInt64ToDouble(scratch1, scratch2);
+ ledbr(scratch1, scratch1);
+ MovFloatToInt(holder, scratch1);
+ holder = scratch4;
+ }
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ vlvg(dst, scratch3, MemOperand(r0, 2), Condition(2));
+ vlvg(dst, scratch4, MemOperand(r0, 3), Condition(2));
+}
+
+#define EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, lane_size, mul_even, \
+ mul_odd) \
+ CHECK_NE(src, scratch2); \
+ vrepi(scratch2, Operand(1), Condition(lane_size)); \
+ mul_even(scratch1, src, scratch2, Condition(0), Condition(0), \
+ Condition(lane_size)); \
+ mul_odd(scratch2, src, scratch2, Condition(0), Condition(0), \
+ Condition(lane_size)); \
+ va(dst, scratch1, scratch2, Condition(0), Condition(0), \
+ Condition(lane_size + 1));
+void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo)
+}
+
+void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch,
+ Simd128Register scratch2) {
+ vx(scratch, scratch, scratch, Condition(0), Condition(0), Condition(3));
+ vsum(dst, src, scratch, Condition(0), Condition(0), Condition(1));
+}
+
+void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo)
+}
+
+void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vmle, vmlo)
+}
+#undef EXT_ADD_PAIRWISE
+
+void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ // NaN to 0.
+ vlr(scratch, src, Condition(0), Condition(0), Condition(0));
+ vfce(scratch, scratch, scratch, Condition(0), Condition(0), Condition(3));
+ vn(scratch, src, scratch, Condition(0), Condition(0), Condition(0));
+ vcgd(scratch, scratch, Condition(5), Condition(0), Condition(3));
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ vpks(dst, dst, scratch, Condition(0), Condition(3));
+}
+
+void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ vclgd(scratch, src, Condition(5), Condition(0), Condition(3));
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ vpkls(dst, dst, scratch, Condition(0), Condition(3));
+}
+
+void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2) {
+ mov(scratch1, Operand(low));
+ mov(scratch2, Operand(high));
+ vlvgp(dst, scratch2, scratch1);
+}
+
+void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Register scratch1,
+ Register scratch2, Simd128Register scratch3,
+ Simd128Register scratch4) {
+ DCHECK(!AreAliased(src1, src2, scratch3, scratch4));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ vrepi(scratch3, Operand(31), Condition(0));
+ vmnl(scratch4, src2, scratch3, Condition(0), Condition(0), Condition(0));
+ // Input needs to be reversed.
+ vlgv(scratch1, src1, MemOperand(r0, 0), Condition(3));
+ vlgv(scratch2, src1, MemOperand(r0, 1), Condition(3));
+ lrvgr(scratch1, scratch1);
+ lrvgr(scratch2, scratch2);
+ vlvgp(dst, scratch2, scratch1);
+ // Clear scratch.
+ vx(scratch3, scratch3, scratch3, Condition(0), Condition(0), Condition(0));
+ vperm(dst, dst, scratch3, scratch4, Condition(0), Condition(0));
+}
+
+void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, uint64_t high,
+ uint64_t low, Register scratch1,
+ Register scratch2, Simd128Register scratch3) {
+ mov(scratch1, Operand(low));
+ mov(scratch2, Operand(high));
+ vlvgp(scratch3, scratch2, scratch1);
+ vperm(dst, src1, src2, scratch3, Condition(0), Condition(0));
+}
+
+void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1));
+ vmo(dst, src1, src2, Condition(0), Condition(0), Condition(1));
+ va(dst, scratch, dst, Condition(0), Condition(0), Condition(2));
+}
+
+#define Q15_MUL_ROAUND(accumulator, src1, src2, const_val, scratch, unpack) \
+ unpack(scratch, src1, Condition(0), Condition(0), Condition(1)); \
+ unpack(accumulator, src2, Condition(0), Condition(0), Condition(1)); \
+ vml(accumulator, scratch, accumulator, Condition(0), Condition(0), \
+ Condition(2)); \
+ va(accumulator, accumulator, const_val, Condition(0), Condition(0), \
+ Condition(2)); \
+ vrepi(scratch, Operand(15), Condition(2)); \
+ vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \
+ Condition(2));
+void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch1,
+ Simd128Register scratch2,
+ Simd128Register scratch3) {
+ DCHECK(!AreAliased(src1, src2, scratch1, scratch2, scratch3));
+ vrepi(scratch1, Operand(0x4000), Condition(2));
+ Q15_MUL_ROAUND(scratch2, src1, src2, scratch1, scratch3, vupl)
+ Q15_MUL_ROAUND(dst, src1, src2, scratch1, scratch3, vuph)
+ vpks(dst, dst, scratch2, Condition(0), Condition(2));
+}
+#undef Q15_MUL_ROAUND
// Vector LE Load and Transform instructions.
#ifdef V8_TARGET_BIG_ENDIAN
@@ -5463,16 +6012,16 @@ void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
V(16x8, vlbrrep, LoadU16LE, 1) \
V(8x16, vlrep, LoadU8, 0)
-#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
- vector_instr(dst, mem, Condition(condition)); \
- return; \
- } \
- scalar_instr(r1, mem); \
- vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
- vrep(dst, dst, Operand(0), Condition(condition)); \
+#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE( \
+ Simd128Register dst, const MemOperand& mem, Register scratch) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(scratch, mem); \
+ vlvg(dst, scratch, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
}
LOAD_SPLAT_LIST(LOAD_SPLAT)
#undef LOAD_SPLAT
@@ -5486,40 +6035,41 @@ LOAD_SPLAT_LIST(LOAD_SPLAT)
V(8x8U, vuplh, 0) \
V(8x8S, vuph, 0)
-#define LOAD_EXTEND(name, unpack_instr, condition) \
- void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
- vlebrg(kScratchDoubleReg, mem, Condition(0)); \
- } else { \
- LoadU64LE(r1, mem); \
- vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
- } \
- unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
- Condition(condition)); \
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE( \
+ Simd128Register dst, const MemOperand& mem, Register scratch) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vlebrg(dst, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(scratch, mem); \
+ vlvg(dst, scratch, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, dst, Condition(0), Condition(0), Condition(condition)); \
}
LOAD_EXTEND_LIST(LOAD_EXTEND)
#undef LOAD_EXTEND
#undef LOAD_EXTEND
-void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
vlebrf(dst, mem, Condition(3));
return;
}
- LoadU32LE(r1, mem);
- vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+ LoadU32LE(scratch, mem);
+ vlvg(dst, scratch, MemOperand(r0, 3), Condition(2));
}
-void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
vlebrg(dst, mem, Condition(1));
return;
}
- LoadU64LE(r1, mem);
- vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+ LoadU64LE(scratch, mem);
+ vlvg(dst, scratch, MemOperand(r0, 1), Condition(3));
}
#define LOAD_LANE_LIST(V) \
@@ -5528,15 +6078,16 @@ void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
V(16, vlebrh, LoadU16LE, 1) \
V(8, vleb, LoadU8, 0)
-#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
- const MemOperand& mem, int lane) { \
- if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
- vector_instr(dst, mem, Condition(lane)); \
- return; \
- } \
- scalar_instr(r1, mem); \
- vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane, \
+ Register scratch) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(scratch, mem); \
+ vlvg(dst, scratch, MemOperand(r0, lane), Condition(condition)); \
}
LOAD_LANE_LIST(LOAD_LANE)
#undef LOAD_LANE
@@ -5548,15 +6099,16 @@ LOAD_LANE_LIST(LOAD_LANE)
V(16, vstebrh, StoreU16LE, 1) \
V(8, vsteb, StoreU8, 0)
-#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
- const MemOperand& mem, int lane) { \
- if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
- vector_instr(src, mem, Condition(lane)); \
- return; \
- } \
- vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
- scalar_instr(r1, mem); \
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane, \
+ Register scratch) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(scratch, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(scratch, mem); \
}
STORE_LANE_LIST(STORE_LANE)
#undef STORE_LANE
@@ -5580,8 +6132,6 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
LoadU64(destination, MemOperand(kRootRegister, offset));
}
-#undef kScratchDoubleReg
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index aa2e0ef5b8..f5abeb9860 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -124,6 +124,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
@@ -136,6 +138,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Register dst, ExternalReference reference);
+ void Move(Register dst, const MemOperand& src);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
@@ -172,14 +175,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
- void MultiPushDoubles(RegList dregs, Register location = sp);
- void MultiPopDoubles(RegList dregs, Register location = sp);
+ void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
+ void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
- void MultiPushV128(RegList dregs, Register location = sp);
- void MultiPopV128(RegList dregs, Register location = sp);
+ void MultiPushV128(DoubleRegList dregs, Register location = sp);
+ void MultiPopV128(DoubleRegList dregs, Register location = sp);
- void MultiPushF64OrV128(RegList dregs, Register location = sp);
- void MultiPopF64OrV128(RegList dregs, Register location = sp);
+ void MultiPushF64OrV128(DoubleRegList dregs, Register location = sp);
+ void MultiPopF64OrV128(DoubleRegList dregs, Register location = sp);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -238,6 +241,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Add Logical (Register - Immediate)
void AddU32(Register dst, const Operand& imm);
void AddU64(Register dst, const Operand& imm);
+ void AddU64(Register dst, Register src1, Register src2);
// Add Logical (Register - Mem)
void AddU32(Register dst, const MemOperand& opnd);
@@ -395,26 +399,46 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
// Vector LE Load and Transform instructions.
- void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem);
- void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem);
- void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& mem);
- void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem);
- void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem);
- void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem);
- void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem);
- void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane);
- void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane);
- void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane);
- void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane);
- void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
- void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
- void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
- void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& me,
+ Register scratch);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -1078,131 +1102,322 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I16x8Splat(Simd128Register dst, Register src);
void I8x16Splat(Simd128Register dst, Register src);
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register = r0);
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
- uint8_t imm_lane_idx);
- void I64x2ExtractLane(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
- void I32x4ExtractLane(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register = r0);
+ void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
+ Register = r0);
+ void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
+ Register = r0);
void I16x8ExtractLaneU(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register = r0);
void I16x8ExtractLaneS(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register scratch);
void I8x16ExtractLaneU(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register = r0);
void I8x16ExtractLaneS(Register dst, Simd128Register src,
- uint8_t imm_lane_idx);
+ uint8_t imm_lane_idx, Register scratch);
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
- DoubleRegister src2, uint8_t imm_lane_idx);
+ DoubleRegister src2, uint8_t imm_lane_idx,
+ Register scratch);
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
- DoubleRegister src2, uint8_t imm_lane_idx);
+ DoubleRegister src2, uint8_t imm_lane_idx,
+ Register scratch);
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx);
+ Register src2, uint8_t imm_lane_idx, Register = r0);
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx);
+ Register src2, uint8_t imm_lane_idx, Register = r0);
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx);
+ Register src2, uint8_t imm_lane_idx, Register = r0);
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
- Register src2, uint8_t imm_lane_idx);
-
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, Simd128Register) \
- V(F64x2Sub, Simd128Register) \
- V(F64x2Mul, Simd128Register) \
- V(F64x2Div, Simd128Register) \
- V(F64x2Min, Simd128Register) \
- V(F64x2Max, Simd128Register) \
- V(F64x2Eq, Simd128Register) \
- V(F64x2Ne, Simd128Register) \
- V(F64x2Lt, Simd128Register) \
- V(F64x2Le, Simd128Register) \
- V(F32x4Add, Simd128Register) \
- V(F32x4Sub, Simd128Register) \
- V(F32x4Mul, Simd128Register) \
- V(F32x4Div, Simd128Register) \
- V(F32x4Min, Simd128Register) \
- V(F32x4Max, Simd128Register) \
- V(F32x4Eq, Simd128Register) \
- V(F32x4Ne, Simd128Register) \
- V(F32x4Lt, Simd128Register) \
- V(F32x4Le, Simd128Register) \
- V(I64x2Add, Simd128Register) \
- V(I64x2Sub, Simd128Register) \
- V(I64x2Mul, Simd128Register) \
- V(I64x2Eq, Simd128Register) \
- V(I64x2Ne, Simd128Register) \
- V(I64x2GtS, Simd128Register) \
- V(I64x2GeS, Simd128Register) \
- V(I64x2Shl, Register) \
- V(I64x2ShrS, Register) \
- V(I64x2ShrU, Register) \
- V(I64x2Shl, const Operand&) \
- V(I64x2ShrS, const Operand&) \
- V(I64x2ShrU, const Operand&) \
- V(I32x4Add, Simd128Register) \
- V(I32x4Sub, Simd128Register) \
- V(I32x4Mul, Simd128Register) \
- V(I32x4Eq, Simd128Register) \
- V(I32x4Ne, Simd128Register) \
- V(I32x4GtS, Simd128Register) \
- V(I32x4GeS, Simd128Register) \
- V(I32x4GtU, Simd128Register) \
- V(I32x4GeU, Simd128Register) \
- V(I32x4MinS, Simd128Register) \
- V(I32x4MinU, Simd128Register) \
- V(I32x4MaxS, Simd128Register) \
- V(I32x4MaxU, Simd128Register) \
- V(I32x4Shl, Register) \
- V(I32x4ShrS, Register) \
- V(I32x4ShrU, Register) \
- V(I32x4Shl, const Operand&) \
- V(I32x4ShrS, const Operand&) \
- V(I32x4ShrU, const Operand&) \
- V(I16x8Add, Simd128Register) \
- V(I16x8Sub, Simd128Register) \
- V(I16x8Mul, Simd128Register) \
- V(I16x8Eq, Simd128Register) \
- V(I16x8Ne, Simd128Register) \
- V(I16x8GtS, Simd128Register) \
- V(I16x8GeS, Simd128Register) \
- V(I16x8GtU, Simd128Register) \
- V(I16x8GeU, Simd128Register) \
- V(I16x8MinS, Simd128Register) \
- V(I16x8MinU, Simd128Register) \
- V(I16x8MaxS, Simd128Register) \
- V(I16x8MaxU, Simd128Register) \
- V(I16x8Shl, Register) \
- V(I16x8ShrS, Register) \
- V(I16x8ShrU, Register) \
- V(I16x8Shl, const Operand&) \
- V(I16x8ShrS, const Operand&) \
- V(I16x8ShrU, const Operand&) \
- V(I8x16Add, Simd128Register) \
- V(I8x16Sub, Simd128Register) \
- V(I8x16Eq, Simd128Register) \
- V(I8x16Ne, Simd128Register) \
- V(I8x16GtS, Simd128Register) \
- V(I8x16GeS, Simd128Register) \
- V(I8x16GtU, Simd128Register) \
- V(I8x16GeU, Simd128Register) \
- V(I8x16MinS, Simd128Register) \
- V(I8x16MinU, Simd128Register) \
- V(I8x16MaxS, Simd128Register) \
- V(I8x16MaxU, Simd128Register) \
- V(I8x16Shl, Register) \
- V(I8x16ShrS, Register) \
- V(I8x16ShrU, Register) \
- V(I8x16Shl, const Operand&) \
- V(I8x16ShrS, const Operand&) \
- V(I8x16ShrU, const Operand&)
-
-#define PROTOTYPE_SIMD_BINOP(name, stype) \
- void name(Simd128Register dst, Simd128Register src1, stype src2);
+ Register src2, uint8_t imm_lane_idx, Register = r0);
+ void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Register scratch1, Register scratch2, Register scratch3);
+ void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Simd128Register scratch);
+ void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Simd128Register scratch);
+ void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Simd128Register scratch);
+ void I64x2BitMask(Register dst, Simd128Register src, Register scratch1,
+ Simd128Register scratch2);
+ void I32x4BitMask(Register dst, Simd128Register src, Register scratch1,
+ Simd128Register scratch2);
+ void I16x8BitMask(Register dst, Simd128Register src, Register scratch1,
+ Simd128Register scratch2);
+ void I8x16BitMask(Register dst, Simd128Register src, Register scratch1,
+ Register scratch2, Simd128Register scratch3);
+ void V128AnyTrue(Register dst, Simd128Register src, Register scratch);
+ void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2);
+ void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2);
+ void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2);
+ void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2);
+ void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2);
+ void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2);
+ void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register scratch);
+ void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register scratch);
+ void F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4);
+ void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4);
+ void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch);
+ void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src,
+ Simd128Register scratch);
+ void I8x16Swizzle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Register scratch1, Register scratch2,
+ Simd128Register scratch3, Simd128Register scratch4);
+ void S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2);
+ void I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2,
+ Simd128Register scratch3);
+ void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register scratch);
+ void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register scratch1,
+ Simd128Register scratch2, Simd128Register scratch3);
+ void S128Select(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register mask);
+
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define PROTOTYPE_SIMD_SHIFT(name) \
+ void name(Simd128Register dst, Simd128Register src1, Register src2, \
+ Simd128Register scratch); \
+ void name(Simd128Register dst, Simd128Register src1, const Operand& src2, \
+ Register scratch1, Simd128Register scratch2);
+ SIMD_SHIFT_LIST(PROTOTYPE_SIMD_SHIFT)
+#undef PROTOTYPE_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4Sqrt) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(I64x2Abs) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2Neg) \
+ V(I32x4Abs) \
+ V(I32x4Neg) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I16x8Abs) \
+ V(I16x8Neg) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I8x16Abs) \
+ V(I8x16Neg) \
+ V(I8x16Popcnt) \
+ V(S128Not) \
+ V(S128Zero) \
+ V(S128AllOnes)
+
+#define PROTOTYPE_SIMD_UNOP(name) \
+ void name(Simd128Register dst, Simd128Register src);
+ SIMD_UNOP_LIST(PROTOTYPE_SIMD_UNOP)
+#undef PROTOTYPE_SIMD_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I16x8RoundingAverageU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor) \
+ V(S128AndNot)
+
+#define PROTOTYPE_SIMD_BINOP(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define SIMD_EXT_MUL_LIST(V) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U)
+
+#define PROTOTYPE_SIMD_EXT_MUL(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
+ Simd128Register scratch);
+ SIMD_EXT_MUL_LIST(PROTOTYPE_SIMD_EXT_MUL)
+#undef PROTOTYPE_SIMD_EXT_MUL
+#undef SIMD_EXT_MUL_LIST
+
+#define SIMD_ALL_TRUE_LIST(V) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue)
+
+#define PROTOTYPE_SIMD_ALL_TRUE(name) \
+ void name(Register dst, Simd128Register src, Register scratch1, \
+ Simd128Register scratch2);
+ SIMD_ALL_TRUE_LIST(PROTOTYPE_SIMD_ALL_TRUE)
+#undef PROTOTYPE_SIMD_ALL_TRUE
+#undef SIMD_ALL_TRUE_LIST
+
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms)
+
+#define PROTOTYPE_SIMD_QFM(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
+ Simd128Register src3);
+ SIMD_QFM_LIST(PROTOTYPE_SIMD_QFM)
+#undef PROTOTYPE_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+#define SIMD_ADD_SUB_SAT_LIST(V) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU)
+
+#define PROTOTYPE_SIMD_ADD_SUB_SAT(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
+ Simd128Register scratch1, Simd128Register scratch2);
+ SIMD_ADD_SUB_SAT_LIST(PROTOTYPE_SIMD_ADD_SUB_SAT)
+#undef PROTOTYPE_SIMD_ADD_SUB_SAT
+#undef SIMD_ADD_SUB_SAT_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U)
+
+#define PROTOTYPE_SIMD_EXT_ADD_PAIRWISE(name) \
+ void name(Simd128Register dst, Simd128Register src, \
+ Simd128Register scratch1, Simd128Register scratch2);
+ SIMD_EXT_ADD_PAIRWISE_LIST(PROTOTYPE_SIMD_EXT_ADD_PAIRWISE)
+#undef PROTOTYPE_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
// ---------------------------------------------------------------------------
// Pointer compression Support
@@ -1221,6 +1436,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
+ void LoadTaggedSignedField(Register destination, MemOperand field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 6e3b6a3e2b..b3e5a49f2d 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_S390_REGISTER_S390_H_
#define V8_CODEGEN_S390_REGISTER_S390_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -36,56 +35,6 @@ namespace internal {
V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
- 1 << 3 | // r3 a2
- 1 << 4 | // r4 a3
- 1 << 5; // r5 a4
-
-const int kNumJSCallerSaved = 5;
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
- 1 << 6 | // r6 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 7 | // r7 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 8 | // r8 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 9 | // r9 (HandleScope logic in MacroAssembler)
- 1 << 10 | // r10 (Roots register in Javascript)
- 1 << 11 | // r11 (fp in Javascript)
- 1 << 12 | // r12 (ip in Javascript)
- 1 << 13; // r13 (cp in Javascript)
-// 1 << 15; // r15 (sp in Javascript)
-
-const int kNumCalleeSaved = 8;
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7; // d7
-
-const int kNumCallerSavedDoubles = 8;
-
-const RegList kCalleeSavedDoubles = 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13 | // d12
- 1 << 14 | // d12
- 1 << 15; // d13
-
-const int kNumCalleeSavedDoubles = 8;
-
// The following constants describe the stack frame linkage area as
// defined by the ABI.
@@ -154,7 +103,7 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
+static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -173,7 +122,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
@@ -204,7 +153,7 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
-static_assert(sizeof(DoubleRegister) == sizeof(int),
+static_assert(sizeof(DoubleRegister) <= sizeof(int),
"DoubleRegister can efficiently be passed by value");
using FloatRegister = DoubleRegister;
diff --git a/deps/v8/src/codegen/s390/reglist-s390.h b/deps/v8/src/codegen/s390/reglist-s390.h
new file mode 100644
index 0000000000..7f557dc597
--- /dev/null
+++ b/deps/v8/src/codegen/s390/reglist-s390.h
@@ -0,0 +1,58 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_S390_REGLIST_S390_H_
+#define V8_CODEGEN_S390_REGLIST_S390_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = {r1, r2, // r2 a1
+ r3, // r3 a2
+ r4, // r4 a3
+ r5}; // r5 a4
+
+const int kNumJSCallerSaved = 5;
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = {r6, // r6 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ r7, // r7 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ r8, // r8 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ r9, // r9 (HandleScope logic in MacroAssembler)
+ r10, // r10 (Roots register in Javascript)
+ fp, // r11 (fp in Javascript)
+ ip, // r12 (ip in Javascript)
+ r13}; // r13 (cp in Javascript)
+// r15; // r15 (sp in Javascript)
+
+const int kNumCalleeSaved = 8;
+
+const DoubleRegList kCallerSavedDoubles = {d0, d1, d2, d3, d4, d5, d6, d7};
+
+const int kNumCallerSavedDoubles = 8;
+
+const DoubleRegList kCalleeSavedDoubles = {d8, d9, d10, d11,
+ d12, d13, d14, d15};
+
+const int kNumCalleeSavedDoubles = 8;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_S390_REGLIST_S390_H_
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 1d08a3b4d7..5b7b618fd6 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -109,10 +109,10 @@ void SafepointTable::Print(std::ostream& os) const {
}
}
-Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler) {
+SafepointTableBuilder::Safepoint SafepointTableBuilder::DefineSafepoint(
+ Assembler* assembler) {
entries_.push_back(EntryBuilder(zone_, assembler->pc_offset_for_safepoint()));
- EntryBuilder& new_entry = entries_.back();
- return Safepoint(new_entry.stack_indexes, &new_entry.register_indexes);
+ return SafepointTableBuilder::Safepoint(&entries_.back(), this);
}
int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
@@ -131,6 +131,8 @@ int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
}
void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
+ DCHECK_LT(max_stack_index_, tagged_slots_size);
+
#ifdef DEBUG
int last_pc = -1;
int last_trampoline = -1;
@@ -151,7 +153,10 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
#endif // DEBUG
RemoveDuplicates();
- TrimEntries(&tagged_slots_size);
+
+ // The encoding is compacted by translating stack slot indices s.t. they
+ // start at 0. See also below.
+ tagged_slots_size -= min_stack_index();
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// We cannot emit a const pool within the safepoint table.
@@ -161,14 +166,14 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(Code::kMetadataAlignment);
assembler->RecordComment(";;; Safepoint table.");
- offset_ = assembler->pc_offset();
+ safepoint_table_offset_ = assembler->pc_offset();
// Compute the required sizes of the fields.
int used_register_indexes = 0;
STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
- int max_pc = -1;
+ int max_pc = SafepointEntry::kNoTrampolinePC;
STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
- int max_deopt_index = -1;
+ int max_deopt_index = SafepointEntry::kNoDeoptIndex;
for (const EntryBuilder& entry : entries_) {
used_register_indexes |= entry.register_indexes;
max_pc = std::max(max_pc, std::max(entry.pc, entry.trampoline));
@@ -186,7 +191,10 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
};
bool has_deopt_data = max_deopt_index != -1;
int register_indexes_size = value_to_bytes(used_register_indexes);
- // Add 1 so all values are non-negative.
+ // Add 1 so all values (including kNoDeoptIndex and kNoTrampolinePC) are
+ // non-negative.
+ STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
+ STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
int pc_size = value_to_bytes(max_pc + 1);
int deopt_index_size = value_to_bytes(max_deopt_index + 1);
int tagged_slots_bytes =
@@ -224,22 +232,30 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
for (const EntryBuilder& entry : entries_) {
emit_bytes(entry.pc, pc_size);
if (has_deopt_data) {
- // Add 1 so all values are non-negative.
+ // Add 1 so all values (including kNoDeoptIndex and kNoTrampolinePC) are
+ // non-negative.
+ STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
+ STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
emit_bytes(entry.deopt_index + 1, deopt_index_size);
emit_bytes(entry.trampoline + 1, pc_size);
}
emit_bytes(entry.register_indexes, register_indexes_size);
}
- // Emit bitmaps of tagged stack slots.
+ // Emit bitmaps of tagged stack slots. Note the slot list is reversed in the
+ // encoding.
+ // TODO(jgruber): Avoid building a reversed copy of the bit vector.
ZoneVector<uint8_t> bits(tagged_slots_bytes, 0, zone_);
for (const EntryBuilder& entry : entries_) {
std::fill(bits.begin(), bits.end(), 0);
// Run through the indexes and build a bitmap.
for (int idx : *entry.stack_indexes) {
- DCHECK_GT(tagged_slots_size, idx);
- int index = tagged_slots_size - 1 - idx;
+ // The encoding is compacted by translating stack slot indices s.t. they
+ // start at 0. See also above.
+ const int adjusted_idx = idx - min_stack_index();
+ DCHECK_GT(tagged_slots_size, adjusted_idx);
+ int index = tagged_slots_size - 1 - adjusted_idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
bits[byte_index] |= (1u << bit_index);
@@ -261,17 +277,8 @@ void SafepointTableBuilder::RemoveDuplicates() {
const EntryBuilder& entry2) {
if (entry1.deopt_index != entry2.deopt_index) return false;
DCHECK_EQ(entry1.trampoline, entry2.trampoline);
-
- ZoneChunkList<int>* indexes1 = entry1.stack_indexes;
- ZoneChunkList<int>* indexes2 = entry2.stack_indexes;
- if (indexes1->size() != indexes2->size()) return false;
- if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
- return false;
- }
-
- if (entry1.register_indexes != entry2.register_indexes) return false;
-
- return true;
+ return entry1.register_indexes == entry2.register_indexes &&
+ entry1.stack_indexes->Equals(*entry2.stack_indexes);
};
auto remaining_it = entries_.begin();
@@ -289,27 +296,5 @@ void SafepointTableBuilder::RemoveDuplicates() {
entries_.Rewind(remaining);
}
-void SafepointTableBuilder::TrimEntries(int* tagged_slots_size) {
- int min_index = *tagged_slots_size;
- if (min_index == 0) return; // Early exit: nothing to trim.
-
- for (auto& entry : entries_) {
- for (int idx : *entry.stack_indexes) {
- DCHECK_GT(*tagged_slots_size, idx); // Validity check.
- if (idx >= min_index) continue;
- if (idx == 0) return; // Early exit: nothing to trim.
- min_index = idx;
- }
- }
-
- DCHECK_LT(0, min_index);
- *tagged_slots_size -= min_index;
- for (auto& entry : entries_) {
- for (int& idx : *entry.stack_indexes) {
- idx -= min_index;
- }
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 4201d5fc2f..49848d56ae 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -10,6 +10,7 @@
#include "src/base/memory.h"
#include "src/common/assert-scope.h"
#include "src/utils/allocation.h"
+#include "src/utils/bit-vector.h"
#include "src/utils/utils.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
@@ -35,10 +36,10 @@ class SafepointEntry {
tagged_register_indexes_(tagged_register_indexes),
tagged_slots_(tagged_slots),
trampoline_pc_(trampoline_pc) {
- DCHECK(is_valid());
+ DCHECK(is_initialized());
}
- bool is_valid() const { return tagged_slots_.begin() != nullptr; }
+ bool is_initialized() const { return tagged_slots_.begin() != nullptr; }
bool operator==(const SafepointEntry& other) const {
return pc_ == other.pc_ && deopt_index_ == other.deopt_index_ &&
@@ -49,7 +50,7 @@ class SafepointEntry {
void Reset() {
*this = SafepointEntry{};
- DCHECK(!is_valid());
+ DCHECK(!is_initialized());
}
int pc() const { return pc_; }
@@ -57,22 +58,22 @@ class SafepointEntry {
int trampoline_pc() const { return trampoline_pc_; }
bool has_deoptimization_index() const {
- DCHECK(is_valid());
+ DCHECK(is_initialized());
return deopt_index_ != kNoDeoptIndex;
}
int deoptimization_index() const {
- DCHECK(is_valid() && has_deoptimization_index());
+ DCHECK(is_initialized() && has_deoptimization_index());
return deopt_index_;
}
uint32_t tagged_register_indexes() const {
- DCHECK(is_valid());
+ DCHECK(is_initialized());
return tagged_register_indexes_;
}
base::Vector<const uint8_t> tagged_slots() const {
- DCHECK(is_valid());
+ DCHECK(is_initialized());
return tagged_slots_;
}
@@ -84,6 +85,8 @@ class SafepointEntry {
int trampoline_pc_ = kNoTrampolinePC;
};
+// A wrapper class for accessing the safepoint table embedded into the Code
+// object.
class SafepointTable {
public:
// The isolate and pc arguments are used for figuring out whether pc
@@ -113,6 +116,10 @@ class SafepointTable {
int deopt_index = SafepointEntry::kNoDeoptIndex;
int trampoline_pc = SafepointEntry::kNoTrampolinePC;
if (has_deopt_data()) {
+ STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
+ STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
+ // `-1` to restore the original value, see also
+ // SafepointTableBuilder::Emit.
deopt_index = read_bytes(&entry_ptr, deopt_index_size()) - 1;
trampoline_pc = read_bytes(&entry_ptr, pc_size()) - 1;
DCHECK(deopt_index >= 0 || deopt_index == SafepointEntry::kNoDeoptIndex);
@@ -196,41 +203,58 @@ class SafepointTable {
friend class SafepointEntry;
};
-class Safepoint {
- public:
- void DefinePointerSlot(int index) { stack_indexes_->push_back(index); }
-
- void DefineRegister(int reg_code) {
- // Make sure the recorded index is always less than 31, so that we don't
- // generate {kNoDeoptIndex} by accident.
- DCHECK_LT(reg_code, 31);
- *register_indexes_ |= 1u << reg_code;
- }
-
+class SafepointTableBuilder {
private:
- Safepoint(ZoneChunkList<int>* stack_indexes, uint32_t* register_indexes)
- : stack_indexes_(stack_indexes), register_indexes_(register_indexes) {}
- ZoneChunkList<int>* const stack_indexes_;
- uint32_t* register_indexes_;
-
- friend class SafepointTableBuilder;
-};
+ struct EntryBuilder {
+ int pc;
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
+ int trampoline = SafepointEntry::kNoTrampolinePC;
+ GrowableBitVector* stack_indexes;
+ uint32_t register_indexes = 0;
+ EntryBuilder(Zone* zone, int pc)
+ : pc(pc), stack_indexes(zone->New<GrowableBitVector>()) {}
+ };
-class SafepointTableBuilder {
public:
explicit SafepointTableBuilder(Zone* zone) : entries_(zone), zone_(zone) {}
SafepointTableBuilder(const SafepointTableBuilder&) = delete;
SafepointTableBuilder& operator=(const SafepointTableBuilder&) = delete;
- bool emitted() const { return offset_ != -1; }
+ bool emitted() const {
+ return safepoint_table_offset_ != kNoSafepointTableOffset;
+ }
- // Get the offset of the emitted safepoint table in the code.
- int GetCodeOffset() const {
+ int safepoint_table_offset() const {
DCHECK(emitted());
- return offset_;
+ return safepoint_table_offset_;
}
+ class Safepoint {
+ public:
+ void DefineTaggedStackSlot(int index) {
+ // Note it is only valid to specify stack slots here that are *not* in
+ // the fixed part of the frame (e.g. argc, target, context, stored rbp,
+ // return address). Frame iteration handles the fixed part of the frame
+ // with custom code, see CommonFrame::IterateCompiledFrame.
+ entry_->stack_indexes->Add(index, table_->zone_);
+ table_->UpdateMinMaxStackIndex(index);
+ }
+
+ void DefineTaggedRegister(int reg_code) {
+ DCHECK_LT(reg_code,
+ kBitsPerByte * sizeof(EntryBuilder::register_indexes));
+ entry_->register_indexes |= 1u << reg_code;
+ }
+
+ private:
+ friend class SafepointTableBuilder;
+ Safepoint(EntryBuilder* entry, SafepointTableBuilder* table)
+ : entry_(entry), table_(table) {}
+ EntryBuilder* const entry_;
+ SafepointTableBuilder* const table_;
+ };
+
// Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint(Assembler* assembler);
@@ -246,32 +270,35 @@ class SafepointTableBuilder {
int deopt_index);
private:
- struct EntryBuilder {
- int pc;
- int deopt_index;
- int trampoline;
- ZoneChunkList<int>* stack_indexes;
- uint32_t register_indexes;
- EntryBuilder(Zone* zone, int pc)
- : pc(pc),
- deopt_index(SafepointEntry::kNoDeoptIndex),
- trampoline(SafepointEntry::kNoTrampolinePC),
- stack_indexes(zone->New<ZoneChunkList<int>>(
- zone, ZoneChunkList<int>::StartMode::kSmall)),
- register_indexes(0) {}
- };
-
// Remove consecutive identical entries.
void RemoveDuplicates();
- // Try to trim entries by removing trailing zeros (and shrinking
- // {bits_per_entry}).
- void TrimEntries(int* bits_per_entry);
+ void UpdateMinMaxStackIndex(int index) {
+#ifdef DEBUG
+ max_stack_index_ = std::max(max_stack_index_, index);
+#endif // DEBUG
+ min_stack_index_ = std::min(min_stack_index_, index);
+ }
- ZoneChunkList<EntryBuilder> entries_;
+ int min_stack_index() const {
+ return min_stack_index_ == std::numeric_limits<int>::max()
+ ? 0
+ : min_stack_index_;
+ }
+
+ static constexpr int kNoSafepointTableOffset = -1;
- int offset_ = -1;
+ // Tracks the min/max stack slot index over all entries. We need the minimum
+ // index when encoding the actual table since we shift all unused lower
+ // indices out of the encoding. Tracking the indices during safepoint
+ // construction means we don't have to iterate again later.
+#ifdef DEBUG
+ int max_stack_index_ = 0;
+#endif // DEBUG
+ int min_stack_index_ = std::numeric_limits<int>::max();
+ ZoneChunkList<EntryBuilder> entries_;
+ int safepoint_table_offset_ = kNoSafepointTableOffset;
Zone* const zone_;
};
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 4dd54fd6f0..93ec1ae54f 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -6,7 +6,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h
index d6d8b5da0f..8098ca8ada 100644
--- a/deps/v8/src/codegen/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -127,9 +127,12 @@ using MachineSignature = Signature<MachineType>;
template <typename T>
size_t hash_value(const Signature<T>& sig) {
- size_t hash = base::hash_combine(sig.parameter_count(), sig.return_count());
- for (const T& t : sig.all()) hash = base::hash_combine(hash, t);
- return hash;
+ // Hash over all contained representations, plus the parameter count to
+ // differentiate signatures with the same representation array but different
+ // parameter/return count.
+ size_t seed = base::hash_value(sig.parameter_count());
+ for (T rep : sig.all()) seed = base::hash_combine(seed, base::hash<T>{}(rep));
+ return seed;
}
template <typename T, size_t kNumReturns = 0, size_t kNumParams = 0>
diff --git a/deps/v8/src/codegen/source-position.cc b/deps/v8/src/codegen/source-position.cc
index fa24127682..e08f2d11a4 100644
--- a/deps/v8/src/codegen/source-position.cc
+++ b/deps/v8/src/codegen/source-position.cc
@@ -62,24 +62,40 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
Handle<Code> code) const {
Isolate* isolate = code->GetIsolate();
- Handle<DeoptimizationData> deopt_data(
- DeoptimizationData::cast(code->deoptimization_data()), isolate);
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code->deoptimization_data());
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
- InliningPosition inl =
- deopt_data->InliningPositions().get(pos.InliningId());
+ InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
Handle<SharedFunctionInfo> function(
- deopt_data->GetInlinedFunction(inl.inlined_function_id), isolate);
+ deopt_data.GetInlinedFunction(inl.inlined_function_id), isolate);
stack.push_back(SourcePositionInfo(pos, function));
pos = inl.position;
}
Handle<SharedFunctionInfo> function(
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()), isolate);
+ SharedFunctionInfo::cast(deopt_data.SharedFunctionInfo()), isolate);
stack.push_back(SourcePositionInfo(pos, function));
return stack;
}
+SourcePositionInfo SourcePosition::FirstInfo(Handle<Code> code) const {
+ DisallowGarbageCollection no_gc;
+ Isolate* isolate = code->GetIsolate();
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code->deoptimization_data());
+ SourcePosition pos = *this;
+ if (pos.isInlined()) {
+ InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
+ Handle<SharedFunctionInfo> function(
+ deopt_data.GetInlinedFunction(inl.inlined_function_id), isolate);
+ return SourcePositionInfo(pos, function);
+ }
+ Handle<SharedFunctionInfo> function(
+ SharedFunctionInfo::cast(deopt_data.SharedFunctionInfo()), isolate);
+ return SourcePositionInfo(pos, function);
+}
+
void SourcePosition::Print(std::ostream& out,
SharedFunctionInfo function) const {
Script::PositionInfo pos;
diff --git a/deps/v8/src/codegen/source-position.h b/deps/v8/src/codegen/source-position.h
index 9ec845f907..d8982d6475 100644
--- a/deps/v8/src/codegen/source-position.h
+++ b/deps/v8/src/codegen/source-position.h
@@ -84,6 +84,7 @@ class SourcePosition final {
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
std::vector<SourcePositionInfo> InliningStack(
OptimizedCompilationInfo* cinfo) const;
+ SourcePositionInfo FirstInfo(Handle<Code> code) const;
void Print(std::ostream& out, Code code) const;
void PrintJson(std::ostream& out) const;
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index 4dfb9a1741..bffaa5c326 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -35,9 +35,9 @@ struct RawPtrT : WordT {
static constexpr MachineType kMachineType = MachineType::Pointer();
};
-// A RawPtrT that is guaranteed to point into the virtual memory cage.
-struct CagedPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::CagedPointer();
+// A RawPtrT that is guaranteed to point into the sandbox.
+struct SandboxedPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::SandboxedPointer();
};
template <class To>
@@ -84,11 +84,16 @@ struct UintPtrT : WordT {
static constexpr MachineType kMachineType = MachineType::UintPtr();
};
+// An index into the external pointer table.
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+struct ExternalPointerT : Uint32T {
+ static constexpr MachineType kMachineType = MachineType::Uint32();
+};
+#else
struct ExternalPointerT : UntaggedT {
- static const MachineRepresentation kMachineRepresentation =
- MachineType::PointerRepresentation();
static constexpr MachineType kMachineType = MachineType::Pointer();
};
+#endif
struct Float32T : UntaggedT {
static const MachineRepresentation kMachineRepresentation =
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index e1546f71ca..24a237c16a 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/external-reference-encoder.h"
+#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
#include "src/execution/isolate-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
@@ -50,12 +51,6 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- isolate()->builtins()->IsBuiltinCodeDataContainerHandle(
- object, &builtin)) {
- // Similar to roots, builtins may be loaded from the builtins table.
- LoadRootRelative(destination,
- RootRegisterOffsetForBuiltinCodeDataContainer(builtin));
} else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
// Similar to roots, builtins may be loaded from the builtins table.
LoadRootRelative(destination, RootRegisterOffsetForBuiltin(builtin));
@@ -107,12 +102,6 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinCodeDataContainer(
- Builtin builtin) {
- return IsolateData::BuiltinCodeDataContainerSlotOffset(builtin);
-}
-
-// static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 7403aa1bfd..ad255ff1ac 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -80,7 +80,6 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
static int32_t RootRegisterOffsetForBuiltin(Builtin builtin);
- static int32_t RootRegisterOffsetForBuiltinCodeDataContainer(Builtin builtin);
// Returns the root-relative offset to reference.address().
static intptr_t RootRegisterOffsetForExternalReference(
@@ -96,7 +95,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
-#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
// Minimum page size. We must touch memory once per page when expanding the
// stack, to avoid access violations.
static constexpr int kStackPageSize = 4 * KB;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index cb2f67850a..dd74e93f10 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -38,9 +38,9 @@ void Assembler::emitw(uint16_t x) {
// TODO(ishell): Rename accordingly once RUNTIME_ENTRY is renamed.
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
- DCHECK_NE(options().code_range_start, 0);
+ DCHECK_NE(options().code_range_base, 0);
RecordRelocInfo(rmode);
- uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
+ uint32_t offset = static_cast<uint32_t>(entry - options().code_range_base);
emitl(offset);
}
@@ -264,7 +264,7 @@ int Assembler::deserialization_special_target_size(
return kSpecialTargetSize;
}
-Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
+Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
}
@@ -273,7 +273,7 @@ Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
}
Address Assembler::runtime_entry_at(Address pc) {
- return ReadUnalignedValue<int32_t>(pc) + options().code_range_start;
+ return ReadUnalignedValue<int32_t>(pc) + options().code_range_base;
}
// -----------------------------------------------------------------------------
@@ -322,7 +322,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Object obj(DecompressTaggedPointer(cage_base, compressed));
// Embedding of compressed Code objects must not happen when external code
// space is enabled, because CodeDataContainers must be used instead.
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode(cage_base));
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
+ !IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);
}
DCHECK(IsFullEmbeddedObject(rmode_) || IsDataEmbeddedObject(rmode_));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 0fdeee7685..df15db18cc 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -11,7 +11,7 @@
#if V8_LIBC_MSVCRT
#include <intrin.h> // _xgetbv()
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <sys/sysctl.h>
#endif
@@ -32,6 +32,8 @@ namespace internal {
namespace {
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
V8_INLINE uint64_t xgetbv(unsigned int xcr) {
#if V8_LIBC_MSVCRT
return _xgetbv(xcr);
@@ -47,7 +49,7 @@ V8_INLINE uint64_t xgetbv(unsigned int xcr) {
}
bool OSHasAVXSupport() {
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
// caused by ISRs, so we detect that here and disable AVX in that case.
char buffer[128];
@@ -63,12 +65,14 @@ bool OSHasAVXSupport() {
*period_pos = '\0';
long kernel_version_major = strtol(buffer, nullptr, 10); // NOLINT
if (kernel_version_major <= 13) return false;
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
// Check whether OS claims to support AVX.
uint64_t feature_mask = xgetbv(0); // XCR_XFEATURE_ENABLED_MASK
return (feature_mask & 0x6) == 0x6;
}
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
} // namespace
bool CpuFeatures::SupportsWasmSimd128() {
@@ -80,12 +84,14 @@ bool CpuFeatures::SupportsWasmSimd128() {
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
- // Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
if (cpu.has_sse42()) SetSupported(SSE4_2);
if (cpu.has_sse41()) SetSupported(SSE4_1);
if (cpu.has_ssse3()) SetSupported(SSSE3);
@@ -125,6 +131,11 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// at runtime in builtins using an extern ref. Other callers should use
// CpuFeatures::SupportWasmSimd128().
CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
+
+ if (cpu.has_cetss()) SetSupported(CETSS);
+ // The static variable is used for codegen of certain CETSS instructions.
+ CpuFeatures::supports_cetss_ = IsSupported(CETSS);
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
}
void CpuFeatures::PrintTarget() {}
@@ -396,7 +407,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
const int safepoint_table_offset =
(safepoint_table_builder == kNoSafepointTable)
? handler_table_offset2
- : safepoint_table_builder->GetCodeOffset();
+ : safepoint_table_builder->safepoint_table_offset();
const int reloc_info_offset =
static_cast<int>(reloc_info_writer.pos() - buffer_->start());
CodeDesc::Initialize(desc, this, safepoint_table_offset,
@@ -998,9 +1009,9 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) {
emit_runtime_entry(entry, rmode);
}
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<CodeT> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(target->IsExecutable());
+ DCHECK(FromCodeT(*target).IsExecutable());
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -1431,7 +1442,7 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
emitl(static_cast<int32_t>(entry));
}
-void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode) {
if (cc == always) {
jmp(target, rmode);
return;
@@ -1524,7 +1535,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
}
}
-void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::jmp(Handle<CodeT> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
// 1110 1001 #32-bit disp.
@@ -2107,6 +2118,15 @@ void Assembler::pushfq() {
emit(0x9C);
}
+void Assembler::incsspq(Register number_of_words) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(number_of_words);
+ emit(0x0F);
+ emit(0xAE);
+ emit(0xE8 | number_of_words.low_bits());
+}
+
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
DCHECK(is_uint16(imm16));
@@ -3741,6 +3761,16 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit(imm8);
}
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2, byte imm8) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNoPrefix, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+ emit(imm8);
+}
+
#define VPD(SIMDRegister, length) \
void Assembler::vpd(byte op, SIMDRegister dst, SIMDRegister src1, \
SIMDRegister src2) { \
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 2c89157979..1d2d07ffdd 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -456,7 +456,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- inline Handle<Code> code_target_object_handle_at(Address pc);
+ inline Handle<CodeT> code_target_object_handle_at(Address pc);
inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
inline Address runtime_entry_at(Address pc);
@@ -544,6 +544,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void popq(Register dst);
void popq(Operand dst);
+ void incsspq(Register number_of_words);
+
void leave();
// Moves
@@ -802,7 +804,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void near_call(intptr_t disp, RelocInfo::Mode rmode);
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
- void call(Handle<Code> target,
+ void call(Handle<CodeT> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Call near absolute indirect, address in register
@@ -813,7 +815,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Use a 32-bit signed displacement.
// Unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(Handle<Code> target, RelocInfo::Mode rmode);
+ void jmp(Handle<CodeT> target, RelocInfo::Mode rmode);
void jmp(Address entry, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
@@ -827,7 +829,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional jumps
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, Address entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode);
// Floating-point operations
void fld(int i);
@@ -1594,6 +1596,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vps(0xC6, dst, src1, src2, imm8);
}
+ void vshufps(YMMRegister dst, YMMRegister src1, YMMRegister src2, byte imm8) {
+ vps(0xC6, dst, src1, src2, imm8);
+ }
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
void vmovaps(YMMRegister dst, YMMRegister src) { vps(0x28, dst, ymm0, src); }
@@ -1809,6 +1814,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vpd(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index a016f6f9ef..af9b2e1cf2 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -24,10 +24,10 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data,
int nof_expected_args) {
RegList allocatable_regs = data->allocatable_registers();
- if (nof_expected_args >= 1) DCHECK(allocatable_regs | arg_reg_1.bit());
- if (nof_expected_args >= 2) DCHECK(allocatable_regs | arg_reg_2.bit());
- if (nof_expected_args >= 3) DCHECK(allocatable_regs | arg_reg_3.bit());
- if (nof_expected_args >= 4) DCHECK(allocatable_regs | arg_reg_4.bit());
+ if (nof_expected_args >= 1) DCHECK(allocatable_regs.has(arg_reg_1));
+ if (nof_expected_args >= 2) DCHECK(allocatable_regs.has(arg_reg_2));
+ if (nof_expected_args >= 3) DCHECK(allocatable_regs.has(arg_reg_3));
+ if (nof_expected_args >= 4) DCHECK(allocatable_regs.has(arg_reg_4));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
@@ -88,6 +88,36 @@ constexpr Register LoadDescriptor::SlotRegister() { return rax; }
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
// static
+constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
+ return rdx;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return rcx; }
+
+// static
+constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
+ return rbx;
+}
+
+// static
+constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
+ return kInterpreterAccumulatorRegister;
+}
+// static
+constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return rdx; }
+// static
+constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return rcx; }
+
+// static
+constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
+ return rbx;
+}
+
+// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return rdi;
@@ -131,7 +161,7 @@ constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
// static
-constexpr auto TypeofDescriptor::registers() { return RegisterArray(rbx); }
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(rax); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
@@ -139,6 +169,21 @@ constexpr auto CallTrampolineDescriptor::registers() {
// rdi : the target to call
return RegisterArray(rdi, rax);
}
+// static
+constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
+ // rdi : the source
+ // rax : the excluded property count
+ return RegisterArray(rdi, rax);
+}
+
+// static
+constexpr auto
+CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
+ // rdi : the source
+ // rax : the excluded property count
+ // rcx : the excluded property base
+ return RegisterArray(rdi, rax, rcx);
+}
// static
constexpr auto CallVarargsDescriptor::registers() {
@@ -248,6 +293,11 @@ constexpr auto BinaryOp_BaselineDescriptor::registers() {
}
// static
+constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
+ return RegisterArray(rax, rdx, rbx);
+}
+
+// static
constexpr auto ApiCallbackDescriptor::registers() {
return RegisterArray(rdx, // api function address
rcx, // argument count (not including receiver)
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 6ac8017ca8..4e351a2f56 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -27,7 +27,7 @@
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#include "src/snapshot/snapshot.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
@@ -376,40 +376,40 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::EncodeCagedPointer(Register value) {
+void TurboAssembler::EncodeSandboxedPointer(Register value) {
ASM_CODE_COMMENT(this);
-#ifdef V8_CAGED_POINTERS
+#ifdef V8_SANDBOXED_POINTERS
subq(value, kPtrComprCageBaseRegister);
- shlq(value, Immediate(kCagedPointerShift));
+ shlq(value, Immediate(kSandboxedPointerShift));
#else
UNREACHABLE();
#endif
}
-void TurboAssembler::DecodeCagedPointer(Register value) {
+void TurboAssembler::DecodeSandboxedPointer(Register value) {
ASM_CODE_COMMENT(this);
-#ifdef V8_CAGED_POINTERS
- shrq(value, Immediate(kCagedPointerShift));
+#ifdef V8_SANDBOXED_POINTERS
+ shrq(value, Immediate(kSandboxedPointerShift));
addq(value, kPtrComprCageBaseRegister);
#else
UNREACHABLE();
#endif
}
-void TurboAssembler::LoadCagedPointerField(Register destination,
- Operand field_operand) {
+void TurboAssembler::LoadSandboxedPointerField(Register destination,
+ Operand field_operand) {
ASM_CODE_COMMENT(this);
movq(destination, field_operand);
- DecodeCagedPointer(destination);
+ DecodeSandboxedPointer(destination);
}
-void TurboAssembler::StoreCagedPointerField(Operand dst_field_operand,
- Register value) {
+void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
+ Register value) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(value, kScratchRegister));
DCHECK(!dst_field_operand.AddressUsesRegister(kScratchRegister));
movq(kScratchRegister, value);
- EncodeCagedPointer(kScratchRegister);
+ EncodeSandboxedPointer(kScratchRegister);
movq(dst_field_operand, kScratchRegister);
}
@@ -417,7 +417,8 @@ void TurboAssembler::LoadExternalPointerField(
Register destination, Operand field_operand, ExternalPointerTag tag,
Register scratch, IsolateRootLocation isolateRootLocation) {
DCHECK(!AreAliased(destination, scratch));
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ DCHECK_NE(kExternalPointerNullTag, tag);
DCHECK(!field_operand.AddressUsesRegister(scratch));
if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
DCHECK(root_array_available_);
@@ -431,33 +432,24 @@ void TurboAssembler::LoadExternalPointerField(
Internals::kExternalPointerTableBufferOffset));
}
movl(destination, field_operand);
+ shrq(destination, Immediate(kExternalPointerIndexShift));
movq(destination, Operand(scratch, destination, times_8, 0));
- if (tag != 0) {
- movq(scratch, Immediate64(~tag));
- andq(destination, scratch);
- }
+ movq(scratch, Immediate64(~tag));
+ andq(destination, scratch);
#else
movq(destination, field_operand);
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers == 0) return;
- DCHECK_GT(NumRegs(registers), 0);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- pushq(Register::from_code(i));
- }
+ for (Register reg : registers) {
+ pushq(reg);
}
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers == 0) return;
- DCHECK_GT(NumRegs(registers), 0);
- for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
- if ((registers >> i) & 1u) {
- popq(Register::from_code(i));
- }
+ for (Register reg : base::Reversed(registers)) {
+ popq(reg);
}
}
@@ -524,7 +516,7 @@ void TurboAssembler::CallRecordWriteStub(
if (options().inline_offheap_trampolines) {
CallBuiltin(builtin);
} else {
- Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
}
@@ -552,7 +544,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
- Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -593,7 +585,7 @@ void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
- Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -757,7 +749,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
- Handle<Code> code =
+ Handle<CodeT> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -786,8 +778,9 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<CodeT> code =
+ CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1467,7 +1460,7 @@ void TurboAssembler::Move(Register dst, Smi source) {
int value = source.value();
if (value == 0) {
xorl(dst, dst);
- } else if (SmiValuesAre32Bits() || value < 0) {
+ } else if (SmiValuesAre32Bits()) {
Move(dst, source.ptr(), RelocInfo::NO_INFO);
} else {
uint32_t uvalue = static_cast<uint32_t>(source.ptr());
@@ -1807,10 +1800,11 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
jmp(kScratchRegister);
}
-void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc) {
- DCHECK_IMPLIES(options().isolate_independent_code,
- Builtins::IsIsolateIndependentBuiltin(*code_object));
+ DCHECK_IMPLIES(
+ options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code_object)));
if (options().inline_offheap_trampolines) {
Builtin builtin = Builtin::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
@@ -1851,9 +1845,11 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
call(kScratchRegister);
}
-void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
- DCHECK_IMPLIES(options().isolate_independent_code,
- Builtins::IsIsolateIndependentBuiltin(*code_object));
+void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ DCHECK_IMPLIES(
+ options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code_object)));
if (options().inline_offheap_trampolines) {
Builtin builtin = Builtin::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
@@ -1915,6 +1911,14 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ LoadExternalPointerField(
+ destination,
+ FieldOperand(code_object, CodeDataContainer::kCodeEntryPointOffset),
+ kCodeEntryPointTag, kScratchRegister);
+ return;
+ }
+
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -2275,6 +2279,23 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
+void TurboAssembler::IncsspqIfSupported(Register number_of_words,
+ Register scratch) {
+ // Optimized code can validate at runtime whether the cpu supports the
+ // incsspq instruction, so it shouldn't use this method.
+ CHECK(isolate()->IsGeneratingEmbeddedBuiltins());
+ DCHECK_NE(number_of_words, scratch);
+ Label not_supported;
+ ExternalReference supports_cetss =
+ ExternalReference::supports_cetss_address();
+ Operand supports_cetss_operand =
+ ExternalReferenceAsOperand(supports_cetss, scratch);
+ cmpb(supports_cetss_operand, Immediate(0));
+ j(equal, &not_supported, Label::kNear);
+ incsspq(number_of_words);
+ bind(&not_supported);
+}
+
void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
LoadMap(map, heap_object);
@@ -2619,11 +2640,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
leaq(kScratchRegister,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
- // Extra words are the receiver (if not already included in argc) and the
- // return address (if a jump).
+ // Extra words are for the return address (if a jump).
int extra_words =
type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
- if (!kJSArgcIncludesReceiver) extra_words++;
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
Move(current, 0);
@@ -2736,7 +2755,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// On Windows and on macOS, we cannot increment the stack size by more than
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 262162ded0..f1aba1355c 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -67,6 +67,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Ret();
+ // Call incsspq with {number_of_words} only if the cpu supports it.
+ // NOTE: This shouldn't be embedded in optimized code, since the check
+ // for CPU support would be redundant (we could check at compiler time).
+ void IncsspqIfSupported(Register number_of_words, Register scratch);
+
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
@@ -365,7 +370,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Call(Register reg) { call(reg); }
void Call(Operand op);
- void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void Call(Handle<CodeT> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
@@ -396,6 +401,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Helper functions that dispatch either to Call/JumpCodeObject or to
// Call/JumpCodeDataContainerObject.
+ // TODO(v8:11880): remove since CodeT targets are now default.
void LoadCodeTEntry(Register destination, Register code);
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
@@ -403,7 +409,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
+ void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc = always);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
@@ -468,7 +474,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
// stack check, do it before calling this function because this function may
// write into the newly allocated space. It may also overwrite the given
// register's value, in the version that takes a register.
-#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
void AllocateStackSpace(Register bytes_scratch);
void AllocateStackSpace(int bytes);
#else
@@ -593,22 +599,22 @@ class V8_EXPORT_PRIVATE TurboAssembler
void DecompressAnyTagged(Register destination, Operand field_operand);
// ---------------------------------------------------------------------------
- // V8 Heap sandbox support
+ // V8 Sandbox support
- // Transform a CagedPointer from/to its encoded form, which is used when the
- // pointer is stored on the heap and ensures that the pointer will always
- // point into the virtual memory cage.
- void EncodeCagedPointer(Register value);
- void DecodeCagedPointer(Register value);
+ // Transform a SandboxedPointer from/to its encoded form, which is used when
+ // the pointer is stored on the heap and ensures that the pointer will always
+ // point into the sandbox.
+ void EncodeSandboxedPointer(Register value);
+ void DecodeSandboxedPointer(Register value);
- // Load and decode a CagedPointer from the heap.
- void LoadCagedPointerField(Register destination, Operand field_operand);
- // Encode and store a CagedPointer to the heap.
- void StoreCagedPointerField(Operand dst_field_operand, Register value);
+ // Load and decode a SandboxedPointer from the heap.
+ void LoadSandboxedPointerField(Register destination, Operand field_operand);
+ // Encode and store a SandboxedPointer to the heap.
+ void StoreSandboxedPointerField(Operand dst_field_operand, Register value);
enum class IsolateRootLocation { kInScratchRegister, kInRootRegister };
// Loads a field containing off-heap pointer and does necessary decoding
- // if V8 heap sandbox is enabled.
+ // if sandboxed external pointers are enabled.
void LoadExternalPointerField(Register destination, Operand field_operand,
ExternalPointerTag tag, Register scratch,
IsolateRootLocation isolateRootLocation =
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index f36763f2e4..a9c563317e 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -5,8 +5,7 @@
#ifndef V8_CODEGEN_X64_REGISTER_X64_H_
#define V8_CODEGEN_X64_REGISTER_X64_H_
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
+#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
@@ -75,7 +74,7 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
+static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");
#define DECLARE_REGISTER(R) \
@@ -86,23 +85,6 @@ constexpr Register no_reg = Register::no_reg();
constexpr int kNumRegs = 16;
-constexpr RegList kJSCallerSaved =
- Register::ListOf(rax, rcx, rdx,
- rbx, // used as a caller-saved register in JavaScript code
- rdi); // callee function
-
-constexpr RegList kCallerSaved =
-#ifdef V8_TARGET_OS_WIN
- Register::ListOf(rax, rcx, rdx, r8, r9, r10, r11);
-#else
- Register::ListOf(rax, rcx, rdx, rdi, rsi, r8, r9, r10, r11);
-#endif // V8_TARGET_OS_WIN
-
-constexpr int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-constexpr int kNumSafepointRegisters = 16;
-
#ifdef V8_TARGET_OS_WIN
// Windows calling convention
constexpr Register arg_reg_1 = rcx;
@@ -179,7 +161,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
@@ -215,7 +197,7 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
};
ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
-static_assert(sizeof(XMMRegister) == sizeof(int),
+static_assert(sizeof(XMMRegister) <= sizeof(int),
"XMMRegister can efficiently be passed by value");
class YMMRegister : public XMMRegister {
@@ -231,7 +213,7 @@ class YMMRegister : public XMMRegister {
};
ASSERT_TRIVIALLY_COPYABLE(YMMRegister);
-static_assert(sizeof(YMMRegister) == sizeof(int),
+static_assert(sizeof(YMMRegister) <= sizeof(int),
"YMMRegister can efficiently be passed by value");
using FloatRegister = XMMRegister;
diff --git a/deps/v8/src/codegen/x64/reglist-x64.h b/deps/v8/src/codegen/x64/reglist-x64.h
new file mode 100644
index 0000000000..6c7be82440
--- /dev/null
+++ b/deps/v8/src/codegen/x64/reglist-x64.h
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_X64_REGLIST_X64_H_
+#define V8_CODEGEN_X64_REGLIST_X64_H_
+
+#include "src/base/macros.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist-base.h"
+
+namespace v8 {
+namespace internal {
+
+using RegList = RegListBase<Register>;
+using DoubleRegList = RegListBase<DoubleRegister>;
+ASSERT_TRIVIALLY_COPYABLE(RegList);
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
+
+constexpr RegList kJSCallerSaved = {
+ rax, rcx, rdx,
+ rbx, // used as a caller-saved register in JavaScript code
+ rdi}; // callee function
+
+constexpr RegList kCallerSaved =
+#ifdef V8_TARGET_OS_WIN
+ {rax, rcx, rdx, r8, r9, r10, r11};
+#else
+ {rax, rcx, rdx, rdi, rsi, r8, r9, r10, r11};
+#endif // V8_TARGET_OS_WIN
+
+constexpr int kNumJSCallerSaved = 5;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_X64_REGLIST_X64_H_
diff --git a/deps/v8/src/common/allow-deprecated.h b/deps/v8/src/common/allow-deprecated.h
new file mode 100644
index 0000000000..8a512366c3
--- /dev/null
+++ b/deps/v8/src/common/allow-deprecated.h
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMMON_ALLOW_DEPRECATED_H_
+#define V8_COMMON_ALLOW_DEPRECATED_H_
+
+#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) || \
+ defined(V8_DEPRECATION_WARNINGS)
+
+#if defined(V8_CC_MSVC)
+
+#define START_ALLOW_USE_DEPRECATED() \
+ __pragma(warning(push)) __pragma(warning(disable : 4996))
+
+#define END_ALLOW_USE_DEPRECATED() __pragma(warning(pop))
+
+#else // !defined(V8_CC_MSVC)
+
+#define START_ALLOW_USE_DEPRECATED() \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+
+#define END_ALLOW_USE_DEPRECATED() _Pragma("GCC diagnostic pop")
+
+#endif // !defined(V8_CC_MSVC)
+
+#else // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) ||
+ // defined(V8_DEPRECATION_WARNINGS))
+
+#define START_ALLOW_USE_DEPRECATED()
+#define END_ALLOW_USE_DEPRECATED()
+
+#endif // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) ||
+ // defined(V8_DEPRECATION_WARNINGS))
+
+#endif // V8_COMMON_ALLOW_DEPRECATED_H_
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 4d17867542..f26b0a06e2 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -30,11 +30,6 @@ class RecursiveMutex;
namespace internal {
-constexpr int KB = 1024;
-constexpr int MB = KB * 1024;
-constexpr int GB = MB * 1024;
-constexpr int64_t TB = static_cast<int64_t>(GB) * 1024;
-
// Determine whether we are running in a simulated environment.
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
@@ -357,6 +352,11 @@ STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
// This type defines raw storage type for external (or off-V8 heap) pointers
// stored on V8 heap.
constexpr int kExternalPointerSize = sizeof(ExternalPointer_t);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+STATIC_ASSERT(kExternalPointerSize == kTaggedSize);
+#else
+STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+#endif
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
@@ -918,6 +918,8 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+enum PageSize { kRegular, kLarge };
+
enum class CodeFlushMode {
kFlushBytecode,
kFlushBaselineCode,
@@ -1153,6 +1155,10 @@ inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
UNREACHABLE();
}
+// Threshold calculated using a microbenckmark.
+// https://chromium-review.googlesource.com/c/v8/v8/+/3429210
+constexpr int kScopeInfoMaxInlinedLocalNamesSize = 75;
+
enum ScopeType : uint8_t {
CLASS_SCOPE, // The scope introduced by a class.
EVAL_SCOPE, // The top-level scope for an eval source.
@@ -1509,34 +1515,6 @@ class CompareOperationFeedback {
};
};
-enum class Operation {
- // Binary operations.
- kAdd,
- kSubtract,
- kMultiply,
- kDivide,
- kModulus,
- kExponentiate,
- kBitwiseAnd,
- kBitwiseOr,
- kBitwiseXor,
- kShiftLeft,
- kShiftRight,
- kShiftRightLogical,
- // Unary operations.
- kBitwiseNot,
- kNegate,
- kIncrement,
- kDecrement,
- // Compare operations.
- kEqual,
- kStrictEqual,
- kLessThan,
- kLessThanOrEqual,
- kGreaterThan,
- kGreaterThanOrEqual,
-};
-
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
@@ -1603,16 +1581,17 @@ inline std::ostream& operator<<(std::ostream& os, CollectionKind kind) {
UNREACHABLE();
}
-// Flags for the runtime function kDefineDataPropertyInLiteral. A property can
-// be enumerable or not, and, in case of functions, the function name
-// can be set or not.
-enum class DataPropertyInLiteralFlag {
+// Flags for the runtime function kDefineKeyedOwnPropertyInLiteral. A property
+// can be enumerable or not, and, in case of functions, the function name can be
+// set or not.
+enum class DefineKeyedOwnPropertyInLiteralFlag {
kNoFlags = 0,
kDontEnum = 1 << 0,
kSetFunctionName = 1 << 1
};
-using DataPropertyInLiteralFlags = base::Flags<DataPropertyInLiteralFlag>;
-DEFINE_OPERATORS_FOR_FLAGS(DataPropertyInLiteralFlags)
+using DefineKeyedOwnPropertyInLiteralFlags =
+ base::Flags<DefineKeyedOwnPropertyInLiteralFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(DefineKeyedOwnPropertyInLiteralFlags)
enum ExternalArrayType {
kExternalInt8Array = 1,
@@ -1649,67 +1628,40 @@ enum class OptimizationMarker : int32_t {
// some processing needs to be done.
kNone = 0b000,
kInOptimizationQueue = 0b001,
- kCompileOptimized = 0b010,
- kCompileOptimizedConcurrent = 0b011,
- kLogFirstExecution = 0b100,
- kLastOptimizationMarker = kLogFirstExecution
+ kCompileMaglev_NotConcurrent = 0b010,
+ kCompileMaglev_Concurrent = 0b011,
+ kCompileTurbofan_NotConcurrent = 0b100,
+ kCompileTurbofan_Concurrent = 0b101,
+ kLastOptimizationMarker = kCompileTurbofan_Concurrent,
};
// For kNone or kInOptimizationQueue we don't need any special processing.
// To check both cases using a single mask, we expect the kNone to be 0 and
// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
-STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b000 &&
+STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
- 0b001);
+ 0b01);
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
0b111);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
-inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
- return marker == OptimizationMarker::kInOptimizationQueue;
-}
-
-inline bool IsCompileOptimizedMarker(OptimizationMarker marker) {
- return marker == OptimizationMarker::kCompileOptimized ||
- marker == OptimizationMarker::kCompileOptimizedConcurrent;
-}
-
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
- case OptimizationMarker::kLogFirstExecution:
- return os << "OptimizationMarker::kLogFirstExecution";
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
- case OptimizationMarker::kCompileOptimized:
- return os << "OptimizationMarker::kCompileOptimized";
- case OptimizationMarker::kCompileOptimizedConcurrent:
- return os << "OptimizationMarker::kCompileOptimizedConcurrent";
+ case OptimizationMarker::kCompileMaglev_NotConcurrent:
+ return os << "OptimizationMarker::kCompileMaglev_NotConcurrent";
+ case OptimizationMarker::kCompileMaglev_Concurrent:
+ return os << "OptimizationMarker::kCompileMaglev_Concurrent";
+ case OptimizationMarker::kCompileTurbofan_NotConcurrent:
+ return os << "OptimizationMarker::kCompileTurbofan_NotConcurrent";
+ case OptimizationMarker::kCompileTurbofan_Concurrent:
+ return os << "OptimizationMarker::kCompileTurbofan_Concurrent";
case OptimizationMarker::kInOptimizationQueue:
return os << "OptimizationMarker::kInOptimizationQueue";
}
}
-enum class OptimizationTier {
- kNone = 0b00,
- kMidTier = 0b01,
- kTopTier = 0b10,
- kLastOptimizationTier = kTopTier
-};
-static constexpr uint32_t kNoneOrMidTierMask = 0b10;
-static constexpr uint32_t kNoneMask = 0b11;
-
-inline std::ostream& operator<<(std::ostream& os,
- const OptimizationTier& tier) {
- switch (tier) {
- case OptimizationTier::kNone:
- return os << "OptimizationTier::kNone";
- case OptimizationTier::kMidTier:
- return os << "OptimizationTier::kMidTier";
- case OptimizationTier::kTopTier:
- return os << "OptimizationTier::kTopTier";
- }
-}
-
enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
enum class CallFeedbackContent { kTarget, kReceiver };
@@ -1721,26 +1673,48 @@ inline std::ostream& operator<<(std::ostream& os,
case SpeculationMode::kDisallowSpeculation:
return os << "SpeculationMode::kDisallowSpeculation";
}
- UNREACHABLE();
- return os;
}
enum class BlockingBehavior { kBlock, kDontBlock };
-enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
-
-#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
- C(Handler, handler) \
- C(CEntryFP, c_entry_fp) \
- C(CFunction, c_function) \
- C(Context, context) \
- C(PendingException, pending_exception) \
- C(PendingHandlerContext, pending_handler_context) \
- C(PendingHandlerEntrypoint, pending_handler_entrypoint) \
- C(PendingHandlerConstantPool, pending_handler_constant_pool) \
- C(PendingHandlerFP, pending_handler_fp) \
- C(PendingHandlerSP, pending_handler_sp) \
- C(ExternalCaughtException, external_caught_exception) \
+enum class ConcurrencyMode : uint8_t { kNotConcurrent, kConcurrent };
+
+inline const char* ToString(ConcurrencyMode mode) {
+ switch (mode) {
+ case ConcurrencyMode::kNotConcurrent:
+ return "ConcurrencyMode::kNotConcurrent";
+ case ConcurrencyMode::kConcurrent:
+ return "ConcurrencyMode::kConcurrent";
+ }
+}
+inline std::ostream& operator<<(std::ostream& os, ConcurrencyMode mode) {
+ return os << ToString(mode);
+}
+
+// An architecture independent representation of the sets of registers available
+// for instruction creation.
+enum class AliasingKind {
+ // Registers alias a single register of every other size (e.g. Intel).
+ kOverlap,
+ // Registers alias two registers of the next smaller size (e.g. ARM).
+ kCombine,
+ // SIMD128 Registers are independent of every other size (e.g Riscv)
+ kIndependent
+};
+
+#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
+ C(Handler, handler) \
+ C(CEntryFP, c_entry_fp) \
+ C(CFunction, c_function) \
+ C(Context, context) \
+ C(PendingException, pending_exception) \
+ C(PendingHandlerContext, pending_handler_context) \
+ C(PendingHandlerEntrypoint, pending_handler_entrypoint) \
+ C(PendingHandlerConstantPool, pending_handler_constant_pool) \
+ C(PendingHandlerFP, pending_handler_fp) \
+ C(PendingHandlerSP, pending_handler_sp) \
+ C(NumFramesAbovePendingHandler, num_frames_above_pending_handler) \
+ C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
enum IsolateAddressId {
@@ -1760,7 +1734,7 @@ enum IsolateAddressId {
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
V(TrapFuncSigMismatch) \
- V(TrapDataSegmentDropped) \
+ V(TrapDataSegmentOutOfBounds) \
V(TrapElemSegmentDropped) \
V(TrapTableOutOfBounds) \
V(TrapRethrowNull) \
@@ -1769,8 +1743,6 @@ enum IsolateAddressId {
V(TrapArrayOutOfBounds) \
V(TrapArrayTooLarge)
-enum WasmRttSubMode { kCanonicalize, kFresh };
-
enum KeyedAccessLoadMode {
STANDARD_LOAD,
LOAD_IGNORE_OUT_OF_BOUNDS,
@@ -1820,15 +1792,8 @@ constexpr int kSwissNameDictionaryInitialCapacity = 4;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
-#ifdef V8_INCLUDE_RECEIVER_IN_ARGC
-constexpr bool kJSArgcIncludesReceiver = true;
constexpr int kJSArgcReceiverSlots = 1;
constexpr uint16_t kDontAdaptArgumentsSentinel = 0;
-#else
-constexpr bool kJSArgcIncludesReceiver = false;
-constexpr int kJSArgcReceiverSlots = 0;
-constexpr uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
-#endif
// Helper to get the parameter count for functions with JS linkage.
inline constexpr int JSParameterCount(int param_count_without_receiver) {
@@ -1925,15 +1890,17 @@ enum class StringTransitionStrategy {
} // namespace internal
-// Tag dispatching support for acquire loads and release stores.
+// Tag dispatching support for atomic loads and stores.
struct AcquireLoadTag {};
struct RelaxedLoadTag {};
struct ReleaseStoreTag {};
struct RelaxedStoreTag {};
+struct SeqCstAccessTag {};
static constexpr AcquireLoadTag kAcquireLoad;
static constexpr RelaxedLoadTag kRelaxedLoad;
static constexpr ReleaseStoreTag kReleaseStore;
static constexpr RelaxedStoreTag kRelaxedStore;
+static constexpr SeqCstAccessTag kSeqCstAccess;
} // namespace v8
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index de75463362..e5d4e91544 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -18,6 +18,7 @@ namespace internal {
T(DebuggerLoading, "Error loading debugger") \
T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
T(DeletePrivateField, "Private fields can not be deleted") \
+ T(PlaceholderOnly, "%") \
T(UncaughtException, "Uncaught %") \
T(Unsupported, "Not supported") \
T(WrongServiceType, "Internal error, wrong service type: %") \
@@ -55,10 +56,12 @@ namespace internal {
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
+ T(CallShadowRealmFunctionThrown, "Called throwing ShadowRealm function") \
T(CallSiteExpectsFunction, \
"CallSite expects wasm object as first or function as second argument, " \
"got <%, %>") \
T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
+ T(CannotBeShared, "% cannot be shared") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
T(CannotPreventExt, "Cannot prevent extensions") \
T(CannotFreeze, "Cannot freeze") \
@@ -85,6 +88,7 @@ namespace internal {
T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property %, object is not extensible") \
T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
+ T(DoNotUse, "Do not use %; %") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueNotConstructor, \
"Class extends value % is not a constructor or null") \
@@ -104,6 +108,7 @@ namespace internal {
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
T(InvalidArgument, "invalid_argument") \
+ T(InvalidArgumentForTemporal, "Invalid argument for Temporal %") \
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
@@ -146,6 +151,7 @@ namespace internal {
T(NotConstructor, "% is not a constructor") \
T(NotDateObject, "this is not a Date object.") \
T(NotGeneric, "% requires that 'this' be a %") \
+ T(NotCallable, "% is not a function") \
T(NotCallableOrIterable, \
"% is not a function or its return value is not iterable") \
T(NotCallableOrAsyncIterable, \
@@ -345,19 +351,22 @@ namespace internal {
T(Invalid, "Invalid % : %") \
T(InvalidArrayLength, "Invalid array length") \
T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
+ T(InvalidCalendar, "Invalid calendar specified: %") \
T(InvalidCodePoint, "Invalid code point %") \
T(InvalidCountValue, "Invalid count value") \
T(InvalidDataViewAccessorOffset, \
"Offset is outside the bounds of the DataView") \
- T(InvalidDataViewLength, "Invalid DataView length") \
+ T(InvalidDataViewLength, "Invalid DataView length %") \
T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
T(InvalidHint, "Invalid hint: %") \
T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
T(InvalidLanguageTag, "Invalid language tag: %") \
T(InvalidWeakMapKey, "Invalid value used as weak map key") \
T(InvalidWeakSetValue, "Invalid value used in weak set") \
+ T(InvalidShadowRealmEvaluateSourceText, "Invalid value used as source text") \
T(InvalidStringLength, "Invalid string length") \
T(InvalidTimeValue, "Invalid time value") \
+ T(InvalidTimeValueForTemporal, "Invalid time value for Temporal %") \
T(InvalidTimeZone, "Invalid time zone specified: %") \
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayIndex, "Invalid typed array index") \
@@ -380,6 +389,8 @@ namespace internal {
T(ToPrecisionFormatRange, \
"toPrecision() argument must be between 1 and 100") \
T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
+ T(StructFieldCountOutOfRange, \
+ "Struct field count out of range (maximum of 999 allowed)") \
T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
T(TypedArraySetSourceTooLarge, "Source is too large") \
T(TypedArrayTooLargeToSort, \
@@ -585,7 +596,7 @@ namespace internal {
T(WasmTrapFuncSigMismatch, "null function or function signature mismatch") \
T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \
T(WasmTrapJSTypeError, "type incompatibility when transforming from/to JS") \
- T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
+ T(WasmTrapDataSegmentOutOfBounds, "data segment out of bounds") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
T(WasmTrapRethrowNull, "rethrowing null value") \
T(WasmTrapNullDereference, "dereferencing a null pointer") \
diff --git a/deps/v8/src/common/operation.h b/deps/v8/src/common/operation.h
new file mode 100644
index 0000000000..74682d9046
--- /dev/null
+++ b/deps/v8/src/common/operation.h
@@ -0,0 +1,59 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMMON_OPERATION_H_
+#define V8_COMMON_OPERATION_H_
+
+#include <ostream>
+
+#define ARITHMETIC_OPERATION_LIST(V) \
+ V(Add) \
+ V(Subtract) \
+ V(Multiply) \
+ V(Divide) \
+ V(Modulus) \
+ V(Exponentiate) \
+ V(BitwiseAnd) \
+ V(BitwiseOr) \
+ V(BitwiseXor) \
+ V(ShiftLeft) \
+ V(ShiftRight) \
+ V(ShiftRightLogical)
+
+#define UNARY_OPERATION_LIST(V) \
+ V(BitwiseNot) \
+ V(Negate) \
+ V(Increment) \
+ V(Decrement)
+
+#define COMPARISON_OPERATION_LIST(V) \
+ V(Equal) \
+ V(StrictEqual) \
+ V(LessThan) \
+ V(LessThanOrEqual) \
+ V(GreaterThan) \
+ V(GreaterThanOrEqual)
+
+#define OPERATION_LIST(V) \
+ ARITHMETIC_OPERATION_LIST(V) \
+ UNARY_OPERATION_LIST(V) \
+ COMPARISON_OPERATION_LIST(V)
+
+enum class Operation {
+#define DEFINE_OP(name) k##name,
+ OPERATION_LIST(DEFINE_OP)
+#undef DEFINE_OP
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Operation& operation) {
+ switch (operation) {
+#define CASE(name) \
+ case Operation::k##name: \
+ return os << #name;
+ OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
+#endif // V8_COMMON_OPERATION_H_
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index fc366b2b53..f5991ddcda 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -6,7 +6,6 @@
#define V8_COMMON_PTR_COMPR_INL_H_
#include "include/v8-internal.h"
-#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate-inl.h"
@@ -68,11 +67,6 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
-STATIC_ASSERT(kPtrComprCageReservationSize ==
- Internals::kPtrComprCageReservationSize);
-STATIC_ASSERT(kPtrComprCageBaseAlignment ==
- Internals::kPtrComprCageBaseAlignment);
-
#else
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 1d5668208a..58d1460290 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -1,24 +1,55 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
+// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMMON_PTR_COMPR_H_
#define V8_COMMON_PTR_COMPR_H_
+#include "src/base/memory.h"
#include "src/common/globals.h"
-#ifdef V8_COMPRESS_POINTERS
-
namespace v8 {
namespace internal {
-// See v8:7703 for details about how pointer compression works.
-constexpr size_t kPtrComprCageReservationSize = size_t{4} * GB;
-constexpr size_t kPtrComprCageBaseAlignment = size_t{4} * GB;
+// Accessors for fields that may be unaligned due to pointer compression.
+
+template <typename V>
+static inline V ReadMaybeUnalignedValue(Address p) {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(V) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ // Bug(v8:8875) Double fields may be unaligned.
+ constexpr bool unaligned_double_field =
+ std::is_same<V, double>::value && kDoubleSize > kTaggedSize;
+ if (unaligned_double_field || v8_pointer_compression_unaligned) {
+ return base::ReadUnalignedValue<V>(p);
+ } else {
+ return base::Memory<V>(p);
+ }
+}
+
+template <typename V>
+static inline void WriteMaybeUnalignedValue(Address p, V value) {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(V) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ // Bug(v8:8875) Double fields may be unaligned.
+ constexpr bool unaligned_double_field =
+ std::is_same<V, double>::value && kDoubleSize > kTaggedSize;
+ if (unaligned_double_field || v8_pointer_compression_unaligned) {
+ base::WriteUnalignedValue<V>(p, value);
+ } else {
+ base::Memory<V>(p) = value;
+ }
+}
} // namespace internal
} // namespace v8
-#endif // V8_COMPRESS_POINTERS
-
#endif // V8_COMMON_PTR_COMPR_H_
diff --git a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
index bcfb1f6c00..8e611c3785 100644
--- a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
@@ -12,6 +12,7 @@
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/handles/global-handles-inl.h"
#include "src/heap/parked-scope.h"
@@ -23,7 +24,7 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/roots/roots.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
@@ -266,7 +267,7 @@ bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
}
if (job->state == Job::State::kPendingToRunOnForeground) {
- job->task->Run();
+ job->task->RunOnMainThread(isolate_);
job->state = Job::State::kFinalizingNow;
}
@@ -400,11 +401,7 @@ void LazyCompileDispatcher::DoBackgroundWork(JobDelegate* delegate) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompileDispatcherDoBackgroundWork");
- WorkerThreadRuntimeCallStatsScope worker_thread_scope(
- worker_thread_runtime_call_stats_);
-
- LocalIsolate isolate(isolate_, ThreadKind::kBackground,
- worker_thread_scope.Get());
+ LocalIsolate isolate(isolate_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&isolate);
LocalHandleScope handle_scope(&isolate);
@@ -459,7 +456,7 @@ void LazyCompileDispatcher::DoBackgroundWork(JobDelegate* delegate) {
while (!delegate->ShouldYield()) {
Job* job = nullptr;
{
- base::MutexGuard lock(&job_dispose_mutex_);
+ base::MutexGuard lock(&mutex_);
if (jobs_to_dispose_.empty()) break;
job = jobs_to_dispose_.back();
jobs_to_dispose_.pop_back();
@@ -541,13 +538,8 @@ void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
void LazyCompileDispatcher::DeleteJob(Job* job) {
DCHECK(job->state == Job::State::kFinalized);
-#ifdef DEBUG
- {
- base::MutexGuard lock(&mutex_);
- all_jobs_.erase(job);
- }
-#endif
- delete job;
+ base::MutexGuard lock(&mutex_);
+ DeleteJob(job, lock);
}
void LazyCompileDispatcher::DeleteJob(Job* job, const base::MutexGuard&) {
@@ -555,7 +547,6 @@ void LazyCompileDispatcher::DeleteJob(Job* job, const base::MutexGuard&) {
#ifdef DEBUG
all_jobs_.erase(job);
#endif
- base::MutexGuard lock(&job_dispose_mutex_);
jobs_to_dispose_.push_back(job);
if (jobs_to_dispose_.size() == 1) {
num_jobs_for_background_++;
diff --git a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
index 423e700e7f..ce5919dc51 100644
--- a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_DISPATCHER_LAZY_COMPILE_DISPATCHER_H_
#include <cstdint>
-#include <map>
#include <memory>
#include <unordered_set>
#include <utility>
@@ -220,14 +219,17 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
std::unordered_set<Job*> all_jobs_;
#endif
+ // A queue of jobs to delete on the background thread(s). Jobs in this queue
+ // are considered dead as far as the rest of the system is concerned, so they
+ // won't be pointed to by any SharedFunctionInfo and won't be in the all_jobs
+ // set above.
+ std::vector<Job*> jobs_to_dispose_;
+
// If not nullptr, then the main thread waits for the task processing
// this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
Job* main_thread_blocking_on_job_;
base::ConditionVariable main_thread_blocking_signal_;
- mutable base::Mutex job_dispose_mutex_;
- std::vector<Job*> jobs_to_dispose_;
-
// Test support.
base::AtomicValue<bool> block_for_testing_;
base::Semaphore semaphore_for_testing_;
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 45f3684fb6..873a8d982f 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -58,18 +58,11 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
private:
// v8::Task overrides.
void RunInternal() override {
-#ifdef V8_RUNTIME_CALL_STATS
- WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
- worker_thread_runtime_call_stats_);
- LocalIsolate local_isolate(isolate_, ThreadKind::kBackground,
- runtime_call_stats_scope.Get());
-#else // V8_RUNTIME_CALL_STATS
LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
-#endif // V8_RUNTIME_CALL_STATS
DCHECK(local_isolate.heap()->IsParked());
{
- RCS_SCOPE(runtime_call_stats_scope.Get(),
+ RCS_SCOPE(&local_isolate,
RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 84f254dd56..511ca78722 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,25 +1,19 @@
jgruber@chromium.org
mslekova@chromium.org
-mvstanton@chromium.org
nicohartmann@chromium.org
-sigurds@chromium.org
+tebbi@chromium.org
+thibaudm@chromium.org
per-file wasm-*=ahaas@chromium.org
-per-file wasm-*=bbudge@chromium.org
per-file wasm-*=clemensb@chromium.org
per-file wasm-*=gdeepti@chromium.org
per-file wasm-*=jkummerow@chromium.org
per-file wasm-*=manoskouk@chromium.org
-per-file wasm-*=thibaudm@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
per-file machine-operator.*=ahaas@chromium.org
-per-file machine-operator.*=bbudge@chromium.org
per-file machine-operator.*=gdeepti@chromium.org
-per-file machine-operator.*=zhin@chromium.org
per-file opcodes.*=ahaas@chromium.org
-per-file opcodes.*=bbudge@chromium.org
per-file opcodes.*=gdeepti@chromium.org
-per-file opcodes.*=zhin@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index f929b98b0c..2c437d3535 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -243,9 +243,12 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectFunction() {
- FieldAccess access = {kTaggedBase, JSGeneratorObject::kFunctionOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Function(), MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kFunctionOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ Type::CallableFunction(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -421,9 +424,9 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
JSTypedArray::kExternalPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
-#ifdef V8_CAGED_POINTERS
- Type::CagedPointer(),
- MachineType::CagedPointer(),
+#ifdef V8_SANDBOXED_POINTERS
+ Type::SandboxedPointer(),
+ MachineType::SandboxedPointer(),
#else
Type::ExternalPointer(),
MachineType::Pointer(),
@@ -442,9 +445,9 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
-#ifdef V8_CAGED_POINTERS
- Type::CagedPointer(),
- MachineType::CagedPointer(),
+#ifdef V8_SANDBOXED_POINTERS
+ Type::SandboxedPointer(),
+ MachineType::SandboxedPointer(),
#else
Type::ExternalPointer(),
MachineType::Pointer(),
@@ -753,13 +756,12 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
ExternalString::kResourceDataOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
ConstFieldInfo::None(),
false,
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
kExternalStringResourceDataTag,
#endif
};
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 35a7838004..67283d9da1 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -730,8 +730,7 @@ bool AccessInfoFactory::TryLoadPropertyDetails(
}
} else {
DescriptorArray descriptors = *map.instance_descriptors().object();
- *index_out = descriptors.Search(*name.object(), *map.object(),
- broker()->is_concurrent_inlining());
+ *index_out = descriptors.Search(*name.object(), *map.object(), true);
if (index_out->is_found()) {
*details_out = descriptors.GetDetails(*index_out);
}
@@ -744,10 +743,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode) const {
CHECK(name.IsUniqueName());
- // Dictionary property const tracking is unsupported when concurrent inlining
- // is enabled.
- CHECK_IMPLIES(V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
- !broker()->is_concurrent_inlining());
+ // Dictionary property const tracking is unsupported with concurrent inlining.
+ CHECK(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker());
@@ -911,26 +908,19 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// Walk up the prototype chain.
- if (!broker()->is_concurrent_inlining()) {
- if (!map.TrySerializePrototype(NotConcurrentInliningTag{broker()})) {
- return Invalid();
- }
- }
-
// Load the map's prototype's map to guarantee that every time we use it,
// we use the same Map.
- base::Optional<HeapObjectRef> prototype = map.prototype();
- if (!prototype.has_value()) return Invalid();
+ HeapObjectRef prototype = map.prototype();
- MapRef map_prototype_map = prototype->map();
+ MapRef map_prototype_map = prototype.map();
if (!map_prototype_map.object()->IsJSObjectMap()) {
// Don't allow proxies on the prototype chain.
- if (!prototype->IsNull()) {
- DCHECK(prototype->object()->IsJSProxy());
+ if (!prototype.IsNull()) {
+ DCHECK(prototype.object()->IsJSProxy());
return Invalid();
}
- DCHECK(prototype->IsNull());
+ DCHECK(prototype.IsNull());
if (dictionary_prototype_on_chain) {
// TODO(v8:11248) See earlier comment about
@@ -954,7 +944,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return PropertyAccessInfo::NotFound(zone(), receiver_map, holder);
}
- holder = prototype->AsJSObject();
+ holder = prototype.AsJSObject();
map = map_prototype_map;
if (!CanInlinePropertyAccess(map, access_mode)) {
@@ -1129,11 +1119,9 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
PropertyAttributes attrs) const {
// Check if the {map} has a data transition with the given {name}.
Map transition =
- TransitionsAccessor(isolate(), map.object(),
- broker()->is_concurrent_inlining())
+ TransitionsAccessor(isolate(), *map.object(), true)
.SearchTransition(*name.object(), PropertyKind::kData, attrs);
if (transition.is_null()) return Invalid();
-
base::Optional<MapRef> maybe_transition_map =
TryMakeRef(broker(), transition);
if (!maybe_transition_map.has_value()) return Invalid();
@@ -1202,11 +1190,6 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
unrecorded_dependencies.push_back(
dependencies()->TransitionDependencyOffTheRecord(transition_map));
- if (!broker()->is_concurrent_inlining()) {
- transition_map.SerializeBackPointer(
- NotConcurrentInliningTag{broker()}); // For BuildPropertyStore.
- }
-
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index dd3e6801e5..ab929915e1 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -195,8 +195,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -514,7 +516,7 @@ void CodeGenerator::AssembleDeconstructFrame() {
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- __ ldm(ia, fp, lr.bit() | fp.bit());
+ __ ldm(ia, fp, {lr, fp});
}
frame_access_state()->SetFrameAccessToSP();
}
@@ -3548,8 +3550,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ b(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3656,24 +3659,24 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fp.is_empty()) {
frame->AlignSavedCalleeRegisterSlots();
}
- if (saves_fp != 0) {
+ if (!saves_fp.is_empty()) {
// Save callee-saved FP registers.
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
- uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
- uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- DCHECK_EQ((last - first + 1), base::bits::CountPopulation(saves_fp));
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp.bits()) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp.bits());
+ DCHECK_EQ((last - first + 1), saves_fp.Count());
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
- frame->AllocateSavedCalleeRegisterSlots(base::bits::CountPopulation(saves));
+ frame->AllocateSavedCalleeRegisterSlots(saves.Count());
}
}
@@ -3731,7 +3734,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -3770,25 +3773,21 @@ void CodeGenerator::AssembleConstructFrame() {
#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= saves.Count();
required_slots -= frame()->GetReturnSlotCount();
- required_slots -= 2 * base::bits::CountPopulation(saves_fp);
+ required_slots -= 2 * saves_fp.Count();
if (required_slots > 0) {
__ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
- if (saves_fp != 0) {
+ if (!saves_fp.is_empty()) {
// Save callee-saved FP registers.
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
- uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
- uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- DCHECK_EQ((last - first + 1), base::bits::CountPopulation(saves_fp));
- __ vstm(db_w, sp, DwVfpRegister::from_code(first),
- DwVfpRegister::from_code(last));
+ __ vstm(db_w, sp, saves_fp.first(), saves_fp.last());
}
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
}
@@ -3809,18 +3808,15 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ ldm(ia_w, sp, saves);
}
// Restore FP registers.
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fp.is_empty()) {
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
- uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
- uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
- DwVfpRegister::from_code(last));
+ __ vldm(ia_w, sp, saves_fp.first(), saves_fp.last());
}
unwinding_info_writer_.MarkBlockWillExit();
@@ -3865,7 +3861,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// Get the actual argument count.
__ ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
}
AssembleDeconstructFrame();
}
@@ -3875,22 +3871,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
if (parameter_slots > 1) {
- if (kJSArgcIncludesReceiver) {
- __ cmp(argc_reg, Operand(parameter_slots));
- __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
- } else {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
- __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC,
- lt);
- }
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 3ad4e720c4..4adcc3e9c6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -626,7 +626,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -664,7 +664,7 @@ ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
return kArmVst1S128;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -2588,10 +2588,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I64x2UConvertI32x4Low, kArmI64x2UConvertI32x4Low) \
V(I64x2UConvertI32x4High, kArmI64x2UConvertI32x4High) \
V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4) \
+ V(I32x4RelaxedTruncF32x4S, kArmI32x4SConvertF32x4) \
V(I32x4SConvertI16x8Low, kArmI32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High, kArmI32x4SConvertI16x8High) \
V(I32x4Neg, kArmI32x4Neg) \
V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \
+ V(I32x4RelaxedTruncF32x4U, kArmI32x4UConvertF32x4) \
V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \
V(I32x4Abs, kArmI32x4Abs) \
@@ -2640,7 +2642,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Sub, kArmF32x4Sub) \
V(F32x4Mul, kArmF32x4Mul) \
V(F32x4Min, kArmF32x4Min) \
+ V(F32x4RelaxedMin, kArmF32x4Min) \
V(F32x4Max, kArmF32x4Max) \
+ V(F32x4RelaxedMax, kArmF32x4Max) \
V(F32x4Eq, kArmF32x4Eq) \
V(F32x4Ne, kArmF32x4Ne) \
V(F32x4Lt, kArmF32x4Lt) \
@@ -2891,6 +2895,22 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
#if V8_ENABLE_WEBASSEMBLY
namespace {
@@ -3132,6 +3152,14 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
VisitF64x2PminOrPMax(this, kArmF64x2Pmax, node);
}
+void InstructionSelector::VisitF64x2RelaxedMin(Node* node) {
+ VisitF64x2Pmin(node);
+}
+
+void InstructionSelector::VisitF64x2RelaxedMax(Node* node) {
+ VisitF64x2Pmax(node);
+}
+
#define EXT_MUL_LIST(V) \
V(I16x8ExtMulLowI8x16S, kArmVmullLow, NeonS8) \
V(I16x8ExtMulHighI8x16S, kArmVmullHigh, NeonS8) \
@@ -3229,6 +3257,14 @@ void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
g.UseFixed(node->InputAt(0), q0));
}
+void InstructionSelector::VisitI32x4RelaxedTruncF64x2SZero(Node* node) {
+ VisitI32x4TruncSatF64x2SZero(node);
+}
+
+void InstructionSelector::VisitI32x4RelaxedTruncF64x2UZero(Node* node) {
+ VisitI32x4TruncSatF64x2UZero(node);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 9e3e819edc..3f1842f64e 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -255,6 +255,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
+ // Access below the stack pointer is not expected in arm64 and is actively
+ // prevented at run time in the simulator.
+ DCHECK_IMPLIES(offset.from_stack_pointer(), offset.offset() >= 0);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -287,8 +290,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, ne,
exit());
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -878,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
+ __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ Debug("kArchAbortCSADcheck", 0, BREAK);
@@ -1894,8 +1899,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
- case kArm64LdrDecodeCagedPointer:
- __ LoadCagedPointerField(i.OutputRegister(), i.MemoryOperand());
+ case kArm64LdrDecodeSandboxedPointer:
+ __ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1910,8 +1915,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
- case kArm64StrEncodeCagedPointer:
- __ StoreCagedPointerField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
+ case kArm64StrEncodeSandboxedPointer:
+ __ StoreSandboxedPointerField(i.InputOrZeroRegister64(0),
+ i.MemoryOperand(1));
break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2905,8 +2911,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ B(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -2998,17 +3005,17 @@ void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- call_descriptor->CalleeSavedFPRegisters());
+ CPURegList saves_fp =
+ CPURegList(kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
+ DCHECK(saves_fp.bits() == CPURegList::GetCalleeSavedV().bits());
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kSystemPointerSize));
}
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- call_descriptor->CalleeSavedRegisters());
+ CPURegList saves =
+ CPURegList(kXRegSizeInBits, call_descriptor->CalleeSavedRegisters());
saved_count = saves.Count();
if (saved_count != 0) {
frame->AllocateSavedCalleeRegisterSlots(saved_count);
@@ -3025,11 +3032,11 @@ void CodeGenerator::AssembleConstructFrame() {
int required_slots =
frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- call_descriptor->CalleeSavedRegisters());
+ CPURegList saves =
+ CPURegList(kXRegSizeInBits, call_descriptor->CalleeSavedRegisters());
DCHECK_EQ(saves.Count() % 2, 0);
- CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- call_descriptor->CalleeSavedFPRegisters());
+ CPURegList saves_fp =
+ CPURegList(kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
DCHECK_EQ(saves_fp.Count() % 2, 0);
// The number of return slots should be even after aligning the Frame.
const int returns = frame()->GetReturnSlotCount();
@@ -3184,7 +3191,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Save FP registers.
DCHECK_IMPLIES(saves_fp.Count() != 0,
- saves_fp.list() == CPURegList::GetCalleeSavedV().list());
+ saves_fp.bits() == CPURegList::GetCalleeSavedV().bits());
__ PushCPURegList(saves_fp);
// Save registers.
@@ -3204,13 +3211,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
// Restore registers.
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- call_descriptor->CalleeSavedRegisters());
+ CPURegList saves =
+ CPURegList(kXRegSizeInBits, call_descriptor->CalleeSavedRegisters());
__ PopCPURegList<TurboAssembler::kAuthLR>(saves);
// Restore fp registers.
- CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- call_descriptor->CalleeSavedFPRegisters());
+ CPURegList saves_fp =
+ CPURegList(kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
__ PopCPURegList(saves_fp);
unwinding_info_writer_.MarkBlockWillExit();
@@ -3254,7 +3261,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// Get the actual argument count.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
__ Ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
}
AssembleDeconstructFrame();
@@ -3264,10 +3271,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- if (!kJSArgcIncludesReceiver) {
- __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
- }
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
if (parameter_slots > 1) {
__ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 46d5314f4f..f52c999106 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -201,8 +201,8 @@ namespace compiler {
V(Arm64LdarDecompressAnyTagged) \
V(Arm64StrCompressTagged) \
V(Arm64StlrCompressTagged) \
- V(Arm64LdrDecodeCagedPointer) \
- V(Arm64StrEncodeCagedPointer) \
+ V(Arm64LdrDecodeSandboxedPointer) \
+ V(Arm64StrEncodeSandboxedPointer) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64Sxtl) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 3cffa51e90..a21f454c8b 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -317,7 +317,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdarDecompressTaggedSigned:
case kArm64LdarDecompressTaggedPointer:
case kArm64LdarDecompressAnyTagged:
- case kArm64LdrDecodeCagedPointer:
+ case kArm64LdrDecodeSandboxedPointer:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@@ -341,7 +341,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Str:
case kArm64StrCompressTagged:
case kArm64StlrCompressTagged:
- case kArm64StrEncodeCagedPointer:
+ case kArm64StrEncodeSandboxedPointer:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 7f34b6594e..a63fb8d9e5 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -839,8 +839,8 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- case MachineRepresentation::kCagedPointer:
- opcode = kArm64LdrDecodeCagedPointer;
+ case MachineRepresentation::kSandboxedPointer:
+ opcode = kArm64LdrDecodeSandboxedPointer;
immediate_mode = kLoadStoreImm64;
break;
case MachineRepresentation::kSimd128:
@@ -943,8 +943,8 @@ void InstructionSelector::VisitStore(Node* node) {
immediate_mode =
COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64;
break;
- case MachineRepresentation::kCagedPointer:
- opcode = kArm64StrEncodeCagedPointer;
+ case MachineRepresentation::kSandboxedPointer:
+ opcode = kArm64StrEncodeSandboxedPointer;
immediate_mode = kLoadStoreImm64;
break;
case MachineRepresentation::kWord64:
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index eaa39ccb82..56b62ec186 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -90,7 +90,7 @@ class InstructionOperandConverter {
return ToExternalReference(instr_->InputAt(index));
}
- Handle<Code> InputCode(size_t index) {
+ Handle<CodeT> InputCode(size_t index) {
return ToCode(instr_->InputAt(index));
}
@@ -168,7 +168,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToExternalReference();
}
- Handle<Code> ToCode(InstructionOperand* op) {
+ Handle<CodeT> ToCode(InstructionOperand* op) {
return ToConstant(op).ToCode();
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 367a5ae38b..e03f6d843e 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -498,7 +498,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
tasm()->JumpIfEqual(input, begin->first, begin->second);
++begin;
}
- AssembleArchJump(def_block);
+ AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
return;
}
auto middle = begin + (end - begin) / 2;
@@ -509,6 +509,11 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
}
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target))
+ AssembleArchJumpRegardlessOfAssemblyOrder(target);
+}
+
base::OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
@@ -581,7 +586,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
}
void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
- Safepoint safepoint = safepoints()->DefineSafepoint(tasm());
+ auto safepoint = safepoints()->DefineSafepoint(tasm());
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
@@ -593,7 +598,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
// we also don't need to worry about them, since the GC has special
// knowledge about those fields anyway.
if (index < frame_header_offset) continue;
- safepoint.DefinePointerSlot(index);
+ safepoint.DefineTaggedStackSlot(index);
}
}
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 18de20f92c..5fd34a41f4 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -165,8 +165,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
- size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
- size_t GetHandlerTableOffset() const { return handler_table_offset_; }
+ size_t handler_table_offset() const { return handler_table_offset_; }
const ZoneVector<int>& block_starts() const { return block_starts_; }
const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
@@ -246,6 +245,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
CodeGenResult AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(RpoNumber target);
+ void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
// Generates special branch for deoptimization condition.
diff --git a/deps/v8/src/compiler/backend/gap-resolver.cc b/deps/v8/src/compiler/backend/gap-resolver.cc
index e9aeb2fb2c..d6c3d009ea 100644
--- a/deps/v8/src/compiler/backend/gap-resolver.cc
+++ b/deps/v8/src/compiler/backend/gap-resolver.cc
@@ -22,7 +22,7 @@ namespace {
// aliasing, and makes swaps much easier to implement.
MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
ParallelMove* moves) {
- DCHECK(!kSimpleFPAliasing);
+ DCHECK(kFPAliasing == AliasingKind::kCombine);
// Splitting is only possible when the slot size is the same as float size.
DCHECK_EQ(kSystemPointerSize, kFloatSize);
const LocationOperand& src_loc = LocationOperand::cast(move->source());
@@ -104,7 +104,8 @@ void GapResolver::Resolve(ParallelMove* moves) {
i++;
source_kinds.Add(GetKind(move->source()));
destination_kinds.Add(GetKind(move->destination()));
- if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
+ if (kFPAliasing == AliasingKind::kCombine &&
+ move->destination().IsFPRegister()) {
fp_reps |= RepresentationBit(
LocationOperand::cast(move->destination()).representation());
}
@@ -119,7 +120,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
return;
}
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
@@ -166,8 +167,8 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
move->SetPending();
// We may need to split moves between FP locations differently.
- const bool is_fp_loc_move =
- !kSimpleFPAliasing && destination.IsFPLocationOperand();
+ const bool is_fp_loc_move = kFPAliasing == AliasingKind::kCombine &&
+ destination.IsFPLocationOperand();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 84cb574821..2730728cdd 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -328,8 +328,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
exit());
__ lea(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -3736,8 +3738,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ jmp(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3996,14 +3999,9 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
+ if (!saves.is_empty()) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
- int pushed = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- ++pushed;
- }
- frame->AllocateSavedCalleeRegisterSlots(pushed);
+ frame->AllocateSavedCalleeRegisterSlots(saves.Count());
}
}
@@ -4093,17 +4091,17 @@ void CodeGenerator::AssembleConstructFrame() {
#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are created below.
- required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= saves.Count();
required_slots -= frame()->GetReturnSlotCount();
if (required_slots > 0) {
__ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
- if (saves != 0) { // Save callee-saved registers.
+ if (!saves.is_empty()) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (((1 << i) & saves)) __ push(Register::from_code(i));
+ for (Register reg : base::Reversed(saves)) {
+ __ push(reg);
}
}
@@ -4118,14 +4116,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const RegList saves = call_descriptor->CalleeSavedRegisters();
// Restore registers.
- if (saves != 0) {
+ if (!saves.is_empty()) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
__ add(esp, Immediate(returns * kSystemPointerSize));
}
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
+ for (Register reg : saves) {
+ __ pop(reg);
}
}
@@ -4169,7 +4166,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// Get the actual argument count.
__ mov(argc_reg, Operand(ebp, StandardFrameConstants::kArgCOffset));
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
}
AssembleDeconstructFrame();
}
@@ -4182,21 +4179,14 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- if (kJSArgcIncludesReceiver) {
- __ cmp(argc_reg, Immediate(parameter_slots));
- } else {
- int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
- }
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
+ __ cmp(argc_reg, Immediate(parameter_slots));
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
@@ -4208,16 +4198,15 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ ret(static_cast<int>(pop_size));
} else {
Register scratch_reg = ecx;
- DCHECK_EQ(0u,
- call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
}
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & pop_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(pop_reg));
int pop_size = static_cast<int>(parameter_slots * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 6f92f491e0..3a4fb705b6 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -276,7 +276,7 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -632,7 +632,7 @@ ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index fc04b37ec3..0544dd5340 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -30,6 +30,14 @@ namespace v8 {
namespace internal {
namespace compiler {
+Smi NumberConstantToSmi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kNumberConstant);
+ const double d = OpParameter<double>(node->op());
+ Smi smi = Smi::FromInt(static_cast<int32_t>(d));
+ CHECK_EQ(smi.value(), d);
+ return smi;
+}
+
InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
@@ -501,11 +509,17 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
- case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kDelayedStringConstant:
return g->UseImmediate(input);
+ case IrOpcode::kNumberConstant:
+ if (rep == MachineRepresentation::kWord32) {
+ Smi smi = NumberConstantToSmi(input);
+ return g->UseImmediate(static_cast<int32_t>(smi.ptr()));
+ } else {
+ return g->UseImmediate(input);
+ }
case IrOpcode::kCompressedHeapConstant:
case IrOpcode::kHeapConstant: {
if (!CanBeTaggedOrCompressedPointer(rep)) {
@@ -2785,16 +2799,18 @@ void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && \
+ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
+ // && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 &&
+ // !V8_TARGET_ARCH_RISCV64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
UNIMPLEMENTED();
}
@@ -2824,6 +2840,12 @@ void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
+ // && !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
+ !V8_TARGET_ARCH_RISCV64
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
+ // && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 7167ef75eb..45d4de79c0 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -18,6 +18,8 @@
#include "src/compiler/schedule.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
+#include "src/execution/isolate-utils-inl.h"
+#include "src/objects/instance-type-inl.h"
#include "src/utils/ostreams.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -79,13 +81,13 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
- const bool kComplexFPAliasing = !kSimpleFPAliasing &&
+ const bool kCombineFPAliasing = kFPAliasing == AliasingKind::kCombine &&
this->IsFPLocationOperand() &&
other.IsFPLocationOperand();
const bool kComplexS128SlotAliasing =
(this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
(other.IsSimd128StackSlot() && this->IsAnyStackSlot());
- if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
+ if (!kCombineFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other);
}
const LocationOperand& loc = *LocationOperand::cast(this);
@@ -96,7 +98,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation();
- if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
+ if (kCombineFPAliasing && !kComplexS128SlotAliasing) {
if (rep == other_rep) return EqualsCanonicalized(other);
if (kind == LocationOperand::REGISTER) {
// FP register-register interference.
@@ -124,7 +126,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot();
- } else if (kSimpleFPAliasing) {
+ } else if (kFPAliasing != AliasingKind::kCombine) {
// A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only
@@ -160,8 +162,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
<< ")";
case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(="
- << DoubleRegister::from_code(
- unalloc->fixed_register_index())
+ << (unalloc->IsSimd128Register()
+ ? i::RegisterName((Simd128Register::from_code(
+ unalloc->fixed_register_index())))
+ : i::RegisterName(DoubleRegister::from_code(
+ unalloc->fixed_register_index())))
<< ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
@@ -176,7 +181,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
}
}
case InstructionOperand::CONSTANT:
- return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
+ return os << "[constant:v" << ConstantOperand::cast(op).virtual_register()
<< "]";
case InstructionOperand::IMMEDIATE: {
ImmediateOperand imm = ImmediateOperand::cast(op);
@@ -259,8 +264,8 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
case MachineRepresentation::kCompressed:
os << "|c";
break;
- case MachineRepresentation::kCagedPointer:
- os << "|cg";
+ case MachineRepresentation::kSandboxedPointer:
+ os << "|sb";
break;
case MachineRepresentation::kMapWord:
UNREACHABLE();
@@ -282,7 +287,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& mo) {
if (!mo.source().Equals(mo.destination())) {
os << " = " << mo.source();
}
- return os << ";";
+ return os;
}
bool ParallelMove::IsRedundant() const {
@@ -294,8 +299,8 @@ bool ParallelMove::IsRedundant() const {
void ParallelMove::PrepareInsertAfter(
MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
- bool no_aliasing =
- kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
+ bool no_aliasing = kFPAliasing != AliasingKind::kCombine ||
+ !move->destination().IsFPLocationOperand();
MoveOperands* replacement = nullptr;
MoveOperands* eliminated = nullptr;
for (MoveOperands* curr : *this) {
@@ -371,11 +376,11 @@ bool Instruction::AreMovesRedundant() const {
void Instruction::Print() const { StdoutStream{} << *this << std::endl; }
std::ostream& operator<<(std::ostream& os, const ParallelMove& pm) {
- const char* space = "";
+ const char* delimiter = "";
for (MoveOperands* move : pm) {
if (move->IsEliminated()) continue;
- os << space << *move;
- space = " ";
+ os << delimiter << *move;
+ delimiter = "; ";
}
return os;
}
@@ -551,9 +556,11 @@ Handle<HeapObject> Constant::ToHeapObject() const {
return value;
}
-Handle<Code> Constant::ToCode() const {
+Handle<CodeT> Constant::ToCode() const {
DCHECK_EQ(kHeapObject, type());
- Handle<Code> value(reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
+ Handle<CodeT> value(
+ reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
+ DCHECK(value->IsCodeT(GetPtrComprCageBaseSlow(*value)));
return value;
}
@@ -931,7 +938,7 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kSimd128:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
return rep;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 37a8209b6b..89394b2c24 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -5,15 +5,13 @@
#ifndef V8_COMPILER_BACKEND_INSTRUCTION_H_
#define V8_COMPILER_BACKEND_INSTRUCTION_H_
-#include <deque>
#include <iosfwd>
#include <map>
-#include <set>
#include "src/base/compiler-specific.h"
#include "src/base/numbers/double.h"
#include "src/codegen/external-reference.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/codegen/source-position.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
@@ -142,6 +140,9 @@ class V8_EXPORT_PRIVATE INSTRUCTION_OPERAND_ALIGN InstructionOperand {
// APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
+ bool operator==(InstructionOperand& other) const { return Equals(other); }
+ bool operator!=(InstructionOperand& other) const { return !Equals(other); }
+
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
@@ -553,7 +554,7 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kTagged:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
return true;
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
@@ -694,12 +695,19 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) {
- if (kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kOverlap) {
// We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64;
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ if (IsSimd128Register()) {
+ canonical = MachineRepresentation::kSimd128;
+ } else {
+ canonical = MachineRepresentation::kFloat64;
+ }
} else {
// We need to distinguish FP register operands of different reps when
- // aliasing is not simple (e.g. ARM).
+ // aliasing is AliasingKind::kCombine (e.g. ARM).
+ DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
canonical = LocationOperand::cast(this)->representation();
}
}
@@ -1170,7 +1178,7 @@ class V8_EXPORT_PRIVATE Constant final {
}
Handle<HeapObject> ToHeapObject() const;
- Handle<Code> ToCode() const;
+ Handle<CodeT> ToCode() const;
const StringConstantBase* ToDelayedStringConstant() const;
private:
@@ -1695,6 +1703,12 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return (representation_mask() & kFPRepMask) != 0;
}
+ bool HasSimd128VirtualRegisters() const {
+ constexpr int kSimd128RepMask =
+ RepresentationBit(MachineRepresentation::kSimd128);
+ return (representation_mask() & kSimd128RepMask) != 0;
+ }
+
Instruction* GetBlockStart(RpoNumber rpo) const;
using const_iterator = InstructionDeque::const_iterator;
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
index 3796b29b9c..77a00c51b8 100644
--- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -164,8 +164,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -441,9 +443,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
sign_extend); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
+ __ BranchShort(&exit, ne, i.TempRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
size); \
@@ -962,11 +964,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kLoong64Div_w:
__ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ __ maskeqz(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kLoong64Div_wu:
__ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ __ maskeqz(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kLoong64Mod_w:
__ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -979,11 +981,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kLoong64Div_d:
__ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ __ maskeqz(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kLoong64Div_du:
__ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ __ maskeqz(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kLoong64Mod_d:
__ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -1938,8 +1940,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ Branch(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -2178,17 +2181,17 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- int count = base::bits::CountPopulation(saves_fpu);
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
+ int count = saves_fpu.Count();
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- int count = base::bits::CountPopulation(saves);
+ if (!saves.is_empty()) {
+ int count = saves.Count();
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -2246,7 +2249,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -2290,20 +2293,20 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
- required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= saves.Count();
+ required_slots -= saves_fpu.Count();
required_slots -= returns;
if (required_slots > 0) {
__ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize));
}
- if (saves_fpu != 0) {
+ if (!saves_fpu.is_empty()) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count());
}
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
__ MultiPush(saves);
}
@@ -2324,13 +2327,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore GP registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
__ MultiPopFPU(saves_fpu);
}
@@ -2382,9 +2385,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_count).
- if (!kJSArgcIncludesReceiver) {
- __ Add_d(t0, t0, Operand(1)); // Also pop the receiver.
- }
if (parameter_slots > 1) {
__ li(t1, parameter_slots);
__ slt(t2, t0, t1);
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
index 10d22fcaa2..4f03f99acd 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -467,7 +467,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kSimd128:
@@ -546,7 +546,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kSimd128:
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index dfefbe0649..6d70841fd7 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -4,6 +4,8 @@
#include "src/compiler/backend/mid-tier-register-allocator.h"
+#include <ostream>
+
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -76,6 +78,7 @@ class BlockState final {
private:
RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_;
+ RegisterState* simd128_registers_in_state_;
DeferredBlocksRegion* deferred_blocks_region_;
@@ -90,6 +93,8 @@ RegisterState* BlockState::register_in_state(RegisterKind kind) {
return general_registers_in_state_;
case RegisterKind::kDouble:
return double_registers_in_state_;
+ case RegisterKind::kSimd128:
+ return simd128_registers_in_state_;
}
}
@@ -104,6 +109,10 @@ void BlockState::set_register_in_state(RegisterState* register_state,
DCHECK_NULL(double_registers_in_state_);
double_registers_in_state_ = register_state;
break;
+ case RegisterKind::kSimd128:
+ DCHECK_NULL(simd128_registers_in_state_);
+ simd128_registers_in_state_ = register_state;
+ break;
}
}
@@ -178,7 +187,8 @@ class RegisterIndex final {
}
uintptr_t ToBit(MachineRepresentation rep) const {
- if (kSimpleFPAliasing || rep != MachineRepresentation::kSimd128) {
+ if (kFPAliasing != AliasingKind::kCombine ||
+ rep != MachineRepresentation::kSimd128) {
return 1ull << ToInt();
} else {
DCHECK_EQ(rep, MachineRepresentation::kSimd128);
@@ -999,7 +1009,7 @@ void RegisterState::Register::CommitAtMerge() {
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
- CHECK_GT(num_commits_required_, 0);
+ DCHECK_GT(num_commits_required_, 0);
}
void RegisterState::Register::Commit(AllocatedOperand allocated_op,
@@ -1066,7 +1076,7 @@ void RegisterState::Register::Spill(AllocatedOperand allocated_op,
bool is_shared = is_shared_;
Reset();
is_shared_ = is_shared;
- CHECK_IMPLIES(is_shared_, was_spilled_while_shared());
+ DCHECK_IMPLIES(is_shared_, was_spilled_while_shared());
}
void RegisterState::Register::SpillPhiGapMove(
@@ -1280,6 +1290,10 @@ class RegisterBitVector {
public:
RegisterBitVector() : bits_(0) {}
+ bool operator==(const RegisterBitVector& other) const {
+ return bits_ == other.bits_;
+ }
+
bool Contains(RegisterIndex reg, MachineRepresentation rep) const {
return bits_ & reg.ToBit(rep);
}
@@ -1313,6 +1327,7 @@ class RegisterBitVector {
bool IsEmpty() const { return bits_ == 0; }
private:
+ friend std::ostream& operator<<(std::ostream&, RegisterBitVector);
explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {}
static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
@@ -1320,6 +1335,10 @@ class RegisterBitVector {
uintptr_t bits_;
};
+std::ostream& operator<<(std::ostream& os, RegisterBitVector register_bits) {
+ return os << std::hex << register_bits.bits_ << std::dec;
+}
+
// A SinglePassRegisterAllocator is a fast register allocator that does a single
// pass through the instruction stream without performing any live-range
// analysis beforehand. It deals with a single RegisterKind, either general or
@@ -1413,7 +1432,8 @@ class SinglePassRegisterAllocator final {
// Spill a register in a previously processed successor block when merging
// state into the current block.
- void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg);
+ void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg,
+ MachineRepresentation rep);
// Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
// on entry to a |successor| block.
@@ -1421,7 +1441,7 @@ class SinglePassRegisterAllocator final {
VirtualRegisterData& virtual_register,
RpoNumber successor, RegisterState* succ_state);
- // Update the virtual register data with the data in register_state()
+ // Update the virtual register data with the data in register_state_.
void UpdateVirtualRegisterState();
// Returns true if |virtual_register| is defined after use position |pos| at
@@ -1455,6 +1475,8 @@ class SinglePassRegisterAllocator final {
MachineRepresentation rep, InstructionOperand* operand,
UsePosition pos);
void SpillRegister(RegisterIndex reg);
+ void SpillRegisterAndPotentialSimdSibling(RegisterIndex reg,
+ MachineRepresentation rep);
void SpillRegisterForVirtualRegister(int virtual_register);
// Pre-emptively spill the register at the exit of deferred blocks such that
@@ -1512,6 +1534,28 @@ class SinglePassRegisterAllocator final {
bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
RegisterIndex reg);
+ // If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD
+ // register. This returns the index of the higher aliasing FP register from
+ // the SIMD register index (which is the same as the lower register index).
+ RegisterIndex simdSibling(RegisterIndex reg) const {
+ CHECK_EQ(kFPAliasing, AliasingKind::kCombine); // Statically evaluated.
+ RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1};
+#ifdef DEBUG
+ // Check that {reg} is indeed the lower SIMD half and {sibling} is the
+ // upper half.
+ int double_reg_base_code;
+ DCHECK_EQ(2, data_->config()->GetAliases(
+ MachineRepresentation::kSimd128,
+ ToRegCode(reg, MachineRepresentation::kSimd128),
+ MachineRepresentation::kFloat64, &double_reg_base_code));
+ DCHECK_EQ(reg, FromRegCode(double_reg_base_code,
+ MachineRepresentation::kFloat64));
+ DCHECK_EQ(sibling, FromRegCode(double_reg_base_code + 1,
+ MachineRepresentation::kFloat64));
+#endif // DEBUG
+ return sibling;
+ }
+
// Returns a RegisterBitVector representing the allocated registers in
// reg_state.
RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state);
@@ -1519,20 +1563,10 @@ class SinglePassRegisterAllocator final {
// Check the consistency of reg->vreg and vreg->reg mappings if a debug build.
void CheckConsistency();
- bool HasRegisterState() const { return register_state_; }
- RegisterState* register_state() const {
- DCHECK(HasRegisterState());
- return register_state_;
- }
-
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
- return data()->VirtualRegisterDataFor(virtual_register);
+ return data_->VirtualRegisterDataFor(virtual_register);
}
- int num_allocatable_registers() const { return num_allocatable_registers_; }
- const InstructionBlock* current_block() const { return current_block_; }
- MidTierRegisterAllocationData* data() const { return data_; }
-
// Virtual register to register mapping.
ZoneVector<RegisterIndex> virtual_register_to_reg_;
@@ -1555,7 +1589,7 @@ class SinglePassRegisterAllocator final {
RegisterBitVector allocated_registers_bits_;
RegisterBitVector same_input_output_registers_bits_;
- // These fields are only used when kSimpleFPAliasing == false.
+ // These fields are only used when kFPAliasing == COMBINE.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
@@ -1586,9 +1620,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
reg_code_to_index_[reg_code] = RegisterIndex(i);
}
- // If the architecture has non-simple FP aliasing, initialize float and
+ // If the architecture has COMBINE FP aliasing, initialize float and
// simd128 specific register details.
- if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
+ if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
const RegisterConfiguration* config = data->config();
// Float registers.
@@ -1621,7 +1655,11 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
CHECK_EQ(2, config->GetAliases(MachineRepresentation::kSimd128, reg_code,
MachineRepresentation::kFloat64,
&double_reg_base_code));
- RegisterIndex double_reg(reg_code_to_index_[double_reg_base_code]);
+ RegisterIndex double_reg{reg_code_to_index_[double_reg_base_code]};
+ // We later rely on the fact that the two aliasing double registers are at
+ // consecutive indexes.
+ DCHECK_EQ(double_reg.ToInt() + 1,
+ reg_code_to_index_[double_reg_base_code + 1].ToInt());
simd128_reg_code_to_index_->at(reg_code) = double_reg;
index_to_simd128_reg_code_->at(double_reg.ToInt()) = reg_code;
}
@@ -1629,7 +1667,7 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
}
int SinglePassRegisterAllocator::VirtualRegisterForRegister(RegisterIndex reg) {
- return register_state()->VirtualRegisterForRegister(reg);
+ return register_state_->VirtualRegisterForRegister(reg);
}
RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
@@ -1639,8 +1677,8 @@ RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
}
void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
- if (!HasRegisterState()) return;
- for (RegisterIndex reg : *register_state()) {
+ if (!register_state_) return;
+ for (RegisterIndex reg : *register_state_) {
SpillRegisterForDeferred(reg, instr_index);
}
}
@@ -1652,7 +1690,7 @@ void SinglePassRegisterAllocator::EndInstruction() {
}
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
- DCHECK(!HasRegisterState());
+ DCHECK_NULL(register_state_);
DCHECK_NULL(current_block_);
DCHECK(in_use_at_instr_start_bits_.IsEmpty());
DCHECK(in_use_at_instr_end_bits_.IsEmpty());
@@ -1680,25 +1718,25 @@ void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
// If we didn't allocate any registers of this kind, or we have reached the
// start, nothing to do here.
- if (!HasRegisterState() || block->PredecessorCount() == 0) {
+ if (!register_state_ || block->PredecessorCount() == 0) {
current_block_ = nullptr;
return;
}
if (block->PredecessorCount() > 1) {
- register_state()->AddSharedUses(
- static_cast<int>(block->PredecessorCount()) - 1);
+ register_state_->AddSharedUses(static_cast<int>(block->PredecessorCount()) -
+ 1);
}
- BlockState& block_state = data()->block_state(block->rpo_number());
- block_state.set_register_in_state(register_state(), kind());
+ BlockState& block_state = data_->block_state(block->rpo_number());
+ block_state.set_register_in_state(register_state_, kind());
// Remove virtual register to register mappings and clear register state.
// We will update the register state when starting the next block.
while (!allocated_registers_bits_.IsEmpty()) {
RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
VirtualRegisterData& vreg_data =
- data()->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
+ data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
FreeRegister(reg, vreg_data.vreg(), vreg_data.rep());
}
current_block_ = nullptr;
@@ -1706,10 +1744,10 @@ void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
}
void SinglePassRegisterAllocator::CloneStateFrom(RpoNumber successor) {
- BlockState& block_state = data()->block_state(successor);
+ BlockState& block_state = data_->block_state(successor);
RegisterState* successor_registers = block_state.register_in_state(kind());
if (successor_registers != nullptr) {
- if (data()->GetBlock(successor)->PredecessorCount() == 1) {
+ if (data_->GetBlock(successor)->PredecessorCount() == 1) {
// Avoids cloning for successors where we are the only predecessor.
register_state_ = successor_registers;
} else {
@@ -1722,7 +1760,7 @@ void SinglePassRegisterAllocator::CloneStateFrom(RpoNumber successor) {
void SinglePassRegisterAllocator::MergeStateFrom(
const InstructionBlock::Successors& successors) {
for (RpoNumber successor : successors) {
- BlockState& block_state = data()->block_state(successor);
+ BlockState& block_state = data_->block_state(successor);
RegisterState* successor_registers = block_state.register_in_state(kind());
if (successor_registers == nullptr) {
continue;
@@ -1753,47 +1791,76 @@ void SinglePassRegisterAllocator::MergeStateFrom(
if (processed_regs.Contains(reg, rep)) continue;
processed_regs.Add(reg, rep);
- if (register_state()->IsAllocated(reg)) {
- if (successor_registers->Equals(reg, register_state())) {
- // Both match, keep the merged register data.
- register_state()->CommitAtMerge(reg);
- } else {
- // Try to find a new register for this successor register in the
- // merge block, and add a gap move on entry of the successor block.
- RegisterIndex new_reg =
- RegisterForVirtualRegister(virtual_register);
- if (!new_reg.is_valid()) {
- new_reg = ChooseFreeRegister(
- allocated_registers_bits_.Union(succ_allocated_regs), rep);
- } else if (new_reg != reg) {
- // Spill the |new_reg| in the successor block to be able to use it
- // for this gap move. It would be spilled anyway since it contains
- // a different virtual register than the merge block.
- SpillRegisterAtMerge(successor_registers, new_reg);
- }
-
- if (new_reg.is_valid()) {
- MoveRegisterOnMerge(new_reg, reg, vreg_data, successor,
- successor_registers);
- processed_regs.Add(new_reg, rep);
- } else {
- SpillRegisterAtMerge(successor_registers, reg);
- }
- }
- } else {
+ bool reg_in_use = register_state_->IsAllocated(reg);
+ // For COMBINE FP aliasing, the register is also "in use" if the
+ // FP register for the upper half is allocated.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ rep == MachineRepresentation::kSimd128) {
+ reg_in_use |= register_state_->IsAllocated(simdSibling(reg));
+ }
+ // Similarly (but the other way around), the register might be the upper
+ // half of a SIMD register that is allocated.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ (rep == MachineRepresentation::kFloat64 ||
+ rep == MachineRepresentation::kFloat32)) {
+ int simd_reg_code;
+ CHECK_EQ(1, data_->config()->GetAliases(
+ rep, ToRegCode(reg, rep),
+ MachineRepresentation::kSimd128, &simd_reg_code));
+ // Sanity check: The SIMD reg code should be the shifted FP reg code.
+ DCHECK_EQ(simd_reg_code,
+ ToRegCode(reg, rep) >>
+ (rep == MachineRepresentation::kFloat64 ? 1 : 2));
+ RegisterIndex simd_reg =
+ FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
+ reg_in_use |=
+ simd_reg.is_valid() && register_state_->IsAllocated(simd_reg) &&
+ VirtualRegisterDataFor(VirtualRegisterForRegister(simd_reg))
+ .rep() == MachineRepresentation::kSimd128;
+ }
+
+ if (!reg_in_use) {
DCHECK(successor_registers->IsAllocated(reg));
if (RegisterForVirtualRegister(virtual_register).is_valid()) {
// If we already hold the virtual register in a different register
// then spill this register in the sucessor block to avoid
// invalidating the 1:1 vreg<->reg mapping.
// TODO(rmcilroy): Add a gap move to avoid spilling.
- SpillRegisterAtMerge(successor_registers, reg);
- } else {
- // Register is free in our current register state, so merge the
- // successor block's register details into it.
- register_state()->CopyFrom(reg, successor_registers);
- AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
+ SpillRegisterAtMerge(successor_registers, reg, rep);
+ continue;
}
+ // Register is free in our current register state, so merge the
+ // successor block's register details into it.
+ register_state_->CopyFrom(reg, successor_registers);
+ AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
+ continue;
+ }
+
+ // Register is in use in the current register state.
+ if (successor_registers->Equals(reg, register_state_)) {
+ // Both match, keep the merged register data.
+ register_state_->CommitAtMerge(reg);
+ continue;
+ }
+ // Try to find a new register for this successor register in the
+ // merge block, and add a gap move on entry of the successor block.
+ RegisterIndex new_reg = RegisterForVirtualRegister(virtual_register);
+ if (!new_reg.is_valid()) {
+ new_reg = ChooseFreeRegister(
+ allocated_registers_bits_.Union(succ_allocated_regs), rep);
+ } else if (new_reg != reg) {
+ // Spill the |new_reg| in the successor block to be able to use it
+ // for this gap move. It would be spilled anyway since it contains
+ // a different virtual register than the merge block.
+ SpillRegisterAtMerge(successor_registers, new_reg, rep);
+ }
+
+ if (new_reg.is_valid()) {
+ MoveRegisterOnMerge(new_reg, reg, vreg_data, successor,
+ successor_registers);
+ processed_regs.Add(new_reg, rep);
+ } else {
+ SpillRegisterAtMerge(successor_registers, reg, rep);
}
}
}
@@ -1813,26 +1880,66 @@ RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
return allocated_regs;
}
-void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
- RegisterIndex reg) {
- DCHECK_NE(reg_state, register_state());
+void SinglePassRegisterAllocator::SpillRegisterAtMerge(
+ RegisterState* reg_state, RegisterIndex reg, MachineRepresentation rep) {
+ DCHECK_NE(reg_state, register_state_);
if (reg_state->IsAllocated(reg)) {
int virtual_register = reg_state->VirtualRegisterForRegister(reg);
VirtualRegisterData& vreg_data =
- data()->VirtualRegisterDataFor(virtual_register);
+ data_->VirtualRegisterDataFor(virtual_register);
AllocatedOperand allocated = AllocatedOperandForReg(reg, vreg_data.rep());
- reg_state->Spill(reg, allocated, current_block(), data());
+ reg_state->Spill(reg, allocated, current_block_, data_);
+ }
+ // Also spill the "simd sibling" register if we want to use {reg} for SIMD.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ rep == MachineRepresentation::kSimd128) {
+ RegisterIndex sibling = simdSibling(reg);
+ if (reg_state->IsAllocated(sibling)) {
+ int virtual_register = reg_state->VirtualRegisterForRegister(sibling);
+ VirtualRegisterData& vreg_data =
+ data_->VirtualRegisterDataFor(virtual_register);
+ AllocatedOperand allocated =
+ AllocatedOperandForReg(sibling, vreg_data.rep());
+ reg_state->Spill(sibling, allocated, current_block_, data_);
+ }
+ }
+ // Similarly, spill the whole SIMD register if we want to use a part of it.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ (rep == MachineRepresentation::kFloat64 ||
+ rep == MachineRepresentation::kFloat32)) {
+ int simd_reg_code;
+ CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep),
+ MachineRepresentation::kSimd128,
+ &simd_reg_code));
+ // Sanity check: The SIMD register code should be the shifted {reg_code}.
+ DCHECK_EQ(simd_reg_code,
+ ToRegCode(reg, rep) >>
+ (rep == MachineRepresentation::kFloat64 ? 1 : 2));
+ RegisterIndex simd_reg =
+ FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
+ DCHECK(!simd_reg.is_valid() || simd_reg == reg ||
+ simdSibling(simd_reg) == reg);
+ if (simd_reg.is_valid() && reg_state->IsAllocated(simd_reg)) {
+ int virtual_register = reg_state->VirtualRegisterForRegister(simd_reg);
+ VirtualRegisterData& vreg_data =
+ data_->VirtualRegisterDataFor(virtual_register);
+ if (vreg_data.rep() == MachineRepresentation::kSimd128) {
+ AllocatedOperand allocated =
+ AllocatedOperandForReg(simd_reg, vreg_data.rep());
+ reg_state->Spill(simd_reg, allocated, current_block_, data_);
+ }
+ }
}
}
void SinglePassRegisterAllocator::MoveRegisterOnMerge(
RegisterIndex from, RegisterIndex to, VirtualRegisterData& virtual_register,
RpoNumber successor, RegisterState* succ_state) {
- int instr_index = data()->GetBlock(successor)->first_instruction_index();
+ int instr_index = data_->GetBlock(successor)->first_instruction_index();
MoveOperands* move =
- data()->AddPendingOperandGapMove(instr_index, Instruction::START);
+ data_->AddPendingOperandGapMove(instr_index, Instruction::START);
succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register.rep()),
- &move->destination(), data());
+ &move->destination(), data_);
AllocatePendingUse(from, virtual_register, &move->source(), true,
instr_index);
}
@@ -1840,13 +1947,13 @@ void SinglePassRegisterAllocator::MoveRegisterOnMerge(
void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
// Update to the new register state and update vreg_to_register map and
// resetting any shared registers that were spilled by another block.
- DCHECK(HasRegisterState());
- for (RegisterIndex reg : *register_state()) {
- register_state()->ResetIfSpilledWhileShared(reg);
+ DCHECK_NOT_NULL(register_state_);
+ for (RegisterIndex reg : *register_state_) {
+ register_state_->ResetIfSpilledWhileShared(reg);
int virtual_register = VirtualRegisterForRegister(reg);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
MachineRepresentation rep =
- data()->VirtualRegisterDataFor(virtual_register).rep();
+ data_->VirtualRegisterDataFor(virtual_register).rep();
AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
}
}
@@ -1855,31 +1962,38 @@ void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
void SinglePassRegisterAllocator::CheckConsistency() {
#ifdef DEBUG
- for (int virtual_register = 0;
- virtual_register < data()->code()->VirtualRegisterCount();
- virtual_register++) {
- RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
- if (reg.is_valid()) {
- CHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
- CHECK(allocated_registers_bits_.Contains(
- reg, VirtualRegisterDataFor(virtual_register).rep()));
- }
- }
-
- for (RegisterIndex reg : *register_state()) {
+ int virtual_register = -1;
+ for (RegisterIndex reg : virtual_register_to_reg_) {
+ ++virtual_register;
+ if (!reg.is_valid()) continue;
+ DCHECK_NOT_NULL(register_state_);
+ // The register must be set to allocated.
+ DCHECK(register_state_->IsAllocated(reg));
+ // reg <-> vreg linking is consistent.
+ DCHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
+ }
+ DCHECK_EQ(data_->code()->VirtualRegisterCount() - 1, virtual_register);
+
+ RegisterBitVector used_registers;
+ for (RegisterIndex reg : *register_state_) {
+ if (!register_state_->IsAllocated(reg)) continue;
int virtual_register = VirtualRegisterForRegister(reg);
- if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
- CHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
- CHECK(allocated_registers_bits_.Contains(
- reg, VirtualRegisterDataFor(virtual_register).rep()));
- }
- }
+ // reg <-> vreg linking is consistent.
+ DCHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
+ MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep();
+ // Allocated registers do not overlap.
+ DCHECK(!used_registers.Contains(reg, rep));
+ used_registers.Add(reg, rep);
+ }
+ // The {allocated_registers_bits_} bitvector is accurate.
+ DCHECK_EQ(used_registers, allocated_registers_bits_);
#endif
}
RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int reg_code, MachineRepresentation rep) const {
- if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
+ if (kFPAliasing == AliasingKind::kCombine &&
+ kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) {
return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
} else if (rep == MachineRepresentation::kSimd128) {
@@ -1893,7 +2007,8 @@ RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
MachineRepresentation rep) const {
- if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
+ if (kFPAliasing == AliasingKind::kCombine &&
+ kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) {
DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
return index_to_float32_reg_code_->at(reg.ToInt());
@@ -1924,18 +2039,18 @@ void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
int instr_index) {
DCHECK(from.IsAllocated());
DCHECK(to.IsAllocated());
- const InstructionBlock* block = current_block();
- DCHECK_EQ(data()->GetBlock(instr_index), block);
+ const InstructionBlock* block = current_block_;
+ DCHECK_EQ(data_->GetBlock(instr_index), block);
if (instr_index == block->last_instruction_index()) {
// Add gap move to the first instruction of every successor block.
for (const RpoNumber succ : block->successors()) {
- const InstructionBlock* successor = data()->GetBlock(succ);
+ const InstructionBlock* successor = data_->GetBlock(succ);
DCHECK_EQ(1, successor->PredecessorCount());
- data()->AddGapMove(successor->first_instruction_index(),
- Instruction::START, from, to);
+ data_->AddGapMove(successor->first_instruction_index(),
+ Instruction::START, from, to);
}
} else {
- data()->AddGapMove(instr_index + 1, Instruction::START, from, to);
+ data_->AddGapMove(instr_index + 1, Instruction::START, from, to);
}
}
@@ -1983,18 +2098,21 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
// If we don't need a register, only try to allocate one if the virtual
// register hasn't yet been spilled, to try to avoid spilling it.
if (!reg.is_valid() && (must_use_register ||
- !virtual_register.IsSpilledAt(instr_index, data()))) {
+ !virtual_register.IsSpilledAt(instr_index, data_))) {
reg = ChooseRegisterFor(rep, pos, must_use_register);
} else if (reg.is_valid() &&
same_input_output_registers_bits_.Contains(reg, rep) &&
pos != UsePosition::kStart) {
// If we are trying to allocate a register that was used as a
// same_input_output operand, then we can't use it for an input that expands
- // past UsePosition::kStart. This should only happen for REGISTER_OR_SLOT
- // operands that are used for the deopt state, so we can just use a spill
- // slot.
- CHECK(!must_use_register);
- return RegisterIndex::Invalid();
+ // past UsePosition::kStart.
+ if (must_use_register) {
+ // Use a new register instead.
+ reg = ChooseRegisterFor(rep, pos, must_use_register);
+ } else {
+ // Use a spill slot.
+ reg = RegisterIndex::Invalid();
+ }
}
return reg;
}
@@ -2005,7 +2123,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
RegisterIndex reg = ChooseFreeRegister(rep, pos);
if (!reg.is_valid() && must_use_register) {
reg = ChooseRegisterToSpill(rep, pos);
- SpillRegister(reg);
+ SpillRegisterAndPotentialSimdSibling(reg, rep);
}
return reg;
}
@@ -2025,7 +2143,8 @@ RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
MachineRepresentation rep) {
- if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
+ if (kFPAliasing != AliasingKind::kCombine ||
+ kind() == RegisterKind::kGeneral) {
return true;
} else {
switch (rep) {
@@ -2053,12 +2172,13 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
RegisterIndex chosen_reg = RegisterIndex::Invalid();
- if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
- chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers());
+ if (kFPAliasing != AliasingKind::kCombine ||
+ kind() == RegisterKind::kGeneral) {
+ chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_);
} else {
// If we don't have simple fp aliasing, we need to check each register
// individually to get one with the required representation.
- for (RegisterIndex reg : *register_state()) {
+ for (RegisterIndex reg : *register_state_) {
if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) {
chosen_reg = reg;
break;
@@ -2086,18 +2206,25 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
int earliest_definition = kMaxInt;
bool pending_only_use = false;
bool already_spilled = false;
- for (RegisterIndex reg : *register_state()) {
+ for (RegisterIndex reg : *register_state_) {
// Skip if register is in use, or not valid for representation.
if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
+ // With non-simple FP aliasing, a SIMD register might block more than one FP
+ // register.
+ DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine,
+ register_state_->IsAllocated(reg));
+ if (kFPAliasing == AliasingKind::kCombine &&
+ !register_state_->IsAllocated(reg))
+ continue;
VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
- if ((!pending_only_use && register_state()->HasPendingUsesOnly(reg)) ||
+ if ((!pending_only_use && register_state_->HasPendingUsesOnly(reg)) ||
(!already_spilled && vreg_data.HasSpillOperand()) ||
vreg_data.output_instr_index() < earliest_definition) {
chosen_reg = reg;
earliest_definition = vreg_data.output_instr_index();
- pending_only_use = register_state()->HasPendingUsesOnly(reg);
+ pending_only_use = register_state_->HasPendingUsesOnly(reg);
already_spilled = vreg_data.HasSpillOperand();
}
}
@@ -2116,27 +2243,37 @@ void SinglePassRegisterAllocator::CommitRegister(RegisterIndex reg,
// Committing the output operation, and mark the register use in this
// instruction, then mark it as free going forward.
AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
- register_state()->Commit(reg, allocated, operand, data());
+ register_state_->Commit(reg, allocated, operand, data_);
MarkRegisterUse(reg, rep, pos);
FreeRegister(reg, virtual_register, rep);
CheckConsistency();
}
void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) {
- if (!register_state()->IsAllocated(reg)) return;
+ if (!register_state_->IsAllocated(reg)) return;
// Spill the register and free register.
int virtual_register = VirtualRegisterForRegister(reg);
MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep();
AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
- register_state()->Spill(reg, allocated, current_block(), data());
+ register_state_->Spill(reg, allocated, current_block_, data_);
FreeRegister(reg, virtual_register, rep);
}
+void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling(
+ RegisterIndex reg, MachineRepresentation rep) {
+ SpillRegister(reg);
+
+ if (kFPAliasing == AliasingKind::kCombine &&
+ rep == MachineRepresentation::kSimd128) {
+ SpillRegister(simdSibling(reg));
+ }
+}
+
void SinglePassRegisterAllocator::SpillAllRegisters() {
- if (!HasRegisterState()) return;
+ if (!register_state_) return;
- for (RegisterIndex reg : *register_state()) {
+ for (RegisterIndex reg : *register_state_) {
SpillRegister(reg);
}
}
@@ -2154,12 +2291,12 @@ void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
int instr_index) {
// Committing the output operation, and mark the register use in this
// instruction, then mark it as free going forward.
- if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
+ if (register_state_->IsAllocated(reg) && register_state_->IsShared(reg)) {
VirtualRegisterData& virtual_register =
- data()->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
+ data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
AllocatedOperand allocated =
AllocatedOperandForReg(reg, virtual_register.rep());
- register_state()->SpillForDeferred(reg, allocated, instr_index, data());
+ register_state_->SpillForDeferred(reg, allocated, instr_index, data_);
FreeRegister(reg, virtual_register.vreg(), virtual_register.rep());
}
CheckConsistency();
@@ -2168,7 +2305,7 @@ void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
int instr_index, RpoNumber deferred_block,
VirtualRegisterData& virtual_register) {
- DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
+ DCHECK(data_->GetBlock(deferred_block)->IsDeferred());
DCHECK(virtual_register.HasSpillRange());
if (!virtual_register.NeedsSpillAtOutput() &&
!DefinedAfter(virtual_register.vreg(), instr_index, UsePosition::kEnd)) {
@@ -2181,9 +2318,9 @@ void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
if (reg.is_valid()) {
int deferred_block_start =
- data()->GetBlock(deferred_block)->first_instruction_index();
- register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register.vreg(),
- deferred_block_start, data());
+ data_->GetBlock(deferred_block)->first_instruction_index();
+ register_state_->MoveToSpillSlotOnDeferred(reg, virtual_register.vreg(),
+ deferred_block_start, data_);
return;
} else {
virtual_register.MarkAsNeedsSpillAtOutput();
@@ -2203,9 +2340,9 @@ void SinglePassRegisterAllocator::AllocateUse(
AllocatedOperand allocated =
AllocatedOperandForReg(reg, virtual_register.rep());
- register_state()->Commit(reg, allocated, operand, data());
- register_state()->AllocateUse(reg, virtual_register.vreg(), operand,
- instr_index, data());
+ register_state_->Commit(reg, allocated, operand, data_);
+ register_state_->AllocateUse(reg, virtual_register.vreg(), operand,
+ instr_index, data_);
AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(), pos);
CheckConsistency();
}
@@ -2215,8 +2352,8 @@ void SinglePassRegisterAllocator::AllocatePendingUse(
InstructionOperand* operand, bool can_be_constant, int instr_index) {
DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg()));
- register_state()->AllocatePendingUse(reg, virtual_register.vreg(), operand,
- can_be_constant, instr_index);
+ register_state_->AllocatePendingUse(reg, virtual_register.vreg(), operand,
+ can_be_constant, instr_index);
// Since this is a pending use and the operand doesn't need to use a register,
// allocate with UsePosition::kNone to avoid blocking it's use by other
// operands in this instruction.
@@ -2232,7 +2369,7 @@ void SinglePassRegisterAllocator::AllocateUseWithMove(
UnallocatedOperand from =
UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
virtual_register.vreg());
- data()->AddGapMove(instr_index, Instruction::END, from, to);
+ data_->AddGapMove(instr_index, Instruction::END, from, to);
InstructionOperand::ReplaceWith(operand, &to);
MarkRegisterUse(reg, virtual_register.rep(), pos);
CheckConsistency();
@@ -2261,12 +2398,11 @@ void SinglePassRegisterAllocator::AllocateInput(
operand->fixed_slot_index());
InstructionOperand::ReplaceWith(operand, &allocated);
MoveOperands* move_op =
- data()->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
- virtual_register.SpillOperand(&move_op->source(), instr_index, true,
- data());
+ data_->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
+ virtual_register.SpillOperand(&move_op->source(), instr_index, true, data_);
return;
} else if (operand->HasSlotPolicy()) {
- virtual_register.SpillOperand(operand, instr_index, false, data());
+ virtual_register.SpillOperand(operand, instr_index, false, data_);
return;
}
@@ -2291,18 +2427,27 @@ void SinglePassRegisterAllocator::AllocateInput(
RegisterIndex reg = ChooseRegisterFor(virtual_register, instr_index, pos,
must_use_register);
- if (reg.is_valid()) {
- if (must_use_register) {
- AllocateUse(reg, virtual_register, operand, instr_index, pos);
- } else {
- AllocatePendingUse(reg, virtual_register, operand,
- operand->HasRegisterOrSlotOrConstantPolicy(),
- instr_index);
- }
- } else {
+ if (!reg.is_valid()) {
+ // The register will have been spilled at this use.
virtual_register.SpillOperand(
operand, instr_index, operand->HasRegisterOrSlotOrConstantPolicy(),
- data());
+ data_);
+ } else if (!must_use_register) {
+ // We might later dedice to spill this register; allocate a pending use.
+ AllocatePendingUse(reg, virtual_register, operand,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ instr_index);
+ } else if (VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(),
+ reg)) {
+ // The register is directly usable.
+ AllocateUse(reg, virtual_register, operand, instr_index, pos);
+ } else {
+ // We assigned another register to the vreg before. {ChooseRegisterFor}
+ // chose a different one (e.g. to fulfill a "unique register" constraint
+ // for a vreg that was previously used for the input corresponding to the
+ // "same as input" output), so add a gap move to copy the input value to
+ // that new register.
+ AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos);
}
}
}
@@ -2318,7 +2463,7 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
if (reg.is_valid()) {
AllocatePendingUse(reg, vreg_data, operand, true, instr_index);
} else {
- vreg_data.SpillOperand(operand, instr_index, true, data());
+ vreg_data.SpillOperand(operand, instr_index, true, data_);
}
}
@@ -2329,8 +2474,8 @@ void SinglePassRegisterAllocator::AllocateConstantOutput(
// necessary gap moves from the constant operand to the register.
SpillRegisterForVirtualRegister(vreg_data.vreg());
if (vreg_data.NeedsSpillAtOutput()) {
- vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block(),
- instr_index, data());
+ vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block_,
+ instr_index, data_);
}
}
@@ -2361,7 +2506,7 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
// TODO(rmcilroy): support secondary storage.
if (!reg.is_valid()) {
- vreg_data.SpillOperand(operand, instr_index, false, data());
+ vreg_data.SpillOperand(operand, instr_index, false, data_);
} else {
InstructionOperand move_output_to;
if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
@@ -2382,10 +2527,9 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
}
if (vreg_data.NeedsSpillAtOutput()) {
vreg_data.EmitGapMoveFromOutputToSpillSlot(
- *AllocatedOperand::cast(operand), current_block(), instr_index,
- data());
+ *AllocatedOperand::cast(operand), current_block_, instr_index, data_);
} else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
- vreg_data.EmitDeferredSpillOutputs(data());
+ vreg_data.EmitDeferredSpillOutputs(data_);
}
}
@@ -2424,15 +2568,15 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
// register's spill slot. As such, spill this input operand using the output
// virtual register's spill slot, then add a gap-move to move the input
// value into this spill slot.
- output_vreg_data.SpillOperand(input, instr_index, false, data());
+ output_vreg_data.SpillOperand(input, instr_index, false, data_);
// Add an unconstrained gap move for the input virtual register.
UnallocatedOperand unconstrained_input(
UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, input_vreg);
- MoveOperands* move_ops = data()->AddGapMove(
+ MoveOperands* move_ops = data_->AddGapMove(
instr_index, Instruction::END, unconstrained_input, PendingOperand());
output_vreg_data.SpillOperand(&move_ops->destination(), instr_index, true,
- data());
+ data_);
}
}
@@ -2460,8 +2604,7 @@ void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
} else {
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
vreg_data.SpillOperand(operand, instr_index,
- operand->HasRegisterOrSlotOrConstantPolicy(),
- data());
+ operand->HasRegisterOrSlotOrConstantPolicy(), data_);
}
}
@@ -2502,7 +2645,8 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
const UnallocatedOperand* operand, int virtual_register,
MachineRepresentation rep, int instr_index, UsePosition pos) {
EnsureRegisterState();
- RegisterIndex reg = FromRegCode(operand->fixed_register_index(), rep);
+ int reg_code = operand->fixed_register_index();
+ RegisterIndex reg = FromRegCode(reg_code, rep);
if (!IsFreeOrSameVirtualRegister(reg, virtual_register) &&
!DefinedAfter(virtual_register, instr_index, pos)) {
// If register is in-use by a different virtual register, spill it now.
@@ -2510,6 +2654,40 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
// spilling.
SpillRegister(reg);
}
+ // Also potentially spill the "sibling SIMD register" on architectures where a
+ // SIMD register aliases two FP registers.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ rep == MachineRepresentation::kSimd128) {
+ if (register_state_->IsAllocated(simdSibling(reg)) &&
+ !DefinedAfter(virtual_register, instr_index, pos)) {
+ SpillRegister(simdSibling(reg));
+ }
+ }
+ // Similarly (but the other way around), spill a SIMD register that (partly)
+ // overlaps with a fixed FP register.
+ if (kFPAliasing == AliasingKind::kCombine &&
+ (rep == MachineRepresentation::kFloat64 ||
+ rep == MachineRepresentation::kFloat32)) {
+ int simd_reg_code;
+ CHECK_EQ(
+ 1, data_->config()->GetAliases(
+ rep, reg_code, MachineRepresentation::kSimd128, &simd_reg_code));
+ // Sanity check: The SIMD register code should be the shifted {reg_code}.
+ DCHECK_EQ(simd_reg_code,
+ reg_code >> (rep == MachineRepresentation::kFloat64 ? 1 : 2));
+ RegisterIndex simd_reg =
+ FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
+ DCHECK(simd_reg == reg || simdSibling(simd_reg) == reg);
+ int allocated_vreg = VirtualRegisterForRegister(simd_reg);
+ if (simd_reg != reg &&
+ allocated_vreg != InstructionOperand::kInvalidVirtualRegister &&
+ VirtualRegisterDataFor(allocated_vreg).rep() ==
+ MachineRepresentation::kSimd128 &&
+ !DefinedAfter(virtual_register, instr_index, pos)) {
+ SpillRegister(simd_reg);
+ }
+ }
+
MarkRegisterUse(reg, rep, pos);
}
@@ -2521,7 +2699,7 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(
RegisterIndex to_register = RegisterForVirtualRegister(to_vreg.vreg());
// If to_register isn't marked as a phi gap move, we can't use it as such.
- if (to_register.is_valid() && !register_state()->IsPhiGapMove(to_register)) {
+ if (to_register.is_valid() && !register_state_->IsPhiGapMove(to_register)) {
to_register = RegisterIndex::Invalid();
}
@@ -2537,7 +2715,7 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(
} else {
// Otherwise add a gap move.
MoveOperands* move =
- data()->AddPendingOperandGapMove(instr_index, Instruction::END);
+ data_->AddPendingOperandGapMove(instr_index, Instruction::END);
PendingOperand* to_operand = PendingOperand::cast(&move->destination());
PendingOperand* from_operand = PendingOperand::cast(&move->source());
@@ -2546,7 +2724,7 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(
CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), to_operand,
UsePosition::kAll);
} else {
- to_vreg.SpillOperand(to_operand, instr_index, true, data());
+ to_vreg.SpillOperand(to_operand, instr_index, true, data_);
}
// The from side is unconstrained.
@@ -2568,15 +2746,15 @@ void SinglePassRegisterAllocator::AllocatePhi(
// If the register is valid, assign it as a phi gap move to be processed
// at the successor blocks. If no register or spill slot was used then
// the virtual register was never used.
- register_state()->UseForPhiGapMove(reg);
+ register_state_->UseForPhiGapMove(reg);
}
}
}
void SinglePassRegisterAllocator::EnsureRegisterState() {
- if (!HasRegisterState()) {
+ if (V8_UNLIKELY(!register_state_)) {
register_state_ = RegisterState::New(kind(), num_allocatable_registers_,
- data()->allocation_zone());
+ data_->allocation_zone());
}
}
@@ -2591,7 +2769,7 @@ class MidTierOutputProcessor final {
void PopulateDeferredBlockRegion(RpoNumber initial_block);
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
- return data()->VirtualRegisterDataFor(virtual_register);
+ return data_->VirtualRegisterDataFor(virtual_register);
}
MachineRepresentation RepresentationFor(int virtual_register) const {
DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
@@ -2600,12 +2778,11 @@ class MidTierOutputProcessor final {
}
bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
- return blocks.size() == 1 && !data()->GetBlock(blocks[0])->IsDeferred();
+ return blocks.size() == 1 && !data_->GetBlock(blocks[0])->IsDeferred();
}
- MidTierRegisterAllocationData* data() const { return data_; }
- InstructionSequence* code() const { return data()->code(); }
- Zone* zone() const { return data()->allocation_zone(); }
+ InstructionSequence* code() const { return data_->code(); }
+ Zone* zone() const { return data_->allocation_zone(); }
MidTierRegisterAllocationData* const data_;
ZoneQueue<RpoNumber> deferred_blocks_worklist_;
@@ -2629,14 +2806,13 @@ void MidTierOutputProcessor::PopulateDeferredBlockRegion(
while (!deferred_blocks_worklist_.empty()) {
RpoNumber current = deferred_blocks_worklist_.front();
deferred_blocks_worklist_.pop();
- deferred_blocks_region->AddBlock(current, data());
+ deferred_blocks_region->AddBlock(current, data_);
- const InstructionBlock* curr_block = data()->GetBlock(current);
+ const InstructionBlock* curr_block = data_->GetBlock(current);
// Check for whether the predecessor blocks are still deferred.
if (IsDeferredBlockBoundary(curr_block->predecessors())) {
// If not, mark the predecessor as having a deferred successor.
- data()
- ->block_state(curr_block->predecessors()[0])
+ data_->block_state(curr_block->predecessors()[0])
.MarkAsDeferredBlockBoundary();
} else {
// Otherwise process predecessors.
@@ -2652,7 +2828,7 @@ void MidTierOutputProcessor::PopulateDeferredBlockRegion(
// Process any unprocessed successors if we aren't at a boundary.
if (IsDeferredBlockBoundary(curr_block->successors())) {
// If not, mark the predecessor as having a deferred successor.
- data()->block_state(current).MarkAsDeferredBlockBoundary();
+ data_->block_state(current).MarkAsDeferredBlockBoundary();
} else {
// Otherwise process successors.
for (RpoNumber succ : curr_block->successors()) {
@@ -2671,11 +2847,11 @@ void MidTierOutputProcessor::InitializeBlockState(
// phis.
if (block->phis().size()) {
for (int i = 0; i < static_cast<int>(block->PredecessorCount()); ++i) {
- data()->block_state(block->predecessors()[i]).set_successors_phi_index(i);
+ data_->block_state(block->predecessors()[i]).set_successors_phi_index(i);
}
}
- BlockState& block_state = data()->block_state(block->rpo_number());
+ BlockState& block_state = data_->block_state(block->rpo_number());
if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
PopulateDeferredBlockRegion(block->rpo_number());
@@ -2686,7 +2862,7 @@ void MidTierOutputProcessor::InitializeBlockState(
if (block->dominator().IsValid()) {
// Add all the blocks this block dominates to its dominator.
- BlockState& dominator_block_state = data()->block_state(block->dominator());
+ BlockState& dominator_block_state = data_->block_state(block->dominator());
dominator_block_state.dominated_blocks()->Union(
*block_state.dominated_blocks());
} else {
@@ -2744,7 +2920,7 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
// Mark any instructions that require reference maps for later reference map
// processing.
if (instr->HasReferenceMap()) {
- data()->reference_map_instructions().push_back(index);
+ data_->reference_map_instructions().push_back(index);
}
}
@@ -2796,11 +2972,10 @@ class MidTierRegisterAllocator final {
SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
- return data()->VirtualRegisterDataFor(virtual_register);
+ return data_->VirtualRegisterDataFor(virtual_register);
}
- MidTierRegisterAllocationData* data() const { return data_; }
- InstructionSequence* code() const { return data()->code(); }
- Zone* allocation_zone() const { return data()->allocation_zone(); }
+ InstructionSequence* code() const { return data_->code(); }
+ Zone* allocation_zone() const { return data_->allocation_zone(); }
MidTierRegisterAllocationData* const data_;
SinglePassRegisterAllocator general_reg_allocator_;
@@ -2817,10 +2992,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
const InstructionBlock* block) {
RpoNumber block_rpo = block->rpo_number();
bool is_deferred_block_boundary =
- data()->block_state(block_rpo).is_deferred_block_boundary();
+ data_->block_state(block_rpo).is_deferred_block_boundary();
- general_reg_allocator().StartBlock(block);
- double_reg_allocator().StartBlock(block);
+ general_reg_allocator_.StartBlock(block);
+ double_reg_allocator_.StartBlock(block);
// If the block is not deferred but has deferred successors, then try to
// output spill slots for virtual_registers that are only spilled in the
@@ -2828,10 +3003,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
// them at their output in non-deferred blocks.
if (is_deferred_block_boundary && !block->IsDeferred()) {
for (RpoNumber successor : block->successors()) {
- if (!data()->GetBlock(successor)->IsDeferred()) continue;
+ if (!data_->GetBlock(successor)->IsDeferred()) continue;
DCHECK_GT(successor, block_rpo);
DeferredBlocksRegion* deferred_region =
- data()->block_state(successor).deferred_blocks_region();
+ data_->block_state(successor).deferred_blocks_region();
// Freeze the deferred spills on the region to ensure no more are added to
// this region after the spills for this entry point have already been
// emitted.
@@ -2894,10 +3069,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
}
if (instr->ClobbersRegisters()) {
- general_reg_allocator().SpillAllRegisters();
+ general_reg_allocator_.SpillAllRegisters();
}
if (instr->ClobbersDoubleRegisters()) {
- double_reg_allocator().SpillAllRegisters();
+ double_reg_allocator_.SpillAllRegisters();
}
// Allocate temporaries.
@@ -2941,8 +3116,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
// If this block is deferred but it's successor isn't, update the state to
// limit spills to the deferred blocks where possible.
if (is_deferred_block_boundary && block->IsDeferred()) {
- general_reg_allocator().UpdateForDeferredBlock(instr_index);
- double_reg_allocator().UpdateForDeferredBlock(instr_index);
+ general_reg_allocator_.UpdateForDeferredBlock(instr_index);
+ double_reg_allocator_.UpdateForDeferredBlock(instr_index);
}
}
@@ -2962,30 +3137,26 @@ void MidTierRegisterAllocator::AllocateRegisters(
}
}
- general_reg_allocator().EndInstruction();
- double_reg_allocator().EndInstruction();
+ general_reg_allocator_.EndInstruction();
+ double_reg_allocator_.EndInstruction();
}
// For now we spill all registers at a loop header.
// TODO(rmcilroy): Add support for register allocations across loops.
if (block->IsLoopHeader()) {
- general_reg_allocator().SpillAllRegisters();
- double_reg_allocator().SpillAllRegisters();
+ general_reg_allocator_.SpillAllRegisters();
+ double_reg_allocator_.SpillAllRegisters();
}
AllocatePhis(block);
- general_reg_allocator().EndBlock(block);
- double_reg_allocator().EndBlock(block);
+ general_reg_allocator_.EndBlock(block);
+ double_reg_allocator_.EndBlock(block);
}
SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
MachineRepresentation rep) {
- if (IsFloatingPoint(rep)) {
- return double_reg_allocator();
- } else {
- return general_reg_allocator();
- }
+ return IsFloatingPoint(rep) ? double_reg_allocator_ : general_reg_allocator_;
}
bool MidTierRegisterAllocator::IsFixedRegisterPolicy(
@@ -3046,7 +3217,7 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
void MidTierRegisterAllocator::AllocatePhiGapMoves(
const InstructionBlock* block) {
int successors_phi_index =
- data()->block_state(block->rpo_number()).successors_phi_index();
+ data_->block_state(block->rpo_number()).successors_phi_index();
// If successors_phi_index is -1 there are no phi's in the successor.
if (successors_phi_index == -1) return;
@@ -3059,7 +3230,7 @@ void MidTierRegisterAllocator::AllocatePhiGapMoves(
// If there are phis, we only have a single successor due to edge-split form.
DCHECK_EQ(block->SuccessorCount(), 1);
- const InstructionBlock* successor = data()->GetBlock(block->successors()[0]);
+ const InstructionBlock* successor = data_->GetBlock(block->successors()[0]);
for (PhiInstruction* phi : successor->phis()) {
VirtualRegisterData& to_vreg =
@@ -3088,13 +3259,11 @@ void MidTierRegisterAllocator::UpdateSpillRangesForLoops() {
RpoNumber last_loop_block =
RpoNumber::FromInt(block->loop_end().ToInt() - 1);
int last_loop_instr =
- data()->GetBlock(last_loop_block)->last_instruction_index();
+ data_->GetBlock(last_loop_block)->last_instruction_index();
// Extend spill range for all spilled values that are live on entry to the
// loop header.
- BitVector::Iterator iterator(&data()->spilled_virtual_registers());
- for (; !iterator.Done(); iterator.Advance()) {
- const VirtualRegisterData& vreg_data =
- VirtualRegisterDataFor(iterator.Current());
+ for (int vreg : data_->spilled_virtual_registers()) {
+ const VirtualRegisterData& vreg_data = VirtualRegisterDataFor(vreg);
if (vreg_data.HasSpillRange() &&
vreg_data.spill_range()->IsLiveAt(block->first_instruction_index(),
block)) {
@@ -3137,10 +3306,9 @@ class MidTierSpillSlotAllocator final {
void AdvanceTo(int instr_index);
SpillSlot* GetFreeSpillSlot(int byte_width);
- MidTierRegisterAllocationData* data() const { return data_; }
- InstructionSequence* code() const { return data()->code(); }
- Frame* frame() const { return data()->frame(); }
- Zone* zone() const { return data()->allocation_zone(); }
+ InstructionSequence* code() const { return data_->code(); }
+ Frame* frame() const { return data_->frame(); }
+ Zone* zone() const { return data_->allocation_zone(); }
struct OrderByLastUse {
bool operator()(const SpillSlot* a, const SpillSlot* b) const;
@@ -3237,10 +3405,8 @@ void MidTierSpillSlotAllocator::Allocate(
void AllocateSpillSlots(MidTierRegisterAllocationData* data) {
ZoneVector<VirtualRegisterData*> spilled(data->allocation_zone());
- BitVector::Iterator iterator(&data->spilled_virtual_registers());
- for (; !iterator.Done(); iterator.Advance()) {
- VirtualRegisterData& vreg_data =
- data->VirtualRegisterDataFor(iterator.Current());
+ for (int vreg : data->spilled_virtual_registers()) {
+ VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(vreg);
if (vreg_data.HasPendingSpillOperand()) {
spilled.push_back(&vreg_data);
}
@@ -3272,8 +3438,7 @@ class MidTierReferenceMapPopulator final {
void RecordReferences(const VirtualRegisterData& virtual_register);
private:
- MidTierRegisterAllocationData* data() const { return data_; }
- InstructionSequence* code() const { return data()->code(); }
+ InstructionSequence* code() const { return data_->code(); }
MidTierRegisterAllocationData* const data_;
};
@@ -3291,10 +3456,10 @@ void MidTierReferenceMapPopulator::RecordReferences(
Range& live_range = spill_range->live_range();
AllocatedOperand allocated =
*AllocatedOperand::cast(virtual_register.spill_operand());
- for (int instr_index : data()->reference_map_instructions()) {
+ for (int instr_index : data_->reference_map_instructions()) {
if (instr_index > live_range.end() || instr_index < live_range.start())
continue;
- Instruction* instr = data()->code()->InstructionAt(instr_index);
+ Instruction* instr = data_->code()->InstructionAt(instr_index);
DCHECK(instr->HasReferenceMap());
if (spill_range->IsLiveAt(instr_index, instr->block())) {
@@ -3305,10 +3470,8 @@ void MidTierReferenceMapPopulator::RecordReferences(
void PopulateReferenceMaps(MidTierRegisterAllocationData* data) {
MidTierReferenceMapPopulator populator(data);
- BitVector::Iterator iterator(&data->spilled_virtual_registers());
- for (; !iterator.Done(); iterator.Advance()) {
- populator.RecordReferences(
- data->VirtualRegisterDataFor(iterator.Current()));
+ for (int vreg : data->spilled_virtual_registers()) {
+ populator.RecordReferences(data->VirtualRegisterDataFor(vreg));
}
}
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index fd4358a4fa..4dd0d5cd32 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -167,8 +167,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
exit());
__ Addu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -3689,8 +3691,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ Branch(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3931,21 +3934,21 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
frame->AlignSavedCalleeRegisterSlots();
}
- if (saves_fpu != 0) {
- int count = base::bits::CountPopulation(saves_fpu);
+ if (!saves_fpu.is_empty()) {
+ int count = saves_fpu.Count();
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- int count = base::bits::CountPopulation(saves);
+ if (!saves.is_empty()) {
+ int count = saves.Count();
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -4002,7 +4005,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -4043,19 +4046,19 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
- required_slots -= 2 * base::bits::CountPopulation(saves_fpu);
+ required_slots -= saves.Count();
+ required_slots -= 2 * saves_fpu.Count();
required_slots -= returns;
if (required_slots > 0) {
__ Subu(sp, sp, Operand(required_slots * kSystemPointerSize));
}
// Save callee-saved FPU registers.
- if (saves_fpu != 0) {
+ if (!saves_fpu.is_empty()) {
__ MultiPushFPU(saves_fpu);
}
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
__ MultiPush(saves);
}
@@ -4076,13 +4079,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore GP registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
__ MultiPopFPU(saves_fpu);
}
@@ -4133,9 +4136,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_slots).
- if (!kJSArgcIncludesReceiver) {
- __ Addu(t0, t0, Operand(1)); // Also pop the receiver.
- }
if (parameter_slots > 1) {
__ li(kScratchReg, parameter_slots);
__ slt(kScratchReg2, t0, kScratchReg);
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 1278b3e2f7..67a28630a3 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -370,7 +370,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -451,7 +451,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1428,7 +1428,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1483,7 +1483,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index d68850b592..295470491f 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -169,8 +169,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
exit());
__ Daddu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -455,9 +457,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
sign_extend); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
+ __ BranchShort(&exit, ne, i.TempRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
size); \
@@ -3879,8 +3881,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ Branch(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -4136,17 +4139,17 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- int count = base::bits::CountPopulation(saves_fpu);
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
+ int count = saves_fpu.Count();
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- int count = base::bits::CountPopulation(saves);
+ if (!saves.is_empty()) {
+ int count = saves.Count();
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -4204,7 +4207,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -4245,20 +4248,20 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
- required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= saves.Count();
+ required_slots -= saves_fpu.Count();
required_slots -= returns;
if (required_slots > 0) {
__ Dsubu(sp, sp, Operand(required_slots * kSystemPointerSize));
}
- if (saves_fpu != 0) {
+ if (!saves_fpu.is_empty()) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count());
}
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
__ MultiPush(saves);
}
@@ -4279,13 +4282,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore GP registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
__ MultiPopFPU(saves_fpu);
}
@@ -4337,9 +4340,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_slots).
- if (!kJSArgcIncludesReceiver) {
- __ Daddu(t0, t0, Operand(1)); // Also pop the receiver.
- }
if (parameter_slots > 1) {
__ li(kScratchReg, parameter_slots);
__ slt(kScratchReg2, t0, kScratchReg);
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 6b62a7c694..4f5738ddad 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -503,7 +503,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kMips64MsaLd;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -577,7 +577,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -1861,7 +1861,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -1916,7 +1916,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
diff --git a/deps/v8/src/compiler/backend/move-optimizer.cc b/deps/v8/src/compiler/backend/move-optimizer.cc
index 88a34c836f..8544259027 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.cc
+++ b/deps/v8/src/compiler/backend/move-optimizer.cc
@@ -38,7 +38,7 @@ class OperandSet {
void InsertOp(const InstructionOperand& op) {
set_->push_back(op);
- if (!kSimpleFPAliasing && op.IsFPRegister())
+ if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister())
fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation());
}
@@ -52,7 +52,7 @@ class OperandSet {
bool ContainsOpOrAlias(const InstructionOperand& op) const {
if (Contains(op)) return true;
- if (!kSimpleFPAliasing && op.IsFPRegister()) {
+ if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister()) {
// Platforms where FP registers have complex aliasing need extra checks.
const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation();
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 46d8b248e9..1f9a4a70c9 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -187,8 +187,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -551,47 +553,97 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define CleanUInt32(x)
#endif
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label exchange; \
- __ lwsync(); \
- __ bind(&exchange); \
- __ load_instr(i.OutputRegister(0), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ store_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ bne(&exchange, cr0); \
- __ sync(); \
- } while (0)
+static inline bool is_wasm_on_be(bool IsWasm) {
+#if V8_TARGET_BIG_ENDIAN
+ return IsWasm;
+#else
+ return false;
+#endif
+}
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type) \
+#define MAYBE_REVERSE_IF_WASM(dst, src, op, reset) \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ op(dst, src, kScratchReg); \
+ if (reset) src = dst; \
+ }
+
+#define ASSEMBLE_ATOMIC_EXCHANGE(_type, reverse_op) \
+ do { \
+ Register val = i.InputRegister(2); \
+ Register dst = i.OutputRegister(); \
+ MAYBE_REVERSE_IF_WASM(ip, val, reverse_op, true); \
+ __ AtomicExchange<_type>( \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)), val, dst); \
+ MAYBE_REVERSE_IF_WASM(dst, dst, reverse_op, false); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(_type, reverse_op) \
+ do { \
+ Register expected_val = i.InputRegister(2); \
+ Register new_val = i.InputRegister(3); \
+ Register dst = i.OutputRegister(); \
+ MAYBE_REVERSE_IF_WASM(ip, expected_val, reverse_op, true); \
+ MAYBE_REVERSE_IF_WASM(r0, new_val, reverse_op, true); \
+ __ AtomicCompareExchange<_type>( \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)), expected_val, \
+ new_val, dst, kScratchReg); \
+ MAYBE_REVERSE_IF_WASM(dst, dst, reverse_op, false); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, _type) \
do { \
auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
if (std::is_signed<_type>::value) { \
+ __ extsb(dst, lhs); \
+ __ bin_inst(dst, dst, rhs); \
+ } else { \
+ __ bin_inst(dst, lhs, rhs); \
+ } \
+ }; \
+ MemOperand dst_operand = \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
+ kScratchReg, bin_op); \
+ break; \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type, reverse_op, scratch) \
+ do { \
+ auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
+ Register _lhs = lhs; \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ reverse_op(dst, lhs, scratch); \
+ _lhs = dst; \
+ } \
+ if (std::is_signed<_type>::value) { \
switch (sizeof(_type)) { \
case 1: \
- __ extsb(dst, lhs); \
+ UNREACHABLE(); \
break; \
case 2: \
- __ extsh(dst, lhs); \
+ __ extsh(dst, _lhs); \
break; \
case 4: \
- __ extsw(dst, lhs); \
+ __ extsw(dst, _lhs); \
break; \
case 8: \
break; \
default: \
UNREACHABLE(); \
} \
- __ bin_inst(dst, dst, rhs); \
- } else { \
- __ bin_inst(dst, lhs, rhs); \
+ } \
+ __ bin_inst(dst, _lhs, rhs); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ reverse_op(dst, dst, scratch); \
} \
}; \
MemOperand dst_operand = \
MemOperand(i.InputRegister(0), i.InputRegister(1)); \
__ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
kScratchReg, bin_op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ reverse_op(i.OutputRegister(), i.OutputRegister(), scratch); \
+ } \
break; \
} while (false)
@@ -1982,26 +2034,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand(i.InputRegister(0), i.InputRegister(1)),
i.InputRegister(2), i.OutputRegister());
break;
- case kAtomicExchangeInt16:
- __ AtomicExchange<int16_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.OutputRegister());
+ case kAtomicExchangeInt16: {
+ ASSEMBLE_ATOMIC_EXCHANGE(int16_t, ByteReverseU16);
+ __ extsh(i.OutputRegister(), i.OutputRegister());
break;
- case kPPC_AtomicExchangeUint16:
- __ AtomicExchange<uint16_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.OutputRegister());
+ }
+ case kPPC_AtomicExchangeUint16: {
+ ASSEMBLE_ATOMIC_EXCHANGE(uint16_t, ByteReverseU16);
break;
- case kPPC_AtomicExchangeWord32:
- __ AtomicExchange<uint32_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.OutputRegister());
+ }
+ case kPPC_AtomicExchangeWord32: {
+ ASSEMBLE_ATOMIC_EXCHANGE(uint32_t, ByteReverseU32);
break;
- case kPPC_AtomicExchangeWord64:
- __ AtomicExchange<uint64_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.OutputRegister());
+ }
+ case kPPC_AtomicExchangeWord64: {
+ ASSEMBLE_ATOMIC_EXCHANGE(uint64_t, ByteReverseU64);
break;
+ }
case kAtomicCompareExchangeInt8:
__ AtomicCompareExchange<int8_t>(
MemOperand(i.InputRegister(0), i.InputRegister(1)),
@@ -2014,53 +2063,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
kScratchReg);
break;
- case kAtomicCompareExchangeInt16:
- __ AtomicCompareExchange<int16_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
- kScratchReg);
+ case kAtomicCompareExchangeInt16: {
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(int16_t, ByteReverseU16);
+ __ extsh(i.OutputRegister(), i.OutputRegister());
break;
- case kPPC_AtomicCompareExchangeUint16:
- __ AtomicCompareExchange<uint16_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
- kScratchReg);
- break;
- case kPPC_AtomicCompareExchangeWord32:
- __ AtomicCompareExchange<uint32_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
- kScratchReg);
+ }
+ case kPPC_AtomicCompareExchangeUint16: {
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(uint16_t, ByteReverseU16);
break;
- case kPPC_AtomicCompareExchangeWord64:
- __ AtomicCompareExchange<uint64_t>(
- MemOperand(i.InputRegister(0), i.InputRegister(1)),
- i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
- kScratchReg);
+ }
+ case kPPC_AtomicCompareExchangeWord32: {
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(uint32_t, ByteReverseU32);
break;
+ }
+ case kPPC_AtomicCompareExchangeWord64: {
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(uint64_t, ByteReverseU64);
+ } break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kPPC_Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(inst, int8_t); \
- break; \
- case kPPC_Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, uint8_t); \
- break; \
- case kPPC_Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(inst, int16_t); \
- break; \
- case kPPC_Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, uint16_t); \
- break; \
- case kPPC_Atomic##op##Int32: \
- ASSEMBLE_ATOMIC_BINOP(inst, int32_t); \
- break; \
- case kPPC_Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP(inst, uint32_t); \
- break; \
- case kPPC_Atomic##op##Int64: \
- case kPPC_Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(inst, uint64_t); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kPPC_Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_BYTE(inst, int8_t); \
+ __ extsb(i.OutputRegister(), i.OutputRegister()); \
+ break; \
+ case kPPC_Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_BYTE(inst, uint8_t); \
+ break; \
+ case kPPC_Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int16_t, ByteReverseU16, r0); \
+ __ extsh(i.OutputRegister(), i.OutputRegister()); \
+ break; \
+ case kPPC_Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint16_t, ByteReverseU16, r0); \
+ break; \
+ case kPPC_Atomic##op##Int32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int32_t, ByteReverseU32, r0); \
+ __ extsw(i.OutputRegister(), i.OutputRegister()); \
+ break; \
+ case kPPC_Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint32_t, ByteReverseU32, r0); \
+ break; \
+ case kPPC_Atomic##op##Int64: \
+ case kPPC_Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint64_t, ByteReverseU64, r0); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -3316,6 +3360,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode); \
DCHECK_EQ(mode, kMode_MRR); \
__ load_instr(scratch, operand);
+#if V8_TARGET_BIG_ENDIAN
+#define MAYBE_REVERSE_BYTES(reg, instr) __ instr(reg, reg);
+#else
+#define MAYBE_REVERSE_BYTES(reg, instr)
+#endif
case kPPC_S128Load8Splat: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsibzx)
@@ -3325,12 +3374,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_S128Load16Splat: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsihzx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
__ vsplth(dst, kScratchSimd128Reg, Operand(3));
break;
}
case kPPC_S128Load32Splat: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsiwzx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
__ vspltw(dst, kScratchSimd128Reg, Operand(1));
break;
}
@@ -3338,18 +3389,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(dst, lxsdx)
+ MAYBE_REVERSE_BYTES(dst, xxbrd)
__ vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
break;
}
case kPPC_S128Load8x8S: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsb(dst, kScratchSimd128Reg);
break;
}
case kPPC_S128Load8x8U: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsb(dst, kScratchSimd128Reg);
// Zero extend.
__ li(ip, Operand(0xFF));
@@ -3361,12 +3415,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_S128Load16x4S: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsh(dst, kScratchSimd128Reg);
break;
}
case kPPC_S128Load16x4U: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsh(dst, kScratchSimd128Reg);
// Zero extend.
__ mov(ip, Operand(0xFFFF));
@@ -3379,6 +3435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_S128Load32x2S: {
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsw(dst, kScratchSimd128Reg);
break;
}
@@ -3386,6 +3443,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vupkhsw(dst, kScratchSimd128Reg);
// Zero extend.
__ mov(ip, Operand(0xFFFFFFFF));
@@ -3399,6 +3457,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsiwzx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
__ vxor(dst, dst, dst);
__ vinsertw(dst, kScratchSimd128Reg, Operand(3 * lane_width_in_bytes));
break;
@@ -3407,6 +3466,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register();
ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vxor(dst, dst, dst);
__ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
break;
@@ -3432,6 +3492,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ lxsihzx(kScratchSimd128Reg, operand);
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
__ vinserth(dst, kScratchSimd128Reg,
Operand((7 - i.InputUint8(3)) * lane_width_in_bytes));
break;
@@ -3445,6 +3506,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ lxsiwzx(kScratchSimd128Reg, operand);
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
__ vinsertw(dst, kScratchSimd128Reg,
Operand((3 - i.InputUint8(3)) * lane_width_in_bytes));
break;
@@ -3458,6 +3520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ lxsdx(kScratchSimd128Reg, operand);
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ vinsertd(dst, kScratchSimd128Reg,
Operand((1 - i.InputUint8(3)) * lane_width_in_bytes));
break;
@@ -3480,6 +3543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(mode, kMode_MRR);
__ vextractuh(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((7 - i.InputUint8(3)) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
__ stxsihx(kScratchSimd128Reg, operand);
break;
}
@@ -3491,6 +3555,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(mode, kMode_MRR);
__ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((3 - i.InputUint8(3)) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
__ stxsiwx(kScratchSimd128Reg, operand);
break;
}
@@ -3502,9 +3567,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(mode, kMode_MRR);
__ vextractd(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((1 - i.InputUint8(3)) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
__ stxsdx(kScratchSimd128Reg, operand);
break;
}
+#undef MAYBE_REVERSE_BYTES
#define EXT_ADD_PAIRWISE(mul_even, mul_odd, add) \
__ mul_even(tempFPReg1, src, kScratchSimd128Reg); \
__ mul_odd(kScratchSimd128Reg, src, kScratchSimd128Reg); \
@@ -3750,8 +3817,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ b(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3915,22 +3983,21 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
- if (double_saves != 0) {
+ if (!double_saves.is_empty()) {
frame->AlignSavedCalleeRegisterSlots();
- DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles, double_saves.Count());
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kSystemPointerSize));
}
// Save callee-saved registers.
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? call_descriptor->CalleeSavedRegisters() &
- ~kConstantPoolRegister.bit()
- : call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() - kConstantPoolRegister
+ : call_descriptor->CalleeSavedRegisters();
+ if (!saves.is_empty()) {
// register save area does not include the fp or constant pool pointer.
const int num_saves =
kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
@@ -4000,11 +4067,11 @@ void CodeGenerator::AssembleConstructFrame() {
required_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? call_descriptor->CalleeSavedRegisters() &
- ~kConstantPoolRegister.bit()
- : call_descriptor->CalleeSavedRegisters();
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() - kConstantPoolRegister
+ : call_descriptor->CalleeSavedRegisters();
if (required_slots > 0) {
#if V8_ENABLE_WEBASSEMBLY
@@ -4044,21 +4111,20 @@ void CodeGenerator::AssembleConstructFrame() {
#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= saves.Count();
required_slots -= frame()->GetReturnSlotCount();
- required_slots -= (kDoubleSize / kSystemPointerSize) *
- base::bits::CountPopulation(saves_fp);
+ required_slots -= (kDoubleSize / kSystemPointerSize) * saves_fp.Count();
__ AddS64(sp, sp, Operand(-required_slots * kSystemPointerSize), r0);
}
// Save callee-saved Double registers.
- if (saves_fp != 0) {
+ if (!saves_fp.is_empty()) {
__ MultiPushDoubles(saves_fp);
- DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
+ DCHECK_EQ(kNumCalleeSavedDoubles, saves_fp.Count());
}
// Save callee-saved registers.
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
}
@@ -4078,24 +4144,22 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
// Restore registers.
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? call_descriptor->CalleeSavedRegisters() &
- ~kConstantPoolRegister.bit()
- : call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() - kConstantPoolRegister
+ : call_descriptor->CalleeSavedRegisters();
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
+ const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ if (!double_saves.is_empty()) {
__ MultiPopDoubles(double_saves);
}
unwinding_info_writer_.MarkBlockWillExit();
- // We might need r6 for scratch.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r6.bit());
PPCOperandConverter g(this, nullptr);
const int parameter_slots =
static_cast<int>(call_descriptor->ParameterSlotCount());
@@ -4136,7 +4200,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// Get the actual argument count.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
__ LoadU64(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
}
AssembleDeconstructFrame();
@@ -4148,26 +4212,16 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
if (parameter_slots > 1) {
- if (kJSArgcIncludesReceiver) {
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots), r0);
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots));
- __ bind(&skip);
- } else {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver), r0);
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots_without_receiver));
- __ bind(&skip);
- }
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots), r0);
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots));
+ __ bind(&skip);
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index e658a8f2d7..dedd268dde 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -193,7 +193,7 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_LoadWordS32;
mode = kInt16Imm_4ByteAligned;
@@ -339,7 +339,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_StoreCompressTagged;
break;
diff --git a/deps/v8/src/compiler/backend/register-allocation.h b/deps/v8/src/compiler/backend/register-allocation.h
index 11a4a5b964..4c0bfe1baa 100644
--- a/deps/v8/src/compiler/backend/register-allocation.h
+++ b/deps/v8/src/compiler/backend/register-allocation.h
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-enum class RegisterKind { kGeneral, kDouble };
+enum class RegisterKind { kGeneral, kDouble, kSimd128 };
inline int GetRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
@@ -21,6 +21,8 @@ inline int GetRegisterCount(const RegisterConfiguration* config,
return config->num_general_registers();
case RegisterKind::kDouble:
return config->num_double_registers();
+ case RegisterKind::kSimd128:
+ return config->num_simd128_registers();
}
}
@@ -31,6 +33,8 @@ inline int GetAllocatableRegisterCount(const RegisterConfiguration* config,
return config->num_allocatable_general_registers();
case RegisterKind::kDouble:
return config->num_allocatable_double_registers();
+ case RegisterKind::kSimd128:
+ return config->num_allocatable_simd128_registers();
}
}
@@ -41,6 +45,8 @@ inline const int* GetAllocatableRegisterCodes(
return config->allocatable_general_codes();
case RegisterKind::kDouble:
return config->allocatable_double_codes();
+ case RegisterKind::kSimd128:
+ return config->allocatable_simd128_codes();
}
}
@@ -51,7 +57,7 @@ inline int ByteWidthForStackSlot(MachineRepresentation rep) {
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kFloat32:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
return kSystemPointerSize;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 2d6646f586..f0bf4e2186 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -379,8 +379,13 @@ void LiveRange::Spill() {
}
RegisterKind LiveRange::kind() const {
- return IsFloatingPoint(representation()) ? RegisterKind::kDouble
- : RegisterKind::kGeneral;
+ if (kFPAliasing == AliasingKind::kIndependent &&
+ IsSimd128(representation())) {
+ return RegisterKind::kSimd128;
+ } else {
+ return IsFloatingPoint(representation()) ? RegisterKind::kDouble
+ : RegisterKind::kGeneral;
+ }
}
UsePosition* LiveRange::FirstHintPosition(int* register_index) {
@@ -1321,7 +1326,7 @@ TopTierRegisterAllocationData::TopTierRegisterAllocationData(
flags_(flags),
tick_counter_(tick_counter),
slot_for_const_range_(zone) {
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
nullptr);
@@ -1329,6 +1334,11 @@ TopTierRegisterAllocationData::TopTierRegisterAllocationData(
kNumberOfFixedRangesPerRegister *
this->config()->num_simd128_registers(),
nullptr);
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ fixed_simd128_live_ranges_.resize(
+ kNumberOfFixedRangesPerRegister *
+ this->config()->num_simd128_registers(),
+ nullptr);
}
assigned_registers_ = code_zone()->New<BitVector>(
@@ -1339,6 +1349,12 @@ TopTierRegisterAllocationData::TopTierRegisterAllocationData(
this->config()->num_general_registers(), code_zone());
fixed_fp_register_use_ = code_zone()->New<BitVector>(
this->config()->num_double_registers(), code_zone());
+ if (kFPAliasing == AliasingKind::kIndependent) {
+ assigned_simd128_registers_ = code_zone()->New<BitVector>(
+ this->config()->num_simd128_registers(), code_zone());
+ fixed_simd128_register_use_ = code_zone()->New<BitVector>(
+ this->config()->num_simd128_registers(), code_zone());
+ }
this->frame()->SetAllocatedRegisters(assigned_registers_);
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
@@ -1404,10 +1420,8 @@ TopTierRegisterAllocationData::GetPhiMapValueFor(TopLevelLiveRange* top_range) {
bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
bool found = false;
- BitVector::Iterator iterator(live_in_sets()[0]);
- while (!iterator.Done()) {
+ for (int operand_index : *live_in_sets()[0]) {
found = true;
- int operand_index = iterator.Current();
PrintF("Register allocator error: live v%d reached first block.\n",
operand_index);
LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
@@ -1417,7 +1431,6 @@ bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
} else {
PrintF(" (function: %s)\n", debug_name());
}
- iterator.Advance();
}
return found;
}
@@ -1480,8 +1493,14 @@ void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
- if (kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kOverlap) {
fixed_fp_register_use_->Add(index);
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ if (rep == MachineRepresentation::kFloat32) {
+ fixed_fp_register_use_->Add(index);
+ } else {
+ fixed_simd128_register_use_->Add(index);
+ }
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
@@ -1508,19 +1527,26 @@ bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep,
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128: {
- if (kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kOverlap) {
return fixed_fp_register_use_->Contains(index);
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ if (rep == MachineRepresentation::kFloat32) {
+ return fixed_fp_register_use_->Contains(index);
+ } else {
+ return fixed_simd128_register_use_->Contains(index);
+ }
+ } else {
+ int alias_base_index = -1;
+ int aliases = config()->GetAliases(
+ rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ bool result = false;
+ while (aliases-- && !result) {
+ int aliased_reg = alias_base_index + aliases;
+ result |= fixed_fp_register_use_->Contains(aliased_reg);
+ }
+ return result;
}
- int alias_base_index = -1;
- int aliases = config()->GetAliases(
- rep, index, MachineRepresentation::kFloat64, &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- bool result = false;
- while (aliases-- && !result) {
- int aliased_reg = alias_base_index + aliases;
- result |= fixed_fp_register_use_->Contains(aliased_reg);
- }
- return result;
}
case MachineRepresentation::kFloat64:
return fixed_fp_register_use_->Contains(index);
@@ -1535,8 +1561,14 @@ void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
- if (kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kOverlap) {
assigned_double_registers_->Add(index);
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ if (rep == MachineRepresentation::kFloat32) {
+ assigned_double_registers_->Add(index);
+ } else {
+ assigned_simd128_registers_->Add(index);
+ }
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
@@ -1895,13 +1927,10 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
block->last_instruction_index())
.NextStart();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
+ for (int operand_index : *live_out) {
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->AddUseInterval(start, end, allocation_zone(),
data()->is_trace_alloc());
- iterator.Advance();
}
}
@@ -1952,7 +1981,7 @@ TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
int num_regs = config()->num_double_registers();
ZoneVector<TopLevelLiveRange*>* live_ranges =
&data()->fixed_double_live_ranges();
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
switch (rep) {
case MachineRepresentation::kFloat32:
num_regs = config()->num_float_registers();
@@ -1985,6 +2014,32 @@ TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
return result;
}
+TopLevelLiveRange* LiveRangeBuilder::FixedSIMD128LiveRangeFor(
+ int index, SpillMode spill_mode) {
+ DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
+ int num_regs = config()->num_simd128_registers();
+ ZoneVector<TopLevelLiveRange*>* live_ranges =
+ &data()->fixed_simd128_live_ranges();
+ int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
+
+ DCHECK(index < num_regs);
+ USE(num_regs);
+ TopLevelLiveRange* result = (*live_ranges)[offset + index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(
+ FixedFPLiveRangeID(offset + index, MachineRepresentation::kSimd128),
+ MachineRepresentation::kSimd128);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(MachineRepresentation::kSimd128, index);
+ if (spill_mode == SpillMode::kSpillDeferred) {
+ result->set_deferred_fixed();
+ }
+ (*live_ranges)[offset + index] = result;
+ }
+ return result;
+}
+
TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
SpillMode spill_mode) {
if (operand->IsUnallocated()) {
@@ -1998,6 +2053,10 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
} else if (operand->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(operand);
+ if (kFPAliasing == AliasingKind::kIndependent &&
+ op->representation() == MachineRepresentation::kSimd128) {
+ return FixedSIMD128LiveRangeFor(op->register_code(), spill_mode);
+ }
return FixedFPLiveRangeFor(op->register_code(), op->representation(),
spill_mode);
} else {
@@ -2061,10 +2120,13 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
LifetimePosition::GapFromInstructionIndex(block_start);
bool fixed_float_live_ranges = false;
bool fixed_simd128_live_ranges = false;
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
int mask = data()->code()->representation_mask();
fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ int mask = data()->code()->representation_mask();
+ fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
}
SpillMode spill_mode = SpillModeForBlock(block);
@@ -2126,7 +2188,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
allocation_zone(), data()->is_trace_alloc());
}
// Clobber fixed float registers on archs with non-simple aliasing.
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
if (fixed_float_live_ranges) {
for (int i = 0; i < config()->num_allocatable_float_registers();
++i) {
@@ -2149,6 +2211,17 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
allocation_zone(), data()->is_trace_alloc());
}
}
+ } else if (kFPAliasing == AliasingKind::kIndependent) {
+ if (fixed_simd128_live_ranges) {
+ for (int i = 0; i < config()->num_allocatable_simd128_registers();
+ ++i) {
+ int code = config()->GetAllocatableSimd128Code(i);
+ TopLevelLiveRange* range =
+ FixedSIMD128LiveRangeFor(code, spill_mode);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone(), data()->is_trace_alloc());
+ }
+ }
}
}
@@ -2401,18 +2474,15 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
DCHECK(block->IsLoopHeader());
// Add a live range stretching from the first loop instruction to the last
// for each value live on entry to the header.
- BitVector::Iterator iterator(live);
LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
code()->LastLoopInstructionIndex(block))
.NextFullStart();
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
+ for (int operand_index : *live) {
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->EnsureInterval(start, end, allocation_zone(),
data()->is_trace_alloc());
- iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
@@ -2724,7 +2794,7 @@ RegisterAllocator::RegisterAllocator(TopTierRegisterAllocationData* data,
allocatable_register_codes_(
GetAllocatableRegisterCodes(data->config(), kind)),
check_fp_aliasing_(false) {
- if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
+ if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
check_fp_aliasing_ = (data->code()->representation_mask() &
(kFloat32Bit | kSimd128Bit)) != 0;
}
@@ -2932,9 +3002,14 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
const char* RegisterAllocator::RegisterName(int register_code) const {
if (register_code == kUnassignedRegister) return "unassigned";
- return mode() == RegisterKind::kGeneral
- ? i::RegisterName(Register::from_code(register_code))
- : i::RegisterName(DoubleRegister::from_code(register_code));
+ switch (mode()) {
+ case RegisterKind::kGeneral:
+ return i::RegisterName(Register::from_code(register_code));
+ case RegisterKind::kDouble:
+ return i::RegisterName(DoubleRegister::from_code(register_code));
+ case RegisterKind::kSimd128:
+ return i::RegisterName(Simd128Register::from_code(register_code));
+ }
}
LinearScanAllocator::LinearScanAllocator(TopTierRegisterAllocationData* data,
@@ -3086,11 +3161,12 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
// intersection for the entire future.
LifetimePosition new_end = range->End();
for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
- if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
+ if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
+ cur_reg != reg) {
continue;
}
for (const LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
- if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
!data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
range->representation(), reg)) {
continue;
@@ -3302,7 +3378,8 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
std::function<bool(TopLevelLiveRange*)> filter,
RangeWithRegisterSet* to_be_live,
bool* taken_registers) {
- bool check_aliasing = !kSimpleFPAliasing && check_fp_aliasing();
+ bool check_aliasing =
+ kFPAliasing == AliasingKind::kCombine && check_fp_aliasing();
for (const auto& val : counts) {
if (!filter(val.first)) continue;
if (val.second.count >= majority) {
@@ -3374,7 +3451,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
update_caches) {
if (other->TopLevel()->IsFixed()) return;
int reg = range->assigned_register();
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
if (other->assigned_register() != reg) {
return;
}
@@ -3420,7 +3497,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
});
}
for (int reg = 0; reg < num_registers(); ++reg) {
- if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
reg != range->assigned_register()) {
continue;
}
@@ -3441,7 +3518,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
}
}
- } else {
+ } else if (mode() == RegisterKind::kDouble) {
for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) {
@@ -3449,7 +3526,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
}
}
- if (!kSimpleFPAliasing && check_fp_aliasing()) {
+ if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) {
@@ -3465,6 +3542,15 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
}
}
+ } else {
+ DCHECK_EQ(mode(), RegisterKind::kSimd128);
+ for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+ if (current != nullptr) {
+ if (current->IsDeferredFixed()) {
+ add_to_inactive(current);
+ }
+ }
+ }
}
} else {
// Remove all ranges.
@@ -3537,14 +3623,14 @@ void LinearScanAllocator::AllocateRegisters() {
AddToInactive(current);
}
}
- } else {
+ } else if (mode() == RegisterKind::kDouble) {
for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) continue;
AddToInactive(current);
}
}
- if (!kSimpleFPAliasing && check_fp_aliasing()) {
+ if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) continue;
@@ -3558,6 +3644,14 @@ void LinearScanAllocator::AllocateRegisters() {
}
}
}
+ } else {
+ DCHECK(mode() == RegisterKind::kSimd128);
+ for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+ if (current != nullptr) {
+ if (current->IsDeferredFixed()) continue;
+ AddToInactive(current);
+ }
+ }
}
RpoNumber last_block = RpoNumber::FromInt(0);
@@ -3902,7 +3996,7 @@ int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
int* num_regs, int* num_codes,
const int** codes) const {
- DCHECK(!kSimpleFPAliasing);
+ DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
if (rep == MachineRepresentation::kFloat32) {
*num_regs = data()->config()->num_float_registers();
*num_codes = data()->config()->num_allocatable_float_registers();
@@ -3916,15 +4010,29 @@ void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
}
}
+void LinearScanAllocator::GetSIMD128RegisterSet(int* num_regs, int* num_codes,
+ const int** codes) const {
+ DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
+
+ *num_regs = data()->config()->num_simd128_registers();
+ *num_codes = data()->config()->num_allocatable_simd128_registers();
+ *codes = data()->config()->allocatable_simd128_codes();
+}
+
void LinearScanAllocator::FindFreeRegistersForRange(
LiveRange* range, base::Vector<LifetimePosition> positions) {
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
MachineRepresentation rep = range->representation();
- if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kSimd128))
+ if (kFPAliasing == AliasingKind::kCombine &&
+ (rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kSimd128)) {
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ } else if (kFPAliasing == AliasingKind::kIndependent &&
+ (rep == MachineRepresentation::kSimd128)) {
+ GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
+ }
DCHECK_GE(positions.length(), num_regs);
for (int i = 0; i < num_regs; ++i) {
@@ -3933,7 +4041,7 @@ void LinearScanAllocator::FindFreeRegistersForRange(
for (LiveRange* cur_active : active_live_ranges()) {
int cur_reg = cur_active->assigned_register();
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1) due to %d\n",
RegisterName(cur_reg),
@@ -3958,7 +4066,7 @@ void LinearScanAllocator::FindFreeRegistersForRange(
// No need to carry out intersections, when this register won't be
// interesting to this range anyway.
// TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
(positions[cur_reg] <= cur_inactive->NextStart() ||
range->End() <= cur_inactive->NextStart())) {
break;
@@ -3966,7 +4074,7 @@ void LinearScanAllocator::FindFreeRegistersForRange(
LifetimePosition next_intersection =
cur_inactive->FirstIntersection(range);
if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
positions[cur_reg].value());
@@ -4038,9 +4146,13 @@ int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
MachineRepresentation rep = current->representation();
- if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kSimd128)) {
+ if (kFPAliasing == AliasingKind::kCombine &&
+ (rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kSimd128)) {
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ } else if (kFPAliasing == AliasingKind::kIndependent &&
+ (rep == MachineRepresentation::kSimd128)) {
+ GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
}
DCHECK_GE(free_until_pos.length(), num_codes);
@@ -4146,7 +4258,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
int cur_reg = range->assigned_register();
bool is_fixed_or_cant_spill =
range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
if (is_fixed_or_cant_spill) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
@@ -4185,7 +4297,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
// Don't perform costly intersections if they are guaranteed to not update
// block_pos or use_pos.
// TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+ if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing())) {
DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
if (block_pos[cur_reg] <= range->NextStart()) break;
if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
@@ -4194,7 +4306,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
if (is_fixed) {
block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
@@ -4293,7 +4405,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
for (auto it = active_live_ranges().begin();
it != active_live_ranges().end();) {
LiveRange* range = *it;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
if (range->assigned_register() != reg) {
++it;
continue;
@@ -4332,13 +4444,13 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
}
for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
if (cur_reg != reg) continue;
}
for (auto it = inactive_live_ranges(cur_reg).begin();
it != inactive_live_ranges(cur_reg).end();) {
LiveRange* range = *it;
- if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
!data()->config()->AreAliases(current->representation(), reg,
range->representation(), cur_reg)) {
++it;
@@ -4769,10 +4881,11 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
for (const InstructionBlock* block : code()->instruction_blocks()) {
if (CanEagerlyResolveControlFlow(block)) continue;
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
- BitVector::Iterator iterator(live);
- while (!iterator.Done()) {
+ auto it = live->begin();
+ auto end = live->end();
+ while (it != end) {
data()->tick_counter()->TickAndMaybeEnterSafepoint();
- int vreg = iterator.Current();
+ int vreg = *it;
LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
@@ -4835,7 +4948,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
move_loc != -1,
code()->GetInstructionBlock(move_loc)->IsDeferred());
}
- iterator.Advance();
+ ++it;
}
}
@@ -5013,10 +5126,8 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
ZoneQueue<int> worklist(temp_zone);
- for (BitVector::Iterator iterator(
- range->GetListOfBlocksRequiringSpillOperands(data()));
- !iterator.Done(); iterator.Advance()) {
- worklist.push(iterator.Current());
+ for (int block_id : *range->GetListOfBlocksRequiringSpillOperands(data())) {
+ worklist.push(block_id);
}
ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 2a9e6ddd3e..adb0dbdacb 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -372,8 +372,10 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
+ BitVector* assigned_simd128_registers_;
BitVector* fixed_register_use_;
BitVector* fixed_fp_register_use_;
+ BitVector* fixed_simd128_register_use_;
int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
@@ -1244,6 +1246,7 @@ class LiveRangeBuilder final : public ZoneObject {
TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode);
TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep,
SpillMode spill_mode);
+ TopLevelLiveRange* FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -1484,6 +1487,8 @@ class LinearScanAllocator final : public RegisterAllocator {
LiveRange* range, const base::Vector<LifetimePosition>& free_until_pos);
void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
int* num_codes, const int** codes) const;
+ void GetSIMD128RegisterSet(int* num_regs, int* num_codes,
+ const int** codes) const;
void FindFreeRegistersForRange(LiveRange* range,
base::Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 94bcbb6244..56bb8c6879 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -126,6 +126,8 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_Root:
+ return MemOperand(kRootRegister, InputInt32(index));
case kMode_MRR:
// TODO(plind): r6 address mode, to be implemented ...
UNREACHABLE();
@@ -1721,7 +1723,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvStoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
if (instr->InputAt(0)->IsSimd128Register()) {
- UNREACHABLE();
+ Register dst = sp;
+ if (i.InputInt32(1) != 0) {
+ dst = kScratchReg2;
+ __ Add64(kScratchReg2, sp, Operand(i.InputInt32(1)));
+ }
+ __ VU.set(kScratchReg, E8, m1);
+ __ vs(i.InputSimd128Register(0), dst, 0, E8);
} else {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, i.InputInt32(1)));
@@ -2749,13 +2757,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
__ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
- __ li(kScratchReg, 1);
- __ vmv_vx(v0, kScratchReg);
- __ li(kScratchReg, imm1);
- __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
__ li(kScratchReg, imm2);
- __ vsll_vi(v0, v0, 1);
- __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ __ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ __ vslideup_vi(kSimd128ScratchReg, kSimd128ScratchReg2, 1);
+ __ li(kScratchReg, imm1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
__ VU.set(kScratchReg, E8, m1);
if (dst == src0) {
@@ -2771,6 +2777,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vor_vv(dst, dst, kSimd128ScratchReg3);
break;
}
+ case kRiscvI8x16Popcnt: {
+ VRegister dst = i.OutputSimd128Register(),
+ src = i.InputSimd128Register(0);
+ Label t;
+
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vv(kSimd128ScratchReg, src);
+ __ vmv_vv(dst, kSimd128RegZero);
+
+ __ bind(&t);
+ __ vmsne_vv(v0, kSimd128ScratchReg, kSimd128RegZero);
+ __ vadd_vi(dst, dst, 1, Mask);
+ __ vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, Mask);
+ __ vand_vv(kSimd128ScratchReg, kSimd128ScratchReg, kSimd128ScratchReg2);
+ // kScratchReg = -1 if kSimd128ScratchReg == 0 i.e. no active element
+ __ vfirst_m(kScratchReg, kSimd128ScratchReg);
+ __ bgez(kScratchReg, &t);
+ break;
+ }
case kRiscvF64x2NearestInt: {
__ Round_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
@@ -2962,6 +2987,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kRiscvF64x2Qfma: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
+ i.InputSimd128Register(0));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Qfms: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
+ i.InputSimd128Register(0));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kRiscvF32x4ExtractLane: {
__ VU.set(kScratchReg, E32, m1);
__ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
@@ -3136,6 +3175,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
+ case kRiscvF32x4RecipApprox: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfrec7_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4RecipSqrtApprox: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfrsqrt7_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Qfma: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
+ i.InputSimd128Register(0));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Qfms: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
+ i.InputSimd128Register(0));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kRiscvI64x2SConvertI32x4Low: {
__ VU.set(kScratchReg, E64, m1);
__ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
@@ -3488,8 +3551,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ Branch(GetLabel(target));
}
void CodeGenerator::AssembleArchTrap(Instruction* instr,
@@ -3786,17 +3850,17 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- int count = base::bits::CountPopulation(saves_fpu);
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
+ int count = saves_fpu.Count();
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- int count = base::bits::CountPopulation(saves);
+ if (!saves.is_empty()) {
+ int count = saves.Count();
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -3847,7 +3911,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -3887,20 +3951,20 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
- required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= saves.Count();
+ required_slots -= saves_fpu.Count();
required_slots -= returns;
if (required_slots > 0) {
__ Sub64(sp, sp, Operand(required_slots * kSystemPointerSize));
}
- if (saves_fpu != 0) {
+ if (!saves_fpu.is_empty()) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count());
}
- if (saves != 0) {
+ if (!saves.is_empty()) {
// Save callee-saved registers.
__ MultiPush(saves);
}
@@ -3921,13 +3985,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore GP registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
+ const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fpu.is_empty()) {
__ MultiPopFPU(saves_fpu);
}
@@ -3979,9 +4043,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_slots).
- if (!kJSArgcIncludesReceiver) {
- __ Add64(t0, t0, Operand(1)); // Also pop the receiver.
- }
if (parameter_slots > 1) {
Label done;
__ li(kScratchReg, parameter_slots);
@@ -4134,17 +4195,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
VRegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) {
VRegister dst = g.ToSimd128Register(destination);
+ __ VU.set(kScratchReg, E8, m1);
__ vmv_vv(dst, src);
} else {
DCHECK(destination->IsSimd128StackSlot());
- Register dst = g.ToMemOperand(destination).offset() == 0
- ? g.ToMemOperand(destination).rm()
- : kScratchReg;
- if (g.ToMemOperand(destination).offset() != 0) {
- __ Add64(dst, g.ToMemOperand(destination).rm(),
- g.ToMemOperand(destination).offset());
+ __ VU.set(kScratchReg, E8, m1);
+ MemOperand dst = g.ToMemOperand(destination);
+ Register dst_r = dst.rm();
+ if (dst.offset() != 0) {
+ dst_r = kScratchReg;
+ __ Add64(dst_r, dst.rm(), dst.offset());
}
- __ vs(src, dst, 0, E8);
+ __ vs(src, dst_r, 0, E8);
}
} else {
FPURegister src = g.ToDoubleRegister(source);
@@ -4166,24 +4228,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ __ VU.set(kScratchReg, E8, m1);
+ Register src_r = src.rm();
if (src.offset() != 0) {
- __ Add64(src_reg, src.rm(), src.offset());
+ src_r = kScratchReg;
+ __ Add64(src_r, src.rm(), src.offset());
}
if (destination->IsSimd128Register()) {
- __ vl(g.ToSimd128Register(destination), src_reg, 0, E8);
+ __ vl(g.ToSimd128Register(destination), src_r, 0, E8);
} else {
DCHECK(destination->IsSimd128StackSlot());
VRegister temp = kSimd128ScratchReg;
- Register dst = g.ToMemOperand(destination).offset() == 0
- ? g.ToMemOperand(destination).rm()
- : kScratchReg;
- if (g.ToMemOperand(destination).offset() != 0) {
- __ Add64(dst, g.ToMemOperand(destination).rm(),
- g.ToMemOperand(destination).offset());
+ MemOperand dst = g.ToMemOperand(destination);
+ Register dst_r = dst.rm();
+ if (dst.offset() != 0) {
+ dst_r = kScratchReg2;
+ __ Add64(dst_r, dst.rm(), dst.offset());
}
- __ vl(temp, src_reg, 0, E8);
- __ vs(temp, dst, 0, E8);
+ __ vl(temp, src_r, 0, E8);
+ __ vs(temp, dst_r, 0, E8);
}
} else {
if (destination->IsFPRegister()) {
@@ -4214,91 +4277,106 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
RiscvOperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ Ld(src, dst);
- __ Sd(temp, dst);
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
- Register temp_0 = kScratchReg;
- Register temp_1 = kScratchReg2;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ Ld(temp_0, src);
- __ Ld(temp_1, dst);
- __ Sd(temp_0, dst);
- __ Sd(temp_1, src);
- } else if (source->IsFPRegister()) {
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
- } else {
- FPURegister temp = kScratchDoubleReg;
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsFPStackSlot());
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ VRegister src = g.ToDoubleRegister(source).toV();
+ VRegister dst = g.ToDoubleRegister(destination).toV();
+ VRegister temp = kSimd128ScratchReg;
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vv(temp, src);
+ __ vmv_vv(src, dst);
+ __ vmv_vv(dst, temp);
+ }
+ }
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsRegister()) {
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ __ mv(temp, src);
+ __ Ld(src, dst);
+ __ Sd(temp, dst);
+ } else {
MemOperand dst = g.ToMemOperand(destination);
- if (rep == MachineRepresentation::kFloat32) {
- __ MoveFloat(temp, src);
+ if (source->IsFloatRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ DoubleRegister temp = kScratchDoubleReg;
+ __ fmv_s(temp, src);
__ LoadFloat(src, dst);
__ StoreFloat(temp, dst);
- } else {
- DCHECK_EQ(rep, MachineRepresentation::kFloat64);
- __ MoveDouble(temp, src);
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ DoubleRegister temp = kScratchDoubleReg;
+ __ fmv_d(temp, src);
__ LoadDouble(src, dst);
__ StoreDouble(temp, dst);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ VRegister src = g.ToDoubleRegister(source).toV();
+ VRegister temp = kSimd128ScratchReg;
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vv(temp, src);
+ Register dst_v = dst.rm();
+ if (dst.offset() != 0) {
+ dst_v = kScratchReg2;
+ __ Add64(dst_v, dst.rm(), Operand(dst.offset()));
+ }
+ __ vl(src, dst_v, 0, E8);
+ __ vs(temp, dst_v, 0, E8);
}
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- Register temp_0 = kScratchReg;
- MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kIntSize);
- MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
- } else {
- FPURegister temp_1 = kScratchDoubleReg;
- if (rep == MachineRepresentation::kFloat32) {
- __ LoadFloat(temp_1, dst0); // Save destination in temp_1.
- __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ Sw(temp_0, dst0);
- __ StoreFloat(temp_1, src0);
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsSimd128StackSlot()) {
+ __ VU.set(kScratchReg, E8, m1);
+ Register src_v = src.rm();
+ Register dst_v = dst.rm();
+ if (src.offset() != 0) {
+ src_v = kScratchReg;
+ __ Add64(src_v, src.rm(), Operand(src.offset()));
+ }
+ if (dst.offset() != 0) {
+ dst_v = kScratchReg2;
+ __ Add64(dst_v, dst.rm(), Operand(dst.offset()));
+ }
+ __ vl(kSimd128ScratchReg, src_v, 0, E8);
+ __ vl(kSimd128ScratchReg2, dst_v, 0, E8);
+ __ vs(kSimd128ScratchReg, dst_v, 0, E8);
+ __ vs(kSimd128ScratchReg2, src_v, 0, E8);
} else {
- DCHECK_EQ(rep, MachineRepresentation::kFloat64);
- __ LoadDouble(temp_1, dst0); // Save destination in temp_1.
- __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ Sw(temp_0, dst0);
- __ Lw(temp_0, src1);
- __ Sw(temp_0, dst1);
- __ StoreDouble(temp_1, src0);
+ UseScratchRegisterScope scope(tasm());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = kScratchReg2;
+ __ Ld(temp_0, src);
+ __ Ld(temp_1, dst);
+ __ Sd(temp_0, dst);
+ __ Sd(temp_1, src);
}
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 307379be32..aa0d446d22 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -237,6 +237,10 @@ namespace compiler {
V(RiscvF32x4Sqrt) \
V(RiscvF32x4RecipApprox) \
V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Qfma) \
+ V(RiscvF32x4Qfms) \
+ V(RiscvF64x2Qfma) \
+ V(RiscvF64x2Qfms) \
V(RiscvF32x4Add) \
V(RiscvF32x4Sub) \
V(RiscvF32x4Mul) \
@@ -416,10 +420,12 @@ namespace compiler {
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
+// Root = [kRootregister + immediate]
// TODO(plind): Add the new r6 address modes.
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [root + k] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 7d4e31ce92..23e06507d9 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -121,6 +121,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvF32x4Sqrt:
case kRiscvF32x4RecipApprox:
case kRiscvF32x4RecipSqrtApprox:
+ case kRiscvF64x2Qfma:
+ case kRiscvF64x2Qfms:
+ case kRiscvF32x4Qfma:
+ case kRiscvF32x4Qfms:
case kRiscvF32x4ReplaceLane:
case kRiscvF32x4SConvertI32x4:
case kRiscvF32x4Splat:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 6ec4df95c2..24593d8cd9 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -374,6 +374,24 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode,
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(output == nullptr ? node : output),
@@ -542,7 +560,7 @@ void InstructionSelector::VisitLoad(Node* node) {
#else
// Fall through.
#endif
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -622,7 +640,7 @@ void InstructionSelector::VisitStore(Node* node) {
#else
UNREACHABLE();
#endif
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -1731,7 +1749,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -1786,7 +1804,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -2785,63 +2803,67 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kRiscvF64x2Abs) \
- V(F64x2Neg, kRiscvF64x2Neg) \
- V(F64x2Sqrt, kRiscvF64x2Sqrt) \
- V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
- V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
- V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
- V(F64x2Ceil, kRiscvF64x2Ceil) \
- V(F64x2Floor, kRiscvF64x2Floor) \
- V(F64x2Trunc, kRiscvF64x2Trunc) \
- V(F64x2NearestInt, kRiscvF64x2NearestInt) \
- V(I64x2Neg, kRiscvI64x2Neg) \
- V(I64x2Abs, kRiscvI64x2Abs) \
- V(I64x2BitMask, kRiscvI64x2BitMask) \
- V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
- V(F32x4Abs, kRiscvF32x4Abs) \
- V(F32x4Neg, kRiscvF32x4Neg) \
- V(F32x4Sqrt, kRiscvF32x4Sqrt) \
- V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \
- V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
- V(F32x4Ceil, kRiscvF32x4Ceil) \
- V(F32x4Floor, kRiscvF32x4Floor) \
- V(F32x4Trunc, kRiscvF32x4Trunc) \
- V(F32x4NearestInt, kRiscvF32x4NearestInt) \
- V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
- V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
- V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
- V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
- V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
- V(I32x4Neg, kRiscvI32x4Neg) \
- V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \
- V(I32x4Abs, kRiscvI32x4Abs) \
- V(I32x4BitMask, kRiscvI32x4BitMask) \
- V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
- V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
- V(I16x8Neg, kRiscvI16x8Neg) \
- V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \
- V(I16x8Abs, kRiscvI16x8Abs) \
- V(I16x8BitMask, kRiscvI16x8BitMask) \
- V(I8x16Neg, kRiscvI8x16Neg) \
- V(I8x16Abs, kRiscvI8x16Abs) \
- V(I8x16BitMask, kRiscvI8x16BitMask) \
- V(I8x16Popcnt, kRiscvI8x16Popcnt) \
- V(S128Not, kRiscvS128Not) \
- V(V128AnyTrue, kRiscvV128AnyTrue) \
- V(I32x4AllTrue, kRiscvI32x4AllTrue) \
- V(I16x8AllTrue, kRiscvI16x8AllTrue) \
- V(I8x16AllTrue, kRiscvI8x16AllTrue) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kRiscvF64x2Abs) \
+ V(F64x2Neg, kRiscvF64x2Neg) \
+ V(F64x2Sqrt, kRiscvF64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
+ V(F64x2Ceil, kRiscvF64x2Ceil) \
+ V(F64x2Floor, kRiscvF64x2Floor) \
+ V(F64x2Trunc, kRiscvF64x2Trunc) \
+ V(F64x2NearestInt, kRiscvF64x2NearestInt) \
+ V(I64x2Neg, kRiscvI64x2Neg) \
+ V(I64x2Abs, kRiscvI64x2Abs) \
+ V(I64x2BitMask, kRiscvI64x2BitMask) \
+ V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
+ V(F32x4Abs, kRiscvF32x4Abs) \
+ V(F32x4Neg, kRiscvF32x4Neg) \
+ V(F32x4Sqrt, kRiscvF32x4Sqrt) \
+ V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
+ V(F32x4Ceil, kRiscvF32x4Ceil) \
+ V(F32x4Floor, kRiscvF32x4Floor) \
+ V(F32x4Trunc, kRiscvF32x4Trunc) \
+ V(F32x4NearestInt, kRiscvF32x4NearestInt) \
+ V(I32x4RelaxedTruncF32x4S, kRiscvI32x4SConvertF32x4) \
+ V(I32x4RelaxedTruncF32x4U, kRiscvI32x4UConvertF32x4) \
+ V(I32x4RelaxedTruncF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
+ V(I32x4RelaxedTruncF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
+ V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
+ V(I32x4Neg, kRiscvI32x4Neg) \
+ V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \
+ V(I32x4Abs, kRiscvI32x4Abs) \
+ V(I32x4BitMask, kRiscvI32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kRiscvI16x8Neg) \
+ V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \
+ V(I16x8Abs, kRiscvI16x8Abs) \
+ V(I16x8BitMask, kRiscvI16x8BitMask) \
+ V(I8x16Neg, kRiscvI8x16Neg) \
+ V(I8x16Abs, kRiscvI8x16Abs) \
+ V(I8x16BitMask, kRiscvI8x16BitMask) \
+ V(I8x16Popcnt, kRiscvI8x16Popcnt) \
+ V(S128Not, kRiscvS128Not) \
+ V(V128AnyTrue, kRiscvV128AnyTrue) \
+ V(I32x4AllTrue, kRiscvI32x4AllTrue) \
+ V(I16x8AllTrue, kRiscvI16x8AllTrue) \
+ V(I8x16AllTrue, kRiscvI8x16AllTrue) \
V(I64x2AllTrue, kRiscvI64x2AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -2886,6 +2908,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Ne, kRiscvF32x4Ne) \
V(F32x4Lt, kRiscvF32x4Lt) \
V(F32x4Le, kRiscvF32x4Le) \
+ V(F32x4RelaxedMin, kRiscvF32x4Min) \
+ V(F32x4RelaxedMax, kRiscvF32x4Max) \
+ V(F64x2RelaxedMin, kRiscvF64x2Min) \
+ V(F64x2RelaxedMax, kRiscvF64x2Max) \
V(I32x4Add, kRiscvI32x4Add) \
V(I32x4Sub, kRiscvI32x4Sub) \
V(I32x4Mul, kRiscvI32x4Mul) \
@@ -3024,11 +3050,31 @@ void InstructionSelector::VisitS128Select(Node* node) {
VisitRRRR(this, kRiscvS128Select, node);
}
+#define SIMD_VISIT_SELECT_LANE(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRRR(this, kRiscvS128Select, node); \
+ }
+SIMD_VISIT_SELECT_LANE(I8x16RelaxedLaneSelect)
+SIMD_VISIT_SELECT_LANE(I16x8RelaxedLaneSelect)
+SIMD_VISIT_SELECT_LANE(I32x4RelaxedLaneSelect)
+SIMD_VISIT_SELECT_LANE(I64x2RelaxedLaneSelect)
+#undef SIMD_VISIT_SELECT_LANE
+
+#define VISIT_SIMD_QFMOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRRR(this, instruction, node); \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma, kRiscvF64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms, kRiscvF64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma, kRiscvF32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
RiscvOperandGenerator g(this);
- InstructionOperand temp = g.TempFpRegister(v14);
- InstructionOperand temp1 = g.TempFpRegister(v10);
- InstructionOperand temp2 = g.TempFpRegister(v18);
+ InstructionOperand temp = g.TempFpRegister(v16);
+ InstructionOperand temp1 = g.TempFpRegister(v14);
+ InstructionOperand temp2 = g.TempFpRegister(v30);
InstructionOperand dst = g.DefineAsRegister(node);
this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), g.UseImmediate(E16),
@@ -3223,11 +3269,11 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##S( \
Node* node) { \
RiscvOperandGenerator g(this); \
- InstructionOperand t1 = g.TempFpRegister(v10); \
+ InstructionOperand t1 = g.TempFpRegister(v16); \
Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
g.UseImmediate(m1)); \
- InstructionOperand t2 = g.TempFpRegister(v9); \
+ InstructionOperand t2 = g.TempFpRegister(v17); \
Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
g.UseImmediate(m1)); \
@@ -3245,11 +3291,11 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##U( \
Node* node) { \
RiscvOperandGenerator g(this); \
- InstructionOperand t1 = g.TempFpRegister(v10); \
+ InstructionOperand t1 = g.TempFpRegister(v16); \
Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
g.UseImmediate(m1)); \
- InstructionOperand t2 = g.TempFpRegister(v9); \
+ InstructionOperand t2 = g.TempFpRegister(v17); \
Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
g.UseImmediate(m1)); \
@@ -3288,8 +3334,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
+#ifdef RISCV_HAS_NO_UNALIGNED
return MachineOperatorBuilder::AlignmentRequirements::
NoUnalignedAccessSupport();
+#else
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+#endif
}
#undef SIMD_BINOP_LIST
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 7f478fa120..3128a2303e 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -224,8 +224,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ AddS64(scratch1_, object_, offset_);
}
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -2404,15 +2406,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ShiftRightU32(value_, value_, Operand(16));
}
__ AtomicExchangeU16(r1, value_, output, r0);
+ if (reverse_bytes) {
+ __ lrvr(output, output);
+ __ ShiftRightU32(output, output, Operand(16));
+ }
if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
- if (reverse_bytes) {
- __ lrvr(output, output);
- __ ShiftRightU32(output, output, Operand(16));
- }
break;
}
case kAtomicExchangeWord32: {
@@ -2473,11 +2475,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS16(result, result); \
if (is_wasm_on_be(info()->IsWasm())) { \
__ lrvr(result, result); \
__ ShiftRightS32(result, result, Operand(16)); \
} \
+ __ LoadS16(result, result); \
}); \
break; \
case kAtomic##op##Uint16: \
@@ -2554,107 +2556,160 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, Simd128Register) \
- V(F64x2Sub, Simd128Register) \
- V(F64x2Mul, Simd128Register) \
- V(F64x2Div, Simd128Register) \
- V(F64x2Min, Simd128Register) \
- V(F64x2Max, Simd128Register) \
- V(F64x2Eq, Simd128Register) \
- V(F64x2Ne, Simd128Register) \
- V(F64x2Lt, Simd128Register) \
- V(F64x2Le, Simd128Register) \
- V(F32x4Add, Simd128Register) \
- V(F32x4Sub, Simd128Register) \
- V(F32x4Mul, Simd128Register) \
- V(F32x4Div, Simd128Register) \
- V(F32x4Min, Simd128Register) \
- V(F32x4Max, Simd128Register) \
- V(F32x4Eq, Simd128Register) \
- V(F32x4Ne, Simd128Register) \
- V(F32x4Lt, Simd128Register) \
- V(F32x4Le, Simd128Register) \
- V(I64x2Add, Simd128Register) \
- V(I64x2Sub, Simd128Register) \
- V(I64x2Mul, Simd128Register) \
- V(I64x2Eq, Simd128Register) \
- V(I64x2Ne, Simd128Register) \
- V(I64x2GtS, Simd128Register) \
- V(I64x2GeS, Simd128Register) \
- V(I64x2Shl, Register) \
- V(I64x2ShrS, Register) \
- V(I64x2ShrU, Register) \
- V(I32x4Add, Simd128Register) \
- V(I32x4Sub, Simd128Register) \
- V(I32x4Mul, Simd128Register) \
- V(I32x4Eq, Simd128Register) \
- V(I32x4Ne, Simd128Register) \
- V(I32x4GtS, Simd128Register) \
- V(I32x4GeS, Simd128Register) \
- V(I32x4GtU, Simd128Register) \
- V(I32x4GeU, Simd128Register) \
- V(I32x4MinS, Simd128Register) \
- V(I32x4MinU, Simd128Register) \
- V(I32x4MaxS, Simd128Register) \
- V(I32x4MaxU, Simd128Register) \
- V(I32x4Shl, Register) \
- V(I32x4ShrS, Register) \
- V(I32x4ShrU, Register) \
- V(I16x8Add, Simd128Register) \
- V(I16x8Sub, Simd128Register) \
- V(I16x8Mul, Simd128Register) \
- V(I16x8Eq, Simd128Register) \
- V(I16x8Ne, Simd128Register) \
- V(I16x8GtS, Simd128Register) \
- V(I16x8GeS, Simd128Register) \
- V(I16x8GtU, Simd128Register) \
- V(I16x8GeU, Simd128Register) \
- V(I16x8MinS, Simd128Register) \
- V(I16x8MinU, Simd128Register) \
- V(I16x8MaxS, Simd128Register) \
- V(I16x8MaxU, Simd128Register) \
- V(I16x8Shl, Register) \
- V(I16x8ShrS, Register) \
- V(I16x8ShrU, Register) \
- V(I8x16Add, Simd128Register) \
- V(I8x16Sub, Simd128Register) \
- V(I8x16Eq, Simd128Register) \
- V(I8x16Ne, Simd128Register) \
- V(I8x16GtS, Simd128Register) \
- V(I8x16GeS, Simd128Register) \
- V(I8x16GtU, Simd128Register) \
- V(I8x16GeU, Simd128Register) \
- V(I8x16MinS, Simd128Register) \
- V(I8x16MinU, Simd128Register) \
- V(I8x16MaxS, Simd128Register) \
- V(I8x16MaxU, Simd128Register) \
- V(I8x16Shl, Register) \
- V(I8x16ShrS, Register) \
- V(I8x16ShrU, Register)
-
-#define EMIT_SIMD_BINOP(name, stype) \
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define EMIT_SIMD_SHIFT(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputRegister(1), kScratchDoubleReg); \
+ break; \
+ }
+ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I16x8RoundingAverageU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor) \
+ V(S128AndNot)
+
+#define EMIT_SIMD_BINOP(name) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.Input##stype(1)); \
+ i.InputSimd128Register(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Splat, F64x2Splat, Simd128Register, DoubleRegister) \
- V(F32x4Splat, F32x4Splat, Simd128Register, DoubleRegister) \
- V(I64x2Splat, I64x2Splat, Simd128Register, Register) \
- V(I32x4Splat, I32x4Splat, Simd128Register, Register) \
- V(I16x8Splat, I16x8Splat, Simd128Register, Register) \
- V(I8x16Splat, I8x16Splat, Simd128Register, Register)
-
-#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
- case kS390_##name: { \
- __ op(i.Output##dtype(), i.Input##stype(0)); \
- break; \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Splat, Simd128Register, DoubleRegister) \
+ V(F64x2Abs, Simd128Register, Simd128Register) \
+ V(F64x2Neg, Simd128Register, Simd128Register) \
+ V(F64x2Sqrt, Simd128Register, Simd128Register) \
+ V(F64x2Ceil, Simd128Register, Simd128Register) \
+ V(F64x2Floor, Simd128Register, Simd128Register) \
+ V(F64x2Trunc, Simd128Register, Simd128Register) \
+ V(F64x2NearestInt, Simd128Register, Simd128Register) \
+ V(F32x4Splat, Simd128Register, DoubleRegister) \
+ V(F32x4Abs, Simd128Register, Simd128Register) \
+ V(F32x4Neg, Simd128Register, Simd128Register) \
+ V(F32x4Sqrt, Simd128Register, Simd128Register) \
+ V(F32x4Ceil, Simd128Register, Simd128Register) \
+ V(F32x4Floor, Simd128Register, Simd128Register) \
+ V(F32x4Trunc, Simd128Register, Simd128Register) \
+ V(F32x4NearestInt, Simd128Register, Simd128Register) \
+ V(I64x2Splat, Simd128Register, Register) \
+ V(I64x2Abs, Simd128Register, Simd128Register) \
+ V(I64x2Neg, Simd128Register, Simd128Register) \
+ V(I64x2SConvertI32x4Low, Simd128Register, Simd128Register) \
+ V(I64x2SConvertI32x4High, Simd128Register, Simd128Register) \
+ V(I64x2UConvertI32x4Low, Simd128Register, Simd128Register) \
+ V(I64x2UConvertI32x4High, Simd128Register, Simd128Register) \
+ V(I32x4Splat, Simd128Register, Register) \
+ V(I32x4Abs, Simd128Register, Simd128Register) \
+ V(I32x4Neg, Simd128Register, Simd128Register) \
+ V(I32x4SConvertI16x8Low, Simd128Register, Simd128Register) \
+ V(I32x4SConvertI16x8High, Simd128Register, Simd128Register) \
+ V(I32x4UConvertI16x8Low, Simd128Register, Simd128Register) \
+ V(I32x4UConvertI16x8High, Simd128Register, Simd128Register) \
+ V(I16x8Splat, Simd128Register, Register) \
+ V(I16x8Abs, Simd128Register, Simd128Register) \
+ V(I16x8Neg, Simd128Register, Simd128Register) \
+ V(I16x8SConvertI8x16Low, Simd128Register, Simd128Register) \
+ V(I16x8SConvertI8x16High, Simd128Register, Simd128Register) \
+ V(I16x8UConvertI8x16Low, Simd128Register, Simd128Register) \
+ V(I16x8UConvertI8x16High, Simd128Register, Simd128Register) \
+ V(I8x16Splat, Simd128Register, Register) \
+ V(I8x16Abs, Simd128Register, Simd128Register) \
+ V(I8x16Neg, Simd128Register, Simd128Register) \
+ V(S128Not, Simd128Register, Simd128Register)
+
+#define EMIT_SIMD_UNOP(name, dtype, stype) \
+ case kS390_##name: { \
+ __ name(i.Output##dtype(), i.Input##stype(0)); \
+ break; \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
@@ -2670,10 +2725,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I8x16ExtractLaneU, Register) \
V(I8x16ExtractLaneS, Register)
-#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
- case kS390_##name: { \
- __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1)); \
- break; \
+#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
+ case kS390_##name: { \
+ __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1), \
+ kScratchReg); \
+ break; \
}
SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
#undef EMIT_SIMD_EXTRACT_LANE
@@ -2690,103 +2746,127 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#define EMIT_SIMD_REPLACE_LANE(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.Input##stype(2), i.InputInt8(1)); \
+ i.Input##stype(2), i.InputInt8(1), kScratchReg); \
break; \
}
SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
#undef EMIT_SIMD_REPLACE_LANE
#undef SIMD_REPLACE_LANE_LIST
- // vector binops
- case kS390_F64x2Qfma: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vfma(dst, src1, src2, src0, Condition(3), Condition(0));
- break;
- }
- case kS390_F64x2Qfms: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vfnms(dst, src1, src2, src0, Condition(3), Condition(0));
- break;
- }
- case kS390_F32x4Qfma: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vfma(dst, src1, src2, src0, Condition(2), Condition(0));
- break;
- }
- case kS390_F32x4Qfms: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vfnms(dst, src1, src2, src0, Condition(2), Condition(0));
- break;
- }
- case kS390_I16x8RoundingAverageU: {
- __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I8x16RoundingAverageU: {
- __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- // vector unary ops
- case kS390_F64x2Abs: {
- __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(2), Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2Neg: {
- __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(0), Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2Sqrt: {
- __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(0), Condition(0), Condition(3));
- break;
- }
- case kS390_F32x4Abs: {
- __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(2), Condition(0), Condition(2));
- break;
- }
- case kS390_F32x4Neg: {
- __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(0), Condition(0), Condition(2));
- break;
- }
- case kS390_I64x2Neg: {
- __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(3));
+
+#define SIMD_EXT_MUL_LIST(V) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U)
+
+#define EMIT_SIMD_EXT_MUL(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1), kScratchDoubleReg); \
+ break; \
+ }
+ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
+#undef EMIT_SIMD_EXT_MUL
+#undef SIMD_EXT_MUL_LIST
+
+#define SIMD_ALL_TRUE_LIST(V) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue)
+
+#define EMIT_SIMD_ALL_TRUE(name) \
+ case kS390_##name: { \
+ __ name(i.OutputRegister(), i.InputSimd128Register(0), kScratchReg, \
+ kScratchDoubleReg); \
+ break; \
+ }
+ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
+#undef EMIT_SIMD_ALL_TRUE
+#undef SIMD_ALL_TRUE_LIST
+
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms)
+
+#define EMIT_SIMD_QFM(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1), i.InputSimd128Register(2)); \
+ break; \
+ }
+ SIMD_QFM_LIST(EMIT_SIMD_QFM)
+#undef EMIT_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+#define SIMD_ADD_SUB_SAT_LIST(V) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU)
+
+#define EMIT_SIMD_ADD_SUB_SAT(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1), kScratchDoubleReg, \
+ i.ToSimd128Register(instr->TempAt(0))); \
+ break; \
+ }
+ SIMD_ADD_SUB_SAT_LIST(EMIT_SIMD_ADD_SUB_SAT)
+#undef EMIT_SIMD_ADD_SUB_SAT
+#undef SIMD_ADD_SUB_SAT_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U)
+
+#define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ kScratchDoubleReg, i.ToSimd128Register(instr->TempAt(0))); \
+ break; \
+ }
+ SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
+#undef EMIT_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
+ case kS390_I64x2Mul: {
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), r0, r1, ip);
break;
}
- case kS390_I32x4Neg: {
- __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(2));
+ case kS390_I32x4GeU: {
+ __ I32x4GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kS390_I16x8Neg: {
- __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(1));
+ case kS390_I16x8GeU: {
+ __ I16x8GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kS390_I8x16Neg: {
- __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(0));
+ case kS390_I8x16GeU: {
+ __ I8x16GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
+ // vector unary ops
case kS390_F32x4RecipApprox: {
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
@@ -2807,114 +2887,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
- case kS390_F32x4Sqrt: {
- __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(0), Condition(0), Condition(2));
- break;
- }
- case kS390_S128Not: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vno(dst, src, src, Condition(0), Condition(0), Condition(0));
- break;
- }
- case kS390_I8x16Abs: {
- __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(0));
- break;
- }
- case kS390_I16x8Abs: {
- __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(1));
- break;
- }
- case kS390_I32x4Abs: {
- __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(2));
- break;
- }
- case kS390_I64x2Abs: {
- __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(3));
- break;
- }
// vector boolean unops
case kS390_V128AnyTrue: {
- Simd128Register src = i.InputSimd128Register(0);
- Register dst = i.OutputRegister();
- __ mov(dst, Operand(1));
- __ xgr(kScratchReg, kScratchReg);
- __ vtm(src, src, Condition(0), Condition(0), Condition(0));
- __ locgr(Condition(8), dst, kScratchReg);
- break;
- }
-#define SIMD_ALL_TRUE(mode) \
- Simd128Register src = i.InputSimd128Register(0); \
- Register dst = i.OutputRegister(); \
- __ mov(kScratchReg, Operand(1)); \
- __ xgr(dst, dst); \
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
- Condition(0), Condition(2)); \
- __ vceq(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0), \
- Condition(mode)); \
- __ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
- Condition(0)); \
- __ locgr(Condition(8), dst, kScratchReg);
- case kS390_I64x2AllTrue: {
- SIMD_ALL_TRUE(3)
- break;
- }
- case kS390_I32x4AllTrue: {
- SIMD_ALL_TRUE(2)
- break;
- }
- case kS390_I16x8AllTrue: {
- SIMD_ALL_TRUE(1)
- break;
- }
- case kS390_I8x16AllTrue: {
- SIMD_ALL_TRUE(0)
+ __ V128AnyTrue(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg);
break;
}
-#undef SIMD_ALL_TRUE
// vector bitwise ops
- case kS390_S128And: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(1);
- __ vn(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_S128Or: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(1);
- __ vo(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_S128Xor: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(1);
- __ vx(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
- Condition(0));
- break;
- }
case kS390_S128Const: {
uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
- __ mov(r0, Operand(low));
- __ mov(ip, Operand(high));
- __ vlvgp(i.OutputSimd128Register(), ip, r0);
+ __ S128Const(i.OutputSimd128Register(), high, low, r0, ip);
break;
}
case kS390_S128Zero: {
Simd128Register dst = i.OutputSimd128Register();
- __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ __ S128Zero(dst, dst);
break;
}
case kS390_S128AllOnes: {
Simd128Register dst = i.OutputSimd128Register();
- __ vceq(dst, dst, dst, Condition(0), Condition(3));
+ __ S128AllOnes(dst, dst);
break;
}
case kS390_S128Select: {
@@ -2922,604 +2915,145 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register mask = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register src2 = i.InputSimd128Register(2);
- __ vsel(dst, src1, src2, mask, Condition(0), Condition(0));
- break;
- }
- case kS390_S128AndNot: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(1);
- __ vnc(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
- Condition(0));
+ __ S128Select(dst, src1, src2, mask);
break;
}
// vector conversions
-#define CONVERT_FLOAT_TO_INT32(convert) \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index), \
- Condition(2)); \
- __ MovIntToFloat(tempFPReg1, kScratchReg); \
- __ convert(kScratchReg, tempFPReg1, kRoundToZero); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
case kS390_I32x4SConvertF32x4: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToDoubleRegister(instr->TempAt(0));
- DCHECK_NE(dst, tempFPReg1);
- // NaN to 0
- __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
- __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(2));
- __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(0));
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- __ vcgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
- Condition(0), Condition(2));
- } else {
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
- }
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchReg);
break;
}
case kS390_I32x4UConvertF32x4: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToDoubleRegister(instr->TempAt(0));
- DCHECK_NE(dst, tempFPReg1);
- // NaN to 0, negative to 0
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
- Condition(0), Condition(2));
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- __ vclgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
- Condition(0), Condition(2));
- } else {
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
- }
+ __ I32x4UConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchReg);
break;
}
-#undef CONVERT_FLOAT_TO_INT32
-#define CONVERT_INT32_TO_FLOAT(convert, double_index) \
- Simd128Register src = i.InputSimd128Register(0); \
- Simd128Register dst = i.OutputSimd128Register(); \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
- __ convert(kScratchDoubleReg, kScratchReg); \
- __ MovFloatToInt(kScratchReg, kScratchDoubleReg); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
case kS390_F32x4SConvertI32x4: {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- __ vcdg(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(4), Condition(0), Condition(2));
- } else {
- CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
- }
+ __ F32x4SConvertI32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchReg);
break;
}
case kS390_F32x4UConvertI32x4: {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- __ vcdlg(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(4), Condition(0), Condition(2));
- } else {
- CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
- }
- break;
- }
-#undef CONVERT_INT32_TO_FLOAT
-#define VECTOR_UNPACK(op, mode) \
- __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
- Condition(0), Condition(mode));
- case kS390_I64x2SConvertI32x4Low: {
- VECTOR_UNPACK(vupl, 2)
- break;
- }
- case kS390_I64x2SConvertI32x4High: {
- VECTOR_UNPACK(vuph, 2)
- break;
- }
- case kS390_I64x2UConvertI32x4Low: {
- VECTOR_UNPACK(vupll, 2)
- break;
- }
- case kS390_I64x2UConvertI32x4High: {
- VECTOR_UNPACK(vuplh, 2)
- break;
- }
- case kS390_I32x4SConvertI16x8Low: {
- VECTOR_UNPACK(vupl, 1)
- break;
- }
- case kS390_I32x4SConvertI16x8High: {
- VECTOR_UNPACK(vuph, 1)
- break;
- }
- case kS390_I32x4UConvertI16x8Low: {
- VECTOR_UNPACK(vupll, 1)
- break;
- }
- case kS390_I32x4UConvertI16x8High: {
- VECTOR_UNPACK(vuplh, 1)
- break;
- }
- case kS390_I16x8SConvertI8x16Low: {
- VECTOR_UNPACK(vupl, 0)
+ __ F32x4UConvertI32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchReg);
break;
}
- case kS390_I16x8SConvertI8x16High: {
- VECTOR_UNPACK(vuph, 0)
+ case kS390_I16x8SConvertI32x4: {
+ __ I16x8SConvertI32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kS390_I16x8UConvertI8x16Low: {
- VECTOR_UNPACK(vupll, 0)
+ case kS390_I8x16SConvertI16x8: {
+ __ I8x16SConvertI16x8(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kS390_I16x8UConvertI8x16High: {
- VECTOR_UNPACK(vuplh, 0)
- break;
- }
-#undef VECTOR_UNPACK
- case kS390_I16x8SConvertI32x4:
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(2));
- break;
- case kS390_I8x16SConvertI16x8:
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(1));
- break;
-#define VECTOR_PACK_UNSIGNED(mode) \
- Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
- Condition(0), Condition(mode)); \
- __ vmx(tempFPReg, i.InputSimd128Register(0), kScratchDoubleReg, \
- Condition(0), Condition(0), Condition(mode)); \
- __ vmx(kScratchDoubleReg, i.InputSimd128Register(1), kScratchDoubleReg, \
- Condition(0), Condition(0), Condition(mode));
case kS390_I16x8UConvertI32x4: {
- // treat inputs as signed, and saturate to unsigned (negative to 0)
- VECTOR_PACK_UNSIGNED(2)
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
- Condition(0), Condition(2));
+ __ I16x8UConvertI32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kS390_I8x16UConvertI16x8: {
- // treat inputs as signed, and saturate to unsigned (negative to 0)
- VECTOR_PACK_UNSIGNED(1)
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
- Condition(0), Condition(1));
- break;
- }
-#undef VECTOR_PACK_UNSIGNED
-#define BINOP_EXTRACT(op, extract_high, extract_low, mode) \
- Simd128Register src1 = i.InputSimd128Register(0); \
- Simd128Register src2 = i.InputSimd128Register(1); \
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)); \
- Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1)); \
- DCHECK_NE(src1, tempFPReg1); \
- DCHECK_NE(src2, tempFPReg1); \
- __ extract_high(kScratchDoubleReg, src1, Condition(0), Condition(0), \
- Condition(mode)); \
- __ extract_high(tempFPReg1, src2, Condition(0), Condition(0), \
- Condition(mode)); \
- __ op(kScratchDoubleReg, kScratchDoubleReg, tempFPReg1, Condition(0), \
- Condition(0), Condition(mode + 1)); \
- __ extract_low(tempFPReg1, src1, Condition(0), Condition(0), \
- Condition(mode)); \
- __ extract_low(tempFPReg2, src2, Condition(0), Condition(0), \
- Condition(mode)); \
- __ op(tempFPReg1, tempFPReg1, tempFPReg2, Condition(0), Condition(0), \
- Condition(mode + 1));
- case kS390_I16x8AddSatS: {
- BINOP_EXTRACT(va, vuph, vupl, 1)
- __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(2));
- break;
- }
- case kS390_I16x8SubSatS: {
- BINOP_EXTRACT(vs, vuph, vupl, 1)
- __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(2));
- break;
- }
- case kS390_I16x8AddSatU: {
- BINOP_EXTRACT(va, vuplh, vupll, 1)
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(2));
- break;
- }
- case kS390_I16x8SubSatU: {
- BINOP_EXTRACT(vs, vuplh, vupll, 1)
- // negative to 0
- __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
- Condition(0));
- __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(2));
- __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
- Condition(2));
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(2));
- break;
- }
- case kS390_I8x16AddSatS: {
- BINOP_EXTRACT(va, vuph, vupl, 0)
- __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(1));
- break;
- }
- case kS390_I8x16SubSatS: {
- BINOP_EXTRACT(vs, vuph, vupl, 0)
- __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(1));
- break;
- }
- case kS390_I8x16AddSatU: {
- BINOP_EXTRACT(va, vuplh, vupll, 0)
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(1));
- break;
- }
- case kS390_I8x16SubSatU: {
- BINOP_EXTRACT(vs, vuplh, vupll, 0)
- // negative to 0
- __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
- Condition(0));
- __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(1));
- __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
- Condition(1));
- __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(1));
+ __ I8x16UConvertI16x8(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
-#undef BINOP_EXTRACT
case kS390_I8x16Shuffle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
- __ mov(r0, Operand(low));
- __ mov(ip, Operand(high));
- __ vlvgp(kScratchDoubleReg, ip, r0);
- __ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
+ __ I8x16Shuffle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), high, low, r0, ip,
+ kScratchDoubleReg);
break;
}
case kS390_I8x16Swizzle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- DCHECK_NE(src0, tempFPReg1);
- // Saturate the indices to 5 bits. Input indices more than 31 should
- // return 0.
- __ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
- __ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(0));
- // input needs to be reversed
- __ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
- __ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
- __ lrvgr(r0, r0);
- __ lrvgr(r1, r1);
- __ vlvgp(dst, r1, r0);
- // clear scratch
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
- Condition(0));
+ __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), r0, r1, kScratchDoubleReg,
+ i.ToSimd128Register(instr->TempAt(0)));
break;
}
case kS390_I64x2BitMask: {
- __ mov(kScratchReg, Operand(0x80800040));
- __ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
- __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
- Condition(0));
+ __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchDoubleReg);
break;
}
case kS390_I32x4BitMask: {
- __ mov(kScratchReg, Operand(0x204060));
- __ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
- __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
- Condition(0));
+ __ I32x4BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchDoubleReg);
break;
}
case kS390_I16x8BitMask: {
- __ mov(kScratchReg, Operand(0x40506070));
- __ iihf(kScratchReg, Operand(0x102030));
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
- __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
- Condition(0));
+ __ I16x8BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchDoubleReg);
break;
}
case kS390_I8x16BitMask: {
- __ mov(r0, Operand(0x60687078));
- __ iihf(r0, Operand(0x40485058));
- __ mov(ip, Operand(0x20283038));
- __ iihf(ip, Operand(0x81018));
- __ vlvgp(kScratchDoubleReg, ip, r0);
- __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 3),
- Condition(1));
- break;
- }
- case kS390_F32x4Pmin: {
- __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(3), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Pmax: {
- __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(3), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F64x2Pmin: {
- __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(3), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Pmax: {
- __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(3), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Ceil: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
- Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2Floor: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
- Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2Trunc: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
- Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2NearestInt: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
- Condition(0), Condition(3));
- break;
- }
- case kS390_F32x4Ceil: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
- Condition(0), Condition(2));
- break;
- }
- case kS390_F32x4Floor: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
- Condition(0), Condition(2));
- break;
- }
- case kS390_F32x4Trunc: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
- Condition(0), Condition(2));
- break;
- }
- case kS390_F32x4NearestInt: {
- __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
- Condition(0), Condition(2));
+ __ I8x16BitMask(i.OutputRegister(), i.InputSimd128Register(0), r0, ip,
+ kScratchDoubleReg);
break;
}
case kS390_I32x4DotI16x8S: {
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- __ vme(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- __ vmo(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(1),
- Condition(0), Condition(0), Condition(1));
- __ va(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- Condition(0), Condition(0), Condition(2));
- break;
- }
-#define EXT_MUL(mul_even, mul_odd, merge, mode) \
- Simd128Register dst = i.OutputSimd128Register(), \
- src0 = i.InputSimd128Register(0), \
- src1 = i.InputSimd128Register(1); \
- __ mul_even(kScratchDoubleReg, src0, src1, Condition(0), Condition(0), \
- Condition(mode)); \
- __ mul_odd(dst, src0, src1, Condition(0), Condition(0), Condition(mode)); \
- __ merge(dst, kScratchDoubleReg, dst, Condition(0), Condition(0), \
- Condition(mode + 1));
- case kS390_I64x2ExtMulLowI32x4S: {
- EXT_MUL(vme, vmo, vmrl, 2)
- break;
- }
- case kS390_I64x2ExtMulHighI32x4S: {
- EXT_MUL(vme, vmo, vmrh, 2)
- break;
- }
- case kS390_I64x2ExtMulLowI32x4U: {
- EXT_MUL(vmle, vmlo, vmrl, 2)
- break;
- }
- case kS390_I64x2ExtMulHighI32x4U: {
- EXT_MUL(vmle, vmlo, vmrh, 2)
- break;
- }
- case kS390_I32x4ExtMulLowI16x8S: {
- EXT_MUL(vme, vmo, vmrl, 1)
- break;
- }
- case kS390_I32x4ExtMulHighI16x8S: {
- EXT_MUL(vme, vmo, vmrh, 1)
- break;
- }
- case kS390_I32x4ExtMulLowI16x8U: {
- EXT_MUL(vmle, vmlo, vmrl, 1)
- break;
- }
- case kS390_I32x4ExtMulHighI16x8U: {
- EXT_MUL(vmle, vmlo, vmrh, 1)
- break;
- }
-
- case kS390_I16x8ExtMulLowI8x16S: {
- EXT_MUL(vme, vmo, vmrl, 0)
+ __ I32x4DotI16x8S(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kS390_I16x8ExtMulHighI8x16S: {
- EXT_MUL(vme, vmo, vmrh, 0)
- break;
- }
- case kS390_I16x8ExtMulLowI8x16U: {
- EXT_MUL(vmle, vmlo, vmrl, 0)
- break;
- }
- case kS390_I16x8ExtMulHighI8x16U: {
- EXT_MUL(vmle, vmlo, vmrh, 0)
- break;
- }
-#undef EXT_MUL
-#define EXT_ADD_PAIRWISE(lane_size, mul_even, mul_odd) \
- Simd128Register src = i.InputSimd128Register(0); \
- Simd128Register dst = i.OutputSimd128Register(); \
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)); \
- DCHECK_NE(src, tempFPReg1); \
- __ vrepi(tempFPReg1, Operand(1), Condition(lane_size)); \
- __ mul_even(kScratchDoubleReg, src, tempFPReg1, Condition(0), Condition(0), \
- Condition(lane_size)); \
- __ mul_odd(tempFPReg1, src, tempFPReg1, Condition(0), Condition(0), \
- Condition(lane_size)); \
- __ va(dst, kScratchDoubleReg, tempFPReg1, Condition(0), Condition(0), \
- Condition(lane_size + 1));
- case kS390_I32x4ExtAddPairwiseI16x8S: {
- EXT_ADD_PAIRWISE(1, vme, vmo)
- break;
- }
- case kS390_I32x4ExtAddPairwiseI16x8U: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(3));
- __ vsum(dst, src0, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I16x8ExtAddPairwiseI8x16S: {
- EXT_ADD_PAIRWISE(0, vme, vmo)
- break;
- }
- case kS390_I16x8ExtAddPairwiseI8x16U: {
- EXT_ADD_PAIRWISE(0, vmle, vmlo)
+ case kS390_I16x8Q15MulRSatS: {
+ __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ i.ToSimd128Register(instr->TempAt(0)),
+ i.ToSimd128Register(instr->TempAt(1)));
break;
}
-#undef EXT_ADD_PAIRWISE
-#define Q15_MUL_ROAUND(accumulator, unpack) \
- __ unpack(tempFPReg1, src0, Condition(0), Condition(0), Condition(1)); \
- __ unpack(accumulator, src1, Condition(0), Condition(0), Condition(1)); \
- __ vml(accumulator, tempFPReg1, accumulator, Condition(0), Condition(0), \
- Condition(2)); \
- __ va(accumulator, accumulator, tempFPReg2, Condition(0), Condition(0), \
- Condition(2)); \
- __ vrepi(tempFPReg1, Operand(15), Condition(2)); \
- __ vesrav(accumulator, accumulator, tempFPReg1, Condition(0), Condition(0), \
- Condition(2));
- case kS390_I16x8Q15MulRSatS: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
- DCHECK_NE(src1, tempFPReg1);
- DCHECK_NE(src0, tempFPReg2);
- DCHECK_NE(src1, tempFPReg2);
- __ vrepi(tempFPReg2, Operand(0x4000), Condition(2));
- Q15_MUL_ROAUND(kScratchDoubleReg, vupl)
- Q15_MUL_ROAUND(dst, vuph)
- __ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(2));
- break;
- }
-#undef Q15_MUL_ROAUND
case kS390_I8x16Popcnt: {
- __ vpopct(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(0), Condition(0), Condition(0));
+ __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kS390_F64x2ConvertLowI32x4S: {
- __ vupl(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(2));
- __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4),
- Condition(0), Condition(3));
+ __ F64x2ConvertLowI32x4S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kS390_F64x2ConvertLowI32x4U: {
- __ vupll(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
- Condition(0), Condition(2));
- __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4),
- Condition(0), Condition(3));
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kS390_F64x2PromoteLowF32x4: {
- Register holder = r1;
- for (int index = 0; index < 2; ++index) {
- __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index + 2),
- Condition(2));
- __ MovIntToFloat(kScratchDoubleReg, r0);
- __ ldebr(kScratchDoubleReg, kScratchDoubleReg);
- __ MovDoubleToInt64(holder, kScratchDoubleReg);
- holder = ip;
- }
- __ vlvgp(i.OutputSimd128Register(), r1, ip);
+ __ F64x2PromoteLowF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg, r0,
+ r1, ip);
break;
}
case kS390_F32x4DemoteF64x2Zero: {
- Simd128Register dst = i.OutputSimd128Register();
- Register holder = r1;
- for (int index = 0; index < 2; ++index) {
- __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index),
- Condition(3));
- __ MovInt64ToDouble(kScratchDoubleReg, r0);
- __ ledbr(kScratchDoubleReg, kScratchDoubleReg);
- __ MovFloatToInt(holder, kScratchDoubleReg);
- holder = ip;
- }
- __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
- __ vlvg(dst, r1, MemOperand(r0, 2), Condition(2));
- __ vlvg(dst, ip, MemOperand(r0, 3), Condition(2));
+ __ F32x4DemoteF64x2Zero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg, r0,
+ r1, ip);
break;
}
case kS390_I32x4TruncSatF64x2SZero: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- // NaN to 0
- __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
- __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(3));
- __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(0));
- __ vcgd(kScratchDoubleReg, kScratchDoubleReg, Condition(5), Condition(0),
- Condition(3));
- __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
- __ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kS390_I32x4TruncSatF64x2UZero: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vclgd(kScratchDoubleReg, i.InputSimd128Register(0), Condition(5),
- Condition(0), Condition(3));
- __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
- __ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
#define LOAD_SPLAT(type) \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
Simd128Register dst = i.OutputSimd128Register(); \
- __ LoadAndSplat##type##LE(dst, operand);
+ __ LoadAndSplat##type##LE(dst, operand, kScratchReg);
case kS390_S128Load64Splat: {
LOAD_SPLAT(64x2);
break;
@@ -3541,7 +3075,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
Simd128Register dst = i.OutputSimd128Register(); \
- __ LoadAndExtend##type##LE(dst, operand);
+ __ LoadAndExtend##type##LE(dst, operand, kScratchReg);
case kS390_S128Load32x2U: {
LOAD_EXTEND(32x2U);
break;
@@ -3571,7 +3105,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
Simd128Register dst = i.OutputSimd128Register(); \
- __ LoadV##type##ZeroLE(dst, operand);
+ __ LoadV##type##ZeroLE(dst, operand, kScratchReg);
case kS390_S128Load32Zero: {
LOAD_AND_ZERO(32);
break;
@@ -3588,7 +3122,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index); \
Simd128Register dst = i.OutputSimd128Register(); \
DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- __ LoadLane##type##LE(dst, operand, lane);
+ __ LoadLane##type##LE(dst, operand, lane, kScratchReg);
case kS390_S128Load8Lane: {
LOAD_LANE(8, 15 - i.InputUint8(1));
break;
@@ -3611,7 +3145,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 2; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Simd128Register src = i.InputSimd128Register(0); \
- __ StoreLane##type##LE(src, operand, lane);
+ __ StoreLane##type##LE(src, operand, lane, kScratchReg);
case kS390_S128Store8Lane: {
STORE_LANE(8, 15 - i.InputUint8(1));
break;
@@ -3686,8 +3220,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
AssembleArchBranch(instr, branch);
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ b(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3824,19 +3359,18 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
- if (double_saves != 0) {
+ if (!double_saves.is_empty()) {
frame->AlignSavedCalleeRegisterSlots();
- DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles, double_saves.Count());
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kSystemPointerSize));
}
// Save callee-saved registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
// register save area does not include the fp or constant pool pointer.
const int num_saves = kNumCalleeSaved - 1;
frame->AllocateSavedCalleeRegisterSlots(num_saves);
@@ -3898,7 +3432,7 @@ void CodeGenerator::AssembleConstructFrame() {
required_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (required_slots > 0) {
@@ -3938,21 +3472,20 @@ void CodeGenerator::AssembleConstructFrame() {
#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
- required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= saves.Count();
required_slots -= frame()->GetReturnSlotCount();
- required_slots -= (kDoubleSize / kSystemPointerSize) *
- base::bits::CountPopulation(saves_fp);
+ required_slots -= (kDoubleSize / kSystemPointerSize) * saves_fp.Count();
__ lay(sp, MemOperand(sp, -required_slots * kSystemPointerSize));
}
// Save callee-saved Double registers.
- if (saves_fp != 0) {
+ if (!saves_fp.is_empty()) {
__ MultiPushDoubles(saves_fp);
- DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
+ DCHECK_EQ(kNumCalleeSavedDoubles, saves_fp.Count());
}
// Save callee-saved registers.
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
}
@@ -3973,20 +3506,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
+ const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ if (!double_saves.is_empty()) {
__ MultiPopDoubles(double_saves);
}
unwinding_info_writer_.MarkBlockWillExit();
- // We might need r3 for scratch.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r5.bit());
S390OperandConverter g(this, nullptr);
const int parameter_slots =
static_cast<int>(call_descriptor->ParameterSlotCount());
@@ -4027,7 +3558,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// Get the actual argument count.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
__ LoadU64(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
}
AssembleDeconstructFrame();
@@ -4038,26 +3569,17 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
if (parameter_slots > 1) {
- if (kJSArgcIncludesReceiver) {
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots));
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots));
- __ bind(&skip);
- } else {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver));
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots_without_receiver));
- __ bind(&skip);
- }
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots));
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots));
+ __ bind(&skip);
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+
+ TurboAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 8fc0830fad..3f6b89a7b5 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -292,7 +292,7 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
#ifdef V8_COMPRESS_POINTERS
opcode = kS390_LoadWordS32;
break;
@@ -775,7 +775,7 @@ static void VisitGeneralStore(
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer: // Fall through.
+ case MachineRepresentation::kSandboxedPointer: // Fall through.
#ifdef V8_COMPRESS_POINTERS
opcode = kS390_StoreCompressTagged;
break;
@@ -2470,6 +2470,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
+ V(I32x4DotI16x8S) \
V(I16x8Add) \
V(I16x8Sub) \
V(I16x8Mul) \
@@ -2484,6 +2485,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
V(I16x8RoundingAverageU) \
V(I16x8ExtMulLowI8x16S) \
V(I16x8ExtMulHighI8x16S) \
@@ -2505,6 +2507,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
V(I8x16RoundingAverageU) \
V(I8x16Shl) \
V(I8x16ShrS) \
@@ -2515,19 +2518,16 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(S128AndNot)
#define SIMD_BINOP_UNIQUE_REGISTER_LIST(V) \
- V(I32x4DotI16x8S) \
V(I16x8AddSatS) \
V(I16x8SubSatS) \
V(I16x8AddSatU) \
V(I16x8SubSatU) \
V(I16x8Q15MulRSatS) \
- V(I16x8UConvertI32x4) \
V(I8x16AddSatS) \
V(I8x16SubSatS) \
V(I8x16AddSatU) \
V(I8x16SubSatU) \
- V(I8x16Swizzle) \
- V(I8x16UConvertI16x8)
+ V(I8x16Swizzle)
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \
@@ -2565,6 +2565,8 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2AllTrue) \
V(I32x4Neg) \
V(I32x4Abs) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4UConvertI16x8Low) \
@@ -2593,8 +2595,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(V128AnyTrue)
#define SIMD_UNOP_UNIQUE_REGISTER_LIST(V) \
- V(I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4) \
V(I32x4ExtAddPairwiseI16x8S) \
V(I32x4ExtAddPairwiseI16x8U) \
V(I16x8ExtAddPairwiseI8x16S) \
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 5022c188ab..949fc1ad43 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -288,8 +288,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
- : RememberedSetAction::kOmit;
+ mode_ > RecordWriteMode::kValueIsMap ||
+ FLAG_use_full_record_write_builtin
+ ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -344,8 +346,8 @@ void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value);
break;
- case MachineRepresentation::kCagedPointer:
- tasm->StoreCagedPointerField(operand, value);
+ case MachineRepresentation::kSandboxedPointer:
+ tasm->StoreSandboxedPointerField(operand, value);
break;
default:
UNREACHABLE();
@@ -514,11 +516,11 @@ template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
X64OperandConverter& i,
MachineRepresentation rep) {
- if (rep == MachineRepresentation::kCagedPointer) {
- // CagedPointers need to be encoded.
+ if (rep == MachineRepresentation::kSandboxedPointer) {
+ // SandboxedPointers need to be encoded.
Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value);
- tasm->EncodeCagedPointer(value_reg);
+ tasm->EncodeSandboxedPointer(value_reg);
return value_reg;
}
return value;
@@ -535,9 +537,9 @@ Register GetTSANValueRegister<std::memory_order_relaxed>(
MachineRepresentation rep) {
Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value);
- if (rep == MachineRepresentation::kCagedPointer) {
- // CagedPointers need to be encoded.
- tasm->EncodeCagedPointer(value_reg);
+ if (rep == MachineRepresentation::kSandboxedPointer) {
+ // SandboxedPointers need to be encoded.
+ tasm->EncodeSandboxedPointer(value_reg);
}
return value_reg;
}
@@ -1074,8 +1076,9 @@ void AdjustStackPointerForTailCall(Instruction* instr,
// value before frame construction.
// See also: AssembleConstructFrame.
DCHECK(!info->is_osr());
- DCHECK_EQ(linkage->GetIncomingDescriptor()->CalleeSavedRegisters(), 0);
- DCHECK_EQ(linkage->GetIncomingDescriptor()->CalleeSavedFPRegisters(), 0);
+ DCHECK(linkage->GetIncomingDescriptor()->CalleeSavedRegisters().is_empty());
+ DCHECK(
+ linkage->GetIncomingDescriptor()->CalleeSavedFPRegisters().is_empty());
DCHECK_EQ(state->frame()->GetReturnSlotCount(), 0);
stack_slot_delta = (state->frame()->GetTotalFrameSlotCount() -
kReturnAddressStackSlotCount) *
@@ -1173,16 +1176,41 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
+bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g,
+ Instruction* instr) {
+ X64OperandConverter i(g, instr);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode == kFlags_set) {
+ FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+ if (condition != kUnorderedEqual && condition != kUnorderedNotEqual) {
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ // Do not clear output register when it is also input register.
+ for (size_t index = 0; index < instr->InputCount(); ++index) {
+ if (HasRegisterInput(instr, index) && reg == i.InputRegister(index))
+ return false;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
X64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ if (ShouldClearOutputRegisterBeforeInstruction(this, instr)) {
+ // Transform setcc + movzxbl into xorl + setcc to avoid register stall and
+ // encode one byte shorter.
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ __ xorl(reg, reg);
+ }
switch (arch_opcode) {
case kArchCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = i.InputCode(0);
+ Handle<CodeT> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1242,7 +1270,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = i.InputCode(0);
+ Handle<CodeT> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1384,7 +1412,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
+ __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -2386,18 +2414,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64MovqDecodeCagedPointer: {
+ case kX64MovqDecodeSandboxedPointer: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
Register dst = i.OutputRegister();
__ movq(dst, address);
- __ DecodeCagedPointer(dst);
+ __ DecodeSandboxedPointer(dst);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(),
kSystemPointerSize);
break;
}
- case kX64MovqEncodeCagedPointer: {
+ case kX64MovqEncodeSandboxedPointer: {
CHECK(!instr->HasOutput());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -2405,7 +2433,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
- MachineRepresentation::kCagedPointer);
+ MachineRepresentation::kSandboxedPointer);
break;
}
case kX64Movq:
@@ -4428,8 +4456,9 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
}
}
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
+void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
+ RpoNumber target) {
+ __ jmp(GetLabel(target));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -4470,7 +4499,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
__ bind(&check);
__ setcc(FlagsConditionToCondition(condition), reg);
- __ movzxbl(reg, reg);
+ if (!ShouldClearOutputRegisterBeforeInstruction(this, instr)) {
+ __ movzxbl(reg, reg);
+ }
__ bind(&done);
}
@@ -4550,22 +4581,16 @@ static const int kQuadWordSize = 16;
void CodeGenerator::FinishFrame(Frame* frame) {
CallDescriptor* call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fp.is_empty()) { // Save callee-saved XMM registers.
frame->AlignSavedCalleeRegisterSlots();
- const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
+ const uint32_t saves_fp_count = saves_fp.Count();
frame->AllocateSavedCalleeRegisterSlots(
saves_fp_count * (kQuadWordSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int count = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (((1 << i) & saves)) {
- ++count;
- }
- }
- frame->AllocateSavedCalleeRegisterSlots(count);
+ if (!saves.is_empty()) { // Save callee-saved registers.
+ frame->AllocateSavedCalleeRegisterSlots(saves.Count());
}
}
@@ -4624,7 +4649,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
@@ -4662,34 +4687,30 @@ void CodeGenerator::AssembleConstructFrame() {
#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are created below.
- required_slots -= base::bits::CountPopulation(saves);
- required_slots -= base::bits::CountPopulation(saves_fp) *
- (kQuadWordSize / kSystemPointerSize);
+ required_slots -= saves.Count();
+ required_slots -= saves_fp.Count() * (kQuadWordSize / kSystemPointerSize);
required_slots -= frame()->GetReturnSlotCount();
if (required_slots > 0) {
__ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
- if (saves_fp != 0) { // Save callee-saved XMM registers.
- const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
+ if (!saves_fp.is_empty()) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = saves_fp.Count();
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
__ AllocateStackSpace(stack_size);
// Store the registers on the stack.
int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- if (!((1 << i) & saves_fp)) continue;
- __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx),
- XMMRegister::from_code(i));
+ for (XMMRegister reg : saves_fp) {
+ __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx), reg);
slot_idx++;
}
}
- if (saves != 0) { // Save callee-saved registers.
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ pushq(Register::from_code(i));
+ if (!saves.is_empty()) { // Save callee-saved registers.
+ for (Register reg : base::Reversed(saves)) {
+ __ pushq(reg);
}
}
@@ -4704,26 +4725,23 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (saves != 0) {
+ if (!saves.is_empty()) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
__ addq(rsp, Immediate(returns * kSystemPointerSize));
}
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ popq(Register::from_code(i));
+ for (Register reg : saves) {
+ __ popq(reg);
}
}
- const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
+ const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ if (!saves_fp.is_empty()) {
+ const uint32_t saves_fp_count = saves_fp.Count();
const int stack_size = saves_fp_count * kQuadWordSize;
// Load the registers from the stack.
int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- if (!((1 << i) & saves_fp)) continue;
- __ Movdqu(XMMRegister::from_code(i),
- Operand(rsp, kQuadWordSize * slot_idx));
+ for (XMMRegister reg : saves_fp) {
+ __ Movdqu(reg, Operand(rsp, kQuadWordSize * slot_idx));
slot_idx++;
}
// Adjust the stack pointer.
@@ -4769,7 +4787,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// Get the actual argument count.
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
__ movq(argc_reg, Operand(rbp, StandardFrameConstants::kArgCOffset));
}
AssembleDeconstructFrame();
@@ -4783,26 +4801,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- if (kJSArgcIncludesReceiver) {
- __ cmpq(argc_reg, Immediate(parameter_slots));
- } else {
- int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
- }
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
+ __ cmpq(argc_reg, Immediate(parameter_slots));
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- kJSArgcIncludesReceiver
- ? TurboAssembler::kCountIncludesReceiver
- : TurboAssembler::kCountExcludesReceiver);
+ TurboAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = r10;
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
@@ -4810,8 +4821,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == r10 ? rcx : r10;
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & pop_reg.bit());
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
+ DCHECK(!call_descriptor->CalleeSavedRegisters().has(pop_reg));
int pop_size = static_cast<int>(parameter_slots * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index bf2c9b00c7..c2c6ca946e 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -163,8 +163,8 @@ namespace compiler {
V(X64MovqDecompressTaggedPointer) \
V(X64MovqDecompressAnyTagged) \
V(X64MovqCompressTagged) \
- V(X64MovqEncodeCagedPointer) \
- V(X64MovqDecodeCagedPointer) \
+ V(X64MovqEncodeSandboxedPointer) \
+ V(X64MovqDecodeSandboxedPointer) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index a0a972b4e5..4cc187ae06 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -396,8 +396,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64MovqDecompressTaggedPointer:
case kX64MovqDecompressAnyTagged:
case kX64MovqCompressTagged:
- case kX64MovqDecodeCagedPointer:
- case kX64MovqEncodeCagedPointer:
+ case kX64MovqDecodeSandboxedPointer:
+ case kX64MovqEncodeSandboxedPointer:
case kX64Movq:
case kX64Movsd:
case kX64Movss:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 2c6e4ad671..a30b50183c 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -297,8 +297,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
- case MachineRepresentation::kCagedPointer:
- opcode = kX64MovqDecodeCagedPointer;
+ case MachineRepresentation::kSandboxedPointer:
+ opcode = kX64MovqDecodeSandboxedPointer;
break;
case MachineRepresentation::kSimd128:
opcode = kX64Movdqu;
@@ -336,8 +336,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
return kX64MovqCompressTagged;
case MachineRepresentation::kWord64:
return kX64Movq;
- case MachineRepresentation::kCagedPointer:
- return kX64MovqEncodeCagedPointer;
+ case MachineRepresentation::kSandboxedPointer:
+ return kX64MovqEncodeSandboxedPointer;
case MachineRepresentation::kSimd128:
return kX64Movdqu;
case MachineRepresentation::kNone: // Fall through.
@@ -2092,9 +2092,14 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
return hint;
}
} else if (hint == MachineType::Int32()) {
- return hint;
+ if (constant >= std::numeric_limits<int32_t>::min() &&
+ constant <= std::numeric_limits<int32_t>::max()) {
+ return hint;
+ }
} else if (hint == MachineType::Uint32()) {
- if (constant >= 0) return hint;
+ if (constant >= std::numeric_limits<uint32_t>::min() &&
+ constant <= std::numeric_limits<uint32_t>::max())
+ return hint;
}
}
}
@@ -2104,21 +2109,81 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
: MachineType::None();
}
+bool IsIntConstant(Node* node) {
+ return node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant;
+}
+
+bool IsWordAnd(Node* node) {
+ return node->opcode() == IrOpcode::kWord32And ||
+ node->opcode() == IrOpcode::kWord64And;
+}
+
+// The result of WordAnd with a positive interger constant in X64 is known to
+// be sign(zero)-extended. Comparing this result with another positive interger
+// constant can have narrowed operand.
+MachineType MachineTypeForNarrowWordAnd(Node* and_node, Node* constant_node) {
+ Node* and_left = and_node->InputAt(0);
+ Node* and_right = and_node->InputAt(1);
+ Node* and_constant_node = IsIntConstant(and_right)
+ ? and_right
+ : IsIntConstant(and_left) ? and_left : nullptr;
+
+ if (and_constant_node != nullptr) {
+ int64_t and_constant =
+ and_constant_node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(and_constant_node->op())
+ : OpParameter<int64_t>(and_constant_node->op());
+ int64_t cmp_constant = constant_node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(constant_node->op())
+ : OpParameter<int64_t>(constant_node->op());
+ if (and_constant >= 0 && cmp_constant >= 0) {
+ int64_t constant =
+ and_constant > cmp_constant ? and_constant : cmp_constant;
+ if (constant <= std::numeric_limits<int8_t>::max()) {
+ return MachineType::Int8();
+ } else if (constant <= std::numeric_limits<uint8_t>::max()) {
+ return MachineType::Uint8();
+ } else if (constant <= std::numeric_limits<int16_t>::max()) {
+ return MachineType::Int16();
+ } else if (constant <= std::numeric_limits<uint16_t>::max()) {
+ return MachineType::Uint16();
+ } else if (constant <= std::numeric_limits<int32_t>::max()) {
+ return MachineType::Int32();
+ } else if (constant <= std::numeric_limits<uint32_t>::max()) {
+ return MachineType::Uint32();
+ }
+ }
+ }
+
+ return MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // TODO(epertoso): we can probably get some size information out phi nodes.
- // If the load representations don't match, both operands will be
- // zero/sign-extended to 32bit.
- MachineType left_type = MachineTypeForNarrow(left, right);
- MachineType right_type = MachineTypeForNarrow(right, left);
+ MachineType left_type = MachineType::None();
+ MachineType right_type = MachineType::None();
+ if (IsWordAnd(left) && IsIntConstant(right)) {
+ left_type = MachineTypeForNarrowWordAnd(left, right);
+ right_type = left_type;
+ } else if (IsWordAnd(right) && IsIntConstant(left)) {
+ right_type = MachineTypeForNarrowWordAnd(right, left);
+ left_type = right_type;
+ } else {
+ // TODO(epertoso): we can probably get some size information out phi nodes.
+ // If the load representations don't match, both operands will be
+ // zero/sign-extended to 32bit.
+ left_type = MachineTypeForNarrow(left, right);
+ right_type = MachineTypeForNarrow(right, left);
+ }
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8: {
- if (opcode == kX64Test32) return kX64Test8;
- if (opcode == kX64Cmp32) {
+ if (opcode == kX64Test || opcode == kX64Test32) return kX64Test8;
+ if (opcode == kX64Cmp || opcode == kX64Cmp32) {
if (left_type.semantic() == MachineSemantic::kUint32) {
cont->OverwriteUnsignedIfSigned();
} else {
@@ -2129,8 +2194,8 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
break;
}
case MachineRepresentation::kWord16:
- if (opcode == kX64Test32) return kX64Test16;
- if (opcode == kX64Cmp32) {
+ if (opcode == kX64Test || opcode == kX64Test32) return kX64Test16;
+ if (opcode == kX64Cmp || opcode == kX64Cmp32) {
if (left_type.semantic() == MachineSemantic::kUint32) {
cont->OverwriteUnsignedIfSigned();
} else {
@@ -2139,6 +2204,17 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
return kX64Cmp16;
}
break;
+ case MachineRepresentation::kWord32:
+ if (opcode == kX64Test) return kX64Test32;
+ if (opcode == kX64Cmp) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX64Cmp32;
+ }
+ break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index a127d54572..e331d4960e 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -265,8 +265,10 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) {
// | |
// <subgraph1> <subgraph1>
// (and symmetrically for TrapUnless.)
- if ((control_input->opcode() == IrOpcode::kIfTrue ||
- control_input->opcode() == IrOpcode::kIfFalse) &&
+ if (((trapping_condition &&
+ control_input->opcode() == IrOpcode::kIfTrue) ||
+ (!trapping_condition &&
+ control_input->opcode() == IrOpcode::kIfFalse)) &&
control_input->UseCount() == 1) {
Node* branch = NodeProperties::GetControlInput(control_input);
DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index b5ac40b3c4..1a46dade26 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -4,6 +4,9 @@
#include "src/compiler/bytecode-analysis.h"
+#include <utility>
+
+#include "src/compiler/bytecode-liveness-map.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects/objects-inl.h"
@@ -14,8 +17,11 @@ namespace internal {
namespace compiler {
using interpreter::Bytecode;
+using interpreter::BytecodeOperands;
using interpreter::Bytecodes;
+using interpreter::ImplicitRegisterUse;
using interpreter::OperandType;
+using interpreter::Register;
BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
int register_count, Zone* zone)
@@ -25,7 +31,7 @@ BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
void BytecodeLoopAssignments::Add(interpreter::Register r) {
if (r.is_parameter()) {
- bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ bit_vector_->Add(r.ToParameterIndex());
} else {
bit_vector_->Add(parameter_count_ + r.index());
}
@@ -35,7 +41,7 @@ void BytecodeLoopAssignments::AddList(interpreter::Register r, uint32_t count) {
if (r.is_parameter()) {
for (uint32_t i = 0; i < count; i++) {
DCHECK(interpreter::Register(r.index() + i).is_parameter());
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + i);
+ bit_vector_->Add(r.ToParameterIndex() + i);
}
} else {
for (uint32_t i = 0; i < count; i++) {
@@ -91,17 +97,103 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1) {
- if (analyze_liveness) liveness_map_.emplace(bytecode_array->length(), zone);
Analyze();
}
namespace {
-void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
- const interpreter::BytecodeArrayIterator& iterator) {
- int num_operands = Bytecodes::NumberOfOperands(bytecode);
- const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+template <Bytecode bytecode, OperandType operand_type, size_t i>
+void UpdateInLivenessForOutOperand(
+ BytecodeLivenessState* in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ switch (operand_type) {
+ case OperandType::kRegOut: {
+ Register r = iterator.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness->MarkRegisterDead(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegOutList: {
+ Register r = iterator.GetRegisterOperand(i);
+ uint32_t reg_count = iterator.GetRegisterCountOperand(i + 1);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!Register(r.index() + j).is_parameter());
+ in_liveness->MarkRegisterDead(r.index() + j);
+ }
+ }
+ break;
+ }
+ case OperandType::kRegOutPair: {
+ Register r = iterator.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!Register(r.index() + 1).is_parameter());
+ in_liveness->MarkRegisterDead(r.index());
+ in_liveness->MarkRegisterDead(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ Register r = iterator.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!Register(r.index() + 1).is_parameter());
+ DCHECK(!Register(r.index() + 2).is_parameter());
+ in_liveness->MarkRegisterDead(r.index());
+ in_liveness->MarkRegisterDead(r.index() + 1);
+ in_liveness->MarkRegisterDead(r.index() + 2);
+ }
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
+ break;
+ }
+}
+
+template <Bytecode bytecode, OperandType operand_type, size_t i>
+void UpdateInLivenessForInOperand(
+ BytecodeLivenessState* in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ switch (operand_type) {
+ case OperandType::kReg: {
+ Register r = iterator.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness->MarkRegisterLive(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegPair: {
+ Register r = iterator.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!Register(r.index() + 1).is_parameter());
+ in_liveness->MarkRegisterLive(r.index());
+ in_liveness->MarkRegisterLive(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegList: {
+ Register r = iterator.GetRegisterOperand(i);
+ uint32_t reg_count = iterator.GetRegisterCountOperand(i + 1);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+ in_liveness->MarkRegisterLive(r.index() + j);
+ }
+ }
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_type));
+ break;
+ }
+}
+template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
+ OperandType... operand_types, size_t... operand_index>
+void UpdateInLiveness(BytecodeLivenessState* in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator,
+ std::index_sequence<operand_index...>) {
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator) {
// The generator object has to be live.
@@ -117,150 +209,147 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
return;
}
- if (Bytecodes::WritesAccumulator(bytecode)) {
+ if (BytecodeOperands::WritesAccumulator(implicit_register_use)) {
in_liveness->MarkAccumulatorDead();
}
- for (int i = 0; i < num_operands; ++i) {
- switch (operand_types[i]) {
- case OperandType::kRegOut: {
- interpreter::Register r = iterator.GetRegisterOperand(i);
- if (!r.is_parameter()) {
- in_liveness->MarkRegisterDead(r.index());
- }
- break;
- }
- case OperandType::kRegOutList: {
- interpreter::Register r = iterator.GetRegisterOperand(i++);
- uint32_t reg_count = iterator.GetRegisterCountOperand(i);
- if (!r.is_parameter()) {
- for (uint32_t j = 0; j < reg_count; ++j) {
- DCHECK(!interpreter::Register(r.index() + j).is_parameter());
- in_liveness->MarkRegisterDead(r.index() + j);
- }
- }
- break;
- }
- case OperandType::kRegOutPair: {
- interpreter::Register r = iterator.GetRegisterOperand(i);
- if (!r.is_parameter()) {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- in_liveness->MarkRegisterDead(r.index());
- in_liveness->MarkRegisterDead(r.index() + 1);
- }
- break;
- }
- case OperandType::kRegOutTriple: {
- interpreter::Register r = iterator.GetRegisterOperand(i);
- if (!r.is_parameter()) {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
- in_liveness->MarkRegisterDead(r.index());
- in_liveness->MarkRegisterDead(r.index() + 1);
- in_liveness->MarkRegisterDead(r.index() + 2);
- }
- break;
- }
- default:
- DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
- break;
- }
- }
+ ITERATE_PACK(
+ UpdateInLivenessForOutOperand<bytecode, operand_types, operand_index>(
+ in_liveness, iterator));
if (Bytecodes::WritesImplicitRegister(bytecode)) {
- in_liveness->MarkRegisterDead(
- interpreter::Register::FromShortStar(bytecode).index());
+ in_liveness->MarkRegisterDead(Register::FromShortStar(bytecode).index());
}
- if (Bytecodes::ReadsAccumulator(bytecode)) {
+ if (BytecodeOperands::ReadsAccumulator(implicit_register_use)) {
in_liveness->MarkAccumulatorLive();
}
- for (int i = 0; i < num_operands; ++i) {
- switch (operand_types[i]) {
- case OperandType::kReg: {
- interpreter::Register r = iterator.GetRegisterOperand(i);
- if (!r.is_parameter()) {
- in_liveness->MarkRegisterLive(r.index());
- }
- break;
- }
- case OperandType::kRegPair: {
- interpreter::Register r = iterator.GetRegisterOperand(i);
- if (!r.is_parameter()) {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- in_liveness->MarkRegisterLive(r.index());
- in_liveness->MarkRegisterLive(r.index() + 1);
- }
- break;
- }
- case OperandType::kRegList: {
- interpreter::Register r = iterator.GetRegisterOperand(i++);
- uint32_t reg_count = iterator.GetRegisterCountOperand(i);
- if (!r.is_parameter()) {
- for (uint32_t j = 0; j < reg_count; ++j) {
- DCHECK(!interpreter::Register(r.index() + j).is_parameter());
- in_liveness->MarkRegisterLive(r.index() + j);
- }
- }
- break;
- }
- default:
- DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
- break;
- }
+ ITERATE_PACK(
+ UpdateInLivenessForInOperand<bytecode, operand_types, operand_index>(
+ in_liveness, iterator));
+}
+
+template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
+ OperandType... operand_types>
+void UpdateInLiveness(BytecodeLivenessState* in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UpdateInLiveness<bytecode, implicit_register_use, operand_types...>(
+ in_liveness, iterator,
+ std::make_index_sequence<sizeof...(operand_types)>());
+}
+
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ switch (bytecode) {
+#define BYTECODE_UPDATE_IN_LIVENESS(Name, ...) \
+ case Bytecode::k##Name: \
+ return UpdateInLiveness<Bytecode::k##Name, __VA_ARGS__>(in_liveness, \
+ iterator);
+ BYTECODE_LIST(BYTECODE_UPDATE_IN_LIVENESS)
+#undef BYTECODE_UPDATE_IN_LIVENESS
}
}
-void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
+template <bool IsFirstUpdate = false>
+void EnsureOutLivenessIsNotAlias(
+ BytecodeLiveness& liveness,
+ BytecodeLivenessState* next_bytecode_in_liveness, Zone* zone) {
+ if (!IsFirstUpdate) {
+ // We should have copied the next bytecode's in liveness already in the
+ // first pass, so on subsequent passes this should already not be an alias.
+ DCHECK_NE(liveness.out, next_bytecode_in_liveness);
+ return;
+ }
+ if (liveness.out == next_bytecode_in_liveness) {
+ // If the out-liveness is aliasing the next bytecode's in-liveness,
+ // reallocate it and copy the data to the newly allocated state.
+ liveness.out =
+ zone->New<BytecodeLivenessState>(*next_bytecode_in_liveness, zone);
+ }
+}
+
+template <bool IsFirstUpdate, Bytecode bytecode>
+void UpdateOutLiveness(BytecodeLiveness& liveness,
BytecodeLivenessState* next_bytecode_in_liveness,
const interpreter::BytecodeArrayIterator& iterator,
Handle<BytecodeArray> bytecode_array,
- const BytecodeLivenessMap& liveness_map) {
- int current_offset = iterator.current_offset();
+ const BytecodeLivenessMap& liveness_map, Zone* zone) {
+ // On subsequent updates, only update out-liveness manually if it isn't
+ // already aliasing the next bytecode's in-liveness.
+ if (!IsFirstUpdate && liveness.out == next_bytecode_in_liveness) return;
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator ||
bytecode == Bytecode::kResumeGenerator) {
- out_liveness->Union(*next_bytecode_in_liveness);
+ DCHECK_NOT_NULL(next_bytecode_in_liveness);
+ if (IsFirstUpdate) {
+ liveness.out = next_bytecode_in_liveness;
+ } else {
+ liveness.out->Union(*next_bytecode_in_liveness);
+ }
return;
}
+ // Update from next bytecode (unless there isn't one or this is an
+ // unconditional jump).
+ if (next_bytecode_in_liveness != nullptr &&
+ !Bytecodes::IsUnconditionalJump(bytecode) &&
+ !Bytecodes::Returns(bytecode) &&
+ !Bytecodes::UnconditionallyThrows(bytecode)) {
+ if (IsFirstUpdate) {
+ // On first update, we can assume that this out-liveness is the same as
+ // the next liveness, and can directly alias it -- we'll allocate a new
+ // one using EnsureOutLivenessIsNotAlias if it needs to be mutated.
+ DCHECK_NULL(liveness.out);
+ liveness.out = next_bytecode_in_liveness;
+ } else {
+ liveness.out->Union(*next_bytecode_in_liveness);
+ }
+ } else if (IsFirstUpdate) {
+ // Otherwise, on the first allocation we need to make sure that there is an
+ // allocated out liveness.
+ DCHECK_NULL(liveness.out);
+ liveness.out = zone->New<BytecodeLivenessState>(
+ bytecode_array->register_count(), zone);
+ }
+
+ DCHECK_NOT_NULL(liveness.out);
+
// Update from jump target (if any). Skip loops, we update these manually in
// the liveness iterations.
if (Bytecodes::IsForwardJump(bytecode)) {
int target_offset = iterator.GetJumpTargetOffset();
- out_liveness->Union(*liveness_map.GetInLiveness(target_offset));
+ EnsureOutLivenessIsNotAlias<IsFirstUpdate>(liveness,
+ next_bytecode_in_liveness, zone);
+ liveness.out->Union(*liveness_map.GetInLiveness(target_offset));
} else if (Bytecodes::IsSwitch(bytecode)) {
+ EnsureOutLivenessIsNotAlias<IsFirstUpdate>(liveness,
+ next_bytecode_in_liveness, zone);
for (interpreter::JumpTableTargetOffset entry :
iterator.GetJumpTableTargetOffsets()) {
- out_liveness->Union(*liveness_map.GetInLiveness(entry.target_offset));
+ liveness.out->Union(*liveness_map.GetInLiveness(entry.target_offset));
}
}
- // Update from next bytecode (unless there isn't one or this is an
- // unconditional jump).
- if (next_bytecode_in_liveness != nullptr &&
- !Bytecodes::IsUnconditionalJump(bytecode)) {
- out_liveness->Union(*next_bytecode_in_liveness);
- }
-
// Update from exception handler (if any).
if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
int handler_context;
// TODO(leszeks): We should look up this range only once per entry.
HandlerTable table(*bytecode_array);
int handler_offset =
- table.LookupRange(current_offset, &handler_context, nullptr);
+ table.LookupRange(iterator.current_offset(), &handler_context, nullptr);
if (handler_offset != -1) {
- bool was_accumulator_live = out_liveness->AccumulatorIsLive();
- out_liveness->Union(*liveness_map.GetInLiveness(handler_offset));
- out_liveness->MarkRegisterLive(handler_context);
+ EnsureOutLivenessIsNotAlias<IsFirstUpdate>(
+ liveness, next_bytecode_in_liveness, zone);
+ bool was_accumulator_live = liveness.out->AccumulatorIsLive();
+ liveness.out->Union(*liveness_map.GetInLiveness(handler_offset));
+ liveness.out->MarkRegisterLive(handler_context);
if (!was_accumulator_live) {
// The accumulator is reset to the exception on entry into a handler,
// and so shouldn't be considered live coming out of this bytecode just
// because it's live coming into the handler. So, kill the accumulator
// if the handler is the only thing that made it live.
- out_liveness->MarkAccumulatorDead();
+ liveness.out->MarkAccumulatorDead();
// TODO(leszeks): Ideally the accumulator wouldn't be considered live at
// the start of the handler, but looking up if the current bytecode is
@@ -271,19 +360,68 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
}
}
-void UpdateLiveness(Bytecode bytecode, BytecodeLiveness const& liveness,
+template <bool IsFirstUpdate = false>
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator,
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeLivenessMap& liveness_map, Zone* zone) {
+ switch (bytecode) {
+#define BYTECODE_UPDATE_OUT_LIVENESS(Name, ...) \
+ case Bytecode::k##Name: \
+ return UpdateOutLiveness<IsFirstUpdate, Bytecode::k##Name>( \
+ liveness, next_bytecode_in_liveness, iterator, bytecode_array, \
+ liveness_map, zone);
+ BYTECODE_LIST(BYTECODE_UPDATE_OUT_LIVENESS)
+#undef BYTECODE_UPDATE_OUT_LIVENESS
+ }
+}
+
+template <bool IsFirstUpdate, Bytecode bytecode,
+ ImplicitRegisterUse implicit_register_use,
+ OperandType... operand_types>
+void UpdateLiveness(BytecodeLiveness& liveness,
BytecodeLivenessState** next_bytecode_in_liveness,
const interpreter::BytecodeArrayIterator& iterator,
Handle<BytecodeArray> bytecode_array,
- const BytecodeLivenessMap& liveness_map) {
- UpdateOutLiveness(bytecode, liveness.out, *next_bytecode_in_liveness,
- iterator, bytecode_array, liveness_map);
- liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, liveness.in, iterator);
+ const BytecodeLivenessMap& liveness_map, Zone* zone) {
+ UpdateOutLiveness<IsFirstUpdate, bytecode>(
+ liveness, *next_bytecode_in_liveness, iterator, bytecode_array,
+ liveness_map, zone);
+ if (IsFirstUpdate) {
+ // On the first update, allocate the in-liveness as a copy of the
+ // out-liveness.
+ DCHECK_NULL(liveness.in);
+ liveness.in = zone->New<BytecodeLivenessState>(*liveness.out, zone);
+ } else {
+ // On subsequent updates, copy liveness from the out vector.
+ // TODO(leszeks): If this copy doesn't change liveness, we could
+ // opportunistically terminate early.
+ liveness.in->CopyFrom(*liveness.out);
+ }
+ UpdateInLiveness<bytecode, implicit_register_use, operand_types...>(
+ liveness.in, iterator);
*next_bytecode_in_liveness = liveness.in;
}
+template <bool IsFirstUpdate = false>
+void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+ BytecodeLivenessState** next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayIterator& iterator,
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeLivenessMap& liveness_map, Zone* zone) {
+ switch (bytecode) {
+#define BYTECODE_UPDATE_LIVENESS(Name, ...) \
+ case Bytecode::k##Name: \
+ return UpdateLiveness<IsFirstUpdate, Bytecode::k##Name, __VA_ARGS__>( \
+ liveness, next_bytecode_in_liveness, iterator, bytecode_array, \
+ liveness_map, zone);
+ BYTECODE_LIST(BYTECODE_UPDATE_LIVENESS)
+#undef BYTECODE_UPDATE_LIVENESS
+ }
+}
+
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
const interpreter::BytecodeArrayIterator& iterator) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
@@ -331,6 +469,12 @@ void BytecodeAnalysis::Analyze() {
DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+
+ bytecode_count_ = iterator.size();
+ if (analyze_liveness_) {
+ liveness_map_.emplace(bytecode_array()->length(), zone());
+ }
+
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
@@ -443,10 +587,10 @@ void BytecodeAnalysis::Analyze() {
}
if (analyze_liveness_) {
- BytecodeLiveness const& liveness = liveness_map().InitializeLiveness(
- current_offset, bytecode_array()->register_count(), zone());
- UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- bytecode_array(), liveness_map());
+ BytecodeLiveness& liveness =
+ liveness_map().InsertNewLiveness(current_offset);
+ UpdateLiveness<true>(bytecode, liveness, &next_bytecode_in_liveness,
+ iterator, bytecode_array(), liveness_map(), zone());
}
}
@@ -506,16 +650,15 @@ void BytecodeAnalysis::Analyze() {
for (; iterator.current_offset() > header_offset; --iterator) {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
- BytecodeLiveness const& liveness =
- liveness_map().GetLiveness(current_offset);
+ BytecodeLiveness& liveness = liveness_map().GetLiveness(current_offset);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- bytecode_array(), liveness_map());
+ bytecode_array(), liveness_map(), zone());
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
- UpdateOutLiveness(iterator.current_bytecode(), header_liveness.out,
+ UpdateOutLiveness(iterator.current_bytecode(), header_liveness,
next_bytecode_in_liveness, iterator, bytecode_array(),
- liveness_map());
+ liveness_map(), zone());
}
// Process the generator switch statement separately, once the loops are done.
@@ -548,14 +691,14 @@ void BytecodeAnalysis::Analyze() {
next_bytecode_in_liveness = switch_liveness.in;
for (--iterator; iterator.IsValid(); --iterator) {
Bytecode bytecode = iterator.current_bytecode();
- BytecodeLiveness const& liveness =
+ BytecodeLiveness& liveness =
liveness_map().GetLiveness(iterator.current_offset());
// There shouldn't be any more loops.
DCHECK_NE(bytecode, Bytecode::kJumpLoop);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- bytecode_array(), liveness_map());
+ bytecode_array(), liveness_map(), zone());
}
}
}
@@ -630,6 +773,12 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
return header_to_info_.find(header_offset)->second;
}
+const LoopInfo* BytecodeAnalysis::TryGetLoopInfoFor(int header_offset) const {
+ auto it = header_to_info_.find(header_offset);
+ if (it == header_to_info_.end()) return nullptr;
+ return &it->second;
+}
+
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
if (!analyze_liveness_) return nullptr;
@@ -650,21 +799,12 @@ std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
for (; !iterator.done(); iterator.Advance()) {
int current_offset = iterator.current_offset();
- const BitVector& in_liveness =
- GetInLivenessFor(current_offset)->bit_vector();
- const BitVector& out_liveness =
- GetOutLivenessFor(current_offset)->bit_vector();
-
- for (int i = 0; i < in_liveness.length(); ++i) {
- os << (in_liveness.Contains(i) ? "L" : ".");
- }
- os << " -> ";
-
- for (int i = 0; i < out_liveness.length(); ++i) {
- os << (out_liveness.Contains(i) ? "L" : ".");
- }
+ const BytecodeLivenessState* in_liveness = GetInLivenessFor(current_offset);
+ const BytecodeLivenessState* out_liveness =
+ GetOutLivenessFor(current_offset);
- os << " | " << current_offset << ": ";
+ os << ToString(*in_liveness) << " -> " << ToString(*out_liveness) << " | "
+ << current_offset << ": ";
iterator.PrintTo(os) << std::endl;
}
@@ -827,6 +967,8 @@ bool BytecodeAnalysis::LivenessIsValid() {
int invalid_offset = -1;
int which_invalid = -1;
+ BytecodeLivenessState invalid_liveness(bytecode_array()->register_count(),
+ zone());
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
@@ -840,8 +982,8 @@ bool BytecodeAnalysis::LivenessIsValid() {
previous_liveness.CopyFrom(*liveness.out);
- UpdateOutLiveness(bytecode, liveness.out, next_bytecode_in_liveness,
- iterator, bytecode_array(), liveness_map());
+ UpdateOutLiveness(bytecode, liveness, next_bytecode_in_liveness, iterator,
+ bytecode_array(), liveness_map(), zone());
// UpdateOutLiveness skips kJumpLoop, so we update it manually.
if (bytecode == Bytecode::kJumpLoop) {
int target_offset = iterator.GetJumpTargetOffset();
@@ -849,6 +991,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
}
if (!liveness.out->Equals(previous_liveness)) {
+ invalid_liveness.CopyFrom(*liveness.out);
// Reset the invalid liveness.
liveness.out->CopyFrom(previous_liveness);
invalid_offset = current_offset;
@@ -862,6 +1005,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
UpdateInLiveness(bytecode, liveness.in, iterator);
if (!liveness.in->Equals(previous_liveness)) {
+ invalid_liveness.CopyFrom(*liveness.in);
// Reset the invalid liveness.
liveness.in->CopyFrom(previous_liveness);
invalid_offset = current_offset;
@@ -913,22 +1057,16 @@ bool BytecodeAnalysis::LivenessIsValid() {
interpreter::BytecodeArrayIterator forward_iterator(bytecode_array());
for (; !forward_iterator.done(); forward_iterator.Advance()) {
int current_offset = forward_iterator.current_offset();
- const BitVector& in_liveness =
- GetInLivenessFor(current_offset)->bit_vector();
- const BitVector& out_liveness =
- GetOutLivenessFor(current_offset)->bit_vector();
-
- for (int i = 0; i < in_liveness.length(); ++i) {
- of << (in_liveness.Contains(i) ? 'L' : '.');
- }
+ const BytecodeLivenessState* in_liveness =
+ GetInLivenessFor(current_offset);
+ const BytecodeLivenessState* out_liveness =
+ GetOutLivenessFor(current_offset);
- of << " | ";
-
- for (int i = 0; i < out_liveness.length(); ++i) {
- of << (out_liveness.Contains(i) ? 'L' : '.');
- }
+ std::string in_liveness_str = ToString(*in_liveness);
+ std::string out_liveness_str = ToString(*out_liveness);
- of << " : " << current_offset << " : ";
+ of << in_liveness_str << " | " << out_liveness_str << " : "
+ << current_offset << " : ";
// Draw loop back edges by indentin everything between loop headers and
// jump loop instructions.
@@ -952,20 +1090,26 @@ bool BytecodeAnalysis::LivenessIsValid() {
if (current_offset == invalid_offset) {
// Underline the invalid liveness.
+ char in_underline = which_invalid == 0 ? '^' : ' ';
+ char out_underline = which_invalid == 0 ? ' ' : '^';
+ of << std::string(in_liveness_str.size(), in_underline) << " "
+ << std::string(out_liveness_str.size(), out_underline);
+
+ // Make sure to draw the loop indentation marks on this additional line.
+ of << " : " << current_offset << " : ";
+ for (int i = 0; i < loop_indent; ++i) {
+ of << "| ";
+ }
+
+ of << std::endl;
+
+ // Print the invalid liveness.
if (which_invalid == 0) {
- for (int i = 0; i < in_liveness.length(); ++i) {
- of << '^';
- }
- for (int i = 0; i < out_liveness.length() + 3; ++i) {
- of << ' ';
- }
+ of << ToString(invalid_liveness) << " "
+ << std::string(out_liveness_str.size(), ' ');
} else {
- for (int i = 0; i < in_liveness.length() + 3; ++i) {
- of << ' ';
- }
- for (int i = 0; i < out_liveness.length(); ++i) {
- of << '^';
- }
+ of << std::string(in_liveness_str.size(), ' ') << " "
+ << ToString(invalid_liveness);
}
// Make sure to draw the loop indentation marks on this additional line.
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 0e9043a16a..13c53da80c 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -110,6 +110,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
int GetLoopOffsetFor(int offset) const;
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
+ // Try to get the loop info of the loop header at {header_offset}, returning
+ // null if there isn't any.
+ const LoopInfo* TryGetLoopInfoFor(int header_offset) const;
+
+ const ZoneMap<int, LoopInfo>& GetLoopInfos() const { return header_to_info_; }
// Get the top-level resume jump targets.
const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
@@ -133,6 +138,10 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
// Return whether liveness analysis was performed (for verification purposes).
bool liveness_analyzed() const { return analyze_liveness_; }
+ // Return the number of bytecodes (i.e. the number of bytecode operations, as
+ // opposed to the number of bytes in the bytecode).
+ int bytecode_count() const { return bytecode_count_; }
+
private:
struct LoopStackEntry {
int header_offset;
@@ -176,6 +185,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
base::Optional<BytecodeLivenessMap> liveness_map_;
+ int bytecode_count_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index c510d7a7e7..7fe1b626b1 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -90,13 +90,6 @@ class BytecodeGraphBuilder {
return feedback_vector_node_;
}
- void CreateFeedbackCellNode();
- Node* feedback_cell_node() const {
- DCHECK(CodeKindCanTierUp(code_kind()));
- DCHECK_NOT_NULL(feedback_cell_node_);
- return feedback_cell_node_;
- }
-
// Same as above for the feedback vector node.
void CreateNativeContextNode();
Node* native_context_node() const {
@@ -106,12 +99,7 @@ class BytecodeGraphBuilder {
Node* BuildLoadFeedbackCell(int index);
- // Checks the optimization marker and potentially triggers compilation or
- // installs the finished code object.
- // Only relevant for specific code kinds (see CodeKindCanTierUp).
- void MaybeBuildTierUpCheck();
-
- // Builder for loading the a native context field.
+ // Builder for loading a native context field.
Node* BuildLoadNativeContextField(int index);
// Helper function for creating a feedback source containing type feedback
@@ -235,13 +223,13 @@ class BytecodeGraphBuilder {
Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index,
TypeofMode typeof_mode);
- enum class StoreMode {
+ enum class NamedStoreMode {
// Check the prototype chain before storing.
- kNormal,
- // Store value to the receiver without checking the prototype chain.
- kOwn,
+ kSet,
+ // Define value to the receiver without checking the prototype chain.
+ kDefineOwn,
};
- void BuildNamedStore(StoreMode store_mode);
+ void BuildNamedStore(NamedStoreMode store_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
@@ -342,8 +330,6 @@ class BytecodeGraphBuilder {
void BuildJumpIfNotHole();
void BuildJumpIfJSReceiver();
- void BuildUpdateInterruptBudget(int delta);
-
void BuildSwitchOnSmi(Node* condition);
void BuildSwitchOnGeneratorState(
const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
@@ -505,7 +491,6 @@ class BytecodeGraphBuilder {
Node** input_buffer_;
const CodeKind code_kind_;
- Node* feedback_cell_node_;
Node* feedback_vector_node_;
Node* native_context_node_;
@@ -610,7 +595,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
void UpdateStateValues(Node** state_values, Node** values, int count);
Node* GetStateValuesFromCache(Node** values, int count,
- const BitVector* liveness, int liveness_offset);
+ const BytecodeLivenessState* liveness);
int RegisterToValuesIndex(interpreter::Register the_register) const;
@@ -724,7 +709,7 @@ BytecodeGraphBuilder::Environment::Environment(
int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
interpreter::Register the_register) const {
if (the_register.is_parameter()) {
- return the_register.ToParameterIndex(parameter_count());
+ return the_register.ToParameterIndex();
} else {
return the_register.index() + register_base();
}
@@ -995,9 +980,9 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
- Node** values, int count, const BitVector* liveness, int liveness_offset) {
+ Node** values, int count, const BytecodeLivenessState* liveness) {
return builder_->state_values_cache_.GetNodeForValues(
- values, static_cast<size_t>(count), liveness, liveness_offset);
+ values, static_cast<size_t>(count), liveness);
}
Node* BytecodeGraphBuilder::Environment::Checkpoint(
@@ -1006,16 +991,15 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
// to match the parameter count.
- parameters_state_values_ = GetStateValuesFromCache(
- &values()->at(0), parameter_count(), nullptr, 0);
+ parameters_state_values_ =
+ GetStateValuesFromCache(&values()->at(0), parameter_count(), nullptr);
} else {
UpdateStateValues(&parameters_state_values_, &values()->at(0),
parameter_count());
}
- Node* registers_state_values =
- GetStateValuesFromCache(&values()->at(register_base()), register_count(),
- liveness ? &liveness->bit_vector() : nullptr, 0);
+ Node* registers_state_values = GetStateValuesFromCache(
+ &values()->at(register_base()), register_count(), liveness);
bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
Node* accumulator_state_value =
@@ -1082,7 +1066,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
input_buffer_size_(0),
input_buffer_(nullptr),
code_kind_(code_kind),
- feedback_cell_node_(nullptr),
feedback_vector_node_(nullptr),
native_context_node_(nullptr),
needs_eager_checkpoint_(true),
@@ -1122,14 +1105,6 @@ Node* BytecodeGraphBuilder::GetParameter(int parameter_index,
return cached_parameters_[index];
}
-void BytecodeGraphBuilder::CreateFeedbackCellNode() {
- DCHECK_NULL(feedback_cell_node_);
- // Only used by tier-up logic; for code that doesn't tier-up, we can skip
- // this.
- if (!CodeKindCanTierUp(code_kind())) return;
- feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
-}
-
void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
DCHECK_NULL(feedback_vector_node_);
feedback_vector_node_ = jsgraph()->Constant(feedback_vector());
@@ -1144,25 +1119,6 @@ void BytecodeGraphBuilder::CreateNativeContextNode() {
native_context_node_ = jsgraph()->Constant(native_context());
}
-void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
- // For OSR we don't tier up, so we don't need to build this check. Also
- // tiering up currently tail calls to IET which tail calls aren't supported
- // with OSR. See AdjustStackPointerForTailCall.
- if (!CodeKindCanTierUp(code_kind()) || skip_tierup_check()) return;
-
- int parameter_count = bytecode_array().parameter_count();
- Node* target = GetFunctionClosure();
- Node* new_target = GetParameter(
- Linkage::GetJSCallNewTargetParamIndex(parameter_count), "%new.target");
- Node* argc = GetParameter(
- Linkage::GetJSCallArgCountParamIndex(parameter_count), "%argc");
- DCHECK_EQ(environment()->Context()->opcode(), IrOpcode::kParameter);
- Node* context = environment()->Context();
-
- NewNode(simplified()->TierUpCheck(), feedback_vector_node(), target,
- new_target, argc, context);
-}
-
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
NodeProperties::ReplaceContextInput(result, native_context_node());
@@ -1193,9 +1149,7 @@ void BytecodeGraphBuilder::CreateGraph() {
graph()->start());
set_environment(&env);
- CreateFeedbackCellNode();
CreateFeedbackVectorNode();
- MaybeBuildTierUpCheck();
CreateNativeContextNode();
VisitBytecodes();
@@ -1646,7 +1600,7 @@ void BytecodeGraphBuilder::VisitStaInArrayLiteral() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
+void BytecodeGraphBuilder::VisitDefineKeyedOwnPropertyInLiteral() {
PrepareEagerCheckpoint();
Node* object =
@@ -1657,7 +1611,7 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
int flags = bytecode_iterator().GetFlagOperand(2);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(3));
- const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
+ const Operator* op = javascript()->DefineKeyedOwnPropertyInLiteral(feedback);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedStoreKeyed(op, object, name, value, feedback.slot);
@@ -1997,7 +1951,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() {
environment()->BindAccumulator(store, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+void BytecodeGraphBuilder::VisitGetNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -2021,7 +1975,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
+void BytecodeGraphBuilder::VisitGetNamedPropertyFromSuper() {
PrepareEagerCheckpoint();
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -2047,7 +2001,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+void BytecodeGraphBuilder::VisitGetKeyedProperty() {
PrepareEagerCheckpoint();
Node* key = environment()->LookupAccumulator();
Node* object =
@@ -2074,7 +2028,7 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
+void BytecodeGraphBuilder::BuildNamedStore(NamedStoreMode store_mode) {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -2084,16 +2038,16 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
const Operator* op;
- if (store_mode == StoreMode::kOwn) {
- DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ if (store_mode == NamedStoreMode::kDefineOwn) {
+ DCHECK_EQ(FeedbackSlotKind::kDefineNamedOwn,
broker()->GetFeedbackSlotKind(feedback));
- op = javascript()->StoreNamedOwn(name, feedback);
+ op = javascript()->DefineNamedOwnProperty(name, feedback);
} else {
- DCHECK_EQ(StoreMode::kNormal, store_mode);
+ DCHECK_EQ(NamedStoreMode::kSet, store_mode);
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
- op = javascript()->StoreNamed(language_mode, name, feedback);
+ op = javascript()->SetNamedProperty(language_mode, name, feedback);
}
JSTypeHintLowering::LoweringResult lowering =
@@ -2111,15 +2065,15 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaNamedProperty() {
- BuildNamedStore(StoreMode::kNormal);
+void BytecodeGraphBuilder::VisitSetNamedProperty() {
+ BuildNamedStore(NamedStoreMode::kSet);
}
-void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
- BuildNamedStore(StoreMode::kOwn);
+void BytecodeGraphBuilder::VisitDefineNamedOwnProperty() {
+ BuildNamedStore(NamedStoreMode::kDefineOwn);
}
-void BytecodeGraphBuilder::VisitStaKeyedProperty() {
+void BytecodeGraphBuilder::VisitSetKeyedProperty() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -2130,7 +2084,7 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() {
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(source));
- const Operator* op = javascript()->StoreProperty(language_mode, source);
+ const Operator* op = javascript()->SetKeyedProperty(language_mode, source);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedStoreKeyed(op, object, key, value, source.slot);
@@ -2141,10 +2095,10 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- STATIC_ASSERT(JSStorePropertyNode::ObjectIndex() == 0);
- STATIC_ASSERT(JSStorePropertyNode::KeyIndex() == 1);
- STATIC_ASSERT(JSStorePropertyNode::ValueIndex() == 2);
- STATIC_ASSERT(JSStorePropertyNode::FeedbackVectorIndex() == 3);
+ STATIC_ASSERT(JSSetKeyedPropertyNode::ObjectIndex() == 0);
+ STATIC_ASSERT(JSSetKeyedPropertyNode::KeyIndex() == 1);
+ STATIC_ASSERT(JSSetKeyedPropertyNode::ValueIndex() == 2);
+ STATIC_ASSERT(JSSetKeyedPropertyNode::FeedbackVectorIndex() == 3);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, key, value, feedback_vector_node());
}
@@ -2152,7 +2106,7 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaKeyedPropertyAsDefine() {
+void BytecodeGraphBuilder::VisitDefineKeyedOwnProperty() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -2164,7 +2118,8 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyAsDefine() {
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(source));
- const Operator* op = javascript()->DefineProperty(language_mode, source);
+ const Operator* op =
+ javascript()->DefineKeyedOwnProperty(language_mode, source);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedStoreKeyed(op, object, key, value, source.slot);
@@ -2175,10 +2130,10 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyAsDefine() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- STATIC_ASSERT(JSDefinePropertyNode::ObjectIndex() == 0);
- STATIC_ASSERT(JSDefinePropertyNode::KeyIndex() == 1);
- STATIC_ASSERT(JSDefinePropertyNode::ValueIndex() == 2);
- STATIC_ASSERT(JSDefinePropertyNode::FeedbackVectorIndex() == 3);
+ STATIC_ASSERT(JSDefineKeyedOwnPropertyNode::ObjectIndex() == 0);
+ STATIC_ASSERT(JSDefineKeyedOwnPropertyNode::KeyIndex() == 1);
+ STATIC_ASSERT(JSDefineKeyedOwnPropertyNode::ValueIndex() == 2);
+ STATIC_ASSERT(JSDefineKeyedOwnPropertyNode::FeedbackVectorIndex() == 3);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, key, value, feedback_vector_node());
}
@@ -2225,8 +2180,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
bytecode_iterator().GetFlagOperand(2))
? AllocationType::kOld
: AllocationType::kYoung;
- CodeTRef compile_lazy = MakeRef(
- broker(), ToCodeT(*BUILTIN_CODE(jsgraph()->isolate(), CompileLazy)));
+ CodeTRef compile_lazy =
+ MakeRef(broker(), *BUILTIN_CODE(jsgraph()->isolate(), CompileLazy));
const Operator* op =
javascript()->CreateClosure(shared_info, compile_lazy, allocation);
Node* closure = NewNode(
@@ -3569,9 +3524,6 @@ void BytecodeGraphBuilder::VisitSetPendingMessage() {
void BytecodeGraphBuilder::BuildReturn(const BytecodeLivenessState* liveness) {
BuildLoopExitsForFunctionExit(liveness);
- // Note: Negated offset since a return acts like a backwards jump, and should
- // decrement the budget.
- BuildUpdateInterruptBudget(-bytecode_iterator().current_offset());
Node* pop_node = jsgraph()->ZeroConstant();
Node* control =
NewNode(common()->Return(), pop_node, environment()->LookupAccumulator());
@@ -3987,7 +3939,6 @@ void BytecodeGraphBuilder::BuildLoopExitsForFunctionExit(
}
void BytecodeGraphBuilder::BuildJump() {
- BuildUpdateInterruptBudget(bytecode_iterator().GetRelativeJumpTargetOffset());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
@@ -3996,8 +3947,6 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
{
SubEnvironment sub_environment(this);
NewIfTrue();
- BuildUpdateInterruptBudget(
- bytecode_iterator().GetRelativeJumpTargetOffset());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
NewIfFalse();
@@ -4008,8 +3957,6 @@ void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
{
SubEnvironment sub_environment(this);
NewIfFalse();
- BuildUpdateInterruptBudget(
- bytecode_iterator().GetRelativeJumpTargetOffset());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
NewIfTrue();
@@ -4034,8 +3981,6 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
{
SubEnvironment sub_environment(this);
NewIfFalse();
- BuildUpdateInterruptBudget(
- bytecode_iterator().GetRelativeJumpTargetOffset());
environment()->BindAccumulator(jsgraph()->FalseConstant());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
@@ -4049,8 +3994,6 @@ void BytecodeGraphBuilder::BuildJumpIfTrue() {
SubEnvironment sub_environment(this);
NewIfTrue();
environment()->BindAccumulator(jsgraph()->TrueConstant());
- BuildUpdateInterruptBudget(
- bytecode_iterator().GetRelativeJumpTargetOffset());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
NewIfFalse();
@@ -4082,16 +4025,6 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
BuildJumpIf(condition);
}
-void BytecodeGraphBuilder::BuildUpdateInterruptBudget(int delta) {
- if (!CodeKindCanTierUp(code_kind())) return;
-
- // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
- int delta_with_current_bytecode =
- delta - bytecode_iterator().current_bytecode_size();
- NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
- feedback_cell_node());
-}
-
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedUnaryOp(const Operator* op,
Node* operand,
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.cc b/deps/v8/src/compiler/bytecode-liveness-map.cc
index 7050ec385e..a24dbecafa 100644
--- a/deps/v8/src/compiler/bytecode-liveness-map.cc
+++ b/deps/v8/src/compiler/bytecode-liveness-map.cc
@@ -8,32 +8,22 @@ namespace v8 {
namespace internal {
namespace compiler {
-BytecodeLiveness::BytecodeLiveness(int register_count, Zone* zone)
- : in(zone->New<BytecodeLivenessState>(register_count, zone)),
- out(zone->New<BytecodeLivenessState>(register_count, zone)) {}
-
-BytecodeLivenessMap::BytecodeLivenessMap(int bytecode_size, Zone* zone)
- : liveness_map_(base::bits::RoundUpToPowerOfTwo32(bytecode_size / 4 + 1),
- base::KeyEqualityMatcher<int>(),
- ZoneAllocationPolicy(zone)) {}
-
-uint32_t OffsetHash(int offset) { return offset; }
-
-BytecodeLiveness& BytecodeLivenessMap::InitializeLiveness(int offset,
- int register_count,
- Zone* zone) {
- return liveness_map_
- .LookupOrInsert(offset, OffsetHash(offset),
- [&]() { return BytecodeLiveness(register_count, zone); })
- ->value;
-}
-
-BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) {
- return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
-}
-
-const BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) const {
- return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+std::string ToString(const BytecodeLivenessState& liveness) {
+ std::string out;
+ out.resize(liveness.register_count() + 1);
+ for (int i = 0; i < liveness.register_count(); ++i) {
+ if (liveness.RegisterIsLive(i)) {
+ out[i] = 'L';
+ } else {
+ out[i] = '.';
+ }
+ }
+ if (liveness.AccumulatorIsLive()) {
+ out[liveness.register_count()] = 'L';
+ } else {
+ out[liveness.register_count()] = '.';
+ }
+ return out;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
index c68492d8bf..fcc391a8f1 100644
--- a/deps/v8/src/compiler/bytecode-liveness-map.h
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -5,7 +5,8 @@
#ifndef V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
-#include "src/base/hashmap.h"
+#include <string>
+
#include "src/utils/bit-vector.h"
#include "src/zone/zone.h"
@@ -18,24 +19,53 @@ namespace compiler {
class BytecodeLivenessState : public ZoneObject {
public:
+ class Iterator {
+ public:
+ int operator*() const {
+ // Subtract one to compensate for the accumulator at the start of the
+ // bit vector.
+ return *it_ - 1;
+ }
+
+ void operator++() { return ++it_; }
+
+ bool operator!=(const Iterator& other) const { return it_ != other.it_; }
+
+ private:
+ static constexpr struct StartTag {
+ } kStartTag = {};
+ static constexpr struct EndTag {
+ } kEndTag = {};
+ explicit Iterator(const BytecodeLivenessState& liveness, StartTag)
+ : it_(liveness.bit_vector_.begin()) {
+ // If we're not at the end, and the current value is the accumulator, skip
+ // over it.
+ if (it_ != liveness.bit_vector_.end() && *it_ == 0) {
+ ++it_;
+ }
+ }
+ explicit Iterator(const BytecodeLivenessState& liveness, EndTag)
+ : it_(liveness.bit_vector_.end()) {}
+
+ BitVector::Iterator it_;
+ friend class BytecodeLivenessState;
+ };
+
BytecodeLivenessState(int register_count, Zone* zone)
: bit_vector_(register_count + 1, zone) {}
BytecodeLivenessState(const BytecodeLivenessState&) = delete;
BytecodeLivenessState& operator=(const BytecodeLivenessState&) = delete;
- const BitVector& bit_vector() const { return bit_vector_; }
-
- BitVector& bit_vector() { return bit_vector_; }
+ BytecodeLivenessState(const BytecodeLivenessState& other, Zone* zone)
+ : bit_vector_(other.bit_vector_, zone) {}
bool RegisterIsLive(int index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, bit_vector_.length() - 1);
- return bit_vector_.Contains(index);
+ return bit_vector_.Contains(index + 1);
}
- bool AccumulatorIsLive() const {
- return bit_vector_.Contains(bit_vector_.length() - 1);
- }
+ bool AccumulatorIsLive() const { return bit_vector_.Contains(0); }
bool Equals(const BytecodeLivenessState& other) const {
return bit_vector_.Equals(other.bit_vector_);
@@ -44,18 +74,18 @@ class BytecodeLivenessState : public ZoneObject {
void MarkRegisterLive(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, bit_vector_.length() - 1);
- bit_vector_.Add(index);
+ bit_vector_.Add(index + 1);
}
void MarkRegisterDead(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, bit_vector_.length() - 1);
- bit_vector_.Remove(index);
+ bit_vector_.Remove(index + 1);
}
- void MarkAccumulatorLive() { bit_vector_.Add(bit_vector_.length() - 1); }
+ void MarkAccumulatorLive() { bit_vector_.Add(0); }
- void MarkAccumulatorDead() { bit_vector_.Remove(bit_vector_.length() - 1); }
+ void MarkAccumulatorDead() { bit_vector_.Remove(0); }
void MarkAllLive() { bit_vector_.AddAll(); }
@@ -71,6 +101,15 @@ class BytecodeLivenessState : public ZoneObject {
bit_vector_.CopyFrom(other.bit_vector_);
}
+ int register_count() const { return bit_vector_.length() - 1; }
+
+ // Number of live values, including the accumulator.
+ int live_value_count() const { return bit_vector_.Count(); }
+
+ Iterator begin() const { return Iterator(*this, Iterator::kStartTag); }
+
+ Iterator end() const { return Iterator(*this, Iterator::kEndTag); }
+
private:
BitVector bit_vector_;
};
@@ -78,19 +117,42 @@ class BytecodeLivenessState : public ZoneObject {
struct BytecodeLiveness {
BytecodeLivenessState* in;
BytecodeLivenessState* out;
-
- BytecodeLiveness(int register_count, Zone* zone);
};
class V8_EXPORT_PRIVATE BytecodeLivenessMap {
public:
- BytecodeLivenessMap(int size, Zone* zone);
+ BytecodeLivenessMap(int bytecode_size, Zone* zone)
+ : liveness_(zone->NewArray<BytecodeLiveness>(bytecode_size))
+#ifdef DEBUG
+ ,
+ size_(bytecode_size)
+#endif
+ {
+ }
- BytecodeLiveness& InitializeLiveness(int offset, int register_count,
- Zone* zone);
+ BytecodeLiveness& InsertNewLiveness(int offset) {
+ DCHECK_GE(offset, 0);
+ DCHECK_LT(offset, size_);
+#ifdef DEBUG
+ // Null out the in/out liveness, so that later DCHECKs know whether these
+ // have been correctly initialised or not. That code does initialise them
+ // unconditionally though, so we can skip the nulling out in release.
+ liveness_[offset].in = nullptr;
+ liveness_[offset].out = nullptr;
+#endif
+ return liveness_[offset];
+ }
- BytecodeLiveness& GetLiveness(int offset);
- const BytecodeLiveness& GetLiveness(int offset) const;
+ BytecodeLiveness& GetLiveness(int offset) {
+ DCHECK_GE(offset, 0);
+ DCHECK_LT(offset, size_);
+ return liveness_[offset];
+ }
+ const BytecodeLiveness& GetLiveness(int offset) const {
+ DCHECK_GE(offset, 0);
+ DCHECK_LT(offset, size_);
+ return liveness_[offset];
+ }
BytecodeLivenessState* GetInLiveness(int offset) {
return GetLiveness(offset).in;
@@ -107,11 +169,14 @@ class V8_EXPORT_PRIVATE BytecodeLivenessMap {
}
private:
- base::TemplateHashMapImpl<int, BytecodeLiveness,
- base::KeyEqualityMatcher<int>, ZoneAllocationPolicy>
- liveness_map_;
+ BytecodeLiveness* liveness_;
+#ifdef DEBUG
+ size_t size_;
+#endif
};
+V8_EXPORT_PRIVATE std::string ToString(const BytecodeLivenessState& liveness);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 95a84ceeab..951550c4a5 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -19,7 +19,8 @@ namespace {
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
+#define CALLEE_SAVE_REGISTERS esi, edi, ebx
+#define CALLEE_SAVE_FP_REGISTERS
#elif V8_TARGET_ARCH_X64
// ===========================================================================
@@ -32,21 +33,17 @@ namespace {
#define PARAM_REGISTERS rcx, rdx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3
#define FP_RETURN_REGISTER xmm0
-#define CALLEE_SAVE_REGISTERS \
- rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
- r15.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- (1 << xmm6.code()) | (1 << xmm7.code()) | (1 << xmm8.code()) | \
- (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \
- (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \
- (1 << xmm15.code())
+#define CALLEE_SAVE_REGISTERS rbx, rdi, rsi, r12, r13, r14, r15
+#define CALLEE_SAVE_FP_REGISTERS \
+ xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
+
#else // V8_TARGET_OS_WIN
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
#define FP_RETURN_REGISTER xmm0
-#define CALLEE_SAVE_REGISTERS \
- rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
+#define CALLEE_SAVE_REGISTERS rbx, r12, r13, r14, r15
+#define CALLEE_SAVE_FP_REGISTERS
#endif // V8_TARGET_OS_WIN
#elif V8_TARGET_ARCH_ARM
@@ -54,12 +51,8 @@ namespace {
// == arm ====================================================================
// ===========================================================================
#define PARAM_REGISTERS r0, r1, r2, r3
-#define CALLEE_SAVE_REGISTERS \
- r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
- (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
- (1 << d14.code()) | (1 << d15.code())
+#define CALLEE_SAVE_REGISTERS r4, r5, r6, r7, r8, r9, r10
+#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
#elif V8_TARGET_ARCH_ARM64
// ===========================================================================
@@ -68,16 +61,9 @@ namespace {
#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTER d0
-#define CALLEE_SAVE_REGISTERS \
- (1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
- (1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
- (1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) | \
- (1 << x28.code())
+#define CALLEE_SAVE_REGISTERS x19, x20, x21, x22, x23, x24, x25, x26, x27, x28
-#define CALLEE_SAVE_FP_REGISTERS \
- (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
- (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
- (1 << d14.code()) | (1 << d15.code())
+#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
#elif V8_TARGET_ARCH_MIPS
// ===========================================================================
@@ -85,34 +71,24 @@ namespace {
// ===========================================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS a0, a1, a2, a3
-#define CALLEE_SAVE_REGISTERS \
- s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
- s7.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7
+#define CALLEE_SAVE_FP_REGISTERS f20, f22, f24, f26, f28, f30
#elif V8_TARGET_ARCH_MIPS64
// ===========================================================================
// == mips64 =================================================================
// ===========================================================================
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define CALLEE_SAVE_REGISTERS \
- s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
- s7.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7
+#define CALLEE_SAVE_FP_REGISTERS f20, f22, f24, f26, f28, f30
#elif V8_TARGET_ARCH_LOONG64
// ===========================================================================
// == loong64 ================================================================
// ===========================================================================
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define CALLEE_SAVE_REGISTERS \
- s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
- s7.bit() | s8.bit() | fp.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
- f30.bit() | f31.bit()
+#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7, s8, fp
+#define CALLEE_SAVE_FP_REGISTERS f24, f25, f26, f27, f28, f29, f30, f31
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
@@ -124,14 +100,13 @@ namespace {
#define STACK_SHADOW_WORDS 14
#endif
#define PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
-#define CALLEE_SAVE_REGISTERS \
- r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() | r19.bit() | \
- r20.bit() | r21.bit() | r22.bit() | r23.bit() | r24.bit() | r25.bit() | \
- r26.bit() | r27.bit() | r28.bit() | r29.bit() | r30.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- d14.bit() | d15.bit() | d16.bit() | d17.bit() | d18.bit() | d19.bit() | \
- d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
- d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
+#define CALLEE_SAVE_REGISTERS \
+ r14, r15, r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, \
+ r29, r30
+
+#define CALLEE_SAVE_FP_REGISTERS \
+ d14, d15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, \
+ d29, d30, d31
#elif V8_TARGET_ARCH_S390X
// ===========================================================================
@@ -139,11 +114,8 @@ namespace {
// ===========================================================================
#define STACK_SHADOW_WORDS 20
#define PARAM_REGISTERS r2, r3, r4, r5, r6
-#define CALLEE_SAVE_REGISTERS \
- r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
- d14.bit() | d15.bit()
+#define CALLEE_SAVE_REGISTERS r6, r7, r8, r9, r10, ip, r13
+#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
#elif V8_TARGET_ARCH_RISCV64
// ===========================================================================
@@ -152,12 +124,9 @@ namespace {
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
// fp is not part of CALLEE_SAVE_REGISTERS (similar to how MIPS64 or PPC defines
// it)
-#define CALLEE_SAVE_REGISTERS \
- s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | \
- s8.bit() | s9.bit() | s10.bit() | s11.bit()
-#define CALLEE_SAVE_FP_REGISTERS \
- fs0.bit() | fs1.bit() | fs2.bit() | fs3.bit() | fs4.bit() | fs5.bit() | \
- fs6.bit() | fs7.bit() | fs8.bit() | fs9.bit() | fs10.bit() | fs11.bit()
+#define CALLEE_SAVE_REGISTERS s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11
+#define CALLEE_SAVE_FP_REGISTERS \
+ fs0, fs1, fs2, fs3, fs4, fs5, fs6, fs7, fs8, fs9, fs10, fs11
#else
// ===========================================================================
// == unknown ================================================================
@@ -318,17 +287,8 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
BuildParameterLocations(msig, kFPParamRegisterCount, kParamRegisterCount,
kFPParamRegisters, kParamRegisters, &locations);
-#ifdef CALLEE_SAVE_REGISTERS
- const RegList kCalleeSaveRegisters = CALLEE_SAVE_REGISTERS;
-#else
- const RegList kCalleeSaveRegisters = 0;
-#endif
-
-#ifdef CALLEE_SAVE_FP_REGISTERS
- const RegList kCalleeSaveFPRegisters = CALLEE_SAVE_FP_REGISTERS;
-#else
- const RegList kCalleeSaveFPRegisters = 0;
-#endif
+ const RegList kCalleeSaveRegisters = {CALLEE_SAVE_REGISTERS};
+ const DoubleRegList kCalleeSaveFPRegisters = {CALLEE_SAVE_FP_REGISTERS};
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index a5f946943f..bf5215d6d2 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -1015,7 +1015,7 @@ Node* CodeAssembler::CallRuntimeImpl(
Runtime::FunctionId function, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
- TNode<Code> centry =
+ TNode<CodeT> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1048,7 +1048,7 @@ void CodeAssembler::TailCallRuntimeImpl(
Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
- TNode<Code> centry =
+ TNode<CodeT> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1104,7 +1104,7 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
}
void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- TNode<Code> target, TNode<Object> context,
+ TNode<CodeT> target, TNode<Object> context,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 11;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1209,7 +1209,7 @@ template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
TNode<ExternalReference>);
-void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context,
+void CodeAssembler::TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
TNode<JSFunction> function,
TNode<Object> new_target,
TNode<Int32T> arg_count) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 506b82c78b..b97f0a342f 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -1160,13 +1160,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class T = Object, class... TArgs>
TNode<T> CallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
- TNode<Code> target = HeapConstant(callable.code());
+ TNode<CodeT> target = HeapConstant(callable.code());
return CallStub<T>(callable.descriptor(), target, context, args...);
}
template <class T = Object, class... TArgs>
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
- TNode<Code> target, TNode<Object> context, TArgs... args) {
+ TNode<CodeT> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
target, context, args...));
}
@@ -1182,13 +1182,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
void TailCallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
- TNode<Code> target = HeapConstant(callable.code());
+ TNode<CodeT> target = HeapConstant(callable.code());
TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
- TNode<Code> target, TNode<Object> context, TArgs... args) {
+ TNode<CodeT> target, TNode<Object> context, TArgs... args) {
TailCallStubImpl(descriptor, target, context, {args...});
}
@@ -1211,7 +1211,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Note that no arguments adaption is going on here - all the JavaScript
// arguments are left on the stack unmodified. Therefore, this tail call can
// only be used after arguments adaptation has been performed already.
- void TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ void TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
TNode<JSFunction> function, TNode<Object> new_target,
TNode<Int32T> arg_count);
@@ -1220,7 +1220,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* receiver, TArgs... args) {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
- TNode<Code> target = HeapConstant(callable.code());
+ TNode<CodeT> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), {}, arity, {receiver, args...}));
}
@@ -1231,7 +1231,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
- TNode<Code> target = HeapConstant(callable.code());
+ TNode<CodeT> target = HeapConstant(callable.code());
return CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), CAST(new_target), arity,
{receiver, args...});
@@ -1330,7 +1330,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
std::initializer_list<TNode<Object>> args);
void TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- TNode<Code> target, TNode<Object> context,
+ TNode<CodeT> target, TNode<Object> context,
std::initializer_list<Node*> args);
void TailCallStubThenBytecodeDispatchImpl(
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index ddd2ad807d..6f5514289b 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -1112,9 +1112,6 @@ void CompilationDependencies::DependOnElementsKind(
void CompilationDependencies::DependOnOwnConstantElement(
const JSObjectRef& holder, uint32_t index, const ObjectRef& element) {
- // Only valid if the holder can use direct reads, since validation uses
- // GetOwnConstantElementFromHeap.
- DCHECK(holder.should_access_heap() || broker_->is_concurrent_inlining());
RecordDependency(
zone_->New<OwnConstantElementDependency>(holder, index, element));
}
@@ -1235,7 +1232,7 @@ namespace {
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
while (true) {
- HeapObjectRef proto = map.prototype().value();
+ HeapObjectRef proto = map.prototype();
if (!proto.IsJSObject()) {
CHECK_EQ(proto.map().oddball_type(), OddballType::kNull);
break;
@@ -1286,7 +1283,6 @@ void CompilationDependencies::DependOnElementsKinds(
void CompilationDependencies::DependOnConsistentJSFunctionView(
const JSFunctionRef& function) {
- DCHECK(broker_->is_concurrent_inlining());
RecordDependency(zone_->New<ConsistentJSFunctionViewDependency>(function));
}
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index c8bc3a064b..17cec4167e 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -32,7 +32,8 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
if (AbstractState const* const state = node_states_.Get(effect)) {
PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
effect->op()->mnemonic());
- state->Print();
+ state->mutable_state.Print();
+ state->immutable_state.Print();
} else {
PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
effect->op()->mnemonic());
@@ -42,8 +43,10 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
}
switch (node->opcode()) {
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject:
return ReduceLoadFromObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kStoreToObject:
+ case IrOpcode::kInitializeImmutableInObject:
return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kDebugBreak:
case IrOpcode::kAbortCSADcheck:
@@ -92,7 +95,7 @@ namespace Helpers = CsaLoadEliminationHelpers;
// static
template <typename OuterKey>
-void CsaLoadElimination::AbstractState::IntersectWith(
+void CsaLoadElimination::HalfState::IntersectWith(
OuterMap<OuterKey>& to, const OuterMap<OuterKey>& from) {
FieldInfo empty_info;
for (const std::pair<OuterKey, InnerMap>& to_map : to) {
@@ -108,8 +111,7 @@ void CsaLoadElimination::AbstractState::IntersectWith(
}
}
-void CsaLoadElimination::AbstractState::IntersectWith(
- AbstractState const* that) {
+void CsaLoadElimination::HalfState::IntersectWith(HalfState const* that) {
IntersectWith(fresh_entries_, that->fresh_entries_);
IntersectWith(constant_entries_, that->constant_entries_);
IntersectWith(arbitrary_entries_, that->arbitrary_entries_);
@@ -118,10 +120,9 @@ void CsaLoadElimination::AbstractState::IntersectWith(
IntersectWith(arbitrary_unknown_entries_, that->arbitrary_unknown_entries_);
}
-CsaLoadElimination::AbstractState const*
-CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset,
- MachineRepresentation repr) const {
- AbstractState* result = zone_->New<AbstractState>(*this);
+CsaLoadElimination::HalfState const* CsaLoadElimination::HalfState::KillField(
+ Node* object, Node* offset, MachineRepresentation repr) const {
+ HalfState* result = zone_->New<HalfState>(*this);
UnknownOffsetInfos empty_unknown(zone_, InnerMap(zone_));
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
@@ -179,18 +180,16 @@ CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset,
result->arbitrary_unknown_entries_ = empty_unknown;
} else {
// May alias with anything. Clear the state.
- return zone_->New<AbstractState>(zone_);
+ return zone_->New<HalfState>(zone_);
}
}
return result;
}
-CsaLoadElimination::AbstractState const*
-CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
- Node* value,
- MachineRepresentation repr) const {
- AbstractState* new_state = zone_->New<AbstractState>(*this);
+CsaLoadElimination::HalfState const* CsaLoadElimination::HalfState::AddField(
+ Node* object, Node* offset, Node* value, MachineRepresentation repr) const {
+ HalfState* new_state = zone_->New<HalfState>(*this);
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
uint32_t offset_num = static_cast<uint32_t>(m.ResolvedValue());
@@ -212,7 +211,7 @@ CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
return new_state;
}
-CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
+CsaLoadElimination::FieldInfo CsaLoadElimination::HalfState::Lookup(
Node* object, Node* offset) const {
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
@@ -236,10 +235,10 @@ CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
// static
// Kill all elements in {infos} that overlap with an element with {offset} and
// size {ElementSizeInBytes(repr)}.
-void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos,
- uint32_t offset,
- MachineRepresentation repr,
- Zone* zone) {
+void CsaLoadElimination::HalfState::KillOffset(ConstantOffsetInfos& infos,
+ uint32_t offset,
+ MachineRepresentation repr,
+ Zone* zone) {
// All elements in the range [{offset}, {offset + ElementSizeInBytes(repr)})
// are in the killed range. We do not need to traverse the inner maps, we can
// just clear them.
@@ -270,7 +269,7 @@ void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos,
}
}
-void CsaLoadElimination::AbstractState::KillOffsetInFresh(
+void CsaLoadElimination::HalfState::KillOffsetInFresh(
Node* const object, uint32_t offset, MachineRepresentation repr) {
for (int i = 0; i < ElementSizeInBytes(repr); i++) {
Update(fresh_entries_, offset + i, object, {});
@@ -289,15 +288,15 @@ void CsaLoadElimination::AbstractState::KillOffsetInFresh(
}
// static
-void CsaLoadElimination::AbstractState::Print(
- const CsaLoadElimination::AbstractState::ConstantOffsetInfos& infos) {
+void CsaLoadElimination::HalfState::Print(
+ const CsaLoadElimination::HalfState::ConstantOffsetInfos& infos) {
for (const auto outer_entry : infos) {
for (const auto inner_entry : outer_entry.second) {
Node* object = inner_entry.first;
uint32_t offset = outer_entry.first;
FieldInfo info = inner_entry.second;
- PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset,
- object->op()->mnemonic(), info.value->id(),
+ PrintF(" #%d:%s+(%d) -> #%d:%s [repr=%s]\n", object->id(),
+ object->op()->mnemonic(), offset, info.value->id(),
info.value->op()->mnemonic(),
MachineReprToString(info.representation));
}
@@ -305,22 +304,22 @@ void CsaLoadElimination::AbstractState::Print(
}
// static
-void CsaLoadElimination::AbstractState::Print(
- const CsaLoadElimination::AbstractState::UnknownOffsetInfos& infos) {
+void CsaLoadElimination::HalfState::Print(
+ const CsaLoadElimination::HalfState::UnknownOffsetInfos& infos) {
for (const auto outer_entry : infos) {
for (const auto inner_entry : outer_entry.second) {
Node* object = outer_entry.first;
Node* offset = inner_entry.first;
FieldInfo info = inner_entry.second;
- PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
- object->op()->mnemonic(), info.value->id(),
- info.value->op()->mnemonic(),
+ PrintF(" #%d:%s+#%d:%s -> #%d:%s [repr=%s]\n", object->id(),
+ object->op()->mnemonic(), offset->id(), offset->op()->mnemonic(),
+ info.value->id(), info.value->op()->mnemonic(),
MachineReprToString(info.representation));
}
}
}
-void CsaLoadElimination::AbstractState::Print() const {
+void CsaLoadElimination::HalfState::Print() const {
Print(fresh_entries_);
Print(constant_entries_);
Print(arbitrary_entries_);
@@ -331,14 +330,23 @@ void CsaLoadElimination::AbstractState::Print() const {
Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
ObjectAccess const& access) {
+ DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
+ node->opcode() == IrOpcode::kLoadImmutableFromObject);
Node* object = NodeProperties::GetValueInput(node, 0);
Node* offset = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
+ bool is_mutable = node->opcode() == IrOpcode::kLoadFromObject;
+ // We should never find a field in the wrong half-state.
+ DCHECK((is_mutable ? &state->immutable_state : &state->mutable_state)
+ ->Lookup(object, offset)
+ .IsEmpty());
+ HalfState const* half_state =
+ is_mutable ? &state->mutable_state : &state->immutable_state;
MachineRepresentation representation = access.machine_type.representation();
- FieldInfo lookup_result = state->Lookup(object, offset);
+ FieldInfo lookup_result = half_state->Lookup(object, offset);
if (!lookup_result.IsEmpty()) {
// Make sure we don't reuse values that were recorded with a different
// representation or resurrect dead {replacement} nodes.
@@ -354,25 +362,47 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
return Replace(replacement);
}
}
- state = state->AddField(object, offset, node, representation);
+ half_state = half_state->AddField(object, offset, node, representation);
- return UpdateState(node, state);
+ AbstractState const* new_state =
+ is_mutable
+ ? zone()->New<AbstractState>(*half_state, state->immutable_state)
+ : zone()->New<AbstractState>(state->mutable_state, *half_state);
+
+ return UpdateState(node, new_state);
}
Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
ObjectAccess const& access) {
+ DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
+ node->opcode() == IrOpcode::kInitializeImmutableInObject);
Node* object = NodeProperties::GetValueInput(node, 0);
Node* offset = NodeProperties::GetValueInput(node, 1);
Node* value = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
-
MachineRepresentation repr = access.machine_type.representation();
- state = state->KillField(object, offset, repr);
- state = state->AddField(object, offset, value, repr);
-
- return UpdateState(node, state);
+ if (node->opcode() == IrOpcode::kStoreToObject) {
+ // We should not find the field in the wrong half-state.
+ DCHECK(state->immutable_state.Lookup(object, offset).IsEmpty());
+ HalfState const* mutable_state =
+ state->mutable_state.KillField(object, offset, repr);
+ mutable_state = mutable_state->AddField(object, offset, value, repr);
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(*mutable_state, state->immutable_state);
+ return UpdateState(node, new_state);
+ } else {
+ // We should not find the field in the wrong half-state.
+ DCHECK(state->mutable_state.Lookup(object, offset).IsEmpty());
+ // We should not initialize the same immutable field twice.
+ DCHECK(state->immutable_state.Lookup(object, offset).IsEmpty());
+ HalfState const* immutable_state =
+ state->immutable_state.AddField(object, offset, value, repr);
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(state->mutable_state, *immutable_state);
+ return UpdateState(node, new_state);
+ }
}
Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
@@ -431,10 +461,13 @@ Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
// predecessor.
if (state == nullptr) return NoChange();
// If this {node} has some uncontrolled side effects, set its state to
- // {empty_state()}, otherwise to its input state.
- return UpdateState(node, node->op()->HasProperty(Operator::kNoWrite)
- ? state
- : empty_state());
+ // the immutable half-state of its input state, otherwise to its input
+ // state.
+ return UpdateState(
+ node, node->op()->HasProperty(Operator::kNoWrite)
+ ? state
+ : zone()->New<AbstractState>(HalfState(zone()),
+ state->immutable_state));
}
DCHECK_EQ(0, node->op()->EffectOutputCount());
return NoChange();
@@ -464,8 +497,8 @@ Reduction CsaLoadElimination::PropagateInputState(Node* node) {
CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
Node* node, AbstractState const* state) const {
DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
- ZoneQueue<Node*> queue(zone());
- ZoneSet<Node*> visited(zone());
+ std::queue<Node*> queue;
+ std::unordered_set<Node*> visited;
visited.insert(node);
for (int i = 1; i < node->InputCount() - 1; ++i) {
queue.push(node->InputAt(i));
@@ -474,8 +507,25 @@ CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
Node* const current = queue.front();
queue.pop();
if (visited.insert(current).second) {
- if (!current->op()->HasProperty(Operator::kNoWrite)) {
- return empty_state();
+ if (current->opcode() == IrOpcode::kStoreToObject) {
+ Node* object = NodeProperties::GetValueInput(current, 0);
+ Node* offset = NodeProperties::GetValueInput(current, 1);
+ MachineRepresentation repr =
+ ObjectAccessOf(current->op()).machine_type.representation();
+ const HalfState* new_mutable_state =
+ state->mutable_state.KillField(object, offset, repr);
+ state = zone()->New<AbstractState>(*new_mutable_state,
+ state->immutable_state);
+ } else if (current->opcode() == IrOpcode::kInitializeImmutableInObject) {
+#if DEBUG
+ // We are not allowed to reset an immutable (object, offset) pair.
+ Node* object = NodeProperties::GetValueInput(current, 0);
+ Node* offset = NodeProperties::GetValueInput(current, 1);
+ CHECK(state->immutable_state.Lookup(object, offset).IsEmpty());
+#endif
+ } else if (!current->op()->HasProperty(Operator::kNoWrite)) {
+ return zone()->New<AbstractState>(HalfState(zone()),
+ state->immutable_state);
}
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
queue.push(NodeProperties::GetEffectInput(current, i));
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
index 82ca580329..e6b37589ed 100644
--- a/deps/v8/src/compiler/csa-load-elimination.h
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -62,9 +62,9 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
};
// Design doc: https://bit.ly/36MfD6Y
- class AbstractState final : public ZoneObject {
+ class HalfState final : public ZoneObject {
public:
- explicit AbstractState(Zone* zone)
+ explicit HalfState(Zone* zone)
: zone_(zone),
fresh_entries_(zone, InnerMap(zone)),
constant_entries_(zone, InnerMap(zone)),
@@ -73,7 +73,7 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
constant_unknown_entries_(zone, InnerMap(zone)),
arbitrary_unknown_entries_(zone, InnerMap(zone)) {}
- bool Equals(AbstractState const* that) const {
+ bool Equals(HalfState const* that) const {
return fresh_entries_ == that->fresh_entries_ &&
constant_entries_ == that->constant_entries_ &&
arbitrary_entries_ == that->arbitrary_entries_ &&
@@ -81,33 +81,22 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
constant_unknown_entries_ == that->constant_unknown_entries_ &&
arbitrary_unknown_entries_ == that->arbitrary_unknown_entries_;
}
- void IntersectWith(AbstractState const* that);
-
- AbstractState const* KillField(Node* object, Node* offset,
- MachineRepresentation repr) const;
- AbstractState const* AddField(Node* object, Node* offset, Node* value,
- MachineRepresentation repr) const;
+ void IntersectWith(HalfState const* that);
+ HalfState const* KillField(Node* object, Node* offset,
+ MachineRepresentation repr) const;
+ HalfState const* AddField(Node* object, Node* offset, Node* value,
+ MachineRepresentation repr) const;
FieldInfo Lookup(Node* object, Node* offset) const;
-
void Print() const;
private:
- Zone* zone_;
using InnerMap = PersistentMap<Node*, FieldInfo>;
template <typename OuterKey>
using OuterMap = PersistentMap<OuterKey, InnerMap>;
-
// offset -> object -> info
using ConstantOffsetInfos = OuterMap<uint32_t>;
- ConstantOffsetInfos fresh_entries_;
- ConstantOffsetInfos constant_entries_;
- ConstantOffsetInfos arbitrary_entries_;
-
// object -> offset -> info
using UnknownOffsetInfos = OuterMap<Node*>;
- UnknownOffsetInfos fresh_unknown_entries_;
- UnknownOffsetInfos constant_unknown_entries_;
- UnknownOffsetInfos arbitrary_unknown_entries_;
// Update {map} so that {map.Get(outer_key).Get(inner_key)} returns {info}.
template <typename OuterKey>
@@ -123,12 +112,43 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
MachineRepresentation repr, Zone* zone);
void KillOffsetInFresh(Node* object, uint32_t offset,
MachineRepresentation repr);
-
template <typename OuterKey>
static void IntersectWith(OuterMap<OuterKey>& to,
const OuterMap<OuterKey>& from);
static void Print(const ConstantOffsetInfos& infos);
static void Print(const UnknownOffsetInfos& infos);
+
+ Zone* zone_;
+ ConstantOffsetInfos fresh_entries_;
+ ConstantOffsetInfos constant_entries_;
+ ConstantOffsetInfos arbitrary_entries_;
+ UnknownOffsetInfos fresh_unknown_entries_;
+ UnknownOffsetInfos constant_unknown_entries_;
+ UnknownOffsetInfos arbitrary_unknown_entries_;
+ };
+
+ // An {AbstractState} consists of two {HalfState}s, representing the mutable
+ // and immutable sets of known fields, respectively. These sets correspond to
+ // LoadFromObject/StoreToObject and LoadImmutableFromObject/
+ // InitializeImmutableInObject respectively. The two half-states should not
+ // overlap.
+ struct AbstractState : public ZoneObject {
+ explicit AbstractState(Zone* zone)
+ : mutable_state(zone), immutable_state(zone) {}
+ explicit AbstractState(HalfState mutable_state, HalfState immutable_state)
+ : mutable_state(mutable_state), immutable_state(immutable_state) {}
+
+ bool Equals(AbstractState const* that) const {
+ return this->immutable_state.Equals(&that->immutable_state) &&
+ this->mutable_state.Equals(&that->mutable_state);
+ }
+ void IntersectWith(AbstractState const* that) {
+ mutable_state.IntersectWith(&that->mutable_state);
+ immutable_state.IntersectWith(&that->immutable_state);
+ }
+
+ HalfState mutable_state;
+ HalfState immutable_state;
};
Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 51e23d89d3..21696969ec 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -184,8 +184,6 @@ class EffectControlLinearizer {
void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
Node* LowerTypeOf(Node* node);
- void LowerTierUpCheck(Node* node);
- void LowerUpdateInterruptBudget(Node* node);
Node* LowerToBoolean(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -342,6 +340,7 @@ class EffectControlLinearizer {
Zone* temp_zone_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
+ bool inside_region_ = false;
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
JSHeapBroker* broker_;
@@ -625,7 +624,7 @@ void EffectControlLinearizer::Run() {
continue;
}
- gasm()->Reset(block);
+ gasm()->Reset();
BasicBlock::iterator instr = block->begin();
BasicBlock::iterator end_instr = block->end();
@@ -764,8 +763,6 @@ void EffectControlLinearizer::Run() {
ProcessNode(node, &frame_state);
}
- block = gasm()->FinalizeCurrentBlock(block);
-
switch (block->control()) {
case BasicBlock::kGoto:
case BasicBlock::kNone:
@@ -864,6 +861,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
if (node->opcode() == IrOpcode::kFinishRegion) {
// Reset the current region observability.
region_observability_ = RegionObservability::kObservable;
+ inside_region_ = false;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
return RemoveRenameNode(node);
@@ -874,6 +872,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
// StoreField and other operators).
DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
region_observability_ = RegionObservabilityOf(node->op());
+ inside_region_ = true;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
return RemoveRenameNode(node);
@@ -891,6 +890,14 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
return;
}
+ if (node->opcode() == IrOpcode::kStoreField) {
+ // Mark stores outside a region as non-initializing and non-transitioning.
+ if (!inside_region_) {
+ const FieldAccess access = FieldAccessOf(node->op());
+ NodeProperties::ChangeOp(node, simplified()->StoreField(access, false));
+ }
+ }
+
// The IfSuccess nodes should always start a basic block (and basic block
// start nodes are not handled in the ProcessNode method).
DCHECK_NE(IrOpcode::kIfSuccess, node->opcode());
@@ -1160,12 +1167,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTypeOf:
result = LowerTypeOf(node);
break;
- case IrOpcode::kTierUpCheck:
- LowerTierUpCheck(node);
- break;
- case IrOpcode::kUpdateInterruptBudget:
- LowerUpdateInterruptBudget(node);
- break;
case IrOpcode::kNewDoubleElements:
result = LowerNewDoubleElements(node);
break;
@@ -3588,85 +3589,6 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
__ NoContextConstant());
}
-void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
- TierUpCheckNode n(node);
- TNode<FeedbackVector> vector = n.feedback_vector();
-
- Node* optimization_state =
- __ LoadField(AccessBuilder::ForFeedbackVectorFlags(), vector);
-
- // TODO(jgruber): The branch introduces a sequence of spills before the
- // branch (and restores at `fallthrough`) that are completely unnecessary
- // since the IfFalse continuation ends in a tail call. Investigate how to
- // avoid these and fix it.
-
- auto fallthrough = __ MakeLabel();
- auto has_optimized_code_or_marker = __ MakeDeferredLabel();
- __ BranchWithHint(
- __ Word32Equal(
- __ Word32And(optimization_state,
- __ Uint32Constant(
- FeedbackVector::
- kHasNoTopTierCodeOrCompileOptimizedMarkerMask)),
- __ Int32Constant(0)),
- &fallthrough, &has_optimized_code_or_marker, BranchHint::kTrue);
-
- __ Bind(&has_optimized_code_or_marker);
-
- // The optimization marker field contains a non-trivial value, and some
- // action has to be taken. For example, perhaps tier-up has been requested
- // and we need to kick off a compilation job; or optimized code is available
- // and should be tail-called.
- //
- // Currently we delegate these tasks to the InterpreterEntryTrampoline.
- // TODO(jgruber,v8:8888): Consider a dedicated builtin instead.
-
- TNode<HeapObject> code =
- __ HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
-
- JSTrampolineDescriptor descriptor;
- CallDescriptor::Flags flags = CallDescriptor::kFixedTargetRegister |
- CallDescriptor::kIsTailCallForTierUp;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), descriptor, descriptor.GetStackParameterCount(), flags,
- Operator::kNoProperties);
- Node* nodes[] = {code, n.target(), n.new_target(), n.input_count(),
- n.context(), __ effect(), __ control()};
-
-#ifdef DEBUG
- static constexpr int kCodeContextEffectControl = 4;
- DCHECK_EQ(arraysize(nodes),
- descriptor.GetParameterCount() + kCodeContextEffectControl);
-#endif // DEBUG
-
- __ TailCall(call_descriptor, arraysize(nodes), nodes);
-
- __ Bind(&fallthrough);
-}
-
-void EffectControlLinearizer::LowerUpdateInterruptBudget(Node* node) {
- UpdateInterruptBudgetNode n(node);
- TNode<FeedbackCell> feedback_cell = n.feedback_cell();
- TNode<Int32T> budget = __ LoadField<Int32T>(
- AccessBuilder::ForFeedbackCellInterruptBudget(), feedback_cell);
- Node* new_budget = __ Int32Add(budget, __ Int32Constant(n.delta()));
- __ StoreField(AccessBuilder::ForFeedbackCellInterruptBudget(), feedback_cell,
- new_budget);
- if (n.delta() < 0) {
- auto next = __ MakeLabel();
- auto if_budget_exhausted = __ MakeDeferredLabel();
- __ Branch(__ Int32LessThan(new_budget, __ Int32Constant(0)),
- &if_budget_exhausted, &next);
-
- __ Bind(&if_budget_exhausted);
- CallBuiltin(Builtin::kBytecodeBudgetInterruptFromCode,
- node->op()->properties(), feedback_cell);
- __ Goto(&next);
-
- __ Bind(&next);
- }
-}
-
Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
Node* obj = node->InputAt(0);
Callable const callable =
@@ -3683,10 +3605,8 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
- if (kJSArgcIncludesReceiver) {
- arguments_length =
- __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
- }
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
return arguments_length;
}
@@ -3700,10 +3620,8 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
- if (kJSArgcIncludesReceiver) {
- arguments_length =
- __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
- }
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -4972,36 +4890,6 @@ void EffectControlLinearizer::LowerStoreMessage(Node* node) {
__ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
}
-namespace {
-MachineType MachineTypeFor(CTypeInfo::Type type) {
- switch (type) {
- case CTypeInfo::Type::kVoid:
- return MachineType::AnyTagged();
- case CTypeInfo::Type::kBool:
- return MachineType::Bool();
- case CTypeInfo::Type::kInt32:
- return MachineType::Int32();
- case CTypeInfo::Type::kUint32:
- return MachineType::Uint32();
- case CTypeInfo::Type::kInt64:
- return MachineType::Int64();
- case CTypeInfo::Type::kAny:
- static_assert(sizeof(AnyCType) == 8,
- "CTypeInfo::Type::kAny is assumed to be of size 64 bits.");
- return MachineType::Int64();
- case CTypeInfo::Type::kUint64:
- return MachineType::Uint64();
- case CTypeInfo::Type::kFloat32:
- return MachineType::Float32();
- case CTypeInfo::Type::kFloat64:
- return MachineType::Float64();
- case CTypeInfo::Type::kV8Value:
- case CTypeInfo::Type::kApiObject:
- return MachineType::AnyTagged();
- }
-}
-} // namespace
-
Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
Node* node, ElementsKind expected_elements_kind,
GraphAssemblerLabel<0>* bailout) {
@@ -5320,15 +5208,14 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
value_input_count);
Node* stack_slot = nullptr;
+ int kAlign = alignof(v8::FastApiCallbackOptions);
+ int kSize = sizeof(v8::FastApiCallbackOptions);
+ // If this check fails, you've probably added new fields to
+ // v8::FastApiCallbackOptions, which means you'll need to write code
+ // that initializes and reads from them too.
+ CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
+ stack_slot = __ StackSlot(kSize, kAlign);
if (c_signature->HasOptions()) {
- int kAlign = alignof(v8::FastApiCallbackOptions);
- int kSize = sizeof(v8::FastApiCallbackOptions);
- // If this check fails, you've probably added new fields to
- // v8::FastApiCallbackOptions, which means you'll need to write code
- // that initializes and reads from them too.
- CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
- stack_slot = __ StackSlot(kSize, kAlign);
-
__ Store(
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
stack_slot,
@@ -5339,17 +5226,29 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
stack_slot,
static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
+ } else {
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ stack_slot,
+ 0, // fallback = false
+ __ Int32Constant(0));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot,
+ 0, // no data
+ n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
}
MachineSignature::Builder builder(
graph()->zone(), 1, c_arg_count + (c_signature->HasOptions() ? 1 : 0));
- MachineType return_type = MachineTypeFor(c_signature->ReturnInfo().GetType());
+ MachineType return_type =
+ MachineType::TypeForCType(c_signature->ReturnInfo());
builder.AddReturn(return_type);
for (int i = 0; i < c_arg_count; ++i) {
CTypeInfo type = c_signature->ArgumentInfo(i);
MachineType machine_type =
type.GetSequenceType() == CTypeInfo::SequenceType::kScalar
- ? MachineTypeFor(type.GetType())
+ ? MachineType::TypeForCType(type)
: MachineType::AnyTagged();
builder.AddParam(machine_type);
}
@@ -5487,10 +5386,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
__ Branch(is_zero, &if_success, &if_error);
} else {
- // If c_call_result is nullptr, we didn't execute the fast path, so
- // we need to follow the slow path.
- Node* is_zero = __ WordEqual(c_call_result, __ IntPtrConstant(0));
- __ Branch(is_zero, &if_error, &if_success);
+ Node* true_constant = __ TrueConstant();
+ __ Branch(true_constant, &if_success, &if_error);
}
__ Bind(&if_success);
@@ -6818,7 +6715,7 @@ Node* EffectControlLinearizer::BuildAllocateBigInt(Node* bitfield,
DCHECK(machine()->Is64());
DCHECK_EQ(bitfield == nullptr, digit == nullptr);
static constexpr auto zero_bitfield =
- BigInt::SignBits::update(BigInt::LengthBits::encode(0), 0);
+ BigInt::SignBits::update(BigInt::LengthBits::encode(0), false);
Node* map = __ HeapConstant(factory()->bigint_map());
@@ -6846,30 +6743,13 @@ void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
JSHeapBroker* broker) {
- JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
+ JSGraphAssembler graph_assembler_(graph, temp_zone);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
-void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
- Zone* temp_zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- JSHeapBroker* broker) {
- JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
- schedule);
- EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
- temp_zone, source_positions, node_origins,
- MaintainSchedule::kMaintain, broker);
- MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
- SelectLowering select_lowering(&graph_assembler, js_graph->graph());
- graph_assembler.AddInlineReducer(&memory_lowering);
- graph_assembler.AddInlineReducer(&select_lowering);
- linearizer.Run();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 97467391e2..909a8cd682 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -5,14 +5,11 @@
#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
-#include <vector>
-
-#include "src/handles/handles.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
-class Map;
class Zone;
namespace compiler {
@@ -28,14 +25,6 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
JSHeapBroker* broker);
-// Performs effect control linearization lowering in addition to machine
-// lowering, producing a scheduled graph that is ready for instruction
-// selection.
-V8_EXPORT_PRIVATE void LowerToMachineSchedule(
- JSGraph* graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- JSHeapBroker* broker);
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 97b22d8875..9af7345418 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -6,6 +6,7 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/operation-typer.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/execution/frame-constants.h"
@@ -24,10 +25,11 @@ namespace compiler {
#endif // DEBUG
EscapeAnalysisReducer::EscapeAnalysisReducer(
- Editor* editor, JSGraph* jsgraph, EscapeAnalysisResult analysis_result,
- Zone* zone)
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
+ EscapeAnalysisResult analysis_result, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
+ broker_(broker),
analysis_result_(analysis_result),
object_id_cache_(zone),
node_cache_(jsgraph->graph(), zone),
@@ -221,6 +223,7 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
}
void EscapeAnalysisReducer::Finalize() {
+ OperationTyper op_typer(broker_, jsgraph()->graph()->zone());
for (Node* node : arguments_elements_) {
const NewArgumentsElementsParameters& params =
NewArgumentsElementsParametersOf(node->op());
@@ -318,17 +321,21 @@ void EscapeAnalysisReducer::Finalize() {
Node* offset = jsgraph()->graph()->NewNode(
jsgraph()->simplified()->NumberAdd(), index,
offset_to_first_elem);
+ Type offset_type = op_typer.NumberAdd(
+ NodeProperties::GetType(index),
+ NodeProperties::GetType(offset_to_first_elem));
+ NodeProperties::SetType(offset, offset_type);
if (type == CreateArgumentsType::kRestParameter) {
// In the case of rest parameters we should skip the formal
// parameters.
- NodeProperties::SetType(offset,
- TypeCache::Get()->kArgumentsLengthType);
offset = jsgraph()->graph()->NewNode(
jsgraph()->simplified()->NumberAdd(), offset,
formal_parameter_count);
+ NodeProperties::SetType(
+ offset, op_typer.NumberAdd(
+ offset_type,
+ NodeProperties::GetType(formal_parameter_count)));
}
- NodeProperties::SetType(offset,
- TypeCache::Get()->kArgumentsLengthType);
Node* frame = jsgraph()->graph()->NewNode(
jsgraph()->machine()->LoadFramePointer());
NodeProperties::SetType(frame, Type::ExternalPointer());
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 49b672a26b..55c56442b8 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -83,7 +83,7 @@ class NodeHashCache {
class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
EscapeAnalysisResult analysis_result, Zone* zone);
EscapeAnalysisReducer(const EscapeAnalysisReducer&) = delete;
EscapeAnalysisReducer& operator=(const EscapeAnalysisReducer&) = delete;
@@ -108,6 +108,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
+ JSHeapBroker* const broker_;
EscapeAnalysisResult analysis_result_;
ZoneVector<Node*> object_id_cache_;
NodeHashCache node_cache_;
diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc
index 9317d1ad1f..54dc22e99c 100644
--- a/deps/v8/src/compiler/fast-api-calls.cc
+++ b/deps/v8/src/compiler/fast-api-calls.cc
@@ -4,6 +4,8 @@
#include "src/compiler/fast-api-calls.h"
+#include "src/compiler/globals.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -78,6 +80,44 @@ OverloadsResolutionResult ResolveOverloads(
return OverloadsResolutionResult::Invalid();
}
+bool CanOptimizeFastSignature(const CFunctionInfo* c_signature) {
+ USE(c_signature);
+
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat64) {
+ return false;
+ }
+#endif
+
+#ifndef V8_TARGET_ARCH_64_BIT
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kUint64) {
+ return false;
+ }
+#endif
+
+ for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
+ USE(i);
+
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
+ return false;
+ }
+#endif
+
+#ifndef V8_TARGET_ARCH_64_BIT
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
+ return false;
+ }
+#endif
+ }
+
+ return true;
+}
+
} // namespace fast_api_call
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/fast-api-calls.h b/deps/v8/src/compiler/fast-api-calls.h
index fd6832e089..814c01dbdf 100644
--- a/deps/v8/src/compiler/fast-api-calls.h
+++ b/deps/v8/src/compiler/fast-api-calls.h
@@ -42,6 +42,8 @@ OverloadsResolutionResult ResolveOverloads(
Zone* zone, const FastApiCallFunctionVector& candidates,
unsigned int arg_count);
+bool CanOptimizeFastSignature(const CFunctionInfo* c_signature);
+
} // namespace fast_api_call
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index c5199f1e64..113ec96102 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -214,11 +214,8 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
ContinuationFrameStateMode mode) {
// Depending on {mode}, final parameters are added by the deoptimizer
// and aren't explicitly passed in the frame state.
- DCHECK_EQ(
- Builtins::GetStackParameterCount(name) +
- (kJSArgcIncludesReceiver ? 0
- : 1), // Add receiver if it is not included.
- stack_parameter_count + DeoptimizerParameterCountFor(mode));
+ DCHECK_EQ(Builtins::GetStackParameterCount(name),
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 93a6ee4a6b..b2ece7e3b6 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -19,318 +19,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-class GraphAssembler::BasicBlockUpdater {
- public:
- BasicBlockUpdater(Schedule* schedule, Graph* graph,
- CommonOperatorBuilder* common, Zone* temp_zone);
-
- Node* AddNode(Node* node);
- Node* AddNode(Node* node, BasicBlock* to);
- Node* AddClonedNode(Node* node);
-
- BasicBlock* NewBasicBlock(bool deferred);
- BasicBlock* SplitBasicBlock();
- void AddBind(BasicBlock* block);
- void AddBranch(Node* branch, BasicBlock* tblock, BasicBlock* fblock);
- void AddGoto(BasicBlock* to);
- void AddGoto(BasicBlock* from, BasicBlock* to);
- void AddTailCall(Node* node);
-
- void StartBlock(BasicBlock* block);
- BasicBlock* Finalize(BasicBlock* original);
-
- BasicBlock* original_block() { return original_block_; }
- BasicBlock::Control original_control() { return original_control_; }
- Node* original_control_input() { return original_control_input_; }
-
- private:
- enum State { kUnchanged, kChanged };
-
- Zone* temp_zone() { return temp_zone_; }
-
- bool IsOriginalNode(Node* node);
- void UpdateSuccessors(BasicBlock* block);
- void SetBlockDeferredFromPredecessors();
- void RemoveSuccessorsFromSchedule();
- void CopyForChange();
-
- Zone* temp_zone_;
-
- // Current basic block we are scheduling.
- BasicBlock* current_block_;
-
- // The original block that we are lowering.
- BasicBlock* original_block_;
-
- // Position in the current block, only applicable in the 'unchanged' state.
- BasicBlock::iterator node_it_;
- BasicBlock::iterator end_it_;
-
- Schedule* schedule_;
- Graph* graph_;
- CommonOperatorBuilder* common_;
-
- // The nodes in the original block if we are in 'changed' state. Retained to
- // avoid invalidating iterators that are iterating over the original nodes of
- // the block.
- NodeVector saved_nodes_;
-
- // The original control, control input and successors, to enable recovery of
- // them when we finalize the block.
- struct SuccessorInfo {
- BasicBlock* block;
- size_t index;
- };
- ZoneVector<SuccessorInfo> saved_successors_;
- BasicBlock::Control original_control_;
- Node* original_control_input_;
- bool original_deferred_;
- size_t original_node_count_;
-
- State state_;
-};
-
-GraphAssembler::BasicBlockUpdater::BasicBlockUpdater(
- Schedule* schedule, Graph* graph, CommonOperatorBuilder* common,
- Zone* temp_zone)
- : temp_zone_(temp_zone),
- current_block_(nullptr),
- original_block_(nullptr),
- schedule_(schedule),
- graph_(graph),
- common_(common),
- saved_nodes_(schedule->zone()),
- saved_successors_(schedule->zone()),
- original_control_(BasicBlock::kNone),
- original_control_input_(nullptr),
- original_deferred_(false),
- original_node_count_(graph->NodeCount()),
- state_(kUnchanged) {}
-
-Node* GraphAssembler::BasicBlockUpdater::AddNode(Node* node) {
- return AddNode(node, current_block_);
-}
-
-Node* GraphAssembler::BasicBlockUpdater::AddNode(Node* node, BasicBlock* to) {
- if (state_ == kUnchanged) {
- DCHECK_EQ(to, original_block());
-
- if (node_it_ != end_it_ && *node_it_ == node) {
- node_it_++;
- return node;
- }
-
- CopyForChange();
- }
-
- // Add the node to the basic block.
- DCHECK(!schedule_->IsScheduled(node));
- schedule_->AddNode(to, node);
- return node;
-}
-
-Node* GraphAssembler::BasicBlockUpdater::AddClonedNode(Node* node) {
- DCHECK(node->op()->HasProperty(Operator::kPure));
- if (state_ == kUnchanged) {
- CopyForChange();
- }
-
- if (schedule_->IsScheduled(node) &&
- schedule_->block(node) == current_block_) {
- // Node is already scheduled for the current block, don't add it again.
- return node;
- } else if (!schedule_->IsScheduled(node) && !IsOriginalNode(node)) {
- // Node is not scheduled yet, so we can add it directly.
- return AddNode(node);
- } else {
- // TODO(9684): Potentially add some per-block caching so we can avoid
- // cloning if we've already cloned for this block.
- return AddNode(graph_->CloneNode(node));
- }
-}
-
-bool GraphAssembler::BasicBlockUpdater::IsOriginalNode(Node* node) {
- // Return true if node was part of the original schedule and might currently
- // be re-added to the schedule after a CopyForChange.
- return node->id() < original_node_count_;
-}
-
-void GraphAssembler::BasicBlockUpdater::CopyForChange() {
- DCHECK_EQ(kUnchanged, state_);
-
- // Save successor.
- DCHECK(saved_successors_.empty());
- for (BasicBlock* successor : original_block()->successors()) {
- for (size_t i = 0; i < successor->PredecessorCount(); i++) {
- if (successor->PredecessorAt(i) == original_block()) {
- saved_successors_.push_back({successor, i});
- break;
- }
- }
- }
- DCHECK_EQ(saved_successors_.size(), original_block()->SuccessorCount());
-
- // Save control.
- original_control_ = original_block()->control();
- original_control_input_ = original_block()->control_input();
-
- // Save original nodes (to allow them to continue to be iterated by the user
- // of graph assembler).
- original_block()->nodes()->swap(saved_nodes_);
- DCHECK(original_block()->nodes()->empty());
-
- // Re-insert the nodes from the front of the block.
- original_block()->InsertNodes(original_block()->begin(), saved_nodes_.begin(),
- node_it_);
-
- // Remove the tail from the schedule.
- for (; node_it_ != end_it_; node_it_++) {
- schedule_->SetBlockForNode(nullptr, *node_it_);
- }
-
- // Reset the control.
- if (original_block()->control() != BasicBlock::kGoto) {
- schedule_->SetBlockForNode(nullptr, original_block()->control_input());
- }
- original_block()->set_control_input(nullptr);
- original_block()->set_control(BasicBlock::kNone);
- original_block()->ClearSuccessors();
-
- state_ = kChanged;
- end_it_ = {};
- node_it_ = {};
-}
-
-BasicBlock* GraphAssembler::BasicBlockUpdater::NewBasicBlock(bool deferred) {
- BasicBlock* block = schedule_->NewBasicBlock();
- block->set_deferred(deferred || original_deferred_);
- return block;
-}
-
-BasicBlock* GraphAssembler::BasicBlockUpdater::SplitBasicBlock() {
- return NewBasicBlock(current_block_->deferred());
-}
-
-void GraphAssembler::BasicBlockUpdater::AddBind(BasicBlock* to) {
- DCHECK_NOT_NULL(to);
- current_block_ = to;
- // Basic block should only have the control node, if any.
- DCHECK_LE(current_block_->NodeCount(), 1);
- SetBlockDeferredFromPredecessors();
-}
-
-void GraphAssembler::BasicBlockUpdater::SetBlockDeferredFromPredecessors() {
- if (!current_block_->deferred()) {
- bool deferred = true;
- for (BasicBlock* pred : current_block_->predecessors()) {
- if (!pred->deferred()) {
- deferred = false;
- break;
- }
- }
- current_block_->set_deferred(deferred);
- }
-}
-
-void GraphAssembler::BasicBlockUpdater::AddBranch(Node* node,
- BasicBlock* tblock,
- BasicBlock* fblock) {
- if (state_ == kUnchanged) {
- DCHECK_EQ(current_block_, original_block());
- CopyForChange();
- }
-
- DCHECK_EQ(state_, kChanged);
- schedule_->AddBranch(current_block_, node, tblock, fblock);
- current_block_ = nullptr;
-}
-
-void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* to) {
- DCHECK_NOT_NULL(current_block_);
- AddGoto(current_block_, to);
-}
-
-void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* from,
- BasicBlock* to) {
- if (state_ == kUnchanged) {
- CopyForChange();
- }
-
- if (to->deferred() && !from->deferred()) {
- // Add a new block with the correct deferred hint to avoid merges into the
- // target block with different deferred hints.
- // TODO(9684): Only split the current basic block if the label's target
- // block has multiple merges.
- BasicBlock* new_block = NewBasicBlock(to->deferred());
- schedule_->AddGoto(from, new_block);
- from = new_block;
- }
-
- schedule_->AddGoto(from, to);
- current_block_ = nullptr;
-}
-
-void GraphAssembler::BasicBlockUpdater::AddTailCall(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kTailCall);
- DCHECK_NOT_NULL(current_block_);
-
- if (state_ == kUnchanged) {
- CopyForChange();
- }
-
- schedule_->AddTailCall(current_block_, node);
- current_block_ = nullptr;
-}
-
-void GraphAssembler::BasicBlockUpdater::UpdateSuccessors(BasicBlock* block) {
- for (SuccessorInfo succ : saved_successors_) {
- (succ.block->predecessors())[succ.index] = block;
- block->AddSuccessor(succ.block);
- }
- saved_successors_.clear();
- block->set_control(original_control_);
- block->set_control_input(original_control_input_);
- if (original_control_input_ != nullptr) {
- schedule_->SetBlockForNode(block, original_control_input_);
- } else {
- DCHECK_EQ(BasicBlock::kGoto, original_control_);
- }
-}
-
-void GraphAssembler::BasicBlockUpdater::StartBlock(BasicBlock* block) {
- DCHECK_NULL(current_block_);
- DCHECK_NULL(original_block_);
- DCHECK(saved_nodes_.empty());
- block->ResetRPOInfo();
- current_block_ = block;
- original_block_ = block;
- original_deferred_ = block->deferred();
- node_it_ = block->begin();
- end_it_ = block->end();
- state_ = kUnchanged;
-}
-
-BasicBlock* GraphAssembler::BasicBlockUpdater::Finalize(BasicBlock* original) {
- DCHECK_EQ(original, original_block());
- BasicBlock* block = current_block_;
- if (state_ == kChanged) {
- UpdateSuccessors(block);
- } else {
- DCHECK_EQ(block, original_block());
- if (node_it_ != end_it_) {
- // We have not got to the end of the node list, we need to trim.
- block->TrimNodes(node_it_);
- }
- }
- original_control_ = BasicBlock::kNone;
- saved_nodes_.clear();
- original_deferred_ = false;
- original_control_input_ = nullptr;
- original_block_ = nullptr;
- current_block_ = nullptr;
- return block;
-}
-
class V8_NODISCARD GraphAssembler::BlockInlineReduction {
public:
explicit BlockInlineReduction(GraphAssembler* gasm) : gasm_(gasm) {
@@ -349,16 +37,12 @@ class V8_NODISCARD GraphAssembler::BlockInlineReduction {
GraphAssembler::GraphAssembler(
MachineGraph* mcgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback,
- Schedule* schedule, bool mark_loop_exits)
+ bool mark_loop_exits)
: temp_zone_(zone),
mcgraph_(mcgraph),
effect_(nullptr),
control_(nullptr),
node_changed_callback_(node_changed_callback),
- block_updater_(schedule != nullptr
- ? new BasicBlockUpdater(schedule, mcgraph->graph(),
- mcgraph->common(), zone)
- : nullptr),
inline_reducers_(zone),
inline_reductions_blocked_(false),
loop_headers_(zone),
@@ -711,13 +395,8 @@ Node* GraphAssembler::DebugBreak() {
Node* GraphAssembler::Unreachable(
GraphAssemblerLabel<0u>* block_updater_successor) {
Node* result = UnreachableWithoutConnectToEnd();
- if (block_updater_ == nullptr) {
- ConnectUnreachableToEnd();
- InitializeEffectControl(nullptr, nullptr);
- } else {
- DCHECK_NOT_NULL(block_updater_successor);
- Goto(block_updater_successor);
- }
+ ConnectUnreachableToEnd();
+ InitializeEffectControl(nullptr, nullptr);
return result;
}
@@ -890,8 +569,6 @@ void GraphAssembler::TailCall(const CallDescriptor* call_descriptor,
Node* node = AddNode(graph()->NewNode(common()->TailCall(call_descriptor),
inputs_size, inputs));
- if (block_updater_) block_updater_->AddTailCall(node);
-
// Unlike ConnectUnreachableToEnd, the TailCall node terminates a block; to
// keep it live, it *must* be connected to End (also in Turboprop schedules).
NodeProperties::MergeControlToEnd(graph(), common(), node);
@@ -912,103 +589,18 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
BranchImpl(condition, if_true, if_false, hint);
}
-void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,
- Node* if_true_control,
- Node* if_false_control,
- BasicBlock* if_true_block,
- BasicBlock* if_false_block) {
- DCHECK_NOT_NULL(block_updater_);
- // TODO(9684): Only split the current basic block if the label's target
- // block has multiple merges.
- BasicBlock* if_true_target = block_updater_->SplitBasicBlock();
- BasicBlock* if_false_target = block_updater_->SplitBasicBlock();
-
- block_updater_->AddBranch(branch, if_true_target, if_false_target);
-
- block_updater_->AddNode(if_true_control, if_true_target);
- block_updater_->AddGoto(if_true_target, if_true_block);
-
- block_updater_->AddNode(if_false_control, if_false_target);
- block_updater_->AddGoto(if_false_target, if_false_block);
-}
-
-void GraphAssembler::BindBasicBlock(BasicBlock* block) {
- if (block_updater_) {
- block_updater_->AddBind(block);
- }
-}
-
-BasicBlock* GraphAssembler::NewBasicBlock(bool deferred) {
- if (!block_updater_) return nullptr;
- return block_updater_->NewBasicBlock(deferred);
-}
-
-void GraphAssembler::GotoBasicBlock(BasicBlock* block) {
- if (block_updater_) {
- block_updater_->AddGoto(block);
- }
-}
-
-void GraphAssembler::GotoIfBasicBlock(BasicBlock* block, Node* branch,
- IrOpcode::Value goto_if) {
- if (block_updater_) {
- // TODO(9684): Only split the current basic block for the goto_target
- // if block has multiple merges.
- BasicBlock* goto_target = block_updater_->SplitBasicBlock();
- BasicBlock* fallthrough_target = block_updater_->SplitBasicBlock();
-
- if (goto_if == IrOpcode::kIfTrue) {
- block_updater_->AddBranch(branch, goto_target, fallthrough_target);
- } else {
- DCHECK_EQ(goto_if, IrOpcode::kIfFalse);
- block_updater_->AddBranch(branch, fallthrough_target, goto_target);
- }
-
- block_updater_->AddNode(control(), goto_target);
- block_updater_->AddGoto(goto_target, block);
-
- block_updater_->AddBind(fallthrough_target);
- }
-}
-
-BasicBlock* GraphAssembler::FinalizeCurrentBlock(BasicBlock* block) {
- if (block_updater_) {
- block = block_updater_->Finalize(block);
- if (control() == mcgraph()->Dead()) {
- // If the block's end is unreachable, then reset current effect and
- // control to that of the block's throw control node.
- DCHECK(block->control() == BasicBlock::kThrow);
- Node* throw_node = block->control_input();
- control_ = NodeProperties::GetControlInput(throw_node);
- effect_ = NodeProperties::GetEffectInput(throw_node);
- }
- }
- return block;
-}
-
void GraphAssembler::ConnectUnreachableToEnd() {
DCHECK_EQ(effect()->opcode(), IrOpcode::kUnreachable);
- // When maintaining the schedule we can't easily rewire the successor blocks
- // to disconnect them from the graph, so we just leave the unreachable nodes
- // in the schedule.
- // TODO(9684): Add a scheduled dead-code elimination phase to remove all the
- // subsequent unreachable code from the schedule.
- if (!block_updater_) {
- Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- if (node_changed_callback_.has_value()) {
- (*node_changed_callback_)(graph()->end());
- }
- effect_ = control_ = mcgraph()->Dead();
+ Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ if (node_changed_callback_.has_value()) {
+ (*node_changed_callback_)(graph()->end());
}
+ effect_ = control_ = mcgraph()->Dead();
}
Node* GraphAssembler::AddClonedNode(Node* node) {
DCHECK(node->op()->HasProperty(Operator::kPure));
- if (block_updater_) {
- node = block_updater_->AddClonedNode(node);
- }
-
UpdateEffectControlWith(node);
return node;
}
@@ -1036,10 +628,6 @@ Node* GraphAssembler::AddNode(Node* node) {
}
}
- if (block_updater_) {
- block_updater_->AddNode(node);
- }
-
if (node->opcode() == IrOpcode::kTerminate) {
return node;
}
@@ -1048,12 +636,9 @@ Node* GraphAssembler::AddNode(Node* node) {
return node;
}
-void GraphAssembler::Reset(BasicBlock* block) {
+void GraphAssembler::Reset() {
effect_ = nullptr;
control_ = nullptr;
- if (block_updater_) {
- block_updater_->StartBlock(block);
- }
}
void GraphAssembler::InitializeEffectControl(Node* effect, Node* control) {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index c7da66acfc..cabae8699d 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -25,8 +25,6 @@ using Boolean = Oddball;
namespace compiler {
-class Schedule;
-class BasicBlock;
class Reducer;
#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
@@ -161,11 +159,9 @@ class GraphAssemblerLabel {
return TNode<T>::UncheckedCast(PhiAt(index));
}
- GraphAssemblerLabel(GraphAssemblerLabelType type, BasicBlock* basic_block,
- int loop_nesting_level,
+ GraphAssemblerLabel(GraphAssemblerLabelType type, int loop_nesting_level,
const std::array<MachineRepresentation, VarCount>& reps)
: type_(type),
- basic_block_(basic_block),
loop_nesting_level_(loop_nesting_level),
representations_(reps) {}
@@ -183,11 +179,9 @@ class GraphAssemblerLabel {
return type_ == GraphAssemblerLabelType::kDeferred;
}
bool IsLoop() const { return type_ == GraphAssemblerLabelType::kLoop; }
- BasicBlock* basic_block() { return basic_block_; }
bool is_bound_ = false;
const GraphAssemblerLabelType type_;
- BasicBlock* const basic_block_;
const int loop_nesting_level_;
size_t merged_count_ = 0;
Node* effect_;
@@ -204,10 +198,10 @@ class V8_EXPORT_PRIVATE GraphAssembler {
GraphAssembler(
MachineGraph* jsgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
- Schedule* schedule = nullptr, bool mark_loop_exits = false);
+ bool mark_loop_exits = false);
virtual ~GraphAssembler();
- void Reset(BasicBlock* block);
+ void Reset();
void InitializeEffectControl(Node* effect, Node* control);
// Create label.
@@ -223,9 +217,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
GraphAssemblerLabel<VarCount> MakeLabel(
std::array<MachineRepresentation, VarCount> reps_array,
GraphAssemblerLabelType type) {
- return GraphAssemblerLabel<VarCount>(
- type, NewBasicBlock(type == GraphAssemblerLabelType::kDeferred),
- loop_nesting_level_, reps_array);
+ return GraphAssemblerLabel<VarCount>(type, loop_nesting_level_, reps_array);
}
// Convenience wrapper for creating non-deferred labels.
@@ -432,10 +424,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
return TNode<T>::UncheckedCast(AddNode(node));
}
- // Finalizes the {block} being processed by the assembler, returning the
- // finalized block (which may be different from the original block).
- BasicBlock* FinalizeCurrentBlock(BasicBlock* block);
-
void ConnectUnreachableToEnd();
// Add an inline reducers such that nodes added to the graph will be run
@@ -452,15 +440,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Effect effect() const { return Effect(effect_); }
protected:
- class BasicBlockUpdater;
-
template <typename... Vars>
void MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label, Vars... vars);
- BasicBlock* NewBasicBlock(bool deferred);
- void BindBasicBlock(BasicBlock* block);
- void GotoBasicBlock(BasicBlock* block);
- void GotoIfBasicBlock(BasicBlock* block, Node* branch,
- IrOpcode::Value goto_if);
V8_INLINE Node* AddClonedNode(Node* node);
@@ -551,10 +532,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, Vars...);
- void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
- Node* if_false_control,
- BasicBlock* if_true_block,
- BasicBlock* if_false_block);
Zone* temp_zone_;
MachineGraph* mcgraph_;
@@ -563,7 +540,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
// {node_changed_callback_} should be called when a node outside the
// subgraph created by the graph assembler changes.
base::Optional<NodeChangedCallback> node_changed_callback_;
- std::unique_ptr<BasicBlockUpdater> block_updater_;
// Inline reducers enable reductions to be performed to nodes as they are
// added to the graph with the graph assembler.
@@ -708,7 +684,6 @@ void GraphAssembler::Bind(GraphAssemblerLabel<VarCount>* label) {
control_ = label->control_;
effect_ = label->effect_;
- BindBasicBlock(label->basic_block());
label->SetBound();
@@ -755,19 +730,12 @@ void GraphAssembler::BranchImpl(Node* condition,
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
- Node* if_true_control = control_ =
- graph()->NewNode(common()->IfTrue(), branch);
+ control_ = graph()->NewNode(common()->IfTrue(), branch);
MergeState(if_true, vars...);
- Node* if_false_control = control_ =
- graph()->NewNode(common()->IfFalse(), branch);
+ control_ = graph()->NewNode(common()->IfFalse(), branch);
MergeState(if_false, vars...);
- if (block_updater_) {
- RecordBranchInBlockUpdater(branch, if_true_control, if_false_control,
- if_true->basic_block(), if_false->basic_block());
- }
-
control_ = nullptr;
effect_ = nullptr;
}
@@ -778,7 +746,6 @@ void GraphAssembler::Goto(GraphAssemblerLabel<sizeof...(Vars)>* label,
DCHECK_NOT_NULL(control());
DCHECK_NOT_NULL(effect());
MergeState(label, vars...);
- GotoBasicBlock(label->basic_block());
control_ = nullptr;
effect_ = nullptr;
@@ -793,7 +760,6 @@ void GraphAssembler::GotoIf(Node* condition,
control_ = graph()->NewNode(common()->IfTrue(), branch);
MergeState(label, vars...);
- GotoIfBasicBlock(label->basic_block(), branch, IrOpcode::kIfTrue);
control_ = AddNode(graph()->NewNode(common()->IfFalse(), branch));
}
@@ -806,7 +772,6 @@ void GraphAssembler::GotoIfNot(Node* condition,
control_ = graph()->NewNode(common()->IfFalse(), branch);
MergeState(label, vars...);
- GotoIfBasicBlock(label->basic_block(), branch, IrOpcode::kIfFalse);
control_ = AddNode(graph()->NewNode(common()->IfTrue(), branch));
}
@@ -850,9 +815,8 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
JSGraphAssembler(
JSGraph* jsgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
- Schedule* schedule = nullptr, bool mark_loop_exits = false)
- : GraphAssembler(jsgraph, zone, node_changed_callback, schedule,
- mark_loop_exits),
+ bool mark_loop_exits = false)
+ : GraphAssembler(jsgraph, zone, node_changed_callback, mark_loop_exits),
jsgraph_(jsgraph) {}
Node* SmiConstant(int32_t value);
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index d06bf7ef47..fe9072eb10 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -50,8 +50,7 @@ namespace compiler {
// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
// mutable) HeapObject and the data is an instance of ObjectData. Its handle
// must be persistent so that the GC can update it at a safepoint. Via this
-// handle, the object can be accessed concurrently to the main thread. To be
-// used the flag --concurrent-inlining must be on.
+// handle, the object can be accessed concurrently to the main thread.
//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
@@ -67,22 +66,18 @@ enum ObjectDataKind {
namespace {
-bool IsReadOnlyHeapObjectForCompiler(HeapObject object) {
+bool IsReadOnlyHeapObjectForCompiler(PtrComprCageBase cage_base,
+ HeapObject object) {
DisallowGarbageCollection no_gc;
// TODO(jgruber): Remove this compiler-specific predicate and use the plain
// heap predicate instead. This would involve removing the special cases for
// builtins.
- return (object.IsCode() && Code::cast(object).is_builtin()) ||
- (object.IsHeapObject() &&
- ReadOnlyHeap::Contains(HeapObject::cast(object)));
+ return (object.IsCode(cage_base) && Code::cast(object).is_builtin()) ||
+ ReadOnlyHeap::Contains(object);
}
} // namespace
-NotConcurrentInliningTag::NotConcurrentInliningTag(JSHeapBroker* broker) {
- CHECK(!broker->is_concurrent_inlining());
-}
-
class ObjectData : public ZoneObject {
public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
@@ -108,17 +103,18 @@ class ObjectData : public ZoneObject {
// handles from read only root table or builtins table which is what
// canonical scope uses as well. For all other objects we should have
// created ObjectData in canonical handle scope on the main thread.
- CHECK_IMPLIES(
- broker->mode() == JSHeapBroker::kDisabled ||
- broker->mode() == JSHeapBroker::kSerializing,
- broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
+ Isolate* isolate = broker->isolate();
+ CHECK_IMPLIES(broker->mode() == JSHeapBroker::kDisabled ||
+ broker->mode() == JSHeapBroker::kSerializing,
+ isolate->handle_scope_data()->canonical_scope != nullptr);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
kind == kNeverSerializedHeapObject ||
kind == kBackgroundSerializedHeapObject);
- CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
- object->IsHeapObject() && IsReadOnlyHeapObjectForCompiler(
- HeapObject::cast(*object)));
+ CHECK_IMPLIES(
+ kind == kUnserializedReadOnlyHeapObject,
+ object->IsHeapObject() && IsReadOnlyHeapObjectForCompiler(
+ isolate, HeapObject::cast(*object)));
}
#define DECLARE_IS(Name) bool Is##Name() const;
@@ -286,77 +282,10 @@ class JSReceiverData : public HeapObjectData {
class JSObjectData : public JSReceiverData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object, ObjectDataKind kind);
-
- // Recursive serialization of all reachable JSObjects.
- bool SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
- NotConcurrentInliningTag,
- int max_depth = kMaxFastLiteralDepth);
- ObjectData* GetInobjectField(int property_index) const;
-
- // Shallow serialization of {elements}.
- void SerializeElements(JSHeapBroker* broker, NotConcurrentInliningTag);
- bool serialized_elements() const { return serialized_elements_; }
- ObjectData* elements() const;
-
- ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
-
- void SerializeObjectCreateMap(JSHeapBroker* broker, NotConcurrentInliningTag);
-
- // Can be nullptr.
- ObjectData* object_create_map(JSHeapBroker* broker) const {
- if (!serialized_object_create_map_) {
- DCHECK_NULL(object_create_map_);
- TRACE_MISSING(broker, "object_create_map on " << this);
- }
- return object_create_map_;
- }
-
- // This method is only used to assert our invariants.
- bool cow_or_empty_elements_tenured() const;
-
- bool has_extra_serialized_data() const {
- return serialized_as_boilerplate_ || serialized_elements_ ||
- serialized_object_create_map_;
- }
-
- private:
- ObjectData* elements_ = nullptr;
- ObjectData* raw_properties_or_hash_ = nullptr;
- bool cow_or_empty_elements_tenured_ = false;
- // The {serialized_as_boilerplate} flag is set when all recursively
- // reachable JSObjects are serialized.
- bool serialized_as_boilerplate_ = false;
- bool serialized_elements_ = false;
-
- ZoneVector<ObjectData*> inobject_fields_;
-
- bool serialized_object_create_map_ = false;
- ObjectData* object_create_map_ = nullptr;
+ Handle<JSObject> object, ObjectDataKind kind)
+ : JSReceiverData(broker, storage, object, kind) {}
};
-void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
- NotConcurrentInliningTag) {
- if (serialized_object_create_map_) return;
- serialized_object_create_map_ = true;
-
- TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
- Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
-
- if (jsobject->map().is_prototype_map()) {
- Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
- broker->isolate());
- if (maybe_proto_info->IsPrototypeInfo()) {
- auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
- if (proto_info->HasObjectCreateMap()) {
- DCHECK_NULL(object_create_map_);
- object_create_map_ =
- broker->GetOrCreateData(proto_info->ObjectCreateMap());
- }
- }
- }
-}
-
namespace {
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
@@ -460,59 +389,13 @@ class JSTypedArrayData : public JSObjectData {
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSTypedArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- bool serialized() const { return serialized_; }
-
- bool is_on_heap() const { return is_on_heap_; }
- size_t length() const { return length_; }
- void* data_ptr() const { return data_ptr_; }
-
- ObjectData* buffer() const { return buffer_; }
-
- private:
- bool serialized_ = false;
- bool is_on_heap_ = false;
- size_t length_ = 0;
- void* data_ptr_ = nullptr;
- ObjectData* buffer_ = nullptr;
};
-void JSTypedArrayData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
-
- is_on_heap_ = typed_array->is_on_heap();
- length_ = typed_array->length();
- data_ptr_ = typed_array->DataPtr();
-
- if (!is_on_heap()) {
- DCHECK_NULL(buffer_);
- buffer_ = broker->GetOrCreateData(typed_array->buffer());
- }
-}
-
class JSDataViewData : public JSObjectData {
public:
JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSDataView> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind) {
- DCHECK_EQ(kind, kBackgroundSerializedHeapObject);
- if (!broker->is_concurrent_inlining()) {
- byte_length_ = object->byte_length();
- }
- }
-
- size_t byte_length() const {
- return byte_length_;
- }
-
- private:
- size_t byte_length_ = 0; // Only valid if not concurrent inlining.
+ : JSObjectData(broker, storage, object, kind) {}
};
class JSBoundFunctionData : public JSObjectData {
@@ -641,100 +524,27 @@ class MapData : public HeapObjectData {
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
- byte bit_field() const { return bit_field_; }
- byte bit_field2() const { return bit_field2_; }
uint32_t bit_field3() const { return bit_field3_; }
- bool can_be_deprecated() const { return can_be_deprecated_; }
- bool can_transition() const { return can_transition_; }
- int in_object_properties_start_in_words() const {
- CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
- return in_object_properties_start_in_words_;
- }
int in_object_properties() const {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_;
}
- int constructor_function_index() const { return constructor_function_index_; }
- int NextFreePropertyIndex() const { return next_free_property_index_; }
int UnusedPropertyFields() const { return unused_property_fields_; }
- bool supports_fast_array_iteration() const {
- return supports_fast_array_iteration_;
- }
- bool supports_fast_array_resize() const {
- return supports_fast_array_resize_;
- }
bool is_abandoned_prototype_map() const {
return is_abandoned_prototype_map_;
}
- void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* GetConstructor() const {
- CHECK(serialized_constructor_);
- return constructor_;
- }
-
- void SerializeBackPointer(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* GetBackPointer() const {
- CHECK(serialized_backpointer_);
- return backpointer_;
- }
-
- bool TrySerializePrototype(JSHeapBroker* broker,
- NotConcurrentInliningTag tag);
- void SerializePrototype(JSHeapBroker* broker, NotConcurrentInliningTag tag) {
- CHECK(TrySerializePrototype(broker, tag));
- }
- ObjectData* prototype() const {
- DCHECK_EQ(serialized_prototype_, prototype_ != nullptr);
- return prototype_;
- }
-
- void SerializeForElementStore(JSHeapBroker* broker,
- NotConcurrentInliningTag tag);
-
- bool has_extra_serialized_data() const {
- return serialized_constructor_ || serialized_backpointer_ ||
- serialized_prototype_ || serialized_for_element_store_;
- }
-
private:
// The following fields should be const in principle, but construction
// requires locking the MapUpdater lock. For this reason, it's easier to
// initialize these inside the constructor body, not in the initializer list.
- // This block of fields will always be serialized.
InstanceType instance_type_;
int instance_size_;
uint32_t bit_field3_;
int unused_property_fields_;
bool is_abandoned_prototype_map_;
int in_object_properties_;
-
- // These fields will only serialized if we are not concurrent inlining.
- byte bit_field_;
- byte bit_field2_;
- bool can_be_deprecated_;
- bool can_transition_;
- int in_object_properties_start_in_words_;
- int constructor_function_index_;
- int next_free_property_index_;
- bool supports_fast_array_iteration_;
- bool supports_fast_array_resize_;
-
- // These extra fields still have to be serialized (e.g prototype_), since
- // those classes have fields themselves which are not being directly read.
- // This means that, for example, even though we can get the prototype itself
- // with direct reads, some of its fields require serialization.
- bool serialized_constructor_ = false;
- ObjectData* constructor_ = nullptr;
-
- bool serialized_backpointer_ = false;
- ObjectData* backpointer_ = nullptr;
-
- bool serialized_prototype_ = false;
- ObjectData* prototype_ = nullptr;
-
- bool serialized_for_element_store_ = false;
};
namespace {
@@ -754,8 +564,7 @@ int InstanceSizeWithMinSlack(JSHeapBroker* broker, MapRef map) {
DCHECK(map.object()->GetBackPointer().IsUndefined(broker->isolate()));
static constexpr bool kConcurrentAccess = true;
- TransitionsAccessor(broker->isolate(), *map.object(), &no_gc,
- kConcurrentAccess)
+ TransitionsAccessor(broker->isolate(), *map.object(), kConcurrentAccess)
.TraverseTransitionTree([&](Map m) {
maps.push_back(broker->CanonicalPersistentHandle(m));
});
@@ -813,17 +622,6 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
initial_map_ref.instance_size();
}
CHECK_GT(initial_map_instance_size_with_min_slack_, 0);
-
- if (!initial_map_->should_access_heap() &&
- !broker->is_concurrent_inlining()) {
- // TODO(neis): This is currently only needed for native_context's
- // object_function, as used by GetObjectCreateMap. If no further use
- // sites show up, we should move this into NativeContextData::Serialize.
- initial_map_->SerializePrototype(broker,
- NotConcurrentInliningTag{broker});
- initial_map_->SerializeConstructor(broker,
- NotConcurrentInliningTag{broker});
- }
}
if (has_initial_map_) {
@@ -925,7 +723,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
}
bool JSFunctionRef::IsConsistentWithHeapState() const {
- DCHECK(broker()->is_concurrent_inlining());
DCHECK(broker()->IsMainThread());
return data()->AsJSFunction()->IsConsistentWithHeapState(broker());
}
@@ -1044,26 +841,6 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
is_abandoned_prototype_map_ = object->is_abandoned_prototype_map();
in_object_properties_ =
object->IsJSObjectMap() ? object->GetInObjectProperties() : 0;
-
- // These fields are only needed to be serialized when not concurrent inlining
- // and thus disabling direct reads.
- if (!broker->is_concurrent_inlining()) {
- bit_field_ = object->relaxed_bit_field();
- bit_field2_ = object->bit_field2();
- can_be_deprecated_ = object->NumberOfOwnDescriptors() > 0
- ? object->CanBeDeprecated()
- : false;
- can_transition_ = object->CanTransition();
- in_object_properties_start_in_words_ =
- object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
- : 0;
- next_free_property_index_ = object->NextFreePropertyIndex();
- constructor_function_index_ = object->IsPrimitiveMap()
- ? object->GetConstructorFunctionIndex()
- : Map::kNoConstructorFunctionIndex;
- supports_fast_array_iteration_ = SupportsFastArrayIteration(broker, object);
- supports_fast_array_resize_ = SupportsFastArrayResize(broker, object);
- }
}
class FixedArrayBaseData : public HeapObjectData {
@@ -1094,40 +871,13 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object, ObjectDataKind kind)
- : JSReceiverData(broker, storage, object, kind),
- inobject_fields_(broker->zone()) {}
-
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* length() const {
- CHECK(serialized_);
- return length_;
- }
-
- private:
- bool serialized_ = false;
- ObjectData* length_ = nullptr;
};
-void JSArrayData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSArrayData::Serialize");
- Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
-
- DCHECK_NULL(length_);
- length_ = broker->GetOrCreateData(jsarray->length());
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -1164,169 +914,6 @@ HEAP_BROKER_OBJECT_LIST(DEFINE_IS)
HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
-ObjectData* JSObjectData::GetInobjectField(int property_index) const {
- CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
- return inobject_fields_[property_index];
-}
-
-bool JSObjectData::cow_or_empty_elements_tenured() const {
- return cow_or_empty_elements_tenured_;
-}
-
-ObjectData* JSObjectData::elements() const {
- CHECK(serialized_elements_);
- return elements_;
-}
-
-void JSObjectData::SerializeElements(JSHeapBroker* broker,
- NotConcurrentInliningTag) {
- if (serialized_elements_) return;
- serialized_elements_ = true;
-
- TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
- Handle<FixedArrayBase> elements_object(boilerplate->elements(),
- broker->isolate());
- DCHECK_NULL(elements_);
- elements_ = broker->GetOrCreateData(elements_object);
- DCHECK(elements_->IsFixedArrayBase());
-}
-
-void MapData::SerializeConstructor(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_constructor_) return;
- serialized_constructor_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeConstructor");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK(!map->IsContextMap());
- DCHECK_NULL(constructor_);
- constructor_ = broker->GetOrCreateData(map->GetConstructor());
-}
-
-void MapData::SerializeBackPointer(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_backpointer_) return;
- serialized_backpointer_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeBackPointer");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(backpointer_);
- DCHECK(!map->IsContextMap());
- backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
-}
-
-bool MapData::TrySerializePrototype(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_prototype_) return true;
-
- TraceScope tracer(broker, this, "MapData::SerializePrototype");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(prototype_);
- prototype_ = broker->TryGetOrCreateData(map->prototype());
- if (prototype_ == nullptr) return false;
- serialized_prototype_ = true;
- return true;
-}
-
-bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
- NotConcurrentInliningTag tag,
- int max_depth) {
- if (serialized_as_boilerplate_) return true;
- // If serialization succeeds, we set this to true at the end.
-
- TraceScope tracer(broker, this,
- "JSObjectData::SerializeAsBoilerplateRecursive");
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
-
- DCHECK_GE(max_depth, 0);
- if (max_depth == 0) return false;
-
- // Serialize the elements.
- Isolate* const isolate = broker->isolate();
- Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
-
- // Boilerplate objects should only be reachable from their allocation site,
- // so it is safe to assume that the elements have not been serialized yet.
-
- bool const empty_or_cow =
- elements_object->length() == 0 ||
- elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
- if (empty_or_cow) {
- cow_or_empty_elements_tenured_ = !ObjectInYoungGeneration(*elements_object);
- }
-
- raw_properties_or_hash_ =
- broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
-
- serialized_elements_ = true;
- elements_ = broker->GetOrCreateData(elements_object);
- DCHECK(elements_->IsFixedArrayBase());
-
- if (!boilerplate->HasFastProperties() ||
- boilerplate->property_array().length() != 0) {
- return false;
- }
-
- // Check the in-object properties.
- inobject_fields_.clear();
- Handle<DescriptorArray> descriptors(
- boilerplate->map().instance_descriptors(isolate), isolate);
- for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != PropertyLocation::kField) continue;
- DCHECK_EQ(PropertyKind::kData, details.kind());
-
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- // Make sure {field_index} agrees with {inobject_properties} on the index of
- // this field.
- DCHECK_EQ(field_index.property_index(),
- static_cast<int>(inobject_fields_.size()));
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- ObjectData* value_data = broker->GetOrCreateData(value);
- inobject_fields_.push_back(value_data);
- if (value_data->IsJSObject() && !value_data->should_access_heap()) {
- if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
- broker, tag, max_depth - 1))
- return false;
- }
- }
- TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
-
- if (empty_or_cow || elements_->should_access_heap()) {
- // No need to do anything here. Empty or copy-on-write elements
- // do not need to be serialized because we only need to store the elements
- // reference to the allocated object.
- } else if (boilerplate->HasSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements =
- Handle<FixedArray>::cast(elements_object);
- int length = elements_object->length();
- for (int i = 0; i < length; i++) {
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- ObjectData* value_data = broker->GetOrCreateData(value);
- if (!value_data->should_access_heap()) {
- if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
- broker, tag, max_depth - 1)) {
- return false;
- }
- }
- }
- }
- } else {
- if (!boilerplate->HasDoubleElements()) return false;
- int const size = FixedDoubleArray::SizeFor(elements_object->length());
- if (size > kMaxRegularHeapObjectSize) return false;
- }
-
- if (IsJSArray() && !broker->is_concurrent_inlining()) {
- AsJSArray()->Serialize(broker, NotConcurrentInliningTag{broker});
- }
-
- serialized_as_boilerplate_ = true;
- return true;
-}
-
bool ObjectRef::equals(const ObjectRef& other) const {
return data_ == other.data_;
}
@@ -1368,50 +955,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
CollectArrayAndObjectPrototypes();
SetTargetNativeContextRef(target_native_context().object());
- if (!is_concurrent_inlining()) {
- Factory* const f = isolate()->factory();
- ObjectData* data;
- data = GetOrCreateData(f->array_buffer_detaching_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->array_constructor_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->array_iterator_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->array_species_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->no_elements_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->promise_hook_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->promise_species_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->promise_then_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- data = GetOrCreateData(f->string_length_protector());
- if (!data->should_access_heap()) {
- data->AsPropertyCell()->Cache(this);
- }
- GetOrCreateData(f->many_closures_cell());
- GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, true));
- TRACE(this, "Finished serializing standard objects");
- }
}
namespace {
@@ -1463,7 +1006,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
return nullptr;
}
- if (IsReadOnlyHeapObjectForCompiler(HeapObject::cast(*object))) {
+ if (IsReadOnlyHeapObjectForCompiler(isolate(), HeapObject::cast(*object))) {
entry = refs_->LookupOrInsert(object.address());
return zone()->New<ObjectData>(this, &entry->value, object,
kUnserializedReadOnlyHeapObject);
@@ -1551,56 +1094,27 @@ base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
return MakeRefAssumeMemoryFence(broker(), maybe_result.value());
}
-void MapRef::SerializeForElementStore(NotConcurrentInliningTag tag) {
- if (data()->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeForElementStore(broker(), tag);
-}
-
-void MapData::SerializeForElementStore(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_for_element_store_) return;
- serialized_for_element_store_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
- // TODO(solanes, v8:7790): This should use MapData methods rather than
- // constructing MapRefs, but it involves non-trivial refactoring and this
- // method should go away anyway once the compiler is fully concurrent.
- MapRef map(broker, this);
- do {
- map.SerializePrototype(tag);
- map = map.prototype().value().map();
- } while (map.IsJSObjectMap() && map.is_stable() &&
- IsFastElementsKind(map.elements_kind()));
-}
-
bool MapRef::HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps) {
DCHECK_NOT_NULL(prototype_maps);
- MapRef prototype_map = prototype().value().map();
+ MapRef prototype_map = prototype().map();
while (prototype_map.oddball_type() != OddballType::kNull) {
if (!prototype_map.IsJSObjectMap() || !prototype_map.is_stable() ||
!IsFastElementsKind(prototype_map.elements_kind())) {
return false;
}
prototype_maps->push_back(prototype_map);
- prototype_map = prototype_map.prototype().value().map();
+ prototype_map = prototype_map.prototype().map();
}
return true;
}
bool MapRef::supports_fast_array_iteration() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return SupportsFastArrayIteration(broker(), object());
- }
- return data()->AsMap()->supports_fast_array_iteration();
+ return SupportsFastArrayIteration(broker(), object());
}
bool MapRef::supports_fast_array_resize() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return SupportsFastArrayResize(broker(), object());
- }
- return data()->AsMap()->supports_fast_array_resize();
+ return SupportsFastArrayResize(broker(), object());
}
namespace {
@@ -1608,7 +1122,6 @@ namespace {
void RecordConsistentJSFunctionViewDependencyIfNeeded(
const JSHeapBroker* broker, const JSFunctionRef& ref, JSFunctionData* data,
JSFunctionData::UsedField used_field) {
- if (!broker->is_concurrent_inlining()) return;
if (!data->has_any_used_field()) {
// Deduplicate dependencies.
broker->dependencies()->DependOnConsistentJSFunctionView(ref);
@@ -1667,75 +1180,39 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
}
base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return TryMakeRef(broker(), object()->raw_properties_or_hash());
- }
- return ObjectRef(broker(), data()->AsJSObject()->raw_properties_or_hash());
+ return TryMakeRef(broker(), object()->raw_properties_or_hash());
}
base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
FieldIndex index) const {
CHECK(index.is_inobject());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- Handle<Object> value;
- {
- DisallowGarbageCollection no_gc;
- PtrComprCageBase cage_base = broker()->cage_base();
- Map current_map = object()->map(cage_base, kAcquireLoad);
-
- // If the map changed in some prior GC epoch, our {index} could be
- // outside the valid bounds of the cached map.
- if (*map().object() != current_map) {
- TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
- return {};
- }
+ Handle<Object> value;
+ {
+ DisallowGarbageCollection no_gc;
+ PtrComprCageBase cage_base = broker()->cage_base();
+ Map current_map = object()->map(cage_base, kAcquireLoad);
- base::Optional<Object> maybe_value =
- object()->RawInobjectPropertyAt(cage_base, current_map, index);
- if (!maybe_value.has_value()) {
- TRACE_BROKER_MISSING(broker(),
- "Unable to safely read property in " << *this);
- return {};
- }
- value = broker()->CanonicalPersistentHandle(maybe_value.value());
+ // If the map changed in some prior GC epoch, our {index} could be
+ // outside the valid bounds of the cached map.
+ if (*map().object() != current_map) {
+ TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
+ return {};
}
- return TryMakeRef(broker(), value);
- }
- JSObjectData* object_data = data()->AsJSObject();
- return ObjectRef(broker(),
- object_data->GetInobjectField(index.property_index()));
-}
-void JSObjectRef::SerializeAsBoilerplateRecursive(
- NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker(), tag);
-}
-
-void AllocationSiteRef::SerializeRecursive(NotConcurrentInliningTag tag) {
- DCHECK(data_->should_access_heap());
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- DCHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- if (boilerplate().has_value()) {
- boilerplate()->SerializeAsBoilerplateRecursive(tag);
- }
- if (nested_site().IsAllocationSite()) {
- nested_site().AsAllocationSite().SerializeRecursive(tag);
+ base::Optional<Object> maybe_value =
+ object()->RawInobjectPropertyAt(cage_base, current_map, index);
+ if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(),
+ "Unable to safely read property in " << *this);
+ return {};
+ }
+ value = broker()->CanonicalPersistentHandle(maybe_value.value());
}
-}
-
-void JSObjectRef::SerializeElements(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeElements(broker(), tag);
+ return TryMakeRef(broker(), value);
}
bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return !ObjectInYoungGeneration(*elements.object());
- }
- return data()->AsJSObject()->cow_or_empty_elements_tenured();
+ return !ObjectInYoungGeneration(*elements.object());
}
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
@@ -1746,10 +1223,7 @@ FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
}
int MapRef::GetInObjectPropertyOffset(int i) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return object()->GetInObjectPropertyOffset(i);
- }
- return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
+ return object()->GetInObjectPropertyOffset(i);
}
PropertyDetails MapRef::GetPropertyDetails(
@@ -1785,7 +1259,6 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
uint32_t index) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
String maybe_char;
auto result = ConcurrentLookupIterator::TryGetOwnChar(
&maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
@@ -1802,7 +1275,6 @@ base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
}
bool StringRef::SupportedStringKind() const {
- if (!broker()->is_concurrent_inlining()) return true;
return IsInternalizedString() || object()->IsThinString();
}
@@ -1927,31 +1399,14 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
-// kBackgroundSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
- return object()->name(); \
- }
-
-// Like BIMODAL_ACCESSOR except that we force a direct heap access if
-// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
-// This is because we identified the method to be safe to use direct heap
-// access, but the holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
- result holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
- return ObjectRef::data()->As##holder()->name(); \
- }
-#define BIMODAL_ACCESSOR_WITH_FLAG_B(holder, field, name, BitField) \
- typename BitField::FieldType holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
- return BitField::decode(ObjectRef::data()->As##holder()->field()); \
- }
-
#define HEAP_ACCESSOR_C(holder, result, name) \
result holder##Ref::name() const { return object()->name(); }
+#define HEAP_ACCESSOR_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ return object()->name(); \
+ }
+
ObjectRef AllocationSiteRef::nested_site() const {
return MakeRefAssumeMemoryFence(broker(), object()->nested_site());
}
@@ -1983,52 +1438,44 @@ uint64_t HeapNumberRef::value_as_bits() const {
}
JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
ObjectRef JSBoundFunctionRef::bound_this() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
-BIMODAL_ACCESSOR_WITH_FLAG_C(JSDataView, size_t, byte_length)
-
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
- Map::Bits2::ElementsKindBits)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
- Map::Bits3::IsDictionaryMapBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_deprecated,
- Map::Bits3::IsDeprecatedBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, NumberOfOwnDescriptors,
- Map::Bits3::NumberOfOwnDescriptorsBits)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_migration_target,
- Map::Bits3::IsMigrationTargetBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_prototype_slot,
- Map::Bits1::HasPrototypeSlotBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_access_check_needed,
- Map::Bits1::IsAccessCheckNeededBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_callable,
- Map::Bits1::IsCallableBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_indexed_interceptor,
- Map::Bits1::HasIndexedInterceptorBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_constructor,
- Map::Bits1::IsConstructorBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
- Map::Bits1::IsUndetectableBit)
+HEAP_ACCESSOR_C(JSDataView, size_t, byte_length)
+
+HEAP_ACCESSOR_B(Map, bit_field2, elements_kind, Map::Bits2::ElementsKindBits)
+HEAP_ACCESSOR_B(Map, bit_field3, is_dictionary_map,
+ Map::Bits3::IsDictionaryMapBit)
+HEAP_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::Bits3::IsDeprecatedBit)
+HEAP_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
+ Map::Bits3::NumberOfOwnDescriptorsBits)
+HEAP_ACCESSOR_B(Map, bit_field3, is_migration_target,
+ Map::Bits3::IsMigrationTargetBit)
+HEAP_ACCESSOR_B(Map, bit_field, has_prototype_slot,
+ Map::Bits1::HasPrototypeSlotBit)
+HEAP_ACCESSOR_B(Map, bit_field, is_access_check_needed,
+ Map::Bits1::IsAccessCheckNeededBit)
+HEAP_ACCESSOR_B(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
+HEAP_ACCESSOR_B(Map, bit_field, has_indexed_interceptor,
+ Map::Bits1::HasIndexedInterceptorBit)
+HEAP_ACCESSOR_B(Map, bit_field, is_constructor, Map::Bits1::IsConstructorBit)
+HEAP_ACCESSOR_B(Map, bit_field, is_undetectable, Map::Bits1::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
-BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
+HEAP_ACCESSOR_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
-BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
+HEAP_ACCESSOR_C(Map, InstanceType, instance_type)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
@@ -2083,13 +1530,13 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value() || prototype->IsNull()) return not_found;
- if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
+ HeapObjectRef prototype = receiver_map.prototype();
+ if (prototype.IsNull()) return not_found;
+ if (!expected_receiver_type->IsTemplateFor(prototype.object()->map())) {
return not_found;
}
return HolderLookupResult(CallOptimization::kHolderFound,
- prototype->AsJSObject());
+ prototype.AsJSObject());
}
ObjectRef CallHandlerInfoRef::data() const {
@@ -2125,10 +1572,8 @@ BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
const {
return broker()->IsMainThread()
- ? object()->GetInlineability(broker()->isolate(),
- broker()->is_turboprop())
- : object()->GetInlineability(broker()->local_isolate(),
- broker()->is_turboprop());
+ ? object()->GetInlineability(broker()->isolate())
+ : object()->GetInlineability(broker()->local_isolate());
}
ObjectRef FeedbackCellRef::value() const {
@@ -2148,65 +1593,46 @@ DescriptorArrayRef MapRef::instance_descriptors() const {
object()->instance_descriptors(broker()->isolate(), kAcquireLoad));
}
-base::Optional<HeapObjectRef> MapRef::prototype() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return TryMakeRef(broker(), HeapObject::cast(object()->prototype()),
- kAssumeMemoryFence);
- }
- ObjectData* prototype_data = data()->AsMap()->prototype();
- if (prototype_data == nullptr) {
- TRACE_BROKER_MISSING(broker(), "prototype for map " << *this);
- return {};
- }
- return HeapObjectRef(broker(), prototype_data);
+HeapObjectRef MapRef::prototype() const {
+ return MakeRefAssumeMemoryFence(broker(),
+ HeapObject::cast(object()->prototype()));
}
MapRef MapRef::FindRootMap() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// TODO(solanes, v8:7790): Consider caching the result of the root map.
return MakeRefAssumeMemoryFence(broker(),
object()->FindRootMap(broker()->isolate()));
}
ObjectRef MapRef::GetConstructor() const {
- if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
- }
- return ObjectRef(broker(), data()->AsMap()->GetConstructor());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
HeapObjectRef MapRef::GetBackPointer() const {
- if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(
- broker(), HeapObject::cast(object()->GetBackPointer()));
- }
- return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(),
+ HeapObject::cast(object()->GetBackPointer()));
}
bool JSTypedArrayRef::is_on_heap() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Underlying field written 1. during initialization or 2. with release-store.
return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Immutable after initialization.
return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Immutable after initialization.
return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Underlying field written 1. during initialization or 2. protected by the
// is_on_heap release/acquire semantics (external_pointer store happens-before
@@ -2217,15 +1643,11 @@ void* JSTypedArrayRef::data_ptr() const {
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(IsInobjectSlackTrackingInProgress);
- return Map::Bits3::ConstructionCounterBits::decode(
- data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
+ return object()->IsInobjectSlackTrackingInProgress();
}
int MapRef::constructor_function_index() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetConstructorFunctionIndex);
- CHECK(IsPrimitiveMap());
- return data()->AsMap()->constructor_function_index();
+ return object()->GetConstructorFunctionIndex();
}
bool MapRef::is_stable() const {
@@ -2233,20 +1655,12 @@ bool MapRef::is_stable() const {
return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
}
-bool MapRef::CanBeDeprecated() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanBeDeprecated);
- CHECK_GT(NumberOfOwnDescriptors(), 0);
- return data()->AsMap()->can_be_deprecated();
-}
+bool MapRef::CanBeDeprecated() const { return object()->CanBeDeprecated(); }
-bool MapRef::CanTransition() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanTransition);
- return data()->AsMap()->can_transition();
-}
+bool MapRef::CanTransition() const { return object()->CanTransition(); }
int MapRef::GetInObjectPropertiesStartInWords() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetInObjectPropertiesStartInWords);
- return data()->AsMap()->in_object_properties_start_in_words();
+ return object()->GetInObjectPropertiesStartInWords();
}
int MapRef::GetInObjectProperties() const {
@@ -2384,7 +1798,6 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
CompilationDependencies* dependencies) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
*elements_ref.object(), map().elements_kind(), index);
if (!maybe_element.has_value()) return {};
@@ -2399,7 +1812,6 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
FixedArrayBase elements, ElementsKind elements_kind, uint32_t index) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
DCHECK_LE(index, JSObject::kMaxElementIndex);
Handle<JSObject> holder = object();
@@ -2444,7 +1856,6 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
CompilationDependencies* dependencies) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
broker(), *this, field_representation, index);
if (result.has_value()) {
@@ -2456,7 +1867,6 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
InternalIndex index, CompilationDependencies* dependencies) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
base::Optional<ObjectRef> result =
GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
@@ -2475,17 +1885,12 @@ ObjectRef JSArrayRef::GetBoilerplateLength() const {
}
base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return TryMakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
- } else {
- return ObjectRef{broker(), data()->AsJSArray()->length()};
- }
+ return TryMakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
FixedArrayBaseRef elements_ref, uint32_t index) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Note: we'd like to check `elements_ref == elements()` here, but due to
// concurrency this may not hold. The code below must be able to deal with
// concurrent `elements` modifications.
@@ -2588,15 +1993,7 @@ base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
base::Optional<FixedArrayBaseRef> JSObjectRef::elements(
RelaxedLoadTag tag) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return TryMakeRef(broker(), object()->elements(tag));
- }
- const JSObjectData* d = data()->AsJSObject();
- if (!d->serialized_elements()) {
- TRACE(broker(), "'elements' on " << this);
- return base::nullopt;
- }
- return FixedArrayBaseRef(broker(), d->elements());
+ return TryMakeRef(broker(), object()->elements(tag));
}
int FixedArrayBaseRef::length() const {
@@ -2653,13 +2050,6 @@ bool NameRef::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-void RegExpBoilerplateDescriptionRef::Serialize(NotConcurrentInliningTag) {
- // TODO(jgruber,v8:7790): Remove once member types are also never serialized.
- // Until then, we have to call these functions once on the main thread to
- // trigger serialization.
- data();
-}
-
Handle<Object> ObjectRef::object() const {
return data_->object();
}
@@ -2775,7 +2165,10 @@ BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C
CodeRef JSFunctionRef::code() const {
- return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
+ CodeT code = object()->code(kAcquireLoad);
+ // Safe to do a relaxed conversion to Code here since CodeT::code field is
+ // modified only by GC and the CodeT was acquire-loaded.
+ return MakeRefAssumeMemoryFence(broker(), FromCodeT(code, kRelaxedLoad));
}
NativeContextRef JSFunctionRef::native_context() const {
@@ -2802,55 +2195,24 @@ ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
return MakeRefAssumeMemoryFence(broker(), object()->scope_info(kAcquireLoad));
}
-void JSObjectRef::SerializeObjectCreateMap(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- data()->AsJSObject()->SerializeObjectCreateMap(broker(), tag);
-}
-
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- Handle<Map> map_handle = Handle<Map>::cast(map().object());
- // Note: implemented as an acquire-load.
- if (!map_handle->is_prototype_map()) return {};
-
- Handle<Object> maybe_proto_info = broker()->CanonicalPersistentHandle(
- map_handle->prototype_info(kAcquireLoad));
- if (!maybe_proto_info->IsPrototypeInfo()) return {};
-
- MaybeObject maybe_object_create_map =
- Handle<PrototypeInfo>::cast(maybe_proto_info)
- ->object_create_map(kAcquireLoad);
- if (!maybe_object_create_map->IsWeak()) return {};
-
- return MapRef(broker(),
- broker()->GetOrCreateData(
- maybe_object_create_map->GetHeapObjectAssumeWeak(),
- kAssumeMemoryFence));
- }
- ObjectData* map_data = data()->AsJSObject()->object_create_map(broker());
- if (map_data == nullptr) return base::Optional<MapRef>();
- if (map_data->should_access_heap()) {
- return TryMakeRef(broker(), Handle<Map>::cast(map_data->object()));
- }
- return MapRef(broker(), map_data->AsMap());
-}
+ Handle<Map> map_handle = Handle<Map>::cast(map().object());
+ // Note: implemented as an acquire-load.
+ if (!map_handle->is_prototype_map()) return {};
-void MapRef::SerializeBackPointer(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeBackPointer(broker(), tag);
-}
+ Handle<Object> maybe_proto_info = broker()->CanonicalPersistentHandle(
+ map_handle->prototype_info(kAcquireLoad));
+ if (!maybe_proto_info->IsPrototypeInfo()) return {};
-bool MapRef::TrySerializePrototype(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return true;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsMap()->TrySerializePrototype(broker(), tag);
-}
+ MaybeObject maybe_object_create_map =
+ Handle<PrototypeInfo>::cast(maybe_proto_info)
+ ->object_create_map(kAcquireLoad);
+ if (!maybe_object_create_map->IsWeak()) return {};
-void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
- CHECK(TrySerializePrototype(tag));
+ return MapRef(broker(),
+ broker()->GetOrCreateData(
+ maybe_object_create_map->GetHeapObjectAssumeWeak(),
+ kAssumeMemoryFence));
}
bool PropertyCellRef::Cache() const {
@@ -2861,14 +2223,12 @@ bool PropertyCellRef::Cache() const {
}
bool NativeContextRef::GlobalIsDetached() const {
- base::Optional<ObjectRef> proxy_proto =
- global_proxy_object().map().prototype();
- return !proxy_proto.has_value() || !proxy_proto->equals(global_object());
+ ObjectRef proxy_proto = global_proxy_object().map().prototype();
+ return !proxy_proto.equals(global_object());
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name) const {
- DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<PropertyCell> maybe_cell =
ConcurrentLookupIterator::TryGetPropertyCell(
broker()->isolate(), broker()->local_isolate_or_isolate(),
@@ -2903,12 +2263,10 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef BIMODAL_ACCESSOR_WITH_FLAG_B
-#undef BIMODAL_ACCESSOR_WITH_FLAG_C
+#undef HEAP_ACCESSOR_B
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
-#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 7945feb2bc..8f62862f9b 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -59,13 +59,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
-// Clarifies in function signatures that a method may only be called when
-// concurrent inlining is disabled.
-class NotConcurrentInliningTag final {
- public:
- explicit NotConcurrentInliningTag(JSHeapBroker* broker);
-};
-
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
@@ -424,13 +417,9 @@ class JSObjectRef : public JSReceiverRef {
// relaxed read. This is to ease the transition to unserialized (or
// background-serialized) elements.
base::Optional<FixedArrayBaseRef> elements(RelaxedLoadTag) const;
- void SerializeElements(NotConcurrentInliningTag tag);
bool IsElementsTenured(const FixedArrayBaseRef& elements);
- void SerializeObjectCreateMap(NotConcurrentInliningTag tag);
base::Optional<MapRef> GetObjectCreateMap() const;
-
- void SerializeAsBoilerplateRecursive(NotConcurrentInliningTag tag);
};
class JSDataViewRef : public JSObjectRef {
@@ -489,8 +478,6 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
Handle<RegExpBoilerplateDescription> object() const;
- void Serialize(NotConcurrentInliningTag tag);
-
FixedArrayRef data() const;
StringRef source() const;
int flags() const;
@@ -577,8 +564,6 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
- void Serialize(NotConcurrentInliningTag tag);
-
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
@@ -662,8 +647,6 @@ class AllocationSiteRef : public HeapObjectRef {
AllocationType GetAllocationType() const;
ObjectRef nested_site() const;
- void SerializeRecursive(NotConcurrentInliningTag tag);
-
base::Optional<JSObjectRef> boilerplate() const;
ElementsKind GetElementsKind() const;
bool CanInlineCall() const;
@@ -725,17 +708,10 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
- void SerializeBackPointer(NotConcurrentInliningTag tag);
HeapObjectRef GetBackPointer() const;
- void SerializePrototype(NotConcurrentInliningTag tag);
- // TODO(neis): We should be able to remove TrySerializePrototype once
- // concurrent-inlining is always on. Then we can also change the return type
- // of prototype() back to HeapObjectRef.
- bool TrySerializePrototype(NotConcurrentInliningTag tag);
- base::Optional<HeapObjectRef> prototype() const;
+ HeapObjectRef prototype() const;
- void SerializeForElementStore(NotConcurrentInliningTag tag);
bool HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps);
@@ -1028,8 +1004,8 @@ class CodeRef : public HeapObjectRef {
unsigned GetInlinedBytecodeSize() const;
};
-// CodeDataContainerRef doesn't appear to be used, but it is used via CodeT when
-// V8_EXTERNAL_CODE_SPACE is defined.
+// CodeDataContainerRef doesn't appear to be used directly, but it is used via
+// CodeTRef when V8_EXTERNAL_CODE_SPACE is enabled.
class CodeDataContainerRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(CodeDataContainer, HeapObjectRef)
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 9a859e4072..2d1d55c506 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -268,6 +268,13 @@ void Int64Lowering::LowerNode(Node* node) {
MachineType::Int32(), access.write_barrier_kind)));
break;
}
+ case IrOpcode::kLoadImmutableFromObject: {
+ ObjectAccess access = ObjectAccessOf(node->op());
+ LowerLoadOperator(node, access.machine_type.representation(),
+ simplified()->LoadImmutableFromObject(ObjectAccess(
+ MachineType::Int32(), access.write_barrier_kind)));
+ break;
+ }
case IrOpcode::kStore: {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
LowerStoreOperator(
@@ -291,6 +298,13 @@ void Int64Lowering::LowerNode(Node* node) {
MachineType::Int32(), access.write_barrier_kind)));
break;
}
+ case IrOpcode::kInitializeImmutableInObject: {
+ ObjectAccess access = ObjectAccessOf(node->op());
+ LowerStoreOperator(node, access.machine_type.representation(),
+ simplified()->InitializeImmutableInObject(ObjectAccess(
+ MachineType::Int32(), access.write_barrier_kind)));
+ break;
+ }
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering(signature());
// Only exchange the node if the parameter count actually changed.
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 0b709ad695..7935a68c62 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -6,7 +6,6 @@
#include <functional>
-#include "include/v8-fast-api-calls.h"
#include "src/api/api-inl.h"
#include "src/base/small-vector.h"
#include "src/builtins/builtins-promise.h"
@@ -19,6 +18,7 @@
#include "src/compiler/allocation-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/fast-api-calls.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
@@ -65,7 +65,7 @@ class JSCallReducerAssembler : public JSGraphAssembler {
reducer->JSGraphForGraphAssembler(),
reducer->ZoneForGraphAssembler(),
[reducer](Node* n) { reducer->RevisitForGraphAssembler(n); },
- nullptr, kMarkLoopExits),
+ kMarkLoopExits),
dependencies_(reducer->dependencies()),
node_(node),
outermost_catch_scope_(
@@ -265,6 +265,8 @@ class JSCallReducerAssembler : public JSGraphAssembler {
// Common operators.
TNode<Smi> TypeGuardUnsignedSmall(TNode<Object> value);
+ TNode<Number> TypeGuardNumber(TNode<Object> value);
+ TNode<String> TypeGuardString(TNode<Object> value);
TNode<Object> TypeGuardNonInternal(TNode<Object> value);
TNode<Number> TypeGuardFixedArrayLength(TNode<Object> value);
TNode<Object> Call4(const Callable& callable, TNode<Context> context,
@@ -519,12 +521,15 @@ class JSCallReducerAssembler : public JSGraphAssembler {
};
ForBuilder0 ForZeroUntil(TNode<Number> excluded_limit) {
- TNode<Number> initial_value = ZeroConstant();
+ return ForStartUntil(ZeroConstant(), excluded_limit);
+ }
+
+ ForBuilder0 ForStartUntil(TNode<Number> start, TNode<Number> excluded_limit) {
auto cond = [=](TNode<Number> i) {
return NumberLessThan(i, excluded_limit);
};
auto step = [=](TNode<Number> i) { return NumberAdd(i, OneConstant()); };
- return {this, initial_value, cond, step};
+ return {this, start, cond, step};
}
ForBuilder0 Forever(TNode<Number> initial_value, const StepFunction1& step) {
@@ -857,7 +862,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- CodeTRef code = MakeRef(broker_, ToCodeT(*callable.code()));
+ CodeTRef code = MakeRef(broker_, *callable.code());
return AddNode<JSFunction>(graph()->NewNode(
javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell),
context, effect(), control()));
@@ -1047,6 +1052,14 @@ TNode<Smi> JSCallReducerAssembler::TypeGuardUnsignedSmall(TNode<Object> value) {
return TNode<Smi>::UncheckedCast(TypeGuard(Type::UnsignedSmall(), value));
}
+TNode<Number> JSCallReducerAssembler::TypeGuardNumber(TNode<Object> value) {
+ return TNode<Smi>::UncheckedCast(TypeGuard(Type::Number(), value));
+}
+
+TNode<String> JSCallReducerAssembler::TypeGuardString(TNode<Object> value) {
+ return TNode<String>::UncheckedCast(TypeGuard(Type::String(), value));
+}
+
TNode<Object> JSCallReducerAssembler::TypeGuardNonInternal(
TNode<Object> value) {
return TNode<Object>::UncheckedCast(TypeGuard(Type::NonInternal(), value));
@@ -1983,38 +1996,33 @@ namespace {
Callable GetCallableForArrayIndexOfIncludes(ArrayIndexOfIncludesVariant variant,
ElementsKind elements_kind,
Isolate* isolate) {
+ DCHECK(IsHoleyElementsKind(elements_kind));
if (variant == ArrayIndexOfIncludesVariant::kIndexOf) {
switch (elements_kind) {
- case PACKED_SMI_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
- case PACKED_ELEMENTS:
case HOLEY_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIndexOfSmiOrObject);
- case PACKED_DOUBLE_ELEMENTS:
- return Builtins::CallableFor(isolate,
- Builtin::kArrayIndexOfPackedDoubles);
- default:
- DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
+ case HOLEY_DOUBLE_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIndexOfHoleyDoubles);
+ default: {
+ UNREACHABLE();
+ }
}
} else {
DCHECK_EQ(variant, ArrayIndexOfIncludesVariant::kIncludes);
switch (elements_kind) {
- case PACKED_SMI_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
- case PACKED_ELEMENTS:
case HOLEY_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIncludesSmiOrObject);
- case PACKED_DOUBLE_ELEMENTS:
- return Builtins::CallableFor(isolate,
- Builtin::kArrayIncludesPackedDoubles);
- default:
- DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
+ case HOLEY_DOUBLE_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIncludesHoleyDoubles);
+ default: {
+ UNREACHABLE();
+ }
}
}
UNREACHABLE();
@@ -2030,13 +2038,7 @@ IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeIndexOfIncludes(
TNode<Object> search_element = ArgumentOrUndefined(0);
TNode<Object> from_index = ArgumentOrZero(1);
- // TODO(jgruber): This currently only reduces to a stub call. Create a full
- // reduction (similar to other higher-order array builtins) instead of
- // lowering to a builtin call. E.g. Array.p.every and Array.p.some have almost
- // identical functionality.
-
- TNode<Number> length = LoadJSArrayLength(receiver, kind);
- TNode<FixedArrayBase> elements = LoadElements(receiver);
+ TNode<Number> original_length = LoadJSArrayLength(receiver, kind);
const bool have_from_index = ArgumentCount() > 1;
if (have_from_index) {
@@ -2046,18 +2048,279 @@ IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeIndexOfIncludes(
// therefore needs to be added to the length. If the result is still
// negative, it needs to be clamped to 0.
TNode<Boolean> cond = NumberLessThan(from_index_smi, ZeroConstant());
- from_index = SelectIf<Number>(cond)
- .Then(_ {
- return NumberMax(NumberAdd(length, from_index_smi),
- ZeroConstant());
- })
- .Else(_ { return from_index_smi; })
- .ExpectFalse()
- .Value();
+ from_index =
+ SelectIf<Number>(cond)
+ .Then(_ {
+ return NumberMax(NumberAdd(original_length, from_index_smi),
+ ZeroConstant());
+ })
+ .Else(_ { return from_index_smi; })
+ .ExpectFalse()
+ .Value();
}
- return Call4(GetCallableForArrayIndexOfIncludes(variant, kind, isolate()),
- context, elements, search_element, length, from_index);
+ if (IsHoleyElementsKind(kind)) {
+ TNode<FixedArrayBase> elements = LoadElements(receiver);
+ return Call4(GetCallableForArrayIndexOfIncludes(variant, kind, isolate()),
+ context, elements, search_element, original_length,
+ from_index);
+ }
+
+ auto out = MakeLabel(MachineRepresentation::kTagged);
+
+ DCHECK(IsFastPackedElementsKind(kind));
+
+ Node* fail_value;
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ fail_value = FalseConstant();
+ } else {
+ fail_value = NumberConstant(-1);
+ }
+ TNode<FixedArrayBase> elements = LoadElements(receiver);
+
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS: {
+ TNode<Boolean> is_finite_number = AddNode<Boolean>(graph()->NewNode(
+ simplified()->ObjectIsFiniteNumber(), search_element));
+ GotoIfNot(is_finite_number, &out, fail_value);
+
+ TNode<Number> search_element_number = TypeGuardNumber(search_element);
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ // if from_index is not smi, it will early bailout, so here
+ // we could LoadElement directly.
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+
+ auto cond = NumberEqual(search_element_number,
+ TNode<Number>::UncheckedCast(element));
+
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ GotoIf(cond, &out, TrueConstant());
+ } else {
+ GotoIf(cond, &out, k);
+ }
+ });
+ Goto(&out, fail_value);
+ break;
+ }
+ case PACKED_DOUBLE_ELEMENTS: {
+ auto nan_loop = MakeLabel();
+ TNode<Boolean> is_number = AddNode<Boolean>(
+ graph()->NewNode(simplified()->ObjectIsNumber(), search_element));
+ GotoIfNot(is_number, &out, fail_value);
+
+ TNode<Number> search_element_number = TypeGuardNumber(search_element);
+
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ // https://tc39.es/ecma262/#sec-array.prototype.includes use
+ // SameValueZero, NaN == NaN, so we need to check.
+ TNode<Boolean> is_nan = AddNode<Boolean>(graph()->NewNode(
+ simplified()->NumberIsNaN(), search_element_number));
+ GotoIf(is_nan, &nan_loop);
+ } else {
+ DCHECK(variant == ArrayIndexOfIncludesVariant::kIndexOf);
+ // https://tc39.es/ecma262/#sec-array.prototype.indexOf use
+ // IsStrictEqual, NaN != NaN, NaN compare will be handled by
+ // NumberEqual.
+ }
+
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+
+ auto cond = NumberEqual(search_element_number,
+ TNode<Number>::UncheckedCast(element));
+
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ GotoIf(cond, &out, TrueConstant());
+ } else {
+ GotoIf(cond, &out, k);
+ }
+ });
+ Goto(&out, fail_value);
+
+ // https://tc39.es/ecma262/#sec-array.prototype.includes use
+ // SameValueZero, NaN == NaN, we need to bind nan_loop to check.
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ Bind(&nan_loop);
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+
+ auto cond = AddNode<Boolean>(
+ graph()->NewNode(simplified()->NumberIsNaN(),
+ TNode<Number>::UncheckedCast(element)));
+ GotoIf(cond, &out, TrueConstant());
+ });
+ Goto(&out, fail_value);
+ }
+ break;
+ }
+ case PACKED_ELEMENTS: {
+ auto number_loop = MakeLabel();
+ auto not_number = MakeLabel();
+ auto string_loop = MakeLabel();
+ auto bigint_loop = MakeLabel();
+ auto ident_loop = MakeLabel();
+
+ auto is_number = AddNode(
+ graph()->NewNode(simplified()->ObjectIsNumber(), search_element));
+ GotoIf(is_number, &number_loop);
+ Goto(&not_number);
+
+ Bind(&not_number);
+ auto is_string = AddNode(
+ graph()->NewNode(simplified()->ObjectIsString(), search_element));
+ GotoIf(is_string, &string_loop);
+ auto is_bigint = AddNode(
+ graph()->NewNode(simplified()->ObjectIsBigInt(), search_element));
+ GotoIf(is_bigint, &bigint_loop);
+
+ Goto(&ident_loop);
+ Bind(&ident_loop);
+
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ // if from_index is not smi, it will early bailout, so here
+ // we could LoadElement directly.
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+ auto cond = AddNode(graph()->NewNode(simplified()->ReferenceEqual(),
+ search_element, element));
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ GotoIf(cond, &out, TrueConstant());
+ } else {
+ GotoIf(cond, &out, k);
+ }
+ });
+
+ Goto(&out, fail_value);
+
+ Bind(&number_loop);
+ TNode<Number> search_element_number = TypeGuardNumber(search_element);
+
+ auto nan_loop = MakeLabel();
+
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ // https://tc39.es/ecma262/#sec-array.prototype.includes use
+ // SameValueZero, NaN == NaN, so we need to check.
+ auto is_nan = AddNode(graph()->NewNode(simplified()->NumberIsNaN(),
+ search_element_number));
+ GotoIf(is_nan, &nan_loop);
+ } else {
+ DCHECK(variant == ArrayIndexOfIncludesVariant::kIndexOf);
+ // https://tc39.es/ecma262/#sec-array.prototype.indexOf use
+ // IsStrictEqual, NaN != NaN, NaN compare will be handled by
+ // NumberEqual.
+ }
+
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ auto continue_label = MakeLabel();
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+
+ auto is_number = AddNode(
+ graph()->NewNode(simplified()->ObjectIsNumber(), element));
+
+ GotoIfNot(is_number, &continue_label);
+
+ TNode<Number> element_number = TypeGuardNumber(element);
+ auto cond = NumberEqual(search_element_number, element_number);
+ GotoIfNot(cond, &continue_label);
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ Goto(&out, TrueConstant());
+ } else {
+ Goto(&out, k);
+ }
+
+ Bind(&continue_label);
+ });
+ Goto(&out, fail_value);
+
+ // https://tc39.es/ecma262/#sec-array.prototype.includes use
+ // SameValueZero, NaN == NaN, we need to bind nan_loop to check.
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ Bind(&nan_loop);
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+
+ auto cond = AddNode<Boolean>(
+ graph()->NewNode(simplified()->ObjectIsNaN(), element));
+ GotoIf(cond, &out, TrueConstant());
+ });
+ Goto(&out, fail_value);
+ }
+
+ Bind(&string_loop);
+ TNode<String> search_element_string = TypeGuardString(search_element);
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ auto continue_label = MakeLabel();
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+ auto is_string = AddNode(
+ graph()->NewNode(simplified()->ObjectIsString(), element));
+
+ GotoIfNot(is_string, &continue_label);
+
+ TNode<String> element_string = TypeGuardString(element);
+ auto cond = AddNode(graph()->NewNode(simplified()->StringEqual(),
+ element_string,
+ search_element_string));
+ GotoIfNot(cond, &continue_label);
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ Goto(&out, TrueConstant());
+ } else {
+ Goto(&out, k);
+ }
+
+ Bind(&continue_label);
+ });
+ Goto(&out, fail_value);
+
+ Bind(&bigint_loop);
+ ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
+ .Do([&](TNode<Number> k) {
+ auto continue_label = MakeLabel();
+ TNode<Object> element = LoadElement<Object>(
+ AccessBuilder::ForFixedArrayElement(kind), elements, k);
+ auto is_bigint = AddNode(
+ graph()->NewNode(simplified()->ObjectIsBigInt(), element));
+
+ GotoIfNot(is_bigint, &continue_label);
+ auto cond = AddNode<Object>(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kBigIntEqualToBigInt, 2),
+ search_element, element, context, FrameStateInput(), effect(),
+ control()));
+
+ GotoIfNot(ToBoolean(cond), &continue_label);
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ Goto(&out, TrueConstant());
+ } else {
+ Goto(&out, k);
+ }
+
+ Bind(&continue_label);
+ });
+ Goto(&out, fail_value);
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ }
+ }
+ Bind(&out);
+ if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
+ return out.PhiAt<Boolean>(0);
+ } else {
+ return out.PhiAt<Number>(0);
+ }
}
namespace {
@@ -2628,18 +2891,15 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
MapRef first_receiver_map = receiver_maps[0];
bool const is_constructor = first_receiver_map.is_constructor();
- base::Optional<HeapObjectRef> const prototype =
- first_receiver_map.prototype();
- if (!prototype.has_value()) return inference.NoChange();
+ HeapObjectRef prototype = first_receiver_map.prototype();
for (const MapRef& receiver_map : receiver_maps) {
- base::Optional<HeapObjectRef> map_prototype = receiver_map.prototype();
- if (!map_prototype.has_value()) return inference.NoChange();
+ HeapObjectRef map_prototype = receiver_map.prototype();
// Check for consistency among the {receiver_maps}.
- if (!map_prototype->equals(*prototype) ||
+ if (!map_prototype.equals(prototype) ||
receiver_map.is_constructor() != is_constructor ||
- !InstanceTypeChecker::IsJSFunctionOrBoundFunction(
+ !InstanceTypeChecker::IsJSFunctionOrBoundFunctionOrWrappedFunction(
receiver_map.instance_type())) {
return inference.NoChange();
}
@@ -2654,16 +2914,18 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// This mirrors the checks done in builtins-function-gen.cc at
// runtime otherwise.
int minimum_nof_descriptors =
- std::max({JSFunctionOrBoundFunction::kLengthDescriptorIndex,
- JSFunctionOrBoundFunction::kNameDescriptorIndex}) +
+ std::max(
+ {JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex,
+ JSFunctionOrBoundFunctionOrWrappedFunction::
+ kNameDescriptorIndex}) +
1;
if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
return inference.NoChange();
}
const InternalIndex kLengthIndex(
- JSFunctionOrBoundFunction::kLengthDescriptorIndex);
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex);
const InternalIndex kNameIndex(
- JSFunctionOrBoundFunction::kNameDescriptorIndex);
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex);
ReadOnlyRoots roots(isolate());
StringRef length_string = MakeRef(broker(), roots.length_string_handle());
StringRef name_string = MakeRef(broker(), roots.name_string_handle());
@@ -2690,7 +2952,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
MapRef map = is_constructor
? native_context().bound_function_with_constructor_map()
: native_context().bound_function_without_constructor_map();
- if (!map.prototype().value().equals(*prototype)) return inference.NoChange();
+ if (!map.prototype().equals(prototype)) return inference.NoChange();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -2813,16 +3075,14 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
ZoneVector<MapRef> const& object_maps = inference.GetMaps();
MapRef candidate_map = object_maps[0];
- base::Optional<HeapObjectRef> candidate_prototype = candidate_map.prototype();
- if (!candidate_prototype.has_value()) return inference.NoChange();
+ HeapObjectRef candidate_prototype = candidate_map.prototype();
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
MapRef object_map = object_maps[i];
- base::Optional<HeapObjectRef> map_prototype = object_map.prototype();
- if (!map_prototype.has_value()) return inference.NoChange();
+ HeapObjectRef map_prototype = object_map.prototype();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- !map_prototype->equals(*candidate_prototype)) {
+ !map_prototype.equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
// with hidden prototypes at this point.
@@ -2835,7 +3095,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
if (!inference.RelyOnMapsViaStability(dependencies())) {
return inference.NoChange();
}
- Node* value = jsgraph()->Constant(*candidate_prototype);
+ Node* value = jsgraph()->Constant(candidate_prototype);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -3523,42 +3783,6 @@ Reduction JSCallReducer::ReduceCallWasmFunction(
}
#endif // V8_ENABLE_WEBASSEMBLY
-#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
-namespace {
-bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
- if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat32 ||
- c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat64) {
- return true;
- }
- for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
- if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
- c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
- return true;
- }
- }
- return false;
-}
-} // namespace
-#endif
-
-#ifndef V8_TARGET_ARCH_64_BIT
-namespace {
-bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
- if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kInt64 ||
- c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kUint64) {
- return true;
- }
- for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
- if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
- c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
- return true;
- }
- }
- return false;
-}
-} // namespace
-#endif
-
// Given a FunctionTemplateInfo, checks whether the fast API call can be
// optimized, applying the initial step of the overload resolution algorithm:
// Given an overload set function_template_info.c_signatures, and a list of
@@ -3603,16 +3827,9 @@ FastApiCallFunctionVector CanOptimizeFastCall(
const size_t len = c_signature->ArgumentCount() - kReceiver;
bool optimize_to_fast_call = (len == arg_count);
-#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
optimize_to_fast_call =
- optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
-#else
- USE(c_signature);
-#endif
-#ifndef V8_TARGET_ARCH_64_BIT
- optimize_to_fast_call =
- optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
-#endif
+ optimize_to_fast_call &&
+ fast_api_call::CanOptimizeFastSignature(c_signature);
if (optimize_to_fast_call) {
result.push_back({functions[i], c_signature});
@@ -6855,9 +7072,8 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
// have the initial Promise.prototype as their [[Prototype]].
for (const MapRef& receiver_map : receiver_maps) {
if (!receiver_map.IsJSPromiseMap()) return false;
- base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value() ||
- !prototype->equals(native_context().promise_prototype())) {
+ HeapObjectRef prototype = receiver_map.prototype();
+ if (!prototype.equals(native_context().promise_prototype())) {
return false;
}
}
@@ -6911,7 +7127,7 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- CodeTRef code = MakeRef(broker(), ToCodeT(*callable.code()));
+ CodeTRef code = MakeRef(broker(), *callable.code());
return graph()->NewNode(javascript()->CreateClosure(shared, code),
jsgraph()->HeapConstant(feedback_cell), context,
effect, control);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 7875ae6be9..fab65507ea 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -939,6 +939,9 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
return NoChange();
}
+ // Don't inline anything for class constructors.
+ if (IsClassConstructor(shared.kind())) return NoChange();
+
MapRef function_map =
native_context().GetFunctionMapFromIndex(shared.function_map_index());
DCHECK(!function_map.IsInobjectSlackTrackingInProgress());
@@ -958,7 +961,8 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Emit code to allocate the JSFunction instance.
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(function_map.instance_size(), allocation, Type::Function());
+ a.Allocate(function_map.instance_size(), allocation,
+ Type::CallableFunction());
a.Store(AccessBuilder::ForMap(), function_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
jsgraph()->EmptyFixedArrayConstant());
@@ -1324,7 +1328,7 @@ base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
MapRef standard_map =
broker->target_native_context().object_function().initial_map(
broker->dependencies());
- if (prototype.equals(standard_map.prototype().value())) {
+ if (prototype.equals(standard_map.prototype())) {
return standard_map;
}
if (prototype.map().oddball_type() == OddballType::kNull) {
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 15f37c65cb..6ae447cad7 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -404,8 +404,8 @@ void JSGenericLowering::LowerJSGetIterator(Node* node) {
ReplaceWithBuiltinCall(node, Builtin::kGetIteratorWithFeedback);
}
-void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- JSStorePropertyNode n(node);
+void JSGenericLowering::LowerJSSetKeyedProperty(Node* node) {
+ JSSetKeyedPropertyNode n(node);
const PropertyAccess& p = n.Parameters();
FrameState frame_state = n.frame_state();
Node* outer_state = frame_state.outer_frame_state();
@@ -414,6 +414,12 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
n->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
+
+ // KeyedStoreIC is currently a base class for multiple keyed property store
+ // operations and contains mixed logic for set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetKeyedIC as a subclass of KeyedStoreIC, which
+ // can be called here.
ReplaceWithBuiltinCall(node, Builtin::kKeyedStoreICTrampoline);
} else {
node->InsertInput(zone(), 3,
@@ -422,8 +428,8 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
}
}
-void JSGenericLowering::LowerJSDefineProperty(Node* node) {
- JSDefinePropertyNode n(node);
+void JSGenericLowering::LowerJSDefineKeyedOwnProperty(Node* node) {
+ JSDefineKeyedOwnPropertyNode n(node);
const PropertyAccess& p = n.Parameters();
FrameState frame_state = n.frame_state();
Node* outer_state = frame_state.outer_frame_state();
@@ -432,16 +438,16 @@ void JSGenericLowering::LowerJSDefineProperty(Node* node) {
n->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(node, Builtin::kKeyedDefineOwnICTrampoline);
+ ReplaceWithBuiltinCall(node, Builtin::kDefineKeyedOwnICTrampoline);
} else {
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(node, Builtin::kKeyedDefineOwnIC);
+ ReplaceWithBuiltinCall(node, Builtin::kDefineKeyedOwnIC);
}
}
-void JSGenericLowering::LowerJSStoreNamed(Node* node) {
- JSStoreNamedNode n(node);
+void JSGenericLowering::LowerJSSetNamedProperty(Node* node) {
+ JSSetNamedPropertyNode n(node);
NamedAccess const& p = n.Parameters();
FrameState frame_state = n.frame_state();
Node* outer_state = frame_state.outer_frame_state();
@@ -455,6 +461,11 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
+ // StoreIC is currently a base class for multiple property store operations
+ // and contains mixed logic for named and keyed, set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can
+ // be called here.
ReplaceWithBuiltinCall(node, Builtin::kStoreICTrampoline);
} else {
node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
@@ -464,10 +475,10 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
}
}
-void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
+void JSGenericLowering::LowerJSDefineNamedOwnProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- JSStoreNamedOwnNode n(node);
- StoreNamedOwnParameters const& p = n.Parameters();
+ JSDefineNamedOwnPropertyNode n(node);
+ DefineNamedOwnPropertyParameters const& p = n.Parameters();
FrameState frame_state = n.frame_state();
Node* outer_state = frame_state.outer_frame_state();
STATIC_ASSERT(n.FeedbackVectorIndex() == 2);
@@ -476,13 +487,13 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- Callable callable = CodeFactory::StoreOwnIC(isolate());
+ Callable callable = CodeFactory::DefineNamedOwnIC(isolate());
ReplaceWithBuiltinCall(node, callable, flags);
} else {
node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+ Callable callable = CodeFactory::DefineNamedOwnICInOptimizedCode(isolate());
ReplaceWithBuiltinCall(node, callable, flags);
}
}
@@ -507,14 +518,14 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
}
}
-void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
- JSStoreDataPropertyInLiteralNode n(node);
+void JSGenericLowering::LowerJSDefineKeyedOwnPropertyInLiteral(Node* node) {
+ JSDefineKeyedOwnPropertyInLiteralNode n(node);
FeedbackParameter const& p = n.Parameters();
STATIC_ASSERT(n.FeedbackVectorIndex() == 4);
RelaxControls(node);
node->InsertInput(zone(), 5,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
+ ReplaceWithRuntimeCall(node, Runtime::kDefineKeyedOwnPropertyInLiteral);
}
void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 4facc0f25f..84029e1e77 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -43,8 +43,7 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
- bool tracing_enabled, bool is_concurrent_inlining,
- CodeKind code_kind)
+ bool tracing_enabled, CodeKind code_kind)
: isolate_(isolate),
#if V8_COMPRESS_POINTERS
cage_base_(isolate),
@@ -55,7 +54,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
root_index_map_(isolate),
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
- is_concurrent_inlining_(is_concurrent_inlining),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
@@ -349,7 +347,7 @@ KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
if (IsKeyedHasICKind(kind)) {
return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
}
- if (IsDefineOwnICKind(kind)) {
+ if (IsDefineKeyedOwnICKind(kind)) {
return KeyedAccessMode(AccessMode::kDefine,
nexus.GetKeyedAccessStoreMode());
}
@@ -357,7 +355,7 @@ KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
}
if (IsStoreInArrayLiteralICKind(kind) ||
- IsStoreDataPropertyInLiteralKind(kind)) {
+ IsDefineKeyedOwnPropertyInLiteralKind(kind)) {
return KeyedAccessMode(AccessMode::kStoreInLiteral,
nexus.GetKeyedAccessStoreMode());
}
@@ -410,10 +408,10 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
keyed_mode_(keyed_mode),
transition_groups_(zone) {
DCHECK(IsKeyedLoadICKind(slot_kind) || IsKeyedHasICKind(slot_kind) ||
- IsStoreDataPropertyInLiteralKind(slot_kind) ||
+ IsDefineKeyedOwnPropertyInLiteralKind(slot_kind) ||
IsKeyedStoreICKind(slot_kind) ||
IsStoreInArrayLiteralICKind(slot_kind) ||
- IsDefineOwnICKind(slot_kind));
+ IsDefineKeyedOwnICKind(slot_kind));
}
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
@@ -428,6 +426,7 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
return true;
}
+// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
ZoneVector<MapRef> const& maps, bool has_migration_target_maps)
@@ -444,11 +443,11 @@ NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
- IsStoreOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) ||
+ IsDefineNamedOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) ||
IsKeyedHasICKind(slot_kind) || IsKeyedStoreICKind(slot_kind) ||
IsStoreInArrayLiteralICKind(slot_kind) ||
- IsStoreDataPropertyInLiteralKind(slot_kind) ||
- IsDefineOwnICKind(slot_kind));
+ IsDefineKeyedOwnPropertyInLiteralKind(slot_kind) ||
+ IsDefineKeyedOwnICKind(slot_kind));
}
void JSHeapBroker::SetFeedback(FeedbackSource const& source,
@@ -484,59 +483,6 @@ bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
.IsUninitialized();
}
-namespace {
-
-using MapRefAndHandler = std::pair<MapRef, MaybeObjectHandle>;
-MaybeObjectHandle TryGetMinimorphicHandler(
- ZoneVector<MapRefAndHandler> const& maps_and_handlers,
- FeedbackSlotKind kind, NativeContextRef const& native_context,
- bool is_turboprop) {
- if (!is_turboprop || !FLAG_turbo_dynamic_map_checks || !IsLoadICKind(kind)) {
- return MaybeObjectHandle();
- }
-
- // Don't use dynamic map checks when loading properties from Array.prototype.
- // Using dynamic map checks prevents constant folding and hence does not
- // inline the array builtins. We only care about monomorphic cases here. For
- // polymorphic loads currently we don't inline the builtins even without
- // dynamic map checks.
- if (maps_and_handlers.size() == 1 &&
- maps_and_handlers[0].first.equals(
- native_context.initial_array_prototype().map())) {
- return MaybeObjectHandle();
- }
-
- MaybeObjectHandle initial_handler;
- for (const MapRefAndHandler& map_and_handler : maps_and_handlers) {
- MapRef map = map_and_handler.first;
- MaybeObjectHandle handler = map_and_handler.second;
- if (handler.is_null()) return MaybeObjectHandle();
- DCHECK(!handler->IsCleared());
- // TODO(mythria): extend this to DataHandlers too
- if (!handler.object()->IsSmi()) return MaybeObjectHandle();
- if (LoadHandler::GetHandlerKind(handler.object()->ToSmi()) !=
- LoadHandler::Kind::kField) {
- return MaybeObjectHandle();
- }
- CHECK(!map.object()->IsJSGlobalProxyMap());
- if (initial_handler.is_null()) {
- initial_handler = handler;
- } else if (!handler.is_identical_to(initial_handler)) {
- return MaybeObjectHandle();
- }
- }
- return initial_handler;
-}
-
-bool HasMigrationTargets(const ZoneVector<MapRef>& maps) {
- for (const MapRef& map : maps) {
- if (map.is_migration_target()) return true;
- }
- return false;
-}
-
-} // namespace
-
const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
FeedbackSlotKind kind) const {
return *zone()->New<InsufficientFeedback>(kind);
@@ -549,7 +495,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSlotKind kind = nexus.kind();
if (nexus.IsUninitialized()) return NewInsufficientFeedback(kind);
- ZoneVector<MapRefAndHandler> maps_and_handlers(zone());
ZoneVector<MapRef> maps(zone());
{
std::vector<MapAndHandler> maps_and_handlers_unfiltered;
@@ -571,20 +516,12 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
}
}
if (map.is_abandoned_prototype_map()) continue;
- maps_and_handlers.push_back({map, map_and_handler.second});
maps.push_back(map);
}
}
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
- MaybeObjectHandle handler = TryGetMinimorphicHandler(
- maps_and_handlers, kind, target_native_context(), is_turboprop());
- if (!handler.is_null()) {
- return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
- *name, kind, CanonicalPersistentHandle(handler.object()), maps,
- HasMigrationTargets(maps));
- }
// If no maps were found for a non-megamorphic access, then our maps died
// and we should soft-deopt.
@@ -710,9 +647,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
AllocationSiteRef site =
MakeRefAssumeMemoryFence(this, AllocationSite::cast(object));
- if (!is_concurrent_inlining() && site.PointsToLiteral()) {
- site.SerializeRecursive(NotConcurrentInliningTag{this});
- }
return *zone()->New<LiteralFeedback>(site, nexus.kind());
}
@@ -728,9 +662,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
RegExpBoilerplateDescriptionRef boilerplate = MakeRefAssumeMemoryFence(
this, RegExpBoilerplateDescription::cast(object));
- if (!is_concurrent_inlining()) {
- boilerplate.Serialize(NotConcurrentInliningTag{this});
- }
return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
@@ -971,15 +902,14 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
AccessInfoFactory factory(this, dependencies, zone());
PropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(map, name, access_mode);
- if (is_concurrent_inlining_) {
- TRACE(this, "Storing PropertyAccessInfo for "
- << access_mode << " of property " << name << " on map "
- << map);
- property_access_infos_.insert({target, access_info});
- }
+ TRACE(this, "Storing PropertyAccessInfo for "
+ << access_mode << " of property " << name << " on map "
+ << map);
+ property_access_infos_.insert({target, access_info});
return access_info;
}
+// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source) {
@@ -989,16 +919,16 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
- if (is_concurrent_inlining_) {
- // We can assume a memory fence on {source.vector} because in production,
- // the vector has already passed the gc predicate. Unit tests create
- // FeedbackSource objects directly from handles, but they run on
- // the main thread.
- TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
- << source.index() << " "
- << MakeRefAssumeMemoryFence<Object>(this, source.vector));
- minimorphic_property_access_infos_.insert({source, access_info});
- }
+
+ // We can assume a memory fence on {source.vector} because in production,
+ // the vector has already passed the gc predicate. Unit tests create
+ // FeedbackSource objects directly from handles, but they run on
+ // the main thread.
+ TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
+ << source.index() << " "
+ << MakeRefAssumeMemoryFence<Object>(this, source.vector));
+ minimorphic_property_access_infos_.insert({source, access_info});
+
return access_info;
}
@@ -1042,6 +972,7 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
return *static_cast<NamedAccessFeedback const*>(this);
}
+// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessFeedback const&
ProcessedFeedback::AsMinimorphicPropertyAccess() const {
CHECK_EQ(kMinimorphicPropertyAccess, kind());
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 32eac69a5f..0f22411f47 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -32,6 +32,11 @@
namespace v8 {
namespace internal {
+
+namespace maglev {
+class MaglevCompilationInfo;
+}
+
namespace compiler {
class ObjectRef;
@@ -94,12 +99,12 @@ DEFINE_OPERATORS_FOR_FLAGS(GetOrCreateDataFlags)
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
- bool is_concurrent_inlining, CodeKind code_kind);
+ CodeKind code_kind);
// For use only in tests, sets default values for some arguments. Avoids
// churn when new flags are added.
JSHeapBroker(Isolate* isolate, Zone* broker_zone)
- : JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
+ : JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker,
CodeKind::TURBOFAN) {}
~JSHeapBroker();
@@ -127,8 +132,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
- bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
- bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
return IsMainThread() ? NexusConfig::FromMainThread(isolate())
@@ -152,6 +155,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// them.
void DetachLocalIsolate(OptimizedCompilationInfo* info);
+ // TODO(v8:7700): Refactor this once the broker is no longer
+ // Turbofan-specific.
+ void AttachLocalIsolateForMaglev(maglev::MaglevCompilationInfo* info,
+ LocalIsolate* local_isolate);
+ void DetachLocalIsolateForMaglev(maglev::MaglevCompilationInfo* info);
+
bool StackHasOverflowed() const;
#ifdef DEBUG
@@ -436,7 +445,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
- bool const is_concurrent_inlining_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
deleted file mode 100644
index 5692d128a7..0000000000
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-heap-copy-reducer.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/heap/factory-inl.h"
-#include "src/objects/map.h"
-#include "src/objects/scope-info.h"
-#include "src/objects/template-objects.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// In the functions below, we call the ObjectRef (or subclass) constructor in
-// order to trigger serialization if not yet done.
-
-JSHeapCopyReducer::JSHeapCopyReducer(JSHeapBroker* broker) : broker_(broker) {}
-
-JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
-
-Reduction JSHeapCopyReducer::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kHeapConstant: {
- ObjectRef object = MakeRef(broker(), HeapConstantOf(node->op()));
- if (object.IsJSObject()) {
- object.AsJSObject().SerializeObjectCreateMap(
- NotConcurrentInliningTag{broker()});
- }
- break;
- }
- case IrOpcode::kJSCreateArray: {
- CreateArrayParametersOf(node->op()).site(broker());
- break;
- }
- case IrOpcode::kJSCreateArguments: {
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- MakeRef(broker(), state_info.shared_info().ToHandleChecked());
- break;
- }
- case IrOpcode::kJSCreateBlockContext: {
- USE(ScopeInfoOf(broker(), node->op()));
- break;
- }
- case IrOpcode::kJSCreateBoundFunction: {
- CreateBoundFunctionParameters const& p =
- CreateBoundFunctionParametersOf(node->op());
- p.map(broker());
- break;
- }
- case IrOpcode::kJSCreateCatchContext: {
- USE(ScopeInfoOf(broker(), node->op()));
- break;
- }
- case IrOpcode::kJSCreateClosure: {
- CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- p.shared_info(broker());
- p.code(broker());
- break;
- }
- case IrOpcode::kJSCreateEmptyLiteralArray: {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
- }
- break;
- }
- /* Unary ops. */
- case IrOpcode::kJSBitwiseNot:
- case IrOpcode::kJSDecrement:
- case IrOpcode::kJSIncrement:
- case IrOpcode::kJSNegate: {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- if (p.feedback().IsValid()) {
- // Unary ops are treated as binary ops with respect to feedback.
- broker()->GetFeedbackForBinaryOperation(p.feedback());
- }
- break;
- }
- /* Binary ops. */
- case IrOpcode::kJSAdd:
- case IrOpcode::kJSSubtract:
- case IrOpcode::kJSMultiply:
- case IrOpcode::kJSDivide:
- case IrOpcode::kJSModulus:
- case IrOpcode::kJSExponentiate:
- case IrOpcode::kJSBitwiseOr:
- case IrOpcode::kJSBitwiseXor:
- case IrOpcode::kJSBitwiseAnd:
- case IrOpcode::kJSShiftLeft:
- case IrOpcode::kJSShiftRight:
- case IrOpcode::kJSShiftRightLogical: {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForBinaryOperation(p.feedback());
- }
- break;
- }
- /* Compare ops. */
- case IrOpcode::kJSEqual:
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- case IrOpcode::kJSStrictEqual: {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForCompareOperation(p.feedback());
- }
- break;
- }
- case IrOpcode::kJSCreateFunctionContext: {
- CreateFunctionContextParameters const& p =
- CreateFunctionContextParametersOf(node->op());
- p.scope_info(broker());
- break;
- }
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject: {
- CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
- }
- break;
- }
- case IrOpcode::kJSCreateLiteralRegExp: {
- CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForRegExpLiteral(p.feedback());
- }
- break;
- }
- case IrOpcode::kJSGetTemplateObject: {
- GetTemplateObjectParameters const& p =
- GetTemplateObjectParametersOf(node->op());
- p.shared(broker());
- p.description(broker());
- broker()->GetFeedbackForTemplateObject(p.feedback());
- break;
- }
- case IrOpcode::kJSCreateWithContext: {
- USE(ScopeInfoOf(broker(), node->op()));
- break;
- }
- case IrOpcode::kJSLoadNamed: {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name = p.name(broker());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
- name);
- }
- break;
- }
- case IrOpcode::kJSLoadNamedFromSuper: {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name = p.name(broker());
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
- name);
- }
- break;
- }
- case IrOpcode::kJSStoreNamed: {
- NamedAccess const& p = NamedAccessOf(node->op());
- p.name(broker());
- break;
- }
- case IrOpcode::kStoreField:
- case IrOpcode::kLoadField: {
- FieldAccess access = FieldAccessOf(node->op());
- Handle<Map> map_handle;
- if (access.map.ToHandle(&map_handle)) {
- MakeRef(broker(), map_handle);
- }
- Handle<Name> name_handle;
- if (access.name.ToHandle(&name_handle)) {
- MakeRef(broker(), name_handle);
- }
- break;
- }
- case IrOpcode::kMapGuard: {
- ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
- for (Handle<Map> map : maps) {
- MakeRef(broker(), map);
- }
- break;
- }
- case IrOpcode::kCheckMaps: {
- ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
- for (Handle<Map> map : maps) {
- MakeRef(broker(), map);
- }
- break;
- }
- case IrOpcode::kCompareMaps: {
- ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
- for (Handle<Map> map : maps) {
- MakeRef(broker(), map);
- }
- break;
- }
- case IrOpcode::kJSLoadProperty: {
- PropertyAccess const& p = PropertyAccessOf(node->op());
- AccessMode access_mode = AccessMode::kLoad;
- if (p.feedback().IsValid()) {
- broker()->GetFeedbackForPropertyAccess(p.feedback(), access_mode,
- base::nullopt);
- }
- break;
- }
- default:
- break;
- }
- return NoChange();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.h b/deps/v8/src/compiler/js-heap-copy-reducer.h
deleted file mode 100644
index 1041a00fab..0000000000
--- a/deps/v8/src/compiler/js-heap-copy-reducer.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
-#define V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class JSHeapBroker;
-
-// The heap copy reducer makes sure that the relevant heap data referenced
-// by handles embedded in the graph is copied to the heap broker.
-// TODO(jarin) This is just a temporary solution until the graph uses only
-// ObjetRef-derived reference to refer to the heap data.
-class V8_EXPORT_PRIVATE JSHeapCopyReducer : public Reducer {
- public:
- explicit JSHeapCopyReducer(JSHeapBroker* broker);
-
- const char* reducer_name() const override { return "JSHeapCopyReducer"; }
-
- Reduction Reduce(Node* node) override;
-
- private:
- JSHeapBroker* broker();
-
- JSHeapBroker* broker_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index aa5fe632b3..14e5080c30 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -197,16 +197,23 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
candidate.can_inline_function[i] = candidate.bytecode[i].has_value();
- CHECK_IMPLIES(candidate.can_inline_function[i], shared.IsInlineable());
+ // Because of concurrent optimization, optimization of the inlining
+ // candidate could have been disabled meanwhile.
+ // JSInliner will check this again and not actually inline the function in
+ // this case.
+ CHECK_IMPLIES(candidate.can_inline_function[i],
+ shared.IsInlineable() ||
+ shared.GetInlineability() ==
+ SharedFunctionInfo::kHasOptimizationDisabled);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
- // recurion like f() -> g() -> f(). The indirect recursion is helpful in
+ // recursion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
// function. In the case of direct recursion, we only have some static
// information for the first level of inlining and it may not be that useful
// to just inline one level in recursive calls. In some cases like tail
// recursion we may benefit from recursive inlining, if we have additional
// analysis that converts them to iterative implementations. Though it is
- // not obvious if such an anlysis is needed.
+ // not obvious if such an analysis is needed.
if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
frame_shared_info.equals(shared.object())) {
TRACE("Not considering call site #" << node->id() << ":"
@@ -845,13 +852,6 @@ SimplifiedOperatorBuilder* JSInliningHeuristic::simplified() const {
return jsgraph()->simplified();
}
-int JSInliningHeuristic::ScaleInliningSize(int value, JSHeapBroker* broker) {
- if (broker->is_turboprop()) {
- value = value / FLAG_turboprop_inline_scaling_factor;
- }
- return value;
-}
-
#undef TRACE
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index af8e913a47..ee4d63e8cd 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -27,12 +27,10 @@ class JSInliningHeuristic final : public AdvancedReducer {
jsgraph_(jsgraph),
broker_(broker),
mode_(mode),
- max_inlined_bytecode_size_(
- ScaleInliningSize(FLAG_max_inlined_bytecode_size, broker)),
- max_inlined_bytecode_size_cumulative_(ScaleInliningSize(
- FLAG_max_inlined_bytecode_size_cumulative, broker)),
- max_inlined_bytecode_size_absolute_(ScaleInliningSize(
- FLAG_max_inlined_bytecode_size_absolute, broker)) {}
+ max_inlined_bytecode_size_cumulative_(
+ FLAG_max_inlined_bytecode_size_cumulative),
+ max_inlined_bytecode_size_absolute_(
+ FLAG_max_inlined_bytecode_size_absolute) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -78,8 +76,6 @@ class JSInliningHeuristic final : public AdvancedReducer {
// Candidates are kept in a sorted set of unique candidates.
using Candidates = ZoneSet<Candidate, CandidateCompare>;
- static int ScaleInliningSize(int value, JSHeapBroker* broker);
-
// Dumps candidates to console.
void PrintCandidates();
Reduction InlineCandidate(Candidate const& candidate, bool small_function);
@@ -113,7 +109,6 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSHeapBroker* const broker_;
int total_inlined_bytecode_size_ = 0;
const Mode mode_;
- const int max_inlined_bytecode_size_;
const int max_inlined_bytecode_size_cumulative_;
const int max_inlined_bytecode_size_absolute_;
};
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 08e9f54ff4..5ee61373cd 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -533,24 +533,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true.
CHECK(shared_info->is_compiled());
- if (info_->source_positions()) {
- if (broker()->is_concurrent_inlining()) {
- if (!shared_info->object()->AreSourcePositionsAvailable(
- broker()->local_isolate_or_isolate())) {
- // This case is expected to be very rare, since we generate source
- // positions for all functions when debugging or profiling are turned
- // on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source
- // positions should only be missing here if there is a race between 1)
- // enabling/disabling the debugger/profiler, and 2) this compile job.
- // In that case, we simply don't inline.
- TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
- << " because source positions are missing.");
- return NoChange();
- }
- } else {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
- shared_info->object());
- }
+ if (info_->source_positions() &&
+ !shared_info->object()->AreSourcePositionsAvailable(
+ broker()->local_isolate_or_isolate())) {
+ // This case is expected to be very rare, since we generate source
+ // positions for all functions when debugging or profiling are turned
+ // on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source
+ // positions should only be missing here if there is a race between 1)
+ // enabling/disabling the debugger/profiler, and 2) this compile job.
+ // In that case, we simply don't inline.
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because source positions are missing.");
+ return NoChange();
}
// Determine the target's feedback vector and its context.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 0abe6ccda9..c4a22ae195 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -44,6 +44,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineCopyDataProperties:
return ReduceCopyDataProperties(node);
+ case Runtime::kInlineCopyDataPropertiesWithExcludedPropertiesOnStack:
+ return ReduceCopyDataPropertiesWithExcludedPropertiesOnStack(node);
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
@@ -89,6 +91,25 @@ Reduction JSIntrinsicLowering::ReduceCopyDataProperties(Node* node) {
node, Builtins::CallableFor(isolate(), Builtin::kCopyDataProperties), 0);
}
+Reduction
+JSIntrinsicLowering::ReduceCopyDataPropertiesWithExcludedPropertiesOnStack(
+ Node* node) {
+ int input_count =
+ static_cast<int>(CallRuntimeParametersOf(node->op()).arity());
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ auto callable = Builtins::CallableFor(
+ isolate(), Builtin::kCopyDataPropertiesWithExcludedProperties);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), input_count - 1, flags,
+ node->op()->properties());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->SmiConstant(input_count - 1));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+}
+
Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 0);
Node* const done = NodeProperties::GetValueInput(node, 1);
@@ -254,12 +275,10 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
vfalse, merge);
}
-
Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
return Change(node, simplified()->ObjectIsReceiver());
}
-
Reduction JSIntrinsicLowering::ReduceTurbofanStaticAssert(Node* node) {
if (FLAG_always_opt) {
// Ignore static asserts, as we most likely won't have enough information
@@ -293,19 +312,16 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
return Changed(node);
}
-
Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToLength());
return Changed(node);
}
-
Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToObject());
return Changed(node);
}
-
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
// ToString is unnecessary if the input is a string.
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
@@ -317,7 +333,6 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
return Changed(node);
}
-
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
int const arity =
static_cast<int>(CallRuntimeParametersOf(node->op()).arity());
@@ -354,7 +369,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
return Changed(node);
}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b, Node* c) {
RelaxControls(node);
@@ -366,7 +380,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
return Changed(node);
}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b, Node* c, Node* d) {
RelaxControls(node);
@@ -396,10 +409,8 @@ Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
-
Isolate* JSIntrinsicLowering::isolate() const { return jsgraph()->isolate(); }
-
CommonOperatorBuilder* JSIntrinsicLowering::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index b215982139..704a4eba48 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -40,6 +40,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
private:
Reduction ReduceCopyDataProperties(Node* node);
+ Reduction ReduceCopyDataPropertiesWithExcludedPropertiesOnStack(Node* node);
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index c260a7ff9f..478647df7b 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -96,20 +96,20 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadNamedFromSuper:
return ReduceJSLoadNamedFromSuper(node);
- case IrOpcode::kJSStoreNamed:
- return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSSetNamedProperty:
+ return ReduceJSSetNamedProperty(node);
case IrOpcode::kJSHasProperty:
return ReduceJSHasProperty(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
- case IrOpcode::kJSStoreProperty:
- return ReduceJSStoreProperty(node);
- case IrOpcode::kJSDefineProperty:
- return ReduceJSDefineProperty(node);
- case IrOpcode::kJSStoreNamedOwn:
- return ReduceJSStoreNamedOwn(node);
- case IrOpcode::kJSStoreDataPropertyInLiteral:
- return ReduceJSStoreDataPropertyInLiteral(node);
+ case IrOpcode::kJSSetKeyedProperty:
+ return ReduceJSSetKeyedProperty(node);
+ case IrOpcode::kJSDefineKeyedOwnProperty:
+ return ReduceJSDefineKeyedOwnProperty(node);
+ case IrOpcode::kJSDefineNamedOwnProperty:
+ return ReduceJSDefineNamedOwnProperty(node);
+ case IrOpcode::kJSDefineKeyedOwnPropertyInLiteral:
+ return ReduceJSDefineKeyedOwnPropertyInLiteral(node);
case IrOpcode::kJSStoreInArrayLiteral:
return ReduceJSStoreInArrayLiteral(node);
case IrOpcode::kJSToObject:
@@ -365,15 +365,14 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
}
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
- base::Optional<HeapObjectRef> function_prototype = function_map.prototype();
- if (!function_prototype.has_value()) return NoChange();
+ HeapObjectRef function_prototype = function_map.prototype();
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
- Node* value = jsgraph()->Constant(*function_prototype);
+ Node* value = jsgraph()->Constant(function_prototype);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -540,13 +539,12 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false;
break;
}
- base::Optional<HeapObjectRef> map_prototype = map.prototype();
- if (!map_prototype.has_value()) return kMayBeInPrototypeChain;
- if (map_prototype->equals(prototype)) {
+ HeapObjectRef map_prototype = map.prototype();
+ if (map_prototype.equals(prototype)) {
none = false;
break;
}
- map = map_prototype->map();
+ map = map_prototype.map();
// TODO(v8:11457) Support dictionary mode protoypes here.
if (!map.is_stable() || map.is_dictionary_map()) {
return kMayBeInPrototypeChain;
@@ -1092,23 +1090,23 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSSetNamedProperty ||
node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn ||
- node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+ node->opcode() == IrOpcode::kJSSetKeyedProperty ||
+ node->opcode() == IrOpcode::kJSDefineNamedOwnProperty ||
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamedFromSuper ||
- node->opcode() == IrOpcode::kJSDefineProperty);
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
- JSStoreNamedNode::ObjectIndex() == 0 &&
+ JSSetNamedPropertyNode::ObjectIndex() == 0 &&
JSLoadPropertyNode::ObjectIndex() == 0 &&
- JSStorePropertyNode::ObjectIndex() == 0 &&
- JSStoreNamedOwnNode::ObjectIndex() == 0 &&
- JSStoreNamedNode::ObjectIndex() == 0 &&
- JSStoreDataPropertyInLiteralNode::ObjectIndex() == 0 &&
+ JSSetKeyedPropertyNode::ObjectIndex() == 0 &&
+ JSDefineNamedOwnPropertyNode::ObjectIndex() == 0 &&
+ JSSetNamedPropertyNode::ObjectIndex() == 0 &&
+ JSDefineKeyedOwnPropertyInLiteralNode::ObjectIndex() == 0 &&
JSHasPropertyNode::ObjectIndex() == 0 &&
- JSDefinePropertyNode::ObjectIndex() == 0);
+ JSDefineKeyedOwnPropertyNode::ObjectIndex() == 0);
STATIC_ASSERT(JSLoadNamedFromSuperNode::ReceiverIndex() == 0);
Node* context = NodeProperties::GetContextInput(node);
@@ -1164,6 +1162,14 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
for (const MapRef& map : inferred_maps) {
if (map.is_deprecated()) continue;
+
+ // TODO(v8:12547): Support writing to shared structs, which needs a write
+ // barrier that calls Object::Share to ensure the RHS is shared.
+ if (InstanceTypeChecker::IsJSSharedStruct(map.instance_type()) &&
+ access_mode == AccessMode::kStore) {
+ return NoChange();
+ }
+
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies());
access_infos_for_feedback.push_back(access_info);
@@ -1571,17 +1577,18 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
return Replace(call_property);
}
-Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
- JSStoreNamedNode n(node);
+Reduction JSNativeContextSpecialization::ReduceJSSetNamedProperty(Node* node) {
+ JSSetNamedPropertyNode n(node);
NamedAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
FeedbackSource(p.feedback()), AccessMode::kStore);
}
-Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
- JSStoreNamedOwnNode n(node);
- StoreNamedOwnParameters const& p = n.Parameters();
+Reduction JSNativeContextSpecialization::ReduceJSDefineNamedOwnProperty(
+ Node* node) {
+ JSDefineNamedOwnPropertyNode n(node);
+ DefineNamedOwnPropertyParameters const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
FeedbackSource(p.feedback()),
@@ -1669,15 +1676,15 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value,
ElementAccessFeedback const& feedback) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSSetKeyedProperty ||
node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
node->opcode() == IrOpcode::kJSHasProperty ||
- node->opcode() == IrOpcode::kJSDefineProperty);
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
STATIC_ASSERT(JSLoadPropertyNode::ObjectIndex() == 0 &&
- JSStorePropertyNode::ObjectIndex() == 0 &&
+ JSSetKeyedPropertyNode::ObjectIndex() == 0 &&
JSStoreInArrayLiteralNode::ArrayIndex() == 0 &&
- JSStoreDataPropertyInLiteralNode::ObjectIndex() == 0 &&
+ JSDefineKeyedOwnPropertyInLiteralNode::ObjectIndex() == 0 &&
JSHasPropertyNode::ObjectIndex() == 0);
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1734,6 +1741,13 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
&prototype_maps)) {
return NoChange();
}
+
+ // TODO(v8:12547): Support writing to shared structs, which needs a
+ // write barrier that calls Object::Share to ensure the RHS is shared.
+ if (InstanceTypeChecker::IsJSSharedStruct(
+ receiver_map.instance_type())) {
+ return NoChange();
+ }
}
}
for (MapRef const& prototype_map : prototype_maps) {
@@ -1978,15 +1992,15 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSSetKeyedProperty ||
node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn ||
+ node->opcode() == IrOpcode::kJSSetNamedProperty ||
+ node->opcode() == IrOpcode::kJSDefineNamedOwnProperty ||
node->opcode() == IrOpcode::kJSLoadNamedFromSuper ||
- node->opcode() == IrOpcode::kJSDefineProperty);
+ node->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
DCHECK_GE(node->op()->ControlOutputCount(), 1);
ProcessedFeedback const& feedback =
@@ -2163,16 +2177,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
FeedbackSource(p.feedback()), AccessMode::kLoad);
}
-Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
- JSStorePropertyNode n(node);
+Reduction JSNativeContextSpecialization::ReduceJSSetKeyedProperty(Node* node) {
+ JSSetKeyedPropertyNode n(node);
PropertyAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, n.key(), base::nullopt, n.value(),
FeedbackSource(p.feedback()), AccessMode::kStore);
}
-Reduction JSNativeContextSpecialization::ReduceJSDefineProperty(Node* node) {
- JSDefinePropertyNode n(node);
+Reduction JSNativeContextSpecialization::ReduceJSDefineKeyedOwnProperty(
+ Node* node) {
+ JSDefineKeyedOwnPropertyNode n(node);
PropertyAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, n.key(), base::nullopt, n.value(),
@@ -2557,7 +2572,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
case MachineRepresentation::kBit:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
@@ -2609,17 +2624,19 @@ JSNativeContextSpecialization::BuildPropertyStore(
return ValueEffectControl(value, effect, control);
}
-Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
+Reduction
+JSNativeContextSpecialization::ReduceJSDefineKeyedOwnPropertyInLiteral(
Node* node) {
- JSStoreDataPropertyInLiteralNode n(node);
+ JSDefineKeyedOwnPropertyInLiteralNode n(node);
FeedbackParameter const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
NumberMatcher mflags(n.flags());
CHECK(mflags.HasResolvedValue());
- DataPropertyInLiteralFlags cflags(mflags.ResolvedValue());
- DCHECK(!(cflags & DataPropertyInLiteralFlag::kDontEnum));
- if (cflags & DataPropertyInLiteralFlag::kSetFunctionName) return NoChange();
+ DefineKeyedOwnPropertyInLiteralFlags cflags(mflags.ResolvedValue());
+ DCHECK(!(cflags & DefineKeyedOwnPropertyInLiteralFlag::kDontEnum));
+ if (cflags & DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName)
+ return NoChange();
return ReducePropertyAccess(node, n.name(), base::nullopt, n.value(),
FeedbackSource(p.feedback()),
@@ -3411,7 +3428,7 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
for (MapRef receiver_map : receiver_maps) {
- ObjectRef receiver_prototype = receiver_map.prototype().value();
+ ObjectRef receiver_prototype = receiver_map.prototype();
if (!receiver_prototype.IsJSObject() ||
!broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
return false;
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 59fb278341..9f788812e1 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -38,8 +38,8 @@ class TypeCache;
// Specializes a given JSGraph to a given native context, potentially constant
// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
-// nodes. And also specializes {LoadNamed} and {StoreNamed} nodes according
-// to type feedback (if available).
+// nodes. And also specializes {LoadNamed} and {SetNamedProperty} nodes
+// according to type feedback (if available).
class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
: public AdvancedReducer {
public:
@@ -86,13 +86,13 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadNamedFromSuper(Node* node);
Reduction ReduceJSGetIterator(Node* node);
- Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSSetNamedProperty(Node* node);
Reduction ReduceJSHasProperty(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
- Reduction ReduceJSStoreProperty(Node* node);
- Reduction ReduceJSDefineProperty(Node* node);
- Reduction ReduceJSStoreNamedOwn(Node* node);
- Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
+ Reduction ReduceJSSetKeyedProperty(Node* node);
+ Reduction ReduceJSDefineKeyedOwnProperty(Node* node);
+ Reduction ReduceJSDefineNamedOwnProperty(Node* node);
+ Reduction ReduceJSDefineKeyedOwnPropertyInLiteral(Node* node);
Reduction ReduceJSStoreInArrayLiteral(Node* node);
Reduction ReduceJSToObject(Node* node);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a4c6e149bc..306e12f8c1 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -213,29 +213,31 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
return OpParameter<CreateFunctionContextParameters>(op);
}
-bool operator==(StoreNamedOwnParameters const& lhs,
- StoreNamedOwnParameters const& rhs) {
+bool operator==(DefineNamedOwnPropertyParameters const& lhs,
+ DefineNamedOwnPropertyParameters const& rhs) {
return lhs.name_.object().location() == rhs.name_.object().location() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(StoreNamedOwnParameters const& lhs,
- StoreNamedOwnParameters const& rhs) {
+bool operator!=(DefineNamedOwnPropertyParameters const& lhs,
+ DefineNamedOwnPropertyParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(StoreNamedOwnParameters const& p) {
+size_t hash_value(DefineNamedOwnPropertyParameters const& p) {
return base::hash_combine(p.name_.object().location(),
FeedbackSource::Hash()(p.feedback()));
}
-std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
+std::ostream& operator<<(std::ostream& os,
+ DefineNamedOwnPropertyParameters const& p) {
return os << Brief(*p.name_.object());
}
-StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, op->opcode());
- return OpParameter<StoreNamedOwnParameters>(op);
+DefineNamedOwnPropertyParameters const& DefineNamedOwnPropertyParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSDefineNamedOwnProperty, op->opcode());
+ return OpParameter<DefineNamedOwnPropertyParameters>(op);
}
bool operator==(FeedbackParameter const& lhs, FeedbackParameter const& rhs) {
@@ -259,7 +261,7 @@ FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
JSOperator::IsBinaryWithFeedback(op->opcode()) ||
op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
op->opcode() == IrOpcode::kJSInstanceOf ||
- op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+ op->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
return OpParameter<FeedbackParameter>(op);
}
@@ -290,7 +292,7 @@ std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
NamedAccess const& NamedAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
op->opcode() == IrOpcode::kJSLoadNamedFromSuper ||
- op->opcode() == IrOpcode::kJSStoreNamed);
+ op->opcode() == IrOpcode::kJSSetNamedProperty);
return OpParameter<NamedAccess>(op);
}
@@ -314,8 +316,8 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
PropertyAccess const& PropertyAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSHasProperty ||
op->opcode() == IrOpcode::kJSLoadProperty ||
- op->opcode() == IrOpcode::kJSStoreProperty ||
- op->opcode() == IrOpcode::kJSDefineProperty);
+ op->opcode() == IrOpcode::kJSSetKeyedProperty ||
+ op->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
return OpParameter<PropertyAccess>(op);
}
@@ -754,8 +756,8 @@ Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
V(AsyncFunctionEnter, Operator::kNoProperties, 2, 1) \
- V(AsyncFunctionReject, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
- V(AsyncFunctionResolve, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(AsyncFunctionReject, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
+ V(AsyncFunctionResolve, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
@@ -821,7 +823,7 @@ JS_UNOP_WITH_FEEDBACK(UNARY_OP)
JS_BINOP_WITH_FEEDBACK(BINARY_OP)
#undef BINARY_OP
-const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
+const Operator* JSOperatorBuilder::DefineKeyedOwnPropertyInLiteral(
const FeedbackSource& feedback) {
static constexpr int kObject = 1;
static constexpr int kName = 1;
@@ -832,11 +834,11 @@ const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
kObject + kName + kValue + kFlags + kFeedbackVector;
FeedbackParameter parameters(feedback);
return zone()->New<Operator1<FeedbackParameter>>( // --
- IrOpcode::kJSStoreDataPropertyInLiteral,
- Operator::kNoThrow, // opcode
- "JSStoreDataPropertyInLiteral", // name
- kArity, 1, 1, 0, 1, 1, // counts
- parameters); // parameter
+ IrOpcode::kJSDefineKeyedOwnPropertyInLiteral,
+ Operator::kNoThrow, // opcode
+ "JSDefineKeyedOwnPropertyInLiteral", // name
+ kArity, 1, 1, 0, 1, 1, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::StoreInArrayLiteral(
@@ -1099,53 +1101,53 @@ int RestoreRegisterIndexOf(const Operator* op) {
return OpParameter<int>(op);
}
-const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
- const NameRef& name,
- FeedbackSource const& feedback) {
+const Operator* JSOperatorBuilder::SetNamedProperty(
+ LanguageMode language_mode, const NameRef& name,
+ FeedbackSource const& feedback) {
static constexpr int kObject = 1;
static constexpr int kValue = 1;
static constexpr int kFeedbackVector = 1;
static constexpr int kArity = kObject + kValue + kFeedbackVector;
NamedAccess access(language_mode, name, feedback);
- return zone()->New<Operator1<NamedAccess>>( // --
- IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
- "JSStoreNamed", // name
- kArity, 1, 1, 0, 1, 2, // counts
- access); // parameter
+ return zone()->New<Operator1<NamedAccess>>( // --
+ IrOpcode::kJSSetNamedProperty, Operator::kNoProperties, // opcode
+ "JSSetNamedProperty", // name
+ kArity, 1, 1, 0, 1, 2, // counts
+ access); // parameter
}
-const Operator* JSOperatorBuilder::StoreProperty(
+const Operator* JSOperatorBuilder::SetKeyedProperty(
LanguageMode language_mode, FeedbackSource const& feedback) {
PropertyAccess access(language_mode, feedback);
- return zone()->New<Operator1<PropertyAccess>>( // --
- IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
- "JSStoreProperty", // name
- 4, 1, 1, 0, 1, 2, // counts
- access); // parameter
+ return zone()->New<Operator1<PropertyAccess>>( // --
+ IrOpcode::kJSSetKeyedProperty, Operator::kNoProperties, // opcode
+ "JSSetKeyedProperty", // name
+ 4, 1, 1, 0, 1, 2, // counts
+ access); // parameter
}
-const Operator* JSOperatorBuilder::DefineProperty(
+const Operator* JSOperatorBuilder::DefineKeyedOwnProperty(
LanguageMode language_mode, FeedbackSource const& feedback) {
PropertyAccess access(language_mode, feedback);
- return zone()->New<Operator1<PropertyAccess>>( // --
- IrOpcode::kJSDefineProperty, Operator::kNoProperties, // opcode
- "JSDefineProperty", // name
- 4, 1, 1, 0, 1, 2, // counts
- access); // parameter
+ return zone()->New<Operator1<PropertyAccess>>( // --
+ IrOpcode::kJSDefineKeyedOwnProperty, Operator::kNoProperties, // opcode
+ "JSDefineKeyedOwnProperty", // name
+ 4, 1, 1, 0, 1, 2, // counts
+ access); // parameter
}
-const Operator* JSOperatorBuilder::StoreNamedOwn(
+const Operator* JSOperatorBuilder::DefineNamedOwnProperty(
const NameRef& name, FeedbackSource const& feedback) {
static constexpr int kObject = 1;
static constexpr int kValue = 1;
static constexpr int kFeedbackVector = 1;
static constexpr int kArity = kObject + kValue + kFeedbackVector;
- StoreNamedOwnParameters parameters(name, feedback);
- return zone()->New<Operator1<StoreNamedOwnParameters>>( // --
- IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties, // opcode
- "JSStoreNamedOwn", // name
- kArity, 1, 1, 0, 1, 2, // counts
- parameters); // parameter
+ DefineNamedOwnPropertyParameters parameters(name, feedback);
+ return zone()->New<Operator1<DefineNamedOwnPropertyParameters>>( // --
+ IrOpcode::kJSDefineNamedOwnProperty, Operator::kNoProperties, // opcode
+ "JSDefineNamedOwnProperty", // name
+ kArity, 1, 1, 0, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::DeleteProperty() {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index d0599270df..9fdb3f21f5 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -394,10 +394,11 @@ class CreateFunctionContextParameters final {
CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
Operator const*);
-// Defines parameters for JSStoreNamedOwn operator.
-class StoreNamedOwnParameters final {
+// Defines parameters for JSDefineNamedOwnProperty operator.
+class DefineNamedOwnPropertyParameters final {
public:
- StoreNamedOwnParameters(const NameRef& name, FeedbackSource const& feedback)
+ DefineNamedOwnPropertyParameters(const NameRef& name,
+ FeedbackSource const& feedback)
: name_(name), feedback_(feedback) {}
NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
@@ -407,20 +408,21 @@ class StoreNamedOwnParameters final {
const NameTinyRef name_;
FeedbackSource const feedback_;
- friend bool operator==(StoreNamedOwnParameters const&,
- StoreNamedOwnParameters const&);
- friend bool operator!=(StoreNamedOwnParameters const&,
- StoreNamedOwnParameters const&);
- friend size_t hash_value(StoreNamedOwnParameters const&);
+ friend bool operator==(DefineNamedOwnPropertyParameters const&,
+ DefineNamedOwnPropertyParameters const&);
+ friend bool operator!=(DefineNamedOwnPropertyParameters const&,
+ DefineNamedOwnPropertyParameters const&);
+ friend size_t hash_value(DefineNamedOwnPropertyParameters const&);
friend std::ostream& operator<<(std::ostream&,
- StoreNamedOwnParameters const&);
+ DefineNamedOwnPropertyParameters const&);
};
-const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
+const DefineNamedOwnPropertyParameters& DefineNamedOwnPropertyParametersOf(
+ const Operator* op);
// Defines the feedback, i.e., vector and index, for storing a data property in
// an object literal. This is used as a parameter by JSCreateEmptyLiteralArray
-// and JSStoreDataPropertyInLiteral operators.
+// and JSDefineKeyedOwnPropertyInLiteral operators.
class FeedbackParameter final {
public:
explicit FeedbackParameter(FeedbackSource const& feedback)
@@ -442,7 +444,7 @@ std::ostream& operator<<(std::ostream&, FeedbackParameter const&);
const FeedbackParameter& FeedbackParameterOf(const Operator* op);
// Defines the property of an object for a named access. This is
-// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
+// used as a parameter by the JSLoadNamed and JSSetNamedProperty operators.
class NamedAccess final {
public:
NamedAccess(LanguageMode language_mode, const NameRef& name,
@@ -529,9 +531,9 @@ class StoreGlobalParameters final {
const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
-
// Defines the property of an object for a keyed access. This is used
-// as a parameter by the JSLoadProperty and JSStoreProperty operators.
+// as a parameter by the JSLoadProperty and JSSetKeyedProperty
+// operators.
class PropertyAccess final {
public:
PropertyAccess(LanguageMode language_mode, FeedbackSource const& feedback)
@@ -1020,16 +1022,18 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadNamedFromSuper(const NameRef& name,
FeedbackSource const& feedback);
- const Operator* StoreProperty(LanguageMode language_mode,
- FeedbackSource const& feedback);
- const Operator* DefineProperty(LanguageMode language_mode,
- FeedbackSource const& feedback);
- const Operator* StoreNamed(LanguageMode language_mode, const NameRef& name,
- FeedbackSource const& feedback);
+ const Operator* SetKeyedProperty(LanguageMode language_mode,
+ FeedbackSource const& feedback);
+ const Operator* DefineKeyedOwnProperty(LanguageMode language_mode,
+ FeedbackSource const& feedback);
+ const Operator* SetNamedProperty(LanguageMode language_mode,
+ const NameRef& name,
+ FeedbackSource const& feedback);
- const Operator* StoreNamedOwn(const NameRef& name,
- FeedbackSource const& feedback);
- const Operator* StoreDataPropertyInLiteral(const FeedbackSource& feedback);
+ const Operator* DefineNamedOwnProperty(const NameRef& name,
+ FeedbackSource const& feedback);
+ const Operator* DefineKeyedOwnPropertyInLiteral(
+ const FeedbackSource& feedback);
const Operator* StoreInArrayLiteral(const FeedbackSource& feedback);
const Operator* DeleteProperty();
@@ -1295,10 +1299,11 @@ class JSLoadPropertyNode final : public JSNodeWrapperBase {
#undef INPUTS
};
-class JSStorePropertyNode final : public JSNodeWrapperBase {
+class JSSetKeyedPropertyNode final : public JSNodeWrapperBase {
public:
- explicit constexpr JSStorePropertyNode(Node* node) : JSNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
+ explicit constexpr JSSetKeyedPropertyNode(Node* node)
+ : JSNodeWrapperBase(node) {
+ DCHECK_EQ(IrOpcode::kJSSetKeyedProperty, node->opcode());
}
const PropertyAccess& Parameters() const {
@@ -1314,11 +1319,11 @@ class JSStorePropertyNode final : public JSNodeWrapperBase {
#undef INPUTS
};
-class JSDefinePropertyNode final : public JSNodeWrapperBase {
+class JSDefineKeyedOwnPropertyNode final : public JSNodeWrapperBase {
public:
- explicit constexpr JSDefinePropertyNode(Node* node)
+ explicit constexpr JSDefineKeyedOwnPropertyNode(Node* node)
: JSNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kJSDefineProperty, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSDefineKeyedOwnProperty, node->opcode());
}
const PropertyAccess& Parameters() const {
@@ -1575,10 +1580,11 @@ class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
#undef INPUTS
};
-class JSStoreNamedNode final : public JSNodeWrapperBase {
+class JSSetNamedPropertyNode final : public JSNodeWrapperBase {
public:
- explicit constexpr JSStoreNamedNode(Node* node) : JSNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
+ explicit constexpr JSSetNamedPropertyNode(Node* node)
+ : JSNodeWrapperBase(node) {
+ DCHECK_EQ(IrOpcode::kJSSetNamedProperty, node->opcode());
}
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
@@ -1591,14 +1597,15 @@ class JSStoreNamedNode final : public JSNodeWrapperBase {
#undef INPUTS
};
-class JSStoreNamedOwnNode final : public JSNodeWrapperBase {
+class JSDefineNamedOwnPropertyNode final : public JSNodeWrapperBase {
public:
- explicit constexpr JSStoreNamedOwnNode(Node* node) : JSNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
+ explicit constexpr JSDefineNamedOwnPropertyNode(Node* node)
+ : JSNodeWrapperBase(node) {
+ DCHECK_EQ(IrOpcode::kJSDefineNamedOwnProperty, node->opcode());
}
- const StoreNamedOwnParameters& Parameters() const {
- return StoreNamedOwnParametersOf(node()->op());
+ const DefineNamedOwnPropertyParameters& Parameters() const {
+ return DefineNamedOwnPropertyParametersOf(node()->op());
}
#define INPUTS(V) \
@@ -1657,11 +1664,11 @@ class JSCreateEmptyLiteralArrayNode final : public JSNodeWrapperBase {
#undef INPUTS
};
-class JSStoreDataPropertyInLiteralNode final : public JSNodeWrapperBase {
+class JSDefineKeyedOwnPropertyInLiteralNode final : public JSNodeWrapperBase {
public:
- explicit constexpr JSStoreDataPropertyInLiteralNode(Node* node)
+ explicit constexpr JSDefineKeyedOwnPropertyInLiteralNode(Node* node)
: JSNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSDefineKeyedOwnPropertyInLiteral, node->opcode());
}
const FeedbackParameter& Parameters() const {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index c7a614569e..7b63b0279d 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -535,8 +535,8 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
Node* val, Node* effect,
Node* control,
FeedbackSlot slot) const {
- DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
- op->opcode() == IrOpcode::kJSStoreNamedOwn);
+ DCHECK(op->opcode() == IrOpcode::kJSSetNamedProperty ||
+ op->opcode() == IrOpcode::kJSDefineNamedOwnProperty);
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -550,10 +550,10 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
Node* key, Node* val,
Node* effect, Node* control,
FeedbackSlot slot) const {
- DCHECK(op->opcode() == IrOpcode::kJSStoreProperty ||
+ DCHECK(op->opcode() == IrOpcode::kJSSetKeyedProperty ||
op->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
- op->opcode() == IrOpcode::kJSDefineProperty);
+ op->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
+ op->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index bf3720cc77..013de42bbd 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -1626,7 +1626,7 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
// Patch {node} to an indirect call via the {function}s construct stub.
bool use_builtin_construct_stub = function.shared().construct_as_builtin();
- CodeRef code = MakeRef(
+ CodeTRef code = MakeRef(
broker(), use_builtin_construct_stub
? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
: BUILTIN_CODE(isolate(), JSConstructStubGeneric));
@@ -1657,15 +1657,11 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
- // Check if {target} is a JSFunction.
- if (target_type.Is(Type::Function())) {
+ // Check if {target} is a directly callable JSFunction.
+ if (target_type.Is(Type::CallableFunction())) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
// Patch {node} to an indirect call via CallFunctionForwardVarargs.
- // It is safe to call CallFunction instead of Call, as we already checked
- // that the target is a function that is not a class constructor in
- // JSCallReduer.
- // TODO(pthier): We shouldn't blindly rely on checks made in another pass.
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
@@ -1761,9 +1757,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
int formal_count =
shared->internal_formal_parameter_count_without_receiver();
- // TODO(v8:11112): Once the sentinel is always 0, the check against
- // IsDontAdaptArguments() can be removed.
- if (!shared->IsDontAdaptArguments() && formal_count > arity) {
+ if (formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
// Underapplication. Massage the arguments to match the expected number of
// arguments.
@@ -1814,15 +1808,13 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
return Changed(node);
}
- // Check if {target} is a JSFunction.
- if (target_type.Is(Type::Function())) {
+ // Check if {target} is a directly callable JSFunction.
+ if (target_type.Is(Type::CallableFunction())) {
// The node will change operators, remove the feedback vector.
node->RemoveInput(n.FeedbackVectorIndex());
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
// Patch {node} to an indirect call via the CallFunction builtin.
- // It is safe to call CallFunction instead of Call, as we already checked
- // that the target is a function that is not a class constructor.
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index bd62f24600..f5bf0cbb44 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -372,7 +372,7 @@ CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
js_parameter_count, // stack_parameter_count
properties, // properties
kNoCalleeSaved, // callee-saved
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
flags, // flags
debug_name, // debug name
stack_order); // stack order
@@ -426,7 +426,7 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
js_parameter_count, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
flags, // flags
"js-call"); // debug name
}
@@ -522,7 +522,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
RegList callee_saved_registers = kNoCalleeSaved;
if (descriptor.CalleeSaveRegisters()) {
callee_saved_registers = allocatable_registers;
- DCHECK(callee_saved_registers);
+ DCHECK(!callee_saved_registers.is_empty());
}
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return zone->New<CallDescriptor>( // --
@@ -533,7 +533,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
stack_parameter_count, // stack_parameter_count
properties, // properties
callee_saved_registers, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
CallDescriptor::kCanUseRoots | flags, // flags
descriptor.DebugName(), // debug name
descriptor.GetStackArgumentOrder(), // stack order
@@ -583,7 +583,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
stack_parameter_count, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
kFlags, // flags
descriptor.DebugName());
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 5a58a23134..0a96cc8b73 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -9,7 +9,7 @@
#include "src/base/flags.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/machine-type.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/codegen/signature.h"
#include "src/common/globals.h"
@@ -36,7 +36,8 @@ class OptimizedCompilationInfo;
namespace compiler {
-const RegList kNoCalleeSaved = 0;
+constexpr RegList kNoCalleeSaved;
+constexpr DoubleRegList kNoCalleeSavedFp;
class OsrHelper;
@@ -253,13 +254,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
LocationSignature* location_sig, size_t param_slot_count,
Operator::Properties properties,
RegList callee_saved_registers,
- RegList callee_saved_fp_registers, Flags flags,
+ DoubleRegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
StackArgumentOrder stack_order = StackArgumentOrder::kDefault,
#if V8_ENABLE_WEBASSEMBLY
const wasm::FunctionSig* wasm_sig = nullptr,
#endif
- const RegList allocatable_registers = 0,
+ const RegList allocatable_registers = {},
size_t return_slot_count = 0)
: kind_(kind),
target_type_(target_type),
@@ -310,6 +311,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
#if V8_ENABLE_WEBASSEMBLY
if (IsWasmFunctionCall()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
+ if (CalleeSavedRegisters() != kNoCalleeSaved) return true;
return false;
}
@@ -413,7 +415,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
// Get the callee-saved FP registers, if any, across this call.
- RegList CalleeSavedFPRegisters() const { return callee_saved_fp_registers_; }
+ DoubleRegList CalleeSavedFPRegisters() const {
+ return callee_saved_fp_registers_;
+ }
const char* debug_name() const { return debug_name_; }
@@ -442,7 +446,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList AllocatableRegisters() const { return allocatable_registers_; }
bool HasRestrictedAllocatableRegisters() const {
- return allocatable_registers_ != 0;
+ return !allocatable_registers_.is_empty();
}
EncodedCSignature ToEncodedCSignature() const;
@@ -460,7 +464,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const size_t return_slot_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
- const RegList callee_saved_fp_registers_;
+ const DoubleRegList callee_saved_fp_registers_;
// Non-zero value means restricting the set of allocatable registers for
// register allocator to use.
const RegList allocatable_registers_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 357b866ca7..7f0dee2125 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -1070,7 +1070,7 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
// TODO(turbofan): Add support for doing the truncations.
break;
case MachineRepresentation::kFloat64:
@@ -1127,7 +1127,7 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
// TODO(turbofan): Add support for doing the truncations.
break;
case MachineRepresentation::kFloat64:
@@ -1432,7 +1432,7 @@ LoadElimination::IndexRange LoadElimination::FieldIndexOf(
case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
break;
}
int representation_size = ElementSizeInBytes(rep);
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index a56e4c2a41..2b9b5faeb7 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -550,49 +550,63 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
#if V8_ENABLE_WEBASSEMBLY
// static
-ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
- Node* loop_header, Zone* zone, size_t max_size) {
+ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
+ Node* loop_header, Zone* zone, size_t max_size, bool calls_are_large) {
auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
std::vector<Node*> queue;
- DCHECK(loop_header->opcode() == IrOpcode::kLoop);
+ DCHECK_EQ(loop_header->opcode(), IrOpcode::kLoop);
queue.push_back(loop_header);
+#define ENQUEUE_USES(use_name, condition) \
+ for (Node * use_name : node->uses()) { \
+ if (condition && visited->count(use_name) == 0) queue.push_back(use_name); \
+ }
+
while (!queue.empty()) {
Node* node = queue.back();
queue.pop_back();
- // Terminate is not part of the loop, and neither are its uses.
- if (node->opcode() == IrOpcode::kTerminate) {
- DCHECK_EQ(node->InputAt(1), loop_header);
+ if (node->opcode() == IrOpcode::kEnd) {
+ // We reached the end of the graph. The end node is not part of the loop.
continue;
}
visited->insert(node);
if (visited->size() > max_size) return nullptr;
switch (node->opcode()) {
+ case IrOpcode::kLoop:
+ // Found nested loop.
+ if (node != loop_header) return nullptr;
+ ENQUEUE_USES(use, true);
+ break;
case IrOpcode::kLoopExit:
- DCHECK_EQ(node->InputAt(1), loop_header);
+ // Found nested loop.
+ if (node->InputAt(1) != loop_header) return nullptr;
// LoopExitValue/Effect uses are inside the loop. The rest are not.
- for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kLoopExitEffect ||
- use->opcode() == IrOpcode::kLoopExitValue) {
- if (visited->count(use) == 0) queue.push_back(use);
- }
- }
+ ENQUEUE_USES(use, (use->opcode() == IrOpcode::kLoopExitEffect ||
+ use->opcode() == IrOpcode::kLoopExitValue))
break;
case IrOpcode::kLoopExitEffect:
case IrOpcode::kLoopExitValue:
- DCHECK_EQ(NodeProperties::GetControlInput(node)->InputAt(1),
- loop_header);
+ if (NodeProperties::GetControlInput(node)->InputAt(1) != loop_header) {
+ // Found nested loop.
+ return nullptr;
+ }
// All uses are outside the loop, do nothing.
break;
+ // If {calls_are_large}, call nodes are considered to have unbounded size,
+ // i.e. >max_size, with the exception of certain wasm builtins.
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
- // Call nodes are considered to have unbounded size, i.e. >max_size,
- // with the exception of certain wasm builtins.
- return nullptr;
+ if (calls_are_large) return nullptr;
+ ENQUEUE_USES(use, true)
+ break;
case IrOpcode::kCall: {
+ if (!calls_are_large) {
+ ENQUEUE_USES(use, true);
+ break;
+ }
Node* callee = node->InputAt(0);
if (callee->opcode() != IrOpcode::kRelocatableInt32Constant &&
callee->opcode() != IrOpcode::kRelocatableInt64Constant) {
@@ -602,27 +616,30 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
OpParameter<RelocatablePtrConstantInfo>(callee->op()).value();
using WasmCode = v8::internal::wasm::WasmCode;
constexpr intptr_t unrollable_builtins[] = {
+ // Exists in every stack check.
WasmCode::kWasmStackGuard,
- WasmCode::kWasmTableGet,
- WasmCode::kWasmTableSet,
+ // Fast table operations.
+ WasmCode::kWasmTableGet, WasmCode::kWasmTableSet,
WasmCode::kWasmTableGrow,
- WasmCode::kWasmThrow,
- WasmCode::kWasmRethrow,
- WasmCode::kWasmRethrowExplicitContext,
- WasmCode::kWasmRefFunc,
- WasmCode::kWasmAllocateRtt,
- WasmCode::kWasmAllocateFreshRtt};
+ // Atomics.
+ WasmCode::kWasmAtomicNotify, WasmCode::kWasmI32AtomicWait32,
+ WasmCode::kWasmI32AtomicWait64, WasmCode::kWasmI64AtomicWait32,
+ WasmCode::kWasmI64AtomicWait64,
+ // Exceptions.
+ WasmCode::kWasmAllocateFixedArray, WasmCode::kWasmThrow,
+ WasmCode::kWasmRethrow, WasmCode::kWasmRethrowExplicitContext,
+ // Fast wasm-gc operations.
+ WasmCode::kWasmRefFunc};
if (std::count(unrollable_builtins,
unrollable_builtins + arraysize(unrollable_builtins),
info) == 0) {
return nullptr;
}
- V8_FALLTHROUGH;
+ ENQUEUE_USES(use, true)
+ break;
}
default:
- for (Node* use : node->uses()) {
- if (visited->count(use) == 0) queue.push_back(use);
- }
+ ENQUEUE_USES(use, true)
break;
}
}
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index e928e5a779..e7c09da105 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -180,17 +180,17 @@ class V8_EXPORT_PRIVATE LoopFinder {
static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
#if V8_ENABLE_WEBASSEMBLY
- // Find all nodes of a loop given headed by {loop_header}. Returns {nullptr}
- // if the loop size in Nodes exceeds {max_size}. In that context, function
- // calls are considered to have unbounded size, so if the loop contains a
- // function call, {nullptr} is always returned.
- // This is a very restricted version of BuildLoopTree and makes the following
- // assumptions:
- // 1) All loop exits of the loop are marked with LoopExit, LoopExitEffect,
- // and LoopExitValue nodes.
- // 2) There are no nested loops within this loop.
- static ZoneUnorderedSet<Node*>* FindSmallUnnestedLoopFromHeader(
- Node* loop_header, Zone* zone, size_t max_size);
+ // Find all nodes in the loop headed by {loop_header} if it contains no nested
+ // loops.
+ // Assumption: *if* this loop has no nested loops, all exits from the loop are
+ // marked with LoopExit, LoopExitEffect, LoopExitValue, or End nodes.
+ // Returns {nullptr} if
+ // 1) the loop size (in graph nodes) exceeds {max_size},
+ // 2) {calls_are_large} and a function call is found in the loop, excluding
+ // calls to a set of wasm builtins,
+ // 3) a nested loop is found in the loop.
+ static ZoneUnorderedSet<Node*>* FindSmallInnermostLoopFromHeader(
+ Node* loop_header, Zone* zone, size_t max_size, bool calls_are_large);
#endif
};
@@ -198,7 +198,7 @@ class V8_EXPORT_PRIVATE LoopFinder {
class NodeCopier {
public:
// {max}: The maximum number of nodes that this copier will track, including
- // The original nodes and all copies.
+ // the original nodes and all copies.
// {p}: A vector that holds the original nodes and all copies.
// {copy_count}: How many times the nodes should be copied.
NodeCopier(Graph* graph, uint32_t max, NodeVector* p, uint32_t copy_count)
diff --git a/deps/v8/src/compiler/loop-unrolling.cc b/deps/v8/src/compiler/loop-unrolling.cc
index 357b17a3ec..88cc82aaa2 100644
--- a/deps/v8/src/compiler/loop-unrolling.cc
+++ b/deps/v8/src/compiler/loop-unrolling.cc
@@ -19,8 +19,7 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins) {
DCHECK_EQ(loop_node->opcode(), IrOpcode::kLoop);
-
- if (loop == nullptr) return;
+ DCHECK_NOT_NULL(loop);
// No back-jump to the loop header means this is not really a loop.
if (loop_node->InputCount() < 2) return;
@@ -41,6 +40,15 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
source_positions, node_origins);
source_positions->RemoveDecorator();
+ // The terminator nodes in the copies need to get connected to the graph's end
+ // node, except Terminate nodes which will be deleted anyway.
+ for (Node* node : copies) {
+ if (IrOpcode::IsGraphTerminator(node->opcode()) &&
+ node->opcode() != IrOpcode::kTerminate && node->UseCount() == 0) {
+ NodeProperties::MergeControlToEnd(graph, common, node);
+ }
+ }
+
#define COPY(node, n) copier.map(node, n)
#define FOREACH_COPY_INDEX(i) for (uint32_t i = 0; i < unrolling_count; i++)
@@ -69,19 +77,14 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
// ( LoadFromObject )
// | |
// {stack_check}
- // | * | *
- // | | | |
- // | ( Call )
- // | | *
- // | | |
+ // | |
+ // | *
+ // |
+ // | * *
+ // | | |
// {use}: EffectPhi (stack check effect that we need to replace)
DCHECK_EQ(use->opcode(), IrOpcode::kEffectPhi);
- DCHECK_EQ(NodeProperties::GetEffectInput(use, 1)->opcode(),
- IrOpcode::kCall);
DCHECK_EQ(NodeProperties::GetEffectInput(use), stack_check);
- DCHECK_EQ(NodeProperties::GetEffectInput(
- NodeProperties::GetEffectInput(use, 1)),
- stack_check);
DCHECK_EQ(NodeProperties::GetEffectInput(stack_check)->opcode(),
IrOpcode::kLoadFromObject);
Node* replacing_effect = NodeProperties::GetEffectInput(
@@ -141,6 +144,13 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
break;
}
+ case IrOpcode::kTerminate: {
+ // We only need to keep the Terminate node for the loop header of the
+ // first iteration.
+ FOREACH_COPY_INDEX(i) { COPY(node, i)->Kill(); }
+ break;
+ }
+
default:
break;
}
diff --git a/deps/v8/src/compiler/loop-unrolling.h b/deps/v8/src/compiler/loop-unrolling.h
index 24a67cd18e..1db6aba08a 100644
--- a/deps/v8/src/compiler/loop-unrolling.h
+++ b/deps/v8/src/compiler/loop-unrolling.h
@@ -8,7 +8,7 @@
// Loop unrolling is an optimization that copies the body of a loop and creates
// a fresh loop, whose iteration corresponds to 2 or more iterations of the
// initial loop. For a high-level description of the algorithm see
-// docs.google.com/document/d/1AsUCqslMUB6fLdnGq0ZoPk2kn50jIJAWAL77lKXXP5g/
+// https://bit.ly/3G0VdWW.
#include "src/compiler/common-operator.h"
#include "src/compiler/loop-analysis.h"
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 5d2ab6990c..56486b9405 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -85,6 +85,10 @@ class MachineRepresentationInferrer {
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
return MachineRepresentation::kWord32;
+ case MachineRepresentation::kSandboxedPointer:
+ // A sandboxed pointer is a Word64 that uses an encoded representation
+ // when stored on the heap.
+ return MachineRepresentation::kWord64;
default:
break;
}
@@ -996,7 +1000,7 @@ class MachineRepresentationChecker {
// happens in dead code.
return IsAnyTagged(actual);
case MachineRepresentation::kCompressedPointer:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 942e7a17f1..5f3afc5a84 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -1348,13 +1348,33 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
return NoChange();
}
+namespace {
+
+// Returns true if "value << shift >> shift == value". This can be interpreted
+// as "left shifting |value| by |shift| doesn't shift away significant bits".
+// Or, equivalently, "left shifting |value| by |shift| doesn't have signed
+// overflow".
+bool CanRevertLeftShiftWithRightShift(int32_t value, int32_t shift) {
+ if (shift < 0 || shift >= 32) {
+ // This shift would be UB in C++
+ return false;
+ }
+ if (static_cast<int32_t>(static_cast<uint32_t>(value) << shift) >> shift !=
+ static_cast<int32_t>(value)) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
DCHECK(node->opcode() == IrOpcode::kInt32LessThan ||
node->opcode() == IrOpcode::kInt32LessThanOrEqual ||
node->opcode() == IrOpcode::kUint32LessThan ||
node->opcode() == IrOpcode::kUint32LessThanOrEqual);
Int32BinopMatcher m(node);
- // (x >>> K) < (y >>> K) => x < y if only zeros shifted out
+ // (x >> K) < (y >> K) => x < y if only zeros shifted out
if (m.left().op() == machine()->Word32SarShiftOutZeros() &&
m.right().op() == machine()->Word32SarShiftOutZeros()) {
Int32BinopMatcher mleft(m.left().node());
@@ -1366,6 +1386,38 @@ Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
return Changed(node);
}
}
+ // Simplifying (x >> n) <= k into x <= (k << n), with "k << n" being
+ // computed here at compile time.
+ if (m.right().HasResolvedValue() &&
+ m.left().op() == machine()->Word32SarShiftOutZeros() &&
+ m.left().node()->UseCount() == 1) {
+ uint32_t right = m.right().ResolvedValue();
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ auto shift = mleft.right().ResolvedValue();
+ if (CanRevertLeftShiftWithRightShift(right, shift)) {
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, Int32Constant(right << shift));
+ return Changed(node);
+ }
+ }
+ }
+ // Simplifying k <= (x >> n) into (k << n) <= x, with "k << n" being
+ // computed here at compile time.
+ if (m.left().HasResolvedValue() &&
+ m.right().op() == machine()->Word32SarShiftOutZeros() &&
+ m.right().node()->UseCount() == 1) {
+ uint32_t left = m.left().ResolvedValue();
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue()) {
+ auto shift = mright.right().ResolvedValue();
+ if (CanRevertLeftShiftWithRightShift(left, shift)) {
+ node->ReplaceInput(0, Int32Constant(left << shift));
+ node->ReplaceInput(1, mright.left().node());
+ return Changed(node);
+ }
+ }
+ }
return NoChange();
}
@@ -1405,7 +1457,7 @@ Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
return Changed(node).FollowedBy(Reduce(node));
}
- // (x >>> K) < (y >>> K) => x < y if only zeros shifted out
+ // (x >> K) < (y >> K) => x < y if only zeros shifted out
// This is useful for Smi untagging, which results in such a shift.
if (m.left().op() == machine()->Word64SarShiftOutZeros() &&
m.right().op() == machine()->Word64SarShiftOutZeros()) {
@@ -2202,6 +2254,18 @@ MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
}
}
}
+ // Replaces (x >> n) == k with x == k << n, with "k << n" being computed
+ // here at compile time.
+ if (lhs->op() == machine()->Word32SarShiftOutZeros() &&
+ lhs->UseCount() == 1) {
+ typename WordNAdapter::UintNBinopMatcher mshift(lhs);
+ if (mshift.right().HasResolvedValue()) {
+ int32_t shift = static_cast<int32_t>(mshift.right().ResolvedValue());
+ if (CanRevertLeftShiftWithRightShift(rhs, shift)) {
+ return std::make_pair(mshift.left().node(), rhs << shift);
+ }
+ }
+ }
return {};
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 56b298eb55..26513dd05d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -667,7 +667,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(MapInHeader) \
V(AnyTagged) \
V(CompressedPointer) \
- V(CagedPointer) \
+ V(SandboxedPointer) \
V(AnyCompressed)
#define MACHINE_REPRESENTATION_LIST(V) \
@@ -683,7 +683,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(kTaggedPointer) \
V(kTagged) \
V(kCompressedPointer) \
- V(kCagedPointer) \
+ V(kSandboxedPointer) \
V(kCompressed)
#define LOAD_TRANSFORM_LIST(V) \
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 1aa8d2b55c..6b5eb17b33 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -13,7 +13,7 @@
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
@@ -84,12 +84,14 @@ Reduction MemoryLowering::Reduce(Node* node) {
case IrOpcode::kAllocateRaw:
return ReduceAllocateRaw(node);
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject:
return ReduceLoadFromObject(node);
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kStoreToObject:
+ case IrOpcode::kInitializeImmutableInObject:
return ReduceStoreToObject(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
@@ -372,7 +374,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
}
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
- DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
+ node->opcode() == IrOpcode::kLoadImmutableFromObject);
ObjectAccess const& access = ObjectAccessOf(node->op());
MachineType machine_type = access.machine_type;
@@ -405,9 +408,11 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
-#ifdef V8_HEAP_SANDBOX
- DCHECK(V8_HEAP_SANDBOX_BOOL);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ DCHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
DCHECK(node->opcode() == IrOpcode::kLoad);
+ DCHECK_EQ(kExternalPointerSize, kUInt32Size);
+ DCHECK_NE(kExternalPointerNullTag, external_pointer_tag);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@@ -415,29 +420,35 @@ Node* MemoryLowering::DecodeExternalPointer(
// Clone the load node and put it here.
// TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
// cloning nodes from arbitrary locaions in effect/control chains.
- Node* index = __ AddNode(graph()->CloneNode(node));
+ STATIC_ASSERT(kExternalPointerIndexShift > kSystemPointerSizeLog2);
+ Node* shifted_index = __ AddNode(graph()->CloneNode(node));
+ Node* shift_amount =
+ __ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
+ Node* offset = __ Word32Shr(shifted_index, shift_amount);
// Uncomment this to generate a breakpoint for debugging purposes.
// __ DebugBreak();
// Decode loaded external pointer.
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- Node* external_pointer_table_address = __ ExternalConstant(
+ //
+ // Here we access the external pointer table through an ExternalReference.
+ // Alternatively, we could also hardcode the address of the table since it is
+ // never reallocated. However, in that case we must be able to guarantee that
+ // the generated code is never executed under a different Isolate, as that
+ // would allow access to external objects from different Isolates. It also
+ // would break if the code is serialized/deserialized at some point.
+ Node* table_address = __ ExternalConstant(
ExternalReference::external_pointer_table_address(isolate()));
- Node* table = __ Load(MachineType::Pointer(), external_pointer_table_address,
+ Node* table = __ Load(MachineType::Pointer(), table_address,
Internals::kExternalPointerTableBufferOffset);
- // TODO(v8:10391, saelo): bounds check if table is not caged
- Node* offset = __ Int32Mul(index, __ Int32Constant(8));
Node* decoded_ptr =
__ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
- if (external_pointer_tag != 0) {
- Node* tag = __ IntPtrConstant(~external_pointer_tag);
- decoded_ptr = __ WordAnd(decoded_ptr, tag);
- }
+ Node* tag = __ IntPtrConstant(~external_pointer_tag);
+ decoded_ptr = __ WordAnd(decoded_ptr, tag);
return decoded_ptr;
#else
return node;
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
Reduction MemoryLowering::ReduceLoadMap(Node* node) {
@@ -462,37 +473,35 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
Node* offset = __ IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph_zone(), 1, offset);
MachineType type = access.machine_type;
- if (V8_HEAP_SANDBOX_BOOL &&
- access.type.Is(Type::SandboxedExternalPointer())) {
- // External pointer table indices are 32bit numbers
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
+ access.type.Is(Type::ExternalPointer())) {
+ // External pointer table indices are stored as 32-bit numbers
type = MachineType::Uint32();
}
if (type.IsMapWord()) {
- DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
+ DCHECK(!access.type.Is(Type::ExternalPointer()));
return ReduceLoadMap(node);
}
NodeProperties::ChangeOp(node, machine()->Load(type));
- if (V8_HEAP_SANDBOX_BOOL &&
- access.type.Is(Type::SandboxedExternalPointer())) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ if (access.type.Is(Type::ExternalPointer())) {
ExternalPointerTag tag = access.external_pointer_tag;
-#else
- ExternalPointerTag tag = kExternalPointerNullTag;
-#endif
+ DCHECK_NE(kExternalPointerNullTag, tag);
node = DecodeExternalPointer(node, tag);
return Replace(node);
- } else {
- DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
}
+#endif
+
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreToObject(Node* node,
AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
+ node->opcode() == IrOpcode::kInitializeImmutableInObject);
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
@@ -531,11 +540,10 @@ Reduction MemoryLowering::ReduceStoreField(Node* node,
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
// External pointer must never be stored by optimized code.
- DCHECK_IMPLIES(V8_HEAP_SANDBOX_BOOL,
- !access.type.Is(Type::ExternalPointer()) &&
- !access.type.Is(Type::SandboxedExternalPointer()));
- // CagedPointers are not currently stored by optimized code.
- DCHECK(!access.type.Is(Type::CagedPointer()));
+ DCHECK_IMPLIES(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL,
+ !access.type.Is(Type::ExternalPointer()));
+ // SandboxedPointers are not currently stored by optimized code.
+ DCHECK(!access.type.Is(Type::SandboxedPointer()));
MachineType machine_type = access.machine_type;
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index a92dd67c62..4736987744 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -37,6 +37,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject:
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
@@ -53,6 +54,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
+ case IrOpcode::kInitializeImmutableInObject:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@@ -217,12 +219,14 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject:
return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
case IrOpcode::kStoreToObject:
+ case IrOpcode::kInitializeImmutableInObject:
return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
@@ -306,7 +310,8 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
+ node->opcode() == IrOpcode::kLoadImmutableFromObject);
Reduction reduction = memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
if (V8_MAP_PACKING_BOOL && reduction.replacement() != node) {
@@ -316,7 +321,8 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node,
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
+ node->opcode() == IrOpcode::kInitializeImmutableInObject);
memory_lowering()->ReduceStoreToObject(node, state);
EnqueueUses(node, state);
}
@@ -337,11 +343,12 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
EnqueueUses(node, state);
// Node can be replaced under two cases:
- // 1. V8_HEAP_SANDBOX_BOOL is enabled and loading an external pointer value.
+ // 1. V8_SANDBOXED_EXTERNAL_POINTERS_BOOL is enabled and loading an external
+ // pointer value.
// 2. V8_MAP_PACKING_BOOL is enabled.
- DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL && !V8_MAP_PACKING_BOOL,
+ DCHECK_IMPLIES(!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && !V8_MAP_PACKING_BOOL,
reduction.replacement() == node);
- if ((V8_HEAP_SANDBOX_BOOL || V8_MAP_PACKING_BOOL) &&
+ if ((V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || V8_MAP_PACKING_BOOL) &&
reduction.replacement() != node) {
ReplaceUsesAndKillNode(node, reduction.replacement());
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 1cd60b023f..5e55cdfdc1 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -620,7 +620,7 @@ bool NodeProperties::IsFreshObject(Node* node) {
Builtin callee = static_cast<Builtin>(matcher.ResolvedValue());
// Note: Make sure to only add builtins which are guaranteed to return a
// fresh object. E.g. kWasmAllocateFixedArray may return the canonical
- // empty array, and kWasmAllocateRtt may return a cached rtt.
+ // empty array.
return callee == Builtin::kWasmAllocateArray_Uninitialized ||
callee == Builtin::kWasmAllocateArray_InitNull ||
callee == Builtin::kWasmAllocateArray_InitZero ||
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index e736e5cab5..8baac472d4 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -168,21 +168,21 @@
V(JSCreateTypedArray) \
V(JSGetTemplateObject)
-#define JS_OBJECT_OP_LIST(V) \
- JS_CREATE_OP_LIST(V) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSLoadNamedFromSuper) \
- V(JSLoadGlobal) \
- V(JSStoreProperty) \
- V(JSDefineProperty) \
- V(JSStoreNamed) \
- V(JSStoreNamedOwn) \
- V(JSStoreGlobal) \
- V(JSStoreDataPropertyInLiteral) \
- V(JSStoreInArrayLiteral) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
+#define JS_OBJECT_OP_LIST(V) \
+ JS_CREATE_OP_LIST(V) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadNamedFromSuper) \
+ V(JSLoadGlobal) \
+ V(JSSetKeyedProperty) \
+ V(JSDefineKeyedOwnProperty) \
+ V(JSSetNamedProperty) \
+ V(JSDefineNamedOwnProperty) \
+ V(JSStoreGlobal) \
+ V(JSDefineKeyedOwnPropertyInLiteral) \
+ V(JSStoreInArrayLiteral) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
@@ -426,11 +426,13 @@
V(FastApiCall) \
V(FindOrderedHashMapEntry) \
V(FindOrderedHashMapEntryForInt32Key) \
+ V(InitializeImmutableInObject) \
V(LoadDataViewElement) \
V(LoadElement) \
V(LoadField) \
V(LoadFieldByIndex) \
V(LoadFromObject) \
+ V(LoadImmutableFromObject) \
V(LoadMessage) \
V(LoadStackArgument) \
V(LoadTypedElement) \
@@ -486,14 +488,12 @@
V(StringToLowerCaseIntl) \
V(StringToNumber) \
V(StringToUpperCaseIntl) \
- V(TierUpCheck) \
V(ToBoolean) \
V(TransitionAndStoreElement) \
V(TransitionAndStoreNonNumberElement) \
V(TransitionAndStoreNumberElement) \
V(TransitionElementsKind) \
V(TypeOf) \
- V(UpdateInterruptBudget) \
V(VerifyType)
#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
@@ -1135,7 +1135,7 @@ class V8_EXPORT_PRIVATE IrOpcode {
case kJSCreateLiteralArray:
case kJSCreateLiteralObject:
case kJSCreateLiteralRegExp:
- case kJSDefineProperty:
+ case kJSDefineKeyedOwnProperty:
case kJSForInNext:
case kJSForInPrepare:
case kJSGetIterator:
@@ -1146,12 +1146,12 @@ class V8_EXPORT_PRIVATE IrOpcode {
case kJSLoadNamed:
case kJSLoadNamedFromSuper:
case kJSLoadProperty:
- case kJSStoreDataPropertyInLiteral:
+ case kJSDefineKeyedOwnPropertyInLiteral:
case kJSStoreGlobal:
case kJSStoreInArrayLiteral:
- case kJSStoreNamed:
- case kJSStoreNamedOwn:
- case kJSStoreProperty:
+ case kJSSetNamedProperty:
+ case kJSDefineNamedOwnProperty:
+ case kJSSetKeyedProperty:
return true;
default:
return false;
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index e7dc51bd2d..95c5a62249 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -1130,16 +1130,24 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
+ DCHECK(lhs.Is(Type::BigInt()));
+ DCHECK(rhs.Is(Type::BigInt()));
+
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
return Type::BigInt();
}
Type OperationTyper::BigIntSubtract(Type lhs, Type rhs) {
+ DCHECK(lhs.Is(Type::BigInt()));
+ DCHECK(rhs.Is(Type::BigInt()));
+
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
return Type::BigInt();
}
Type OperationTyper::BigIntNegate(Type type) {
+ DCHECK(type.Is(Type::BigInt()));
+
if (type.IsNone()) return type;
return Type::BigInt();
}
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index e90fc750fd..0389822629 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -72,7 +72,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
case IrOpcode::kJSDebugger:
- case IrOpcode::kJSDefineProperty:
+ case IrOpcode::kJSDefineKeyedOwnProperty:
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSGeneratorStore:
case IrOpcode::kJSGetImportMeta:
@@ -84,13 +84,13 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSLoadNamedFromSuper:
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreContext:
- case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSDefineKeyedOwnPropertyInLiteral:
case IrOpcode::kJSStoreGlobal:
case IrOpcode::kJSStoreInArrayLiteral:
case IrOpcode::kJSStoreModule:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSStoreNamedOwn:
- case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSSetNamedProperty:
+ case IrOpcode::kJSDefineNamedOwnProperty:
+ case IrOpcode::kJSSetKeyedProperty:
return true;
case IrOpcode::kJSAsyncFunctionEnter:
@@ -199,13 +199,13 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSLoadNamedFromSuper:
case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSDefineKeyedOwnPropertyInLiteral:
case IrOpcode::kJSStoreInArrayLiteral:
case IrOpcode::kJSStoreGlobal:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSStoreNamedOwn:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSDefineProperty:
+ case IrOpcode::kJSSetNamedProperty:
+ case IrOpcode::kJSDefineNamedOwnProperty:
+ case IrOpcode::kJSSetKeyedProperty:
+ case IrOpcode::kJSDefineKeyedOwnProperty:
// Conversions
case IrOpcode::kJSToLength:
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index 4ccce395d1..e56b6a6fb1 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -78,7 +78,7 @@ class PersistentMap {
bool operator==(const PersistentMap& other) const {
if (tree_ == other.tree_) return true;
if (def_value_ != other.def_value_) return false;
- for (const std::tuple<Key, Value, Value>& triple : Zip(other)) {
+ for (std::tuple<Key, Value, Value> triple : Zip(other)) {
if (std::get<1>(triple) != std::get<2>(triple)) return false;
}
return true;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index ef16b8f304..0f200f2cfe 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -49,7 +49,6 @@
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
@@ -99,6 +98,7 @@
#include "src/compiler/wasm-compiler.h"
#include "src/compiler/wasm-escape-analysis.h"
#include "src/compiler/wasm-inlining.h"
+#include "src/compiler/wasm-loop-peeling.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -119,6 +119,7 @@ static constexpr char kRegisterAllocationZoneName[] =
"register-allocation-zone";
static constexpr char kRegisterAllocatorVerifierZoneName[] =
"register-allocator-verifier-zone";
+
namespace {
Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
@@ -156,9 +157,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(
- isolate_, info_->zone(), info_->trace_heap_broker(),
- info_->concurrent_inlining(), info->code_kind())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker(),
+ info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -502,7 +503,7 @@ class PipelineData {
if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
} else {
- DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
+ DCHECK(call_descriptor->CalleeSavedFPRegisters().is_empty());
}
}
@@ -686,10 +687,6 @@ class PipelineImpl final {
// Step B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Alternative step B. Run minimal concurrent optimization passes for
- // mid-tier.
- bool OptimizeGraphForMidTier(Linkage* linkage);
-
// Substep B.1. Produce a scheduled graph.
void ComputeScheduledGraph();
@@ -1108,15 +1105,14 @@ PipelineCompilationJob::PipelineCompilationJob(
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
: OptimizedCompilationJob(&compilation_info_, "TurboFan"),
- zone_(function->GetIsolate()->allocator(),
- kPipelineCompilationJobZoneName),
- zone_stats_(function->GetIsolate()->allocator()),
- compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
- code_kind, osr_offset, osr_frame),
+ zone_(isolate->allocator(), kPipelineCompilationJobZoneName),
+ zone_stats_(isolate->allocator()),
+ compilation_info_(&zone_, isolate, shared_info, function, code_kind,
+ osr_offset, osr_frame),
pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate),
- compilation_info(), function->GetIsolate(), &zone_stats_)),
- data_(&zone_stats_, function->GetIsolate(), compilation_info(),
+ compilation_info(), isolate, &zone_stats_)),
+ data_(&zone_stats_, isolate, compilation_info(),
pipeline_statistics_.get()),
pipeline_(&data_),
linkage_(nullptr) {}
@@ -1174,8 +1170,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
// allow context specialization for OSR code.
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
- !compilation_info()->is_osr() &&
- !compilation_info()->IsTurboprop()) {
+ !compilation_info()->is_osr()) {
compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
@@ -1199,17 +1194,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
pipeline_.InitializeHeapBroker();
- if (!data_.broker()->is_concurrent_inlining()) {
- if (!pipeline_.CreateGraph()) {
- CHECK(!isolate->has_pending_exception());
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
- }
- }
-
- if (compilation_info()->concurrent_inlining()) {
- // Serialization may have allocated.
- isolate->heap()->PublishPendingAllocations();
- }
+ // Serialization may have allocated.
+ isolate->heap()->PublishPendingAllocations();
return SUCCEEDED;
}
@@ -1222,20 +1208,12 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
local_isolate);
- if (data_.broker()->is_concurrent_inlining()) {
- if (!pipeline_.CreateGraph()) {
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
- }
+ if (!pipeline_.CreateGraph()) {
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
- // We selectively Unpark inside OptimizeGraph*.
- bool success;
- if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
- success = pipeline_.OptimizeGraphForMidTier(linkage_);
- } else {
- success = pipeline_.OptimizeGraph(linkage_);
- }
- if (!success) return FAILED;
+ // We selectively Unpark inside OptimizeGraph.
+ if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
pipeline_.AssembleCode(linkage_);
@@ -1262,7 +1240,9 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
compilation_info()->SetCode(code);
Handle<NativeContext> context(compilation_info()->native_context(), isolate);
- if (CodeKindCanDeoptimize(code->kind())) context->AddOptimizedCode(*code);
+ if (CodeKindCanDeoptimize(code->kind())) {
+ context->AddOptimizedCode(ToCodeT(*code));
+ }
RegisterWeakObjectsInOptimizedCode(isolate, context, code);
return SUCCEEDED;
}
@@ -1392,10 +1372,8 @@ struct InliningPhase {
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker());
AddReducer(data, &graph_reducer, &dead_code_elimination);
- if (!data->info()->IsTurboprop()) {
- AddReducer(data, &graph_reducer, &checkpoint_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- }
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
@@ -1511,24 +1489,6 @@ struct HeapBrokerInitializationPhase {
}
};
-struct CopyMetadataForConcurrentCompilePhase {
- DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(
- temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead(), data->observe_node_manager());
- JSHeapCopyReducer heap_copy_reducer(data->broker());
- AddReducer(data, &graph_reducer, &heap_copy_reducer);
- graph_reducer.ReduceGraph();
-
- // Some nodes that are no longer in the graph might still be in the cache.
- NodeVector cached_nodes(temp_zone);
- data->jsgraph()->GetCachedNodes(&cached_nodes);
- for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
- }
-};
-
struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
@@ -1556,9 +1516,7 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &create_lowering);
- if (!data->info()->IsTurboprop()) {
- AddReducer(data, &graph_reducer, &constant_folding_reducer);
- }
+ AddReducer(data, &graph_reducer, &constant_folding_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &simple_reducer);
@@ -1585,9 +1543,9 @@ struct EscapeAnalysisPhase {
GraphReducer reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
- escape_analysis.analysis_result(),
- temp_zone);
+ EscapeAnalysisReducer escape_reducer(
+ &reducer, data->jsgraph(), data->broker(),
+ escape_analysis.analysis_result(), temp_zone);
AddReducer(data, &reducer, &escape_reducer);
@@ -1655,38 +1613,89 @@ struct LoopPeelingPhase {
};
#if V8_ENABLE_WEBASSEMBLY
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+
+ void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
+ uint32_t function_index, const wasm::WireBytesStorage* wire_bytes,
+ std::vector<compiler::WasmLoopInfo>* loop_info) {
+ if (WasmInliner::any_inlining_impossible(data->graph()->NodeCount())) {
+ return;
+ }
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ DeadCodeElimination dead(&graph_reducer, data->graph(), data->common(),
+ temp_zone);
+ WasmInliner inliner(&graph_reducer, env, function_index,
+ data->source_positions(), data->node_origins(),
+ data->mcgraph(), wire_bytes, loop_info);
+ AddReducer(data, &graph_reducer, &dead);
+ AddReducer(data, &graph_reducer, &inliner);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+namespace {
+void EliminateLoopExits(std::vector<compiler::WasmLoopInfo>* loop_infos) {
+ for (WasmLoopInfo& loop_info : *loop_infos) {
+ std::unordered_set<Node*> loop_exits;
+ // We collect exits into a set first because we are not allowed to mutate
+ // them while iterating uses().
+ for (Node* use : loop_info.header->uses()) {
+ if (use->opcode() == IrOpcode::kLoopExit) {
+ loop_exits.insert(use);
+ }
+ }
+ for (Node* use : loop_exits) {
+ LoopPeeler::EliminateLoopExit(use);
+ }
+ }
+}
+} // namespace
+
struct WasmLoopUnrollingPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopUnrolling)
void Run(PipelineData* data, Zone* temp_zone,
std::vector<compiler::WasmLoopInfo>* loop_infos) {
for (WasmLoopInfo& loop_info : *loop_infos) {
- if (loop_info.is_innermost) {
+ if (loop_info.can_be_innermost) {
ZoneUnorderedSet<Node*>* loop =
- LoopFinder::FindSmallUnnestedLoopFromHeader(
+ LoopFinder::FindSmallInnermostLoopFromHeader(
loop_info.header, temp_zone,
// Only discover the loop until its size is the maximum unrolled
// size for its depth.
- maximum_unrollable_size(loop_info.nesting_depth));
+ maximum_unrollable_size(loop_info.nesting_depth), true);
+ if (loop == nullptr) continue;
UnrollLoop(loop_info.header, loop, loop_info.nesting_depth,
data->graph(), data->common(), temp_zone,
data->source_positions(), data->node_origins());
}
}
+ EliminateLoopExits(loop_infos);
+ }
+};
+
+struct WasmLoopPeelingPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopPeeling)
+
+ void Run(PipelineData* data, Zone* temp_zone,
+ std::vector<compiler::WasmLoopInfo>* loop_infos) {
for (WasmLoopInfo& loop_info : *loop_infos) {
- std::unordered_set<Node*> loop_exits;
- // We collect exits into a set first because we are not allowed to mutate
- // them while iterating uses().
- for (Node* use : loop_info.header->uses()) {
- if (use->opcode() == IrOpcode::kLoopExit) {
- loop_exits.insert(use);
- }
- }
- for (Node* use : loop_exits) {
- LoopPeeler::EliminateLoopExit(use);
+ if (loop_info.can_be_innermost) {
+ ZoneUnorderedSet<Node*>* loop =
+ LoopFinder::FindSmallInnermostLoopFromHeader(
+ loop_info.header, temp_zone, std::numeric_limits<size_t>::max(),
+ false);
+ if (loop == nullptr) continue;
+ PeelWasmLoop(loop_info.header, loop, data->graph(), data->common(),
+ temp_zone, data->source_positions(), data->node_origins());
}
}
+ // If we are going to unroll later, keep loop exits.
+ if (!FLAG_wasm_loop_unrolling) EliminateLoopExits(loop_infos);
}
};
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1966,35 +1975,11 @@ struct DecompressionOptimizationPhase {
}
};
-struct ScheduledEffectControlLinearizationPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(ScheduledEffectControlLinearization)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- // Post-pass for wiring the control/effects
- // - connect allocating representation changes into the control&effect
- // chains and lower them,
- // - get rid of the region markers,
- // - introduce effect phis and rewire effects to get SSA again,
- // - lower simplified memory and select nodes to machine level nodes.
- LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
- data->source_positions(), data->node_origins(),
- data->broker());
-
- // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
- Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
- Scheduler::GenerateDominatorTree(data->schedule());
- TraceScheduleAndVerify(data->info(), data, data->schedule(),
- "effect linearization schedule");
- }
-};
-
#if V8_ENABLE_WEBASSEMBLY
struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
- void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan,
- wasm::CompilationEnv* env, uint32_t function_index,
- const wasm::WireBytesStorage* wire_bytes) {
+ void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
// Run optimizations in two rounds: First one around load elimination and
// then one around branch elimination. This is because those two
// optimizations sometimes display quadratic complexity when run together.
@@ -2014,9 +1999,6 @@ struct WasmOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
- WasmInliner inliner(&graph_reducer, env, function_index,
- data->source_positions(), data->node_origins(),
- data->mcgraph(), wire_bytes);
WasmEscapeAnalysis escape(&graph_reducer, data->mcgraph());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -2026,10 +2008,6 @@ struct WasmOptimizationPhase {
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &escape);
}
- if (FLAG_wasm_inlining &&
- !WasmInliner::any_inlining_impossible(data->graph()->NodeCount())) {
- AddReducer(data, &graph_reducer, &inliner);
- }
graph_reducer.ReduceGraph();
}
{
@@ -2284,6 +2262,17 @@ struct AllocateFPRegistersPhase {
}
};
+template <typename RegAllocator>
+struct AllocateSimd128RegistersPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(AllocateSIMD128Registers)
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ RegAllocator allocator(data->top_tier_register_allocation_data(),
+ RegisterKind::kSimd128, temp_zone);
+ allocator.AllocateRegisters();
+ }
+};
+
struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
@@ -2648,10 +2637,8 @@ void PipelineImpl::InitializeHeapBroker() {
}
data->broker()->SetTargetNativeContextRef(data->native_context());
- if (data->broker()->is_concurrent_inlining()) {
- Run<HeapBrokerInitializationPhase>();
- data->broker()->StopSerializing();
- }
+ Run<HeapBrokerInitializationPhase>();
+ data->broker()->StopSerializing();
data->EndPhaseKind();
}
@@ -2683,15 +2670,6 @@ bool PipelineImpl::CreateGraph() {
}
}
- // Run the type-sensitive lowerings and optimizations on the graph.
- {
- if (!data->broker()->is_concurrent_inlining()) {
- Run<HeapBrokerInitializationPhase>();
- Run<CopyMetadataForConcurrentCompilePhase>();
- data->broker()->StopSerializing();
- }
- }
-
data->EndPhaseKind();
return true;
@@ -2822,85 +2800,6 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
return SelectInstructions(linkage);
}
-bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
- PipelineData* data = this->data_;
-
- data->BeginPhaseKind("V8.TFLowering");
-
- // Type the graph and keep the Typer running such that new nodes get
- // automatically typed when they are created.
- Run<TyperPhase>(data->CreateTyper());
- RunPrintAndVerify(TyperPhase::phase_name());
-
- Run<TypedLoweringPhase>();
- RunPrintAndVerify(TypedLoweringPhase::phase_name());
-
- // TODO(9684): Consider rolling this into the preceeding phase or not creating
- // LoopExit nodes at all.
- Run<LoopExitEliminationPhase>();
- RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
-
- data->DeleteTyper();
-
- if (FLAG_assert_types) {
- Run<TypeAssertionsPhase>();
- RunPrintAndVerify(TypeAssertionsPhase::phase_name());
- }
-
- // Perform simplified lowering. This has to run w/o the Typer decorator,
- // because we cannot compute meaningful types anyways, and the computed types
- // might even conflict with the representation/truncation logic.
- Run<SimplifiedLoweringPhase>(linkage);
- RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
-
-#if V8_ENABLE_WEBASSEMBLY
- if (data->has_js_wasm_calls()) {
- DCHECK(data->info()->inline_js_wasm_calls());
- Run<JSWasmInliningPhase>();
- RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
- }
-#endif // V8_ENABLE_WEBASSEMBLY
-
- // From now on it is invalid to look at types on the nodes, because the types
- // on the nodes might not make sense after representation selection due to the
- // way we handle truncations; if we'd want to look at types afterwards we'd
- // essentially need to re-type (large portions of) the graph.
-
- // In order to catch bugs related to type access after this point, we now
- // remove the types from the nodes (currently only in Debug builds).
-#ifdef DEBUG
- Run<UntyperPhase>();
- RunPrintAndVerify(UntyperPhase::phase_name(), true);
-#endif
-
- // Run generic lowering pass.
- Run<GenericLoweringPhase>();
- RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
-
- data->BeginPhaseKind("V8.TFBlockBuilding");
-
- data->InitializeFrameData(linkage->GetIncomingDescriptor());
-
- Run<EffectControlLinearizationPhase>();
- RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
-
- Run<LateOptimizationPhase>();
- RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
-
- // Optimize memory access and allocation operations.
- Run<MemoryOptimizationPhase>();
- RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
-
- data->source_positions()->RemoveDecorator();
- if (data->info()->trace_turbo_json()) {
- data->node_origins()->RemoveDecorator();
- }
-
- ComputeScheduledGraph();
-
- return SelectInstructions(linkage);
-}
-
namespace {
// Compute a hash of the given graph, in a way that should provide the same
@@ -3150,7 +3049,7 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
wasm::WasmCompilationResult result;
code_generator->tasm()->GetCode(
nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
- static_cast<int>(code_generator->GetHandlerTableOffset()));
+ static_cast<int>(code_generator->handler_table_offset()));
result.instr_buffer = code_generator->tasm()->ReleaseBuffer();
result.source_positions = code_generator->GetSourcePositionTable();
result.protected_instructions_data =
@@ -3227,6 +3126,15 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
data.BeginPhaseKind("V8.WasmOptimization");
+ if (FLAG_wasm_inlining) {
+ pipeline.Run<WasmInliningPhase>(env, function_index, wire_bytes_storage,
+ loop_info);
+ pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
+ if (FLAG_wasm_loop_peeling) {
+ pipeline.Run<WasmLoopPeelingPhase>(loop_info);
+ pipeline.RunPrintAndVerify(WasmLoopPeelingPhase::phase_name(), true);
+ }
if (FLAG_wasm_loop_unrolling) {
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
@@ -3234,8 +3142,7 @@ void Pipeline::GenerateCodeForWasmFunction(
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
- pipeline.Run<WasmOptimizationPhase>(is_asm_js, env, function_index,
- wire_bytes_storage);
+ pipeline.Run<WasmOptimizationPhase>(is_asm_js);
pipeline.RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
} else {
pipeline.Run<WasmBaseOptimizationPhase>();
@@ -3264,7 +3171,7 @@ void Pipeline::GenerateCodeForWasmFunction(
CodeGenerator* code_generator = pipeline.code_generator();
code_generator->tasm()->GetCode(
nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
- static_cast<int>(code_generator->GetHandlerTableOffset()));
+ static_cast<int>(code_generator->handler_table_offset()));
result->instr_buffer = code_generator->tasm()->ReleaseBuffer();
result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
@@ -3310,8 +3217,10 @@ void Pipeline::GenerateCodeForWasmFunction(
<< time.InMilliseconds() << " ms and "
<< zone_stats.GetMaxAllocatedBytes() << " / "
<< zone_stats.GetTotalAllocatedBytes()
- << " max/total bytes, codesize " << codesize << " name "
- << data.info()->GetDebugName().get() << std::endl;
+ << " max/total bytes; bodysize "
+ << function_body.end - function_body.start << " codesize "
+ << codesize << " name " << data.info()->GetDebugName().get()
+ << std::endl;
}
DCHECK(result->succeeded());
@@ -3335,23 +3244,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
{
CompilationHandleScope compilation_scope(isolate, info);
- CanonicalHandleScope canonical(isolate, info);
+ CanonicalHandleScopeForTurbofan canonical(isolate, info);
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.InitializeHeapBroker();
- // Emulating the proper pipeline, we call CreateGraph on different places
- // (i.e before or after creating a LocalIsolateScope) depending on
- // is_concurrent_inlining.
- if (!data.broker()->is_concurrent_inlining()) {
- if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
- }
}
{
LocalIsolateScope local_isolate_scope(data.broker(), info,
isolate->main_thread_local_isolate());
- if (data.broker()->is_concurrent_inlining()) {
- if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
- }
+ if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
// We selectively Unpark inside OptimizeGraph.
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
@@ -3421,8 +3322,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
- Handle<SharedFunctionInfo> shared =
- handle(function->shared(), function->GetIsolate());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
return std::make_unique<PipelineCompilationJob>(
isolate, shared, function, osr_offset, osr_frame, code_kind);
}
@@ -3568,15 +3468,15 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
const RegisterConfiguration* config = RegisterConfiguration::Default();
std::unique_ptr<const RegisterConfiguration> restricted_config;
bool use_mid_tier_register_allocator =
- FLAG_turbo_force_mid_tier_regalloc ||
- (FLAG_turboprop_mid_tier_reg_alloc && data->info()->IsTurboprop()) ||
- (FLAG_turbo_use_mid_tier_regalloc_for_huge_functions &&
- data->sequence()->VirtualRegisterCount() >
- kTopTierVirtualRegistersLimit);
+ !CodeKindIsStaticallyCompiled(data->info()->code_kind()) &&
+ (FLAG_turbo_force_mid_tier_regalloc ||
+ (FLAG_turbo_use_mid_tier_regalloc_for_huge_functions &&
+ data->sequence()->VirtualRegisterCount() >
+ kTopTierVirtualRegistersLimit));
if (call_descriptor->HasRestrictedAllocatableRegisters()) {
RegList registers = call_descriptor->AllocatableRegisters();
- DCHECK_LT(0, NumRegs(registers));
+ DCHECK_LT(0, registers.Count());
restricted_config.reset(
RegisterConfiguration::RestrictGeneralRegisters(registers));
config = restricted_config.get();
@@ -3847,6 +3747,11 @@ void PipelineImpl::AllocateRegistersForTopTier(
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
+ if (data->sequence()->HasSimd128VirtualRegisters() &&
+ (kFPAliasing == AliasingKind::kIndependent)) {
+ Run<AllocateSimd128RegistersPhase<LinearScanAllocator>>();
+ }
+
Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index d86037a578..77ef1bab35 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
#define V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
-#include <vector>
-
#include "src/base/optional.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/js-heap-broker.h"
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f5bddd4510..d2289dbc5e 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -12,6 +12,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-lowering-verifier.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/heap/factory-inl.h"
@@ -139,11 +140,13 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
-RepresentationChanger::RepresentationChanger(JSGraph* jsgraph,
- JSHeapBroker* broker)
+RepresentationChanger::RepresentationChanger(
+ JSGraph* jsgraph, JSHeapBroker* broker,
+ SimplifiedLoweringVerifier* verifier)
: cache_(TypeCache::Get()),
jsgraph_(jsgraph),
broker_(broker),
+ verifier_(verifier),
testing_type_errors_(false),
type_error_(false) {}
@@ -242,7 +245,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return node;
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
@@ -841,7 +844,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
use_info.type_check() == TypeCheckKind::kNumberOrOddball ||
use_info.type_check() == TypeCheckKind::kArrayIndex) &&
IsInt32Double(fv))) {
- return MakeTruncatedInt32Constant(fv);
+ return InsertTypeGuardForVerifier(NodeProperties::GetType(node),
+ MakeTruncatedInt32Constant(fv));
}
break;
}
@@ -1105,7 +1109,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (base::IsValueInRangeForNumericType<int64_t>(fv)) {
int64_t const iv = static_cast<int64_t>(fv);
if (static_cast<double>(iv) == fv) {
- return jsgraph()->Int64Constant(iv);
+ return InsertTypeGuardForVerifier(NodeProperties::GetType(node),
+ jsgraph()->Int64Constant(iv));
}
}
}
@@ -1116,8 +1121,9 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
BigIntRef bigint = m.Ref(broker_).AsBigInt();
- return jsgraph()->Int64Constant(
- static_cast<int64_t>(bigint.AsUint64()));
+ return InsertTypeGuardForVerifier(
+ NodeProperties::GetType(node),
+ jsgraph()->Int64Constant(static_cast<int64_t>(bigint.AsUint64())));
}
break;
}
@@ -1247,8 +1253,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
}
- } else if (output_rep == MachineRepresentation::kCagedPointer) {
- if (output_type.Is(Type::CagedPointer())) {
+ } else if (output_rep == MachineRepresentation::kSandboxedPointer) {
+ if (output_type.Is(Type::SandboxedPointer())) {
return node;
} else {
return TypeError(node, output_rep, output_type,
@@ -1565,6 +1571,18 @@ Node* RepresentationChanger::InsertCheckedFloat64ToInt32(
node, simplified()->CheckedFloat64ToInt32(check, feedback), use_node);
}
+Node* RepresentationChanger::InsertTypeGuardForVerifier(const Type& type,
+ Node* node) {
+ if (verification_enabled()) {
+ DCHECK(!type.IsInvalid());
+ node = jsgraph()->graph()->NewNode(jsgraph()->common()->TypeGuard(type),
+ node, jsgraph()->graph()->start(),
+ jsgraph()->graph()->start());
+ verifier_->RecordTypeGuard(node);
+ }
+ return node;
+}
+
Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); }
} // namespace compiler
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index a4b05cbfe7..5fc368da7e 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -14,6 +14,7 @@ namespace internal {
namespace compiler {
// Foward declarations.
+class SimplifiedLoweringVerifier;
class TypeCache;
enum IdentifyZeros : uint8_t { kIdentifyZeros, kDistinguishZeros };
@@ -78,7 +79,7 @@ class Truncation final {
// Debug utilities.
const char* description() const;
- bool IsLessGeneralThan(Truncation other) {
+ bool IsLessGeneralThan(Truncation other) const {
return LessGeneral(kind(), other.kind()) &&
LessGeneralIdentifyZeros(identify_zeros(), other.identify_zeros());
}
@@ -96,13 +97,11 @@ class Truncation final {
};
explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros)
- : kind_(kind), identify_zeros_(identify_zeros) {
- DCHECK(kind == TruncationKind::kAny ||
- kind == TruncationKind::kOddballAndBigIntToNumber ||
- identify_zeros == kIdentifyZeros);
- }
+ : kind_(kind), identify_zeros_(identify_zeros) {}
+
TruncationKind kind() const { return kind_; }
+ friend class SimplifiedLoweringVerifier;
TruncationKind kind_;
IdentifyZeros identify_zeros_;
@@ -322,7 +321,8 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class V8_EXPORT_PRIVATE RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker);
+ RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker,
+ SimplifiedLoweringVerifier* verifier);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for checking - if the changer cannot figure
@@ -349,10 +349,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
: MachineType::Pointer();
}
+ bool verification_enabled() const { return verifier_ != nullptr; }
+
private:
TypeCache const* cache_;
JSGraph* jsgraph_;
JSHeapBroker* broker_;
+ SimplifiedLoweringVerifier* verifier_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -402,6 +405,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* InsertTruncateInt64ToInt32(Node* node);
Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason,
const FeedbackSource& feedback = {});
+ Node* InsertTypeGuardForVerifier(const Type& type, Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index e346e9171d..6e96e4cef6 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -47,7 +47,7 @@ Reduction SelectLowering::LowerSelect(Node* node) {
__ Bind(&done);
if (reset_gasm) {
- gasm()->Reset(nullptr);
+ gasm()->Reset();
}
return Changed(done.PhiAt(0));
diff --git a/deps/v8/src/compiler/simplified-lowering-verifier.cc b/deps/v8/src/compiler/simplified-lowering-verifier.cc
new file mode 100644
index 0000000000..d113a9f081
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-lowering-verifier.cc
@@ -0,0 +1,251 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering-verifier.h"
+
+#include "src/compiler/operation-typer.h"
+#include "src/compiler/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Truncation LeastGeneralTruncation(const Truncation& t1, const Truncation& t2) {
+ if (t1.IsLessGeneralThan(t2)) return t1;
+ CHECK(t2.IsLessGeneralThan(t1));
+ return t2;
+}
+
+Truncation LeastGeneralTruncation(const Truncation& t1, const Truncation& t2,
+ const Truncation& t3) {
+ return LeastGeneralTruncation(LeastGeneralTruncation(t1, t2), t3);
+}
+
+void SimplifiedLoweringVerifier::CheckAndSet(Node* node, const Type& type,
+ const Truncation& trunc) {
+ DCHECK(!type.IsInvalid());
+
+ if (NodeProperties::IsTyped(node)) {
+ Type node_type = NodeProperties::GetType(node);
+ if (!type.Is(node_type)) {
+ std::ostringstream type_str;
+ type.PrintTo(type_str);
+ std::ostringstream node_type_str;
+ node_type.PrintTo(node_type_str);
+
+ FATAL(
+ "SimplifiedLoweringVerifierError: verified type %s of node #%d:%s "
+ "does not match with type %s assigned during lowering",
+ type_str.str().c_str(), node->id(), node->op()->mnemonic(),
+ node_type_str.str().c_str());
+ }
+ } else {
+ NodeProperties::SetType(node, type);
+ }
+ SetTruncation(node, GeneralizeTruncation(trunc, type));
+}
+
+bool IsModuloTruncation(const Truncation& truncation) {
+ return truncation.IsUsedAsWord32() || truncation.IsUsedAsWord64() ||
+ Truncation::Any().IsLessGeneralThan(truncation);
+}
+
+Truncation SimplifiedLoweringVerifier::GeneralizeTruncation(
+ const Truncation& truncation, const Type& type) const {
+ IdentifyZeros identify_zeros = truncation.identify_zeros();
+ if (!type.Maybe(Type::MinusZero())) {
+ identify_zeros = IdentifyZeros::kDistinguishZeros;
+ }
+
+ switch (truncation.kind()) {
+ case Truncation::TruncationKind::kAny: {
+ return Truncation::Any(identify_zeros);
+ }
+ case Truncation::TruncationKind::kWord32: {
+ if (type.Is(Type::Signed32OrMinusZero()) ||
+ type.Is(Type::Unsigned32OrMinusZero())) {
+ return Truncation::Any(identify_zeros);
+ }
+ return Truncation(Truncation::TruncationKind::kWord32, identify_zeros);
+ }
+ case Truncation::TruncationKind::kWord64: {
+ if (type.Is(Type::BigInt())) {
+ DCHECK_EQ(identify_zeros, IdentifyZeros::kDistinguishZeros);
+ if (type.Is(Type::SignedBigInt64()) ||
+ type.Is(Type::UnsignedBigInt64())) {
+ return Truncation::Any(IdentifyZeros::kDistinguishZeros);
+ }
+ } else if (type.Is(TypeCache::Get()->kSafeIntegerOrMinusZero)) {
+ return Truncation::Any(identify_zeros);
+ }
+ return Truncation(Truncation::TruncationKind::kWord64, identify_zeros);
+ }
+
+ default:
+ // TODO(nicohartmann): Support remaining truncations.
+ UNREACHABLE();
+ }
+}
+
+void SimplifiedLoweringVerifier::VisitNode(Node* node,
+ OperationTyper& op_typer) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt64Constant: {
+ // Constants might be untyped, because they are cached in the graph and
+ // used in different contexts such that no single type can be assigned.
+ // Their type is provided by an introduced TypeGuard where necessary.
+ break;
+ }
+ case IrOpcode::kCheckedFloat64ToInt32: {
+ Type input_type = InputType(node, 0);
+ DCHECK(input_type.Is(Type::Number()));
+
+ const auto& p = CheckMinusZeroParametersOf(node->op());
+ if (p.mode() == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Remove -0 from input_type.
+ input_type =
+ Type::Intersect(input_type, Type::Signed32(), graph_zone());
+ } else {
+ input_type = Type::Intersect(input_type, Type::Signed32OrMinusZero(),
+ graph_zone());
+ }
+ CheckAndSet(node, input_type, Truncation::Word32());
+ break;
+ }
+ case IrOpcode::kInt32Add: {
+ Type output_type =
+ op_typer.NumberAdd(InputType(node, 0), InputType(node, 1));
+ Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
+ InputTruncation(node, 1),
+ Truncation::Word32());
+ CHECK(IsModuloTruncation(output_trunc));
+ CheckAndSet(node, output_type, output_trunc);
+ break;
+ }
+ case IrOpcode::kInt32Sub: {
+ Type output_type =
+ op_typer.NumberSubtract(InputType(node, 0), InputType(node, 1));
+ Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
+ InputTruncation(node, 1),
+ Truncation::Word32());
+ CHECK(IsModuloTruncation(output_trunc));
+ CheckAndSet(node, output_type, output_trunc);
+ break;
+ }
+ case IrOpcode::kChangeInt31ToTaggedSigned: {
+ // ChangeInt31ToTaggedSigned is not truncating any values, so we can
+ // simply forward input.
+ CheckAndSet(node, InputType(node, 0), InputTruncation(node, 0));
+ break;
+ }
+ case IrOpcode::kChangeInt32ToTagged: {
+ // ChangeInt32ToTagged is not truncating any values, so we can simply
+ // forward input.
+ CheckAndSet(node, InputType(node, 0), InputTruncation(node, 0));
+ break;
+ }
+ case IrOpcode::kInt64Add: {
+ Type left_type = InputType(node, 0);
+ Type right_type = InputType(node, 1);
+
+ Type output_type;
+ if (left_type.Is(Type::BigInt()) && right_type.Is(Type::BigInt())) {
+ // BigInt x BigInt -> BigInt
+ output_type = op_typer.BigIntAdd(left_type, right_type);
+ } else if (left_type.Is(Type::Number()) &&
+ right_type.Is(Type::Number())) {
+ // Number x Number -> Number
+ output_type = op_typer.NumberAdd(left_type, right_type);
+ } else {
+ // Invalid type combination.
+ std::ostringstream left_str, right_str;
+ left_type.PrintTo(left_str);
+ right_type.PrintTo(right_str);
+ FATAL(
+ "SimplifiedLoweringVerifierError: invalid combination of input "
+ "types "
+ "%s and %s for node #%d:%s",
+ left_str.str().c_str(), right_str.str().c_str(), node->id(),
+ node->op()->mnemonic());
+ }
+
+ Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
+ InputTruncation(node, 1),
+ Truncation::Word64());
+ CHECK(IsModuloTruncation(output_trunc));
+ CheckAndSet(node, output_type, output_trunc);
+ break;
+ }
+ case IrOpcode::kChangeInt32ToInt64: {
+ // ChangeInt32ToInt64 is not truncating any values, so we can simply
+ // forward input.
+ CheckAndSet(node, InputType(node, 0), InputTruncation(node, 0));
+ break;
+ }
+ case IrOpcode::kDeadValue: {
+ CheckAndSet(node, Type::None(), Truncation::Any());
+ break;
+ }
+ case IrOpcode::kTypeGuard: {
+ Type input_type = Type::Any();
+ if (is_recorded_type_guard(node)) {
+ // If this TypeGuard is recorded, it means that it has been introduced
+ // during lowering to provide type information for nodes that cannot be
+ // typed directly (e.g. constants), so we cannot assume the input node
+ // is typed.
+ if (NodeProperties::IsTyped(node->InputAt(0))) {
+ input_type = InputType(node, 0);
+ }
+ } else {
+ input_type = InputType(node, 0);
+ }
+ Type output_type = op_typer.TypeTypeGuard(node->op(), input_type);
+
+ // TypeGuard has no effect on trunction, but the restricted type may help
+ // generalize it.
+ CheckAndSet(node, output_type, InputTruncation(node, 0));
+ break;
+ }
+ case IrOpcode::kTruncateBigIntToWord64: {
+ Type input_type = InputType(node, 0);
+ CHECK(input_type.Is(Type::BigInt()));
+ CHECK(Truncation::Word64().IsLessGeneralThan(InputTruncation(node, 0)));
+ CheckAndSet(node, input_type, Truncation::Word64());
+ break;
+ }
+ case IrOpcode::kChangeTaggedSignedToInt64: {
+ Type input_type = InputType(node, 0);
+ CHECK(input_type.Is(Type::Number()));
+ Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
+ Truncation::Word64());
+ CheckAndSet(node, input_type, output_trunc);
+ break;
+ }
+ case IrOpcode::kCheckBigInt: {
+ Type input_type = InputType(node, 0);
+ input_type = Type::Intersect(input_type, Type::BigInt(), graph_zone());
+ CheckAndSet(node, input_type, InputTruncation(node, 0));
+ break;
+ }
+ case IrOpcode::kReturn: {
+ const int return_value_count = ValueInputCountOfReturn(node->op());
+ for (int i = 0; i < return_value_count; ++i) {
+ Type input_type = InputType(node, 1 + i);
+ Truncation input_trunc = InputTruncation(node, 1 + i);
+ input_trunc = GeneralizeTruncation(input_trunc, input_type);
+ // No values must be lost due to truncation.
+ CHECK_EQ(input_trunc, Truncation::Any());
+ }
+ break;
+ }
+
+ default:
+ // TODO(nicohartmann): Support remaining operators.
+ break;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-lowering-verifier.h b/deps/v8/src/compiler/simplified-lowering-verifier.h
new file mode 100644
index 0000000000..ceff65dfdd
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-lowering-verifier.h
@@ -0,0 +1,93 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_VERIFIER_H_
+#define V8_COMPILER_SIMPLIFIED_LOWERING_VERIFIER_H_
+
+#include "src/compiler/representation-change.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class OperationTyper;
+
+class SimplifiedLoweringVerifier final {
+ public:
+ struct PerNodeData {
+ Truncation truncation = Truncation::Any(IdentifyZeros::kDistinguishZeros);
+ };
+
+ SimplifiedLoweringVerifier(Zone* zone, Graph* graph)
+ : type_guards_(zone), data_(zone), graph_(graph) {}
+
+ void VisitNode(Node* node, OperationTyper& op_typer);
+
+ void RecordTypeGuard(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kTypeGuard);
+ DCHECK(!is_recorded_type_guard(node));
+ type_guards_.insert(node);
+ }
+ const ZoneUnorderedSet<Node*>& recorded_type_guards() const {
+ return type_guards_;
+ }
+
+ private:
+ bool is_recorded_type_guard(Node* node) const {
+ return type_guards_.find(node) != type_guards_.end();
+ }
+
+ Type InputType(Node* node, int input_index) const {
+ // TODO(nicohartmann): Check that inputs are typed, once all operators are
+ // supported.
+ Node* input = node->InputAt(input_index);
+ if (NodeProperties::IsTyped(input)) {
+ return NodeProperties::GetType(input);
+ }
+ return Type::None();
+ }
+
+ void SetTruncation(Node* node, const Truncation& truncation) {
+ if (data_.size() <= node->id()) {
+ data_.resize(node->id() + 1);
+ }
+ DCHECK_EQ(data_[node->id()].truncation,
+ Truncation::Any(IdentifyZeros::kDistinguishZeros));
+ data_[node->id()].truncation = truncation;
+ }
+
+ Truncation InputTruncation(Node* node, int input_index) const {
+ static const Truncation any_truncation =
+ Truncation::Any(IdentifyZeros::kDistinguishZeros);
+
+ Node* input = node->InputAt(input_index);
+ if (input->id() < data_.size()) {
+ return data_[input->id()].truncation;
+ }
+ return any_truncation;
+ }
+
+ void CheckAndSet(Node* node, const Type& type, const Truncation& trunc);
+
+ // Generalize to a less strict truncation in the context of a given type. For
+ // example, a Truncation::kWord32[kIdentifyZeros] does not have any effect on
+ // a type Range(0, 100), because all equivalence classes are singleton, for
+ // the values of the given type. We can use Truncation::Any[kDistinguishZeros]
+ // instead to avoid a combinatorial explosion of occurring type-truncation-
+ // pairs.
+ Truncation GeneralizeTruncation(const Truncation& truncation,
+ const Type& type) const;
+
+ Zone* graph_zone() const { return graph_->zone(); }
+
+ ZoneUnorderedSet<Node*> type_guards_;
+ ZoneVector<PerNodeData> data_;
+ Graph* graph_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SIMPLIFIED_LOWERING_VERIFIER_H_
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index bbdbdfefd8..15d682bd29 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -23,6 +23,7 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering-verifier.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/numbers/conversions-inl.h"
@@ -160,7 +161,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
return UseInfo::Bool();
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCagedPointer:
+ case MachineRepresentation::kSandboxedPointer:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
break;
@@ -312,7 +313,8 @@ class RepresentationSelector {
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
TickCounter* tick_counter, Linkage* linkage,
- ObserveNodeManager* observe_node_manager)
+ ObserveNodeManager* observe_node_manager,
+ SimplifiedLoweringVerifier* verifier)
: jsgraph_(jsgraph),
zone_(zone),
might_need_revisit_(zone),
@@ -331,9 +333,12 @@ class RepresentationSelector {
op_typer_(broker, graph_zone()),
tick_counter_(tick_counter),
linkage_(linkage),
- observe_node_manager_(observe_node_manager) {
+ observe_node_manager_(observe_node_manager),
+ verifier_(verifier) {
}
+ bool verification_enabled() const { return verifier_ != nullptr; }
+
void ResetNodeInfoState() {
// Clean up for the next phase.
for (NodeInfo& info : info_) {
@@ -553,6 +558,12 @@ class RepresentationSelector {
// Generates a pre-order traversal of the nodes, starting with End.
void GenerateTraversal() {
+ // Reset previous state.
+ ResetNodeInfoState();
+ traversal_nodes_.clear();
+ count_ = graph()->NodeCount();
+ info_.resize(count_);
+
ZoneStack<NodeState> stack(zone_);
stack.push({graph()->end(), 0});
@@ -710,11 +721,46 @@ class RepresentationSelector {
}
}
+ void RunVerifyPhase() {
+ DCHECK_NOT_NULL(verifier_);
+
+ TRACE("--{Verify Phase}--\n");
+
+ // Generate a new traversal containing all the new nodes created during
+ // lowering.
+ GenerateTraversal();
+
+ // Set node types to the refined types computed during retyping.
+ for (Node* node : traversal_nodes_) {
+ NodeInfo* info = GetInfo(node);
+ if (!info->feedback_type().IsInvalid()) {
+ NodeProperties::SetType(node, info->feedback_type());
+ }
+ }
+
+ // Verify all nodes.
+ for (Node* node : traversal_nodes_) verifier_->VisitNode(node, op_typer_);
+
+ // Eliminate all introduced TypeGuard nodes.
+ for (Node* node : verifier_->recorded_type_guards()) {
+ Node* input = node->InputAt(0);
+ DCHECK_EQ(node->InputAt(1), graph()->start());
+ DCHECK_EQ(node->InputAt(2), graph()->start());
+ DisconnectFromEffectAndControl(node);
+ node->ReplaceUses(input);
+ node->Kill();
+ }
+ }
+
void Run(SimplifiedLowering* lowering) {
GenerateTraversal();
RunPropagatePhase();
RunRetypePhase();
RunLowerPhase(lowering);
+
+ if (verification_enabled()) {
+ RunVerifyPhase();
+ }
}
// Just assert for Retype and Lower. Propagate specialized below.
@@ -1074,8 +1120,7 @@ class RepresentationSelector {
} else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) {
return MachineRepresentation::kWord64;
} else if (type.Is(Type::ExternalPointer()) ||
- type.Is(Type::SandboxedExternalPointer()) ||
- type.Is(Type::CagedPointer())) {
+ type.Is(Type::SandboxedPointer())) {
return MachineType::PointerRepresentation();
}
return MachineRepresentation::kTagged;
@@ -2061,7 +2106,10 @@ class RepresentationSelector {
VisitLeaf<T>(node, MachineRepresentation::kTaggedSigned);
if (lower<T>()) {
intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
- DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi));
+ Node* constant = InsertTypeGuardForVerifier(
+ NodeProperties::GetType(node),
+ lowering->jsgraph()->IntPtrConstant(smi));
+ DeferReplacement(node, constant);
}
return;
}
@@ -2100,17 +2148,6 @@ class RepresentationSelector {
//------------------------------------------------------------------
// JavaScript operators.
//------------------------------------------------------------------
- case IrOpcode::kToBoolean: {
- if (truncation.IsUsedAsBool()) {
- ProcessInput<T>(node, 0, UseInfo::Bool());
- SetOutput<T>(node, MachineRepresentation::kBit);
- if (lower<T>()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitInputs<T>(node);
- SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
- }
- return;
- }
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
@@ -2135,6 +2172,17 @@ class RepresentationSelector {
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
+ case IrOpcode::kToBoolean: {
+ if (truncation.IsUsedAsBool()) {
+ ProcessInput<T>(node, 0, UseInfo::Bool());
+ SetOutput<T>(node, MachineRepresentation::kBit);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitInputs<T>(node);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
case IrOpcode::kBooleanNot: {
if (lower<T>()) {
NodeInfo* input_info = GetInfo(node->InputAt(0));
@@ -3075,22 +3123,6 @@ class RepresentationSelector {
return VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
- case IrOpcode::kTierUpCheck: {
- ProcessInput<T>(node, 0, UseInfo::AnyTagged());
- ProcessInput<T>(node, 1, UseInfo::AnyTagged());
- ProcessInput<T>(node, 2, UseInfo::AnyTagged());
- ProcessInput<T>(node, 3, UseInfo::TruncatingWord32());
- ProcessInput<T>(node, 4, UseInfo::AnyTagged());
- ProcessRemainingInputs<T>(node, 5);
- SetOutput<T>(node, MachineRepresentation::kNone);
- return;
- }
- case IrOpcode::kUpdateInterruptBudget: {
- ProcessInput<T>(node, 0, UseInfo::AnyTagged());
- ProcessRemainingInputs<T>(node, 1);
- SetOutput<T>(node, MachineRepresentation::kNone);
- return;
- }
case IrOpcode::kNewConsString: {
ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); // length
ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // first
@@ -4058,6 +4090,16 @@ class RepresentationSelector {
NotifyNodeReplaced(node, replacement);
}
+ Node* InsertTypeGuardForVerifier(const Type& type, Node* node) {
+ if (verification_enabled()) {
+ DCHECK(!type.IsInvalid());
+ node = graph()->NewNode(common()->TypeGuard(type), node, graph()->start(),
+ graph()->start());
+ verifier_->RecordTypeGuard(node);
+ }
+ return node;
+ }
+
private:
void ChangeOp(Node* node, const Operator* new_op) {
compiler::NodeProperties::ChangeOp(node, new_op);
@@ -4077,7 +4119,7 @@ class RepresentationSelector {
Zone* zone_; // Temporary zone.
// Map from node to its uses that might need to be revisited.
ZoneMap<Node*, ZoneVector<Node*>> might_need_revisit_;
- size_t const count_; // number of nodes in the graph
+ size_t count_; // number of nodes in the graph
ZoneVector<NodeInfo> info_; // node id -> usage information
#ifdef DEBUG
ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
@@ -4104,6 +4146,7 @@ class RepresentationSelector {
TickCounter* const tick_counter_;
Linkage* const linkage_;
ObserveNodeManager* const observe_node_manager_;
+ SimplifiedLoweringVerifier* verifier_; // Used to verify output graph.
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -4286,10 +4329,14 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
observe_node_manager_(observe_node_manager) {}
void SimplifiedLowering::LowerAllNodes() {
- RepresentationChanger changer(jsgraph(), broker_);
+ SimplifiedLoweringVerifier* verifier = nullptr;
+ if (FLAG_verify_simplified_lowering) {
+ verifier = zone_->New<SimplifiedLoweringVerifier>(zone_, graph());
+ }
+ RepresentationChanger changer(jsgraph(), broker_, verifier);
RepresentationSelector selector(
jsgraph(), broker_, zone_, &changer, source_positions_, node_origins_,
- tick_counter_, linkage_, observe_node_manager_);
+ tick_counter_, linkage_, observe_node_manager_, verifier);
selector.Run(this);
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 10e719431f..e387ea75c3 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -91,6 +91,9 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
if (access.is_store_in_literal) {
os << " (store in literal)";
}
+ if (access.maybe_initializing_or_transitioning_store) {
+ os << " (initializing or transitioning store)";
+ }
os << "]";
return os;
}
@@ -160,7 +163,9 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
const ObjectAccess& ObjectAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadFromObject ||
- op->opcode() == IrOpcode::kStoreToObject);
+ op->opcode() == IrOpcode::kLoadImmutableFromObject ||
+ op->opcode() == IrOpcode::kStoreToObject ||
+ op->opcode() == IrOpcode::kInitializeImmutableInObject);
return OpParameter<ObjectAccess>(op);
}
@@ -1315,18 +1320,6 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAsUintN(
SpeculativeBigIntAsNParameters(bits, feedback));
}
-const Operator* SimplifiedOperatorBuilder::UpdateInterruptBudget(int delta) {
- return zone()->New<Operator1<int>>(
- IrOpcode::kUpdateInterruptBudget, Operator::kNoThrow | Operator::kNoDeopt,
- "UpdateInterruptBudget", 1, 1, 1, 0, 1, 0, delta);
-}
-
-const Operator* SimplifiedOperatorBuilder::TierUpCheck() {
- return zone()->New<Operator>(IrOpcode::kTierUpCheck,
- Operator::kNoThrow | Operator::kNoDeopt,
- "TierUpCheck", 5, 1, 1, 0, 1, 0);
-}
-
const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
DCHECK(type.CanBeAsserted());
return zone()->New<Operator1<Type>>(IrOpcode::kAssertType,
@@ -1829,11 +1822,6 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
const Operator* SimplifiedOperatorBuilder::AllocateRaw(
Type type, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
- // We forbid optimized allocations to allocate in a different generation than
- // requested.
- DCHECK(!(allow_large_objects == AllowLargeObjects::kTrue &&
- allocation == AllocationType::kYoung &&
- !FLAG_young_generation_large_objects));
return zone()->New<Operator1<AllocateParameters>>(
IrOpcode::kAllocateRaw, Operator::kEliminatable, "AllocateRaw", 1, 1, 1,
1, 1, 1, AllocateParameters(type, allocation, allow_large_objects));
@@ -1878,16 +1866,17 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
UNREACHABLE();
}
-#define ACCESS_OP_LIST(V) \
- V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
- V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
- V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
- V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
- V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
- V(LoadFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
- V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
- V(StoreToObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
- V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+#define ACCESS_OP_LIST(V) \
+ V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
+ V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
+ V(LoadFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(StoreToObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadImmutableFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(InitializeImmutableInObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
@@ -1902,6 +1891,17 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::StoreField(
+ const FieldAccess& access, bool maybe_initializing_or_transitioning) {
+ FieldAccess store_access = access;
+ store_access.maybe_initializing_or_transitioning_store =
+ maybe_initializing_or_transitioning;
+ return zone()->New<Operator1<FieldAccess>>(
+ IrOpcode::kStoreField,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead, "StoreField",
+ 2, 1, 1, 0, 1, 0, store_access);
+}
+
const Operator* SimplifiedOperatorBuilder::LoadMessage() {
return zone()->New<Operator>(IrOpcode::kLoadMessage, Operator::kEliminatable,
"LoadMessage", 1, 1, 1, 1, 1, 0);
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index a69628c4cb..a9ea33531c 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -81,9 +81,13 @@ struct FieldAccess {
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
ExternalPointerTag external_pointer_tag = kExternalPointerNullTag;
#endif
+ bool maybe_initializing_or_transitioning_store; // store is potentially
+ // initializing a newly
+ // allocated object or part
+ // of a map transition.
FieldAccess()
: base_is_tagged(kTaggedBase),
@@ -92,18 +96,18 @@ struct FieldAccess {
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
const_field_info(ConstFieldInfo::None()),
- is_store_in_literal(false) {}
+ is_store_in_literal(false),
+ maybe_initializing_or_transitioning_store(false) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
- bool is_store_in_literal = false
-#ifdef V8_HEAP_SANDBOX
- ,
- ExternalPointerTag external_pointer_tag = kExternalPointerNullTag
+ bool is_store_in_literal = false,
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ExternalPointerTag external_pointer_tag = kExternalPointerNullTag,
#endif
- )
+ bool maybe_initializing_or_transitioning_store = false)
: base_is_tagged(base_is_tagged),
offset(offset),
name(name),
@@ -112,12 +116,12 @@ struct FieldAccess {
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
const_field_info(const_field_info),
- is_store_in_literal(is_store_in_literal)
-#ifdef V8_HEAP_SANDBOX
- ,
- external_pointer_tag(external_pointer_tag)
+ is_store_in_literal(is_store_in_literal),
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ external_pointer_tag(external_pointer_tag),
#endif
- {
+ maybe_initializing_or_transitioning_store(
+ maybe_initializing_or_transitioning_store) {
DCHECK_GE(offset, 0);
DCHECK_IMPLIES(
machine_type.IsMapWord(),
@@ -851,20 +855,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TypeOf();
- // Adds the given delta to the current feedback vector's interrupt budget,
- // and calls the runtime profiler in case the budget is exhausted. A note on
- // the delta parameter: the interrupt budget mechanism originates in the
- // interpreter and thus still refers to 'bytecodes' even though we are
- // generating native code. The interrupt budget essentially corresponds to
- // the number of bytecodes we can execute before calling the profiler. The
- // delta parameter represents the executed bytecodes since the last update.
- const Operator* UpdateInterruptBudget(int delta);
-
- // Takes the current feedback vector as input 0, and generates a check of the
- // vector's marker. Depending on the marker's value, we either do nothing,
- // trigger optimized compilation, or install a finished code object.
- const Operator* TierUpCheck();
-
const Operator* ToBoolean();
const Operator* StringConcat();
@@ -1043,7 +1033,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
- const Operator* StoreField(FieldAccess const&);
+ const Operator* StoreField(FieldAccess const&,
+ bool maybe_initializing_or_transitioning = true);
// load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
@@ -1068,10 +1059,22 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
Type value_type);
// load-from-object [base + offset]
+ // This operator comes in two flavors: LoadImmutableFromObject guarantees that
+ // the underlying object field will be initialized at most once for the
+ // duration of the program. This enables more optimizations in
+ // CsaLoadElimination.
+ // Note: LoadImmutableFromObject is unrelated to LoadImmutable and is lowered
+ // into a regular Load.
const Operator* LoadFromObject(ObjectAccess const&);
+ const Operator* LoadImmutableFromObject(ObjectAccess const&);
// store-to-object [base + offset], value
+ // This operator comes in two flavors: InitializeImmutableInObject guarantees
+ // that the underlying object field has not and will not be initialized again
+ // for the duration of the program. This enables more optimizations in
+ // CsaLoadElimination.
const Operator* StoreToObject(ObjectAccess const&);
+ const Operator* InitializeImmutableInObject(ObjectAccess const&);
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
@@ -1214,37 +1217,6 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
}
};
-class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
- public:
- explicit constexpr TierUpCheckNode(Node* node)
- : SimplifiedNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kTierUpCheck, node->opcode());
- }
-
-#define INPUTS(V) \
- V(FeedbackVector, feedback_vector, 0, FeedbackVector) \
- V(Target, target, 1, JSReceiver) \
- V(NewTarget, new_target, 2, Object) \
- V(InputCount, input_count, 3, UntaggedT) \
- V(Context, context, 4, Context)
- INPUTS(DEFINE_INPUT_ACCESSORS)
-#undef INPUTS
-};
-
-class UpdateInterruptBudgetNode final : public SimplifiedNodeWrapperBase {
- public:
- explicit constexpr UpdateInterruptBudgetNode(Node* node)
- : SimplifiedNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kUpdateInterruptBudget, node->opcode());
- }
-
- int delta() const { return OpParameter<int>(node()->op()); }
-
-#define INPUTS(V) V(FeedbackCell, feedback_cell, 0, FeedbackCell)
- INPUTS(DEFINE_INPUT_ACCESSORS)
-#undef INPUTS
-};
-
#undef DEFINE_INPUT_ACCESSORS
} // namespace compiler
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index d38d7f3ddf..55073bc752 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -4,6 +4,7 @@
#include "src/compiler/state-values-utils.h"
+#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/common-operator.h"
#include "src/utils/bit-vector.h"
@@ -137,8 +138,7 @@ Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
- Node** values, size_t count, const BitVector* liveness,
- int liveness_offset) {
+ Node** values, size_t count, const BytecodeLivenessState* liveness) {
SparseInputMask::BitMaskType input_mask = 0;
// Virtual nodes are the live nodes plus the implicit optimized out nodes,
@@ -150,7 +150,7 @@ SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
if (liveness == nullptr ||
- liveness->Contains(liveness_offset + static_cast<int>(*values_idx))) {
+ liveness->RegisterIsLive(static_cast<int>(*values_idx))) {
input_mask |= 1 << (virtual_node_count);
(*node_buffer)[(*node_count)++] = values[*values_idx];
}
@@ -169,15 +169,16 @@ SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
}
Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
- size_t count, const BitVector* liveness,
- int liveness_offset, size_t level) {
+ size_t count,
+ const BytecodeLivenessState* liveness,
+ size_t level) {
WorkingBuffer* node_buffer = GetWorkingSpace(level);
size_t node_count = 0;
SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
if (level == 0) {
input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
- values, count, liveness, liveness_offset);
+ values, count, liveness);
// Make sure we returned a sparse input mask.
DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
} else {
@@ -189,9 +190,8 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
// remaining live nodes.
size_t previous_input_count = node_count;
- input_mask =
- FillBufferWithValues(node_buffer, &node_count, values_idx, values,
- count, liveness, liveness_offset);
+ input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+ values, count, liveness);
// Make sure we have exhausted our values.
DCHECK_EQ(*values_idx, count);
// Make sure we returned a sparse input mask.
@@ -207,8 +207,8 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
} else {
// Otherwise, add the values to a subtree and add that as an input.
- Node* subtree = BuildTree(values_idx, values, count, liveness,
- liveness_offset, level - 1);
+ Node* subtree =
+ BuildTree(values_idx, values, count, liveness, level - 1);
(*node_buffer)[node_count++] = subtree;
// Don't touch the bitmask, so that it stays dense.
}
@@ -231,7 +231,7 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
namespace {
void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
- const BitVector* liveness, int liveness_offset) {
+ const BytecodeLivenessState* liveness) {
DCHECK_EQ(count, StateValuesAccess(tree).size());
int i;
@@ -239,7 +239,7 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
auto it = access.begin();
auto itend = access.end();
for (i = 0; it != itend; ++it, ++i) {
- if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
+ if (liveness == nullptr || liveness->RegisterIsLive(i)) {
DCHECK_EQ(it.node(), values[i]);
} else {
DCHECK_NULL(it.node());
@@ -251,9 +251,8 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
} // namespace
#endif
-Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
- const BitVector* liveness,
- int liveness_offset) {
+Node* StateValuesCache::GetNodeForValues(
+ Node** values, size_t count, const BytecodeLivenessState* liveness) {
#if DEBUG
// Check that the values represent actual values, and not a tree of values.
for (size_t i = 0; i < count; i++) {
@@ -263,10 +262,10 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
}
}
if (liveness != nullptr) {
- DCHECK_LE(liveness_offset + count, static_cast<size_t>(liveness->length()));
+ DCHECK_LE(count, static_cast<size_t>(liveness->register_count()));
for (size_t i = 0; i < count; i++) {
- if (liveness->Contains(liveness_offset + static_cast<int>(i))) {
+ if (liveness->RegisterIsLive(static_cast<int>(i))) {
DCHECK_NOT_NULL(values[i]);
}
}
@@ -289,8 +288,7 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
}
size_t values_idx = 0;
- Node* tree =
- BuildTree(&values_idx, values, count, liveness, liveness_offset, height);
+ Node* tree = BuildTree(&values_idx, values, count, liveness, height);
// The values should be exhausted by the end of BuildTree.
DCHECK_EQ(values_idx, count);
@@ -298,7 +296,7 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
#if DEBUG
- CheckTreeContainsValues(tree, values, count, liveness, liveness_offset);
+ CheckTreeContainsValues(tree, values, count, liveness);
#endif
return tree;
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 78d57a92b9..9c74ffdc46 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -20,14 +20,14 @@ class BitVector;
namespace compiler {
class Graph;
+class BytecodeLivenessState;
class V8_EXPORT_PRIVATE StateValuesCache {
public:
explicit StateValuesCache(JSGraph* js_graph);
Node* GetNodeForValues(Node** values, size_t count,
- const BitVector* liveness = nullptr,
- int liveness_offset = 0);
+ const BytecodeLivenessState* liveness = nullptr);
private:
static const size_t kMaxInputCount = 8;
@@ -57,15 +57,12 @@ class V8_EXPORT_PRIVATE StateValuesCache {
// at {values_idx}, sparsely encoding according to {liveness}. {node_count} is
// updated with the new number of inputs in {node_buffer}, and a bitmask of
// the sparse encoding is returned.
- SparseInputMask::BitMaskType FillBufferWithValues(WorkingBuffer* node_buffer,
- size_t* node_count,
- size_t* values_idx,
- Node** values, size_t count,
- const BitVector* liveness,
- int liveness_offset);
+ SparseInputMask::BitMaskType FillBufferWithValues(
+ WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
+ Node** values, size_t count, const BytecodeLivenessState* liveness);
Node* BuildTree(size_t* values_idx, Node** values, size_t count,
- const BitVector* liveness, int liveness_offset, size_t level);
+ const BytecodeLivenessState* liveness, size_t level);
WorkingBuffer* GetWorkingSpace(size_t level);
Node* GetEmptyStateValues();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index f0763436b7..14309aa9e3 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -50,6 +50,7 @@ using StoreOffset = uint32_t;
struct UnobservableStore {
NodeId id_;
StoreOffset offset_;
+ bool maybe_gc_observable_ = false;
bool operator==(const UnobservableStore other) const {
return (id_ == other.id_) && (offset_ == other.offset_);
@@ -76,13 +77,15 @@ size_t hash_value(const UnobservableStore& p) {
// an UnobservablesSet allocates no memory.
class UnobservablesSet final {
private:
- using KeyT = UnobservableStore;
- using ValueT = bool; // Emulates set semantics in the map.
+ enum ObservableState {
+ kObservable = 0, // We haven't seen a store to this offset before.
+ kUnobservable = 1, // Stores to the same offset can be eliminated.
+ kGCObservable = 2 // Stores to the same offset can only be eliminated,
+ // if they are not initializing or transitioning.
+ };
- // The PersistentMap uses a special value to signify 'not present'. We use
- // a boolean value to emulate set semantics.
- static constexpr ValueT kNotPresent = false;
- static constexpr ValueT kPresent = true;
+ using KeyT = UnobservableStore;
+ using ValueT = ObservableState;
public:
using SetT = PersistentMap<KeyT, ValueT>;
@@ -97,13 +100,17 @@ class UnobservablesSet final {
UnobservablesSet& operator=(const UnobservablesSet& other)
V8_NOEXCEPT = default;
+ // Computes the intersection of two states.
+ ObservableState Intersect(const ObservableState state1,
+ const ObservableState state2) const;
+
// Computes the intersection of two UnobservablesSets. If one of the sets is
// empty, will return empty.
UnobservablesSet Intersect(const UnobservablesSet& other,
const UnobservablesSet& empty, Zone* zone) const;
// Returns a set that it is the current one, plus the observation obs passed
- // as parameter. If said obs it's already in the set, we don't have to
+ // as parameter. If said obs it's already unobservable, we don't have to
// create a new one.
UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
@@ -113,18 +120,43 @@ class UnobservablesSet final {
// This can probably be done better if the observations are stored first by
// offset and then by node.
// We are removing all nodes with offset off since different nodes may
- // alias one another, and we currently we don't have the means to know if
+ // alias one another, and currently we don't have the means to know if
// two nodes are definitely the same value.
UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+ // Returns a new set where all observations are marked as being observable
+ // by GC.
+ UnobservablesSet MarkGCObservable(Zone* zone) const;
+
const SetT* set() const { return set_; }
bool IsUnvisited() const { return set_ == nullptr; }
bool IsEmpty() const {
return set_ == nullptr || set_->begin() == set_->end();
}
- bool Contains(UnobservableStore obs) const {
- return set_ != nullptr && set_->Get(obs) != kNotPresent;
+
+ // We need to guarantee that objects are fully initialized and fields are in
+ // sync with their map when a GC is triggered (potentially by any allocation).
+ // Therefore initializing or transitioning stores are observable if they are
+ // observable by GC. All other stores are not relevant for correct GC
+ // behaviour and can be eliminated even if they are observable by GC.
+ bool IsUnobservable(UnobservableStore obs,
+ bool maybe_initializing_or_transitioning) const {
+ if (set_ == nullptr) return false;
+ ObservableState state = set_->Get(obs);
+ switch (state) {
+ case kUnobservable:
+ return true;
+ case kObservable:
+ return false;
+ case kGCObservable:
+ return !maybe_initializing_or_transitioning;
+ }
+ UNREACHABLE();
+ }
+
+ bool IsGCObservable(UnobservableStore obs) const {
+ return set_ != nullptr && set_->Get(obs) == kGCObservable;
}
bool operator==(const UnobservablesSet& other) const {
@@ -145,22 +177,22 @@ class UnobservablesSet final {
explicit UnobservablesSet(const SetT* set) : set_(set) {}
static SetT* NewSet(Zone* zone) {
- return zone->New<UnobservablesSet::SetT>(zone, kNotPresent);
+ return zone->New<UnobservablesSet::SetT>(zone, kObservable);
}
- static void SetAdd(SetT* set, const KeyT& key) { set->Set(key, kPresent); }
+ static void SetAdd(SetT* set, const KeyT& key) {
+ set->Set(key, kUnobservable);
+ }
static void SetErase(SetT* set, const KeyT& key) {
- set->Set(key, kNotPresent);
+ set->Set(key, kObservable);
+ }
+ static void SetGCObservable(SetT* set, const KeyT& key) {
+ set->Set(key, kGCObservable);
}
const SetT* set_ = nullptr;
};
-// These definitions are here in order to please the linker, which in debug mode
-// sometimes requires static constants to be defined in .cc files.
-constexpr UnobservablesSet::ValueT UnobservablesSet::kNotPresent;
-constexpr UnobservablesSet::ValueT UnobservablesSet::kPresent;
-
class RedundantStoreFinder final {
public:
// Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
@@ -286,7 +318,8 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
StoreOffset offset = ToOffset(access);
UnobservableStore observation = {stored_to->id(), offset};
- bool is_not_observable = uses.Contains(observation);
+ const bool is_not_observable = uses.IsUnobservable(
+ observation, access.maybe_initializing_or_transitioning_store);
if (is_not_observable) {
TRACE(" #%d is StoreField[+%d,%s](#%d), unobservable", node->id(),
@@ -295,10 +328,29 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
to_remove().insert(node);
return uses;
} else {
- TRACE(" #%d is StoreField[+%d,%s](#%d), observable, recording in set",
- node->id(), offset,
- MachineReprToString(access.machine_type.representation()),
- stored_to->id());
+ const bool is_gc_observable =
+ access.maybe_initializing_or_transitioning_store &&
+ uses.IsGCObservable(observation);
+ // A GC observable store could have been unobservable in a previous
+ // visit. This is possible if the node that previously shadowed the
+ // initializing store is now unobservable, due to additional stores
+ // added to the unobservables set. Example:
+ // StoreA --> StoreB (init)
+ // ^
+ // |
+ // PathX --> Allocate <-- StoreC <-- PathY
+ // When traversing PathX, StoreA will shadow StoreB and we will
+ // eliminate StoreB. When traversing PathY, StoreA will be shadowed by
+ // StoreC and we will eliminate StoreA, but StoreB is now observable by
+ // GC and should not be eliminated.
+ if (is_gc_observable) {
+ to_remove().erase(node);
+ }
+ TRACE(
+ " #%d is StoreField[+%d,%s](#%d), observable%s, recording in set",
+ node->id(), offset,
+ MachineReprToString(access.machine_type.representation()),
+ stored_to->id(), is_gc_observable ? " by GC" : "");
return uses.Add(observation, temp_zone());
}
}
@@ -316,6 +368,17 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
return uses.RemoveSameOffset(offset, temp_zone());
}
+ case IrOpcode::kAllocate:
+ case IrOpcode::kAllocateRaw: {
+ // Allocations can trigger a GC, therefore stores observable by allocation
+ // can not be eliminated, if they are initializing or tranisitioning
+ // stores.
+ TRACE(
+ " #%d is Allocate or AllocateRaw, marking recorded offsets as "
+ "observable by GC",
+ node->id());
+ return uses.MarkGCObservable(temp_zone());
+ }
default:
if (CannotObserveStoreField(node)) {
TRACE(" #%d:%s can observe nothing, set stays unchanged", node->id(),
@@ -444,6 +507,16 @@ UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
return UnobservablesSet(NewSet(zone));
}
+UnobservablesSet::ObservableState UnobservablesSet::Intersect(
+ const ObservableState state1, const ObservableState state2) const {
+ if (state1 == state2) return state1;
+ if (state1 == kObservable || state2 == kObservable) return kObservable;
+ if (state1 == kGCObservable || state2 == kGCObservable) {
+ return kGCObservable;
+ }
+ UNREACHABLE();
+}
+
UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
const UnobservablesSet& empty,
Zone* zone) const {
@@ -451,9 +524,9 @@ UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
UnobservablesSet::SetT* intersection = NewSet(zone);
for (auto triple : set()->Zip(*other.set())) {
- if (std::get<1>(triple) && std::get<2>(triple)) {
- intersection->Set(std::get<0>(triple), kPresent);
- }
+ ObservableState new_state =
+ Intersect(std::get<1>(triple), std::get<2>(triple));
+ intersection->Set(std::get<0>(triple), new_state);
}
return UnobservablesSet(intersection);
@@ -461,7 +534,7 @@ UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
Zone* zone) const {
- if (set()->Get(obs) != kNotPresent) return *this;
+ if (set()->Get(obs) == kUnobservable) return *this;
UnobservablesSet::SetT* new_set = NewSet(zone);
*new_set = *set();
@@ -484,6 +557,19 @@ UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
return UnobservablesSet(new_set);
}
+UnobservablesSet UnobservablesSet::MarkGCObservable(Zone* zone) const {
+ UnobservablesSet::SetT* new_set = NewSet(zone);
+ *new_set = *set();
+
+ // Mark all elements as observable by GC.
+ for (auto entry : *new_set) {
+ const UnobservableStore& obs = entry.first;
+ SetGCObservable(new_set, obs);
+ }
+
+ return UnobservablesSet(new_set);
+}
+
} // namespace
// static
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 6b87797311..123518685d 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1215,9 +1215,6 @@ Type Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
-Type Typer::Visitor::TypeTierUpCheck(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeUpdateInterruptBudget(Node* node) { UNREACHABLE(); }
-
// JS conversion operators.
Type Typer::Visitor::TypeToBoolean(Node* node) {
@@ -1262,7 +1259,13 @@ Type Typer::Visitor::TypeJSCreateGeneratorObject(Node* node) {
}
Type Typer::Visitor::TypeJSCreateClosure(Node* node) {
- return Type::Function();
+ SharedFunctionInfoRef shared =
+ JSCreateClosureNode{node}.Parameters().shared_info(typer_->broker());
+ if (IsClassConstructor(shared.kind())) {
+ return Type::ClassConstructor();
+ } else {
+ return Type::CallableFunction();
+ }
}
Type Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
@@ -1427,17 +1430,17 @@ Type Typer::Visitor::Weaken(Node* node, Type current_type, Type previous_type) {
typer_->zone());
}
-Type Typer::Visitor::TypeJSStoreProperty(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeJSSetKeyedProperty(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeJSDefineProperty(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeJSDefineKeyedOwnProperty(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeJSStoreNamed(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeJSSetNamedProperty(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeJSStoreGlobal(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeJSStoreNamedOwn(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeJSDefineNamedOwnProperty(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
+Type Typer::Visitor::TypeJSDefineKeyedOwnPropertyInLiteral(Node* node) {
UNREACHABLE();
}
@@ -2142,7 +2145,17 @@ Type Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
return type;
}
-Type Typer::Visitor::TypeCheckClosure(Node* node) { return Type::Function(); }
+Type Typer::Visitor::TypeCheckClosure(Node* node) {
+ FeedbackCellRef cell = MakeRef(typer_->broker(), FeedbackCellOf(node->op()));
+ base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ if (!shared.has_value()) return Type::Function();
+
+ if (IsClassConstructor(shared->kind())) {
+ return Type::ClassConstructor();
+ } else {
+ return Type::CallableFunction();
+ }
+}
Type Typer::Visitor::TypeConvertReceiver(Node* node) {
Type arg = Operand(node, 0);
@@ -2185,6 +2198,7 @@ Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
}
Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeLoadImmutableFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
@@ -2215,6 +2229,9 @@ Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeInitializeImmutableInObject(Node* node) {
+ UNREACHABLE();
+}
Type Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 1d051774da..365eb3c13f 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -196,6 +196,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_STRING_ITERATOR_PROTOTYPE_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ERROR_TYPE:
+ case JS_EXTERNAL_OBJECT_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
@@ -257,6 +258,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
+ case JS_SHADOW_REALM_TYPE:
+ case JS_SHARED_STRUCT_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
case JS_TEMPORAL_DURATION_TYPE:
case JS_TEMPORAL_INSTANT_TYPE:
@@ -269,13 +272,14 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_TEMPORAL_ZONED_DATE_TIME_TYPE:
#if V8_ENABLE_WEBASSEMBLY
case WASM_ARRAY_TYPE:
- case WASM_TAG_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_INSTANCE_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_STRUCT_TYPE:
+ case WASM_SUSPENDER_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_TAG_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
#endif // V8_ENABLE_WEBASSEMBLY
case WEAK_CELL_TYPE:
@@ -285,8 +289,10 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_BOUND_FUNCTION_TYPE:
DCHECK(!map.is_undetectable());
return kBoundFunction;
+ case JS_WRAPPED_FUNCTION_TYPE:
+ DCHECK(!map.is_undetectable());
+ return kOtherCallable;
case JS_FUNCTION_TYPE:
- case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -295,7 +301,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
DCHECK(!map.is_undetectable());
- return kFunction;
+ return kCallableFunction;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
+ return kClassConstructor;
case JS_PROXY_TYPE:
DCHECK(!map.is_undetectable());
if (map.is_callable()) return kCallableProxy;
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index d4b129f242..c809c5d63f 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -117,25 +117,23 @@ namespace compiler {
V(OtherUndetectable, uint64_t{1} << 17) \
V(CallableProxy, uint64_t{1} << 18) \
V(OtherProxy, uint64_t{1} << 19) \
- V(Function, uint64_t{1} << 20) \
- V(BoundFunction, uint64_t{1} << 21) \
- V(Hole, uint64_t{1} << 22) \
- V(OtherInternal, uint64_t{1} << 23) \
- V(ExternalPointer, uint64_t{1} << 24) \
- V(Array, uint64_t{1} << 25) \
- V(UnsignedBigInt63, uint64_t{1} << 26) \
- V(OtherUnsignedBigInt64, uint64_t{1} << 27) \
- V(NegativeBigInt63, uint64_t{1} << 28) \
- V(OtherBigInt, uint64_t{1} << 29) \
- /* TODO(v8:10391): Remove this type once all ExternalPointer usages are */ \
- /* sandbox-ready. */ \
- V(SandboxedExternalPointer, uint64_t{1} << 30) \
- V(CagedPointer, uint64_t{1} << 31)
+ V(CallableFunction, uint64_t{1} << 20) \
+ V(ClassConstructor, uint64_t{1} << 21) \
+ V(BoundFunction, uint64_t{1} << 22) \
+ V(Hole, uint64_t{1} << 23) \
+ V(OtherInternal, uint64_t{1} << 24) \
+ V(ExternalPointer, uint64_t{1} << 25) \
+ V(Array, uint64_t{1} << 26) \
+ V(UnsignedBigInt63, uint64_t{1} << 27) \
+ V(OtherUnsignedBigInt64, uint64_t{1} << 28) \
+ V(NegativeBigInt63, uint64_t{1} << 29) \
+ V(OtherBigInt, uint64_t{1} << 30) \
+ V(WasmObject, uint64_t{1} << 31)
// We split the macro list into two parts because the Torque equivalent in
// turbofan-types.tq uses two 32bit bitfield structs.
-#define PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \
- V(WasmObject, uint64_t{1} << 32)
+#define PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \
+ V(SandboxedPointer, uint64_t{1} << 32)
#define PROPER_BITSET_TYPE_LIST(V) \
V(None, uint64_t{0}) \
@@ -190,6 +188,7 @@ namespace compiler {
V(Proxy, kCallableProxy | kOtherProxy) \
V(ArrayOrOtherObject, kArray | kOtherObject) \
V(ArrayOrProxy, kArray | kProxy) \
+ V(Function, kCallableFunction | kClassConstructor) \
V(DetectableCallable, kFunction | kBoundFunction | \
kOtherCallable | kCallableProxy) \
V(Callable, kDetectableCallable | kOtherUndetectable) \
@@ -208,8 +207,7 @@ namespace compiler {
V(Unique, kBoolean | kUniqueName | kNull | \
kUndefined | kHole | kReceiver) \
V(Internal, kHole | kExternalPointer | \
- kSandboxedExternalPointer | kCagedPointer | \
- kOtherInternal) \
+ kSandboxedPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonBigInt, kNonBigIntPrimitive | kReceiver) \
V(NonNumber, kBigInt | kUnique | kString | kInternal) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 4bae72c9e9..91d160a055 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -49,6 +49,9 @@ class Verifier::Visitor {
private:
void CheckNotTyped(Node* node) {
+ // Verification of simplified lowering sets types of many additional nodes.
+ if (FLAG_verify_simplified_lowering) return;
+
if (NodeProperties::IsTyped(node)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -736,30 +739,31 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Any());
CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
break;
- case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSSetKeyedProperty:
CheckNotTyped(node);
CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
break;
- case IrOpcode::kJSDefineProperty:
+ case IrOpcode::kJSDefineKeyedOwnProperty:
CheckNotTyped(node);
CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
break;
- case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSSetNamedProperty:
CheckNotTyped(node);
break;
case IrOpcode::kJSStoreGlobal:
CheckNotTyped(node);
CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
break;
- case IrOpcode::kJSStoreNamedOwn:
+ case IrOpcode::kJSDefineNamedOwnProperty:
CheckNotTyped(node);
- CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
+ CHECK(
+ DefineNamedOwnPropertyParametersOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSGetIterator:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Any());
break;
- case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSDefineKeyedOwnPropertyInLiteral:
case IrOpcode::kJSStoreInArrayLiteral:
CheckNotTyped(node);
CHECK(FeedbackParameterOf(node->op()).feedback().IsValid());
@@ -774,11 +778,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTypeOf:
CheckTypeIs(node, Type::InternalizedString());
break;
- case IrOpcode::kTierUpCheck:
- case IrOpcode::kUpdateInterruptBudget:
- CheckValueInputIs(node, 0, Type::Any());
- CheckNotTyped(node);
- break;
case IrOpcode::kJSGetSuperConstructor:
// We don't check the input for Type::Function because this_function can
// be context-allocated.
@@ -880,13 +879,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kJSAsyncFunctionReject:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Any());
- CheckValueInputIs(node, 2, Type::Boolean());
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSAsyncFunctionResolve:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Any());
- CheckValueInputIs(node, 2, Type::Boolean());
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSFulfillPromise:
@@ -1562,6 +1559,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, ElementAccessOf(node->op()).type));
break;
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject:
CheckValueInputIs(node, 0, Type::Receiver());
break;
case IrOpcode::kLoadTypedElement:
@@ -1584,6 +1582,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
case IrOpcode::kStoreToObject:
+ case IrOpcode::kInitializeImmutableInObject:
// TODO(gsps): Can we check some types here?
break;
case IrOpcode::kTransitionAndStoreElement:
@@ -2080,9 +2079,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
if (idom == nullptr) continue;
BitVector* block_doms = dominators[block->id().ToSize()];
- for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
- BasicBlock* dom =
- schedule->GetBlockById(BasicBlock::Id::FromInt(it.Current()));
+ for (int id : *block_doms) {
+ BasicBlock* dom = schedule->GetBlockById(BasicBlock::Id::FromInt(id));
if (dom != idom &&
!dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
FATAL("Block B%d is not immediately dominated by B%d",
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 07bb413588..0afc124ec8 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "src/api/api-inl.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
@@ -25,6 +26,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
+#include "src/compiler/fast-api-calls.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/graph.h"
@@ -37,10 +39,12 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/zone-stats.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
@@ -84,28 +88,25 @@ MachineType assert_size(int expected_size, MachineType type) {
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
-// TODO(11510): Using LoadImmutable for tagged values causes registers to be
-// spilled and added to the safepoint table, resulting in large code size
-// regressions. A possible solution would be to not spill the register at all,
-// but rather reload the value from memory. This will require non-trivial
-// changes in the register allocator and instuction selector.
-#define LOAD_INSTANCE_FIELD(name, type) \
- (CanBeTaggedOrCompressedPointer((type).representation()) \
- ? LOAD_MUTABLE_INSTANCE_FIELD(name, type) \
- : gasm_->LoadImmutable( \
- assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
- GetInstance(), \
- wasm::ObjectAccess::ToTagged( \
- WasmInstanceObject::k##name##Offset)))
-
-#define LOAD_ROOT(root_name, factory_name) \
- (parameter_mode_ == kNoSpecialParameterMode \
- ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
- isolate_->factory()->factory_name())) \
- : gasm_->LoadImmutable(/* Root pointers do not get compressed. */ \
- MachineType::Pointer(), BuildLoadIsolateRoot(), \
- IsolateData::root_slot_offset( \
- RootIndex::k##root_name)))
+#define LOAD_INSTANCE_FIELD(name, type) \
+ gasm_->LoadImmutable( \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
+ wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
+
+#define LOAD_INSTANCE_FIELD_NO_ELIMINATION(name, type) \
+ gasm_->Load( \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
+ wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
+
+// Use MachineType::Pointer() over Tagged() to load root pointers because they
+// do not get compressed.
+#define LOAD_ROOT(root_name, factory_name) \
+ (parameter_mode_ == kNoSpecialParameterMode \
+ ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
+ isolate_->factory()->factory_name())) \
+ : gasm_->LoadImmutable( \
+ MachineType::Pointer(), BuildLoadIsolateRoot(), \
+ IsolateData::root_slot_offset(RootIndex::k##root_name)))
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
@@ -177,10 +178,11 @@ class WasmGraphAssembler : public GraphAssembler {
: GraphAssembler(mcgraph, zone), simplified_(zone) {}
template <typename... Args>
- Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) {
+ Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id,
+ Operator::Properties properties, Args*... args) {
auto* call_descriptor = GetBuiltinCallDescriptor(
WasmRuntimeStubIdToBuiltinName(stub_id), temp_zone(),
- StubCallMode::kCallWasmRuntimeStub);
+ StubCallMode::kCallWasmRuntimeStub, false, properties);
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched at relocation.
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
@@ -263,6 +265,16 @@ class WasmGraphAssembler : public GraphAssembler {
return LoadFromObject(type, base, IntPtrConstant(offset));
}
+ Node* LoadImmutableFromObject(MachineType type, Node* base, Node* offset) {
+ return AddNode(graph()->NewNode(simplified_.LoadImmutableFromObject(
+ ObjectAccess(type, kNoWriteBarrier)),
+ base, offset, effect(), control()));
+ }
+
+ Node* LoadImmutableFromObject(MachineType type, Node* base, int offset) {
+ return LoadImmutableFromObject(type, base, IntPtrConstant(offset));
+ }
+
Node* LoadImmutable(LoadRepresentation rep, Node* base, Node* offset) {
return AddNode(graph()->NewNode(mcgraph()->machine()->LoadImmutable(rep),
base, offset));
@@ -283,6 +295,19 @@ class WasmGraphAssembler : public GraphAssembler {
return StoreToObject(access, base, IntPtrConstant(offset), value);
}
+ Node* InitializeImmutableInObject(ObjectAccess access, Node* base,
+ Node* offset, Node* value) {
+ return AddNode(
+ graph()->NewNode(simplified_.InitializeImmutableInObject(access), base,
+ offset, value, effect(), control()));
+ }
+
+ Node* InitializeImmutableInObject(ObjectAccess access, Node* base, int offset,
+ Node* value) {
+ return InitializeImmutableInObject(access, base, IntPtrConstant(offset),
+ value);
+ }
+
Node* IsI31(Node* object) {
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)),
@@ -295,8 +320,9 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents.
Node* LoadMap(Node* object) {
- Node* map_word = LoadFromObject(MachineType::TaggedPointer(), object,
- HeapObject::kMapOffset - kHeapObjectTag);
+ Node* map_word =
+ LoadImmutableFromObject(MachineType::TaggedPointer(), object,
+ HeapObject::kMapOffset - kHeapObjectTag);
#ifdef V8_MAP_PACKING
return UnpackMapWord(map_word);
#else
@@ -309,23 +335,23 @@ class WasmGraphAssembler : public GraphAssembler {
#ifdef V8_MAP_PACKING
map = PackMapWord(TNode<Map>::UncheckedCast(map));
#endif
- StoreToObject(access, heap_object, HeapObject::kMapOffset - kHeapObjectTag,
- map);
+ InitializeImmutableInObject(access, heap_object,
+ HeapObject::kMapOffset - kHeapObjectTag, map);
}
Node* LoadInstanceType(Node* map) {
- return LoadFromObject(
+ return LoadImmutableFromObject(
MachineType::Uint16(), map,
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset));
}
Node* LoadWasmTypeInfo(Node* map) {
int offset = Map::kConstructorOrBackPointerOrNativeContextOffset;
- return LoadFromObject(MachineType::TaggedPointer(), map,
- wasm::ObjectAccess::ToTagged(offset));
+ return LoadImmutableFromObject(MachineType::TaggedPointer(), map,
+ wasm::ObjectAccess::ToTagged(offset));
}
Node* LoadSupertypes(Node* wasm_type_info) {
- return LoadFromObject(
+ return LoadImmutableFromObject(
MachineType::TaggedPointer(), wasm_type_info,
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset));
}
@@ -333,7 +359,7 @@ class WasmGraphAssembler : public GraphAssembler {
// FixedArrays.
Node* LoadFixedArrayLengthAsSmi(Node* fixed_array) {
- return LoadFromObject(
+ return LoadImmutableFromObject(
MachineType::TaggedSigned(), fixed_array,
wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset));
}
@@ -346,6 +372,15 @@ class WasmGraphAssembler : public GraphAssembler {
return LoadFromObject(type, fixed_array, offset);
}
+ Node* LoadImmutableFixedArrayElement(
+ Node* fixed_array, Node* index_intptr,
+ MachineType type = MachineType::AnyTagged()) {
+ Node* offset = IntAdd(
+ IntMul(index_intptr, IntPtrConstant(kTaggedSize)),
+ IntPtrConstant(wasm::ObjectAccess::ToTagged(FixedArray::kHeaderSize)));
+ return LoadImmutableFromObject(type, fixed_array, offset);
+ }
+
Node* LoadFixedArrayElement(Node* array, int index, MachineType type) {
return LoadFromObject(
type, array,
@@ -404,14 +439,16 @@ class WasmGraphAssembler : public GraphAssembler {
}
Node* LoadExportedFunctionIndexAsSmi(Node* exported_function_data) {
- return LoadFromObject(MachineType::TaggedSigned(), exported_function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kFunctionIndexOffset));
+ return LoadImmutableFromObject(
+ MachineType::TaggedSigned(), exported_function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kFunctionIndexOffset));
}
Node* LoadExportedFunctionInstance(Node* exported_function_data) {
- return LoadFromObject(MachineType::TaggedPointer(), exported_function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kInstanceOffset));
+ return LoadImmutableFromObject(
+ MachineType::TaggedPointer(), exported_function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kInstanceOffset));
}
// JavaScript objects.
@@ -431,13 +468,18 @@ class WasmGraphAssembler : public GraphAssembler {
Node* StoreStructField(Node* struct_object, const wasm::StructType* type,
uint32_t field_index, Node* value) {
- return StoreToObject(ObjectAccessForGCStores(type->field(field_index)),
- struct_object, FieldOffset(type, field_index), value);
+ ObjectAccess access = ObjectAccessForGCStores(type->field(field_index));
+ return type->mutability(field_index)
+ ? StoreToObject(access, struct_object,
+ FieldOffset(type, field_index), value)
+ : InitializeImmutableInObject(access, struct_object,
+ FieldOffset(type, field_index),
+ value);
}
Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type) {
Node* index_intptr =
- mcgraph()->machine()->Is64() ? ChangeInt32ToInt64(index) : index;
+ mcgraph()->machine()->Is64() ? ChangeUint32ToUint64(index) : index;
return IntAdd(
IntPtrConstant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)),
IntMul(index_intptr,
@@ -445,7 +487,7 @@ class WasmGraphAssembler : public GraphAssembler {
}
Node* LoadWasmArrayLength(Node* array) {
- return LoadFromObject(
+ return LoadImmutableFromObject(
MachineType::Uint32(), array,
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
}
@@ -636,6 +678,7 @@ Node* WasmGraphBuilder::RefNull() { return LOAD_ROOT(NullValue, null_value); }
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRefFunc,
+ Operator::kNoThrow,
gasm_->Uint32Constant(function_index));
}
@@ -660,9 +703,9 @@ Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
// that the generated code is Isolate independent.
return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
case kWasmApiFunctionRefMode:
- // Note: Even if V8_HEAP_SANDBOX, the pointer to the isolate root is not
- // encoded, much like the case above. TODO(manoskouk): Decode the pointer
- // here if that changes.
+ // Note: Even if V8_SANDBOXED_EXTERNAL_POINTERS, the pointer to the
+ // isolate root is not encoded, much like the case above. TODO(manoskouk):
+ // Decode the pointer here if that changes.
return gasm_->Load(
MachineType::Pointer(), Param(0),
wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kIsolateRootOffset));
@@ -683,7 +726,9 @@ Node* WasmGraphBuilder::UndefinedValue() {
return LOAD_ROOT(UndefinedValue, undefined_value);
}
-void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
+void WasmGraphBuilder::StackCheck(
+ WasmInstanceCacheNodes* shared_memory_instance_cache,
+ wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(env_); // Wrappers don't get stack checks.
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) {
return;
@@ -699,7 +744,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
Node* if_true;
Node* if_false;
- gasm_->Branch(check, &if_true, &if_false, BranchHint::kTrue);
+ BranchExpectTrue(check, &if_true, &if_false);
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
@@ -710,12 +755,18 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
+ constexpr Operator::Properties properties =
+ Operator::kNoThrow | Operator::kNoWrite;
+ // If we ever want to mark this call as kNoDeopt, we'll have to make it
+ // non-eliminatable some other way.
+ STATIC_ASSERT((properties & Operator::kEliminatable) !=
+ Operator::kEliminatable);
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
NoContextDescriptor{}, // descriptor
0, // stack parameter count
CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
+ properties, // properties
StubCallMode::kCallWasmRuntimeStub); // stub call mode
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
@@ -725,11 +776,28 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
stack_check_code_node_.get(), effect(), if_false);
SetSourcePosition(call, position);
- DCHECK_GT(call->op()->ControlOutputCount(), 0);
- Node* merge = graph()->NewNode(mcgraph()->common()->Merge(2), if_true, call);
DCHECK_GT(call->op()->EffectOutputCount(), 0);
- Node* ephi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), effect(),
- call, merge);
+ DCHECK_EQ(call->op()->ControlOutputCount(), 0);
+
+ SetEffectControl(call, if_false);
+
+ Node* merge = Merge(if_true, control());
+ Node* ephi_inputs[] = {check, effect(), merge};
+ Node* ephi = EffectPhi(2, ephi_inputs);
+
+ // We only need to refresh the size of a shared memory, as its start can never
+ // change.
+ if (shared_memory_instance_cache != nullptr) {
+ // We handle caching of the instance cache nodes manually, and we may reload
+ // them in contexts where load elimination would eliminate the reload.
+ // Therefore, we use plain Load nodes which are not subject to load
+ // elimination.
+ Node* new_memory_size =
+ LOAD_INSTANCE_FIELD_NO_ELIMINATION(MemorySize, MachineType::UintPtr());
+ shared_memory_instance_cache->mem_size = CreateOrMergeIntoPhi(
+ MachineType::PointerRepresentation(), merge,
+ shared_memory_instance_cache->mem_size, new_memory_size);
+ }
SetEffectControl(ephi, merge);
}
@@ -743,9 +811,11 @@ void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
SetEffectControl(dummy);
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
- StackCheck(0);
+ // We pass the null instance cache, as we are at the beginning of the function
+ // and do not need to update it.
+ StackCheck(nullptr, 0);
- // In testing, no steck checks were emitted. Nothing to rewire then.
+ // In testing, no stack checks were emitted. Nothing to rewire then.
if (effect() == dummy) return;
// Now patch all control uses of {start} to use {control} and all effect uses
@@ -2389,7 +2459,8 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) {
needs_stack_check_ = true;
if (!env_->module->is_memory64) {
// For 32-bit memories, just call the builtin.
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow, input);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow,
+ Operator::kNoThrow, input);
}
// If the input is not a positive int32, growing will always fail
@@ -2403,7 +2474,8 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) {
SetControl(is_32_bit.if_true);
Node* grow_result = gasm_->ChangeInt32ToInt64(gasm_->CallRuntimeStub(
- wasm::WasmCode::kWasmMemoryGrow, gasm_->TruncateInt64ToInt32(input)));
+ wasm::WasmCode::kWasmMemoryGrow, Operator::kNoThrow,
+ gasm_->TruncateInt64ToInt32(input)));
Node* diamond_result = is_32_bit.Phi(MachineRepresentation::kWord64,
grow_result, gasm_->Int64Constant(-1));
@@ -2417,9 +2489,9 @@ Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag,
needs_stack_check_ = true;
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(tag);
- Node* values_array =
- gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAllocateFixedArray,
- gasm_->IntPtrConstant(encoded_size));
+ Node* values_array = gasm_->CallRuntimeStub(
+ wasm::WasmCode::kWasmAllocateFixedArray, Operator::kNoThrow,
+ gasm_->IntPtrConstant(encoded_size));
SetSourcePosition(values_array, position);
uint32_t index = 0;
@@ -2462,7 +2534,6 @@ Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag,
case wasm::kRef:
case wasm::kOptRef:
case wasm::kRtt:
- case wasm::kRttWithDepth:
gasm_->StoreFixedArrayElementAny(values_array, index, value);
++index;
break;
@@ -2478,6 +2549,7 @@ Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag,
Node* exception_tag = LoadTagFromTable(tag_index);
Node* throw_call = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmThrow,
+ Operator::kNoProperties,
exception_tag, values_array);
SetSourcePosition(throw_call, position);
return throw_call;
@@ -2524,7 +2596,8 @@ Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
// TODO(v8:8091): Currently the message of the original exception is not being
// preserved when rethrown to the console. The pending message will need to be
// saved when caught and restored here while being rethrown.
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRethrow, except_obj);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRethrow,
+ Operator::kNoProperties, except_obj);
}
Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
@@ -2592,7 +2665,6 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
case wasm::kRef:
case wasm::kOptRef:
case wasm::kRtt:
- case wasm::kRttWithDepth:
value = gasm_->LoadFixedArrayElementAny(values_array, index);
++index;
break;
@@ -3029,7 +3101,7 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = gasm_->LoadFromObject(
+ Node* target_node = gasm_->LoadImmutableFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
@@ -3185,19 +3257,25 @@ Node* WasmGraphBuilder::BuildIndirectCall(
}
}
-Node* WasmGraphBuilder::BuildUnsandboxExternalPointer(Node* external_pointer) {
-#ifdef V8_HEAP_SANDBOX
+Node* WasmGraphBuilder::BuildLoadExternalPointerFromObject(
+ Node* object, int offset, ExternalPointerTag tag) {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ Node* external_pointer = gasm_->LoadFromObject(
+ MachineType::Uint32(), object, wasm::ObjectAccess::ToTagged(offset));
+ STATIC_ASSERT(kExternalPointerIndexShift > kSystemPointerSizeLog2);
+ Node* shift_amount =
+ gasm_->Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
+ Node* scaled_index = gasm_->Word32Shr(external_pointer, shift_amount);
Node* isolate_root = BuildLoadIsolateRoot();
Node* table =
gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
IsolateData::external_pointer_table_offset() +
Internals::kExternalPointerTableBufferOffset);
- Node* offset = gasm_->Int32Mul(external_pointer, gasm_->Int32Constant(8));
- Node* decoded_ptr = gasm_->Load(MachineType::Pointer(), table, offset);
- Node* tag = gasm_->IntPtrConstant(~kForeignForeignAddressTag);
- return gasm_->WordAnd(decoded_ptr, tag);
+ Node* decoded_ptr = gasm_->Load(MachineType::Pointer(), table, scaled_index);
+ return gasm_->WordAnd(decoded_ptr, gasm_->IntPtrConstant(~tag));
#else
- return external_pointer;
+ return gasm_->LoadFromObject(MachineType::Pointer(), object,
+ wasm::ObjectAccess::ToTagged(offset));
#endif
}
@@ -3206,11 +3284,8 @@ Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
Node* internal = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmExportedFunctionData::kInternalOffset));
- Node* external_pointer =
- gasm_->LoadFromObject(MachineType::Pointer(), internal,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kForeignAddressOffset));
- return BuildUnsandboxExternalPointer(external_pointer);
+ return BuildLoadExternalPointerFromObject(
+ internal, WasmInternalFunction::kForeignAddressOffset);
}
// TODO(9495): Support CAPI function refs.
@@ -3229,31 +3304,25 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* real_sig,
auto load_target = gasm_->MakeLabel();
auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation());
- Node* ref_node = gasm_->LoadFromObject(
+ Node* ref_node = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
- Node* external_target =
- gasm_->LoadFromObject(MachineType::Pointer(), function,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kForeignAddressOffset));
-
- Node* target = BuildUnsandboxExternalPointer(external_target);
+ Node* target = BuildLoadExternalPointerFromObject(
+ function, WasmInternalFunction::kForeignAddressOffset);
Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0));
gasm_->GotoIfNot(is_null_target, &end_label, target);
{
// Compute the call target from the (on-heap) wrapper code. The cached
// target can only be null for WasmJSFunctions.
- Node* wrapper_code = gasm_->LoadFromObject(
+ Node* wrapper_code = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
Node* call_target;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CHECK(!V8_HEAP_SANDBOX_BOOL); // Not supported yet.
- call_target =
- gasm_->LoadFromObject(MachineType::Pointer(), wrapper_code,
- wasm::ObjectAccess::ToTagged(
- CodeDataContainer::kCodeEntryPointOffset));
+ call_target = BuildLoadExternalPointerFromObject(
+ wrapper_code, CodeDataContainer::kCodeEntryPointOffset,
+ kCodeEntryPointTag);
} else {
call_target = gasm_->IntAdd(
@@ -3278,7 +3347,7 @@ void WasmGraphBuilder::CompareToInternalFunctionAtIndex(
Node** failure_control) {
// Since we are comparing to a function reference, it is guaranteed that
// instance->wasm_internal_functions() has been initialized.
- Node* internal_functions = gasm_->LoadFromObject(
+ Node* internal_functions = gasm_->LoadImmutable(
MachineType::TaggedPointer(), GetInstance(),
wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kWasmInternalFunctionsOffset));
@@ -3444,14 +3513,23 @@ Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
void WasmGraphBuilder::InitInstanceCache(
WasmInstanceCacheNodes* instance_cache) {
+ // We handle caching of the instance cache nodes manually, and we may reload
+ // them in contexts where load elimination would eliminate the reload.
+ // Therefore, we use plain Load nodes which are not subject to load
+ // elimination.
// Load the memory start.
+#ifdef V8_SANDBOXED_POINTERS
+ instance_cache->mem_start = LOAD_INSTANCE_FIELD_NO_ELIMINATION(
+ MemoryStart, MachineType::SandboxedPointer());
+#else
instance_cache->mem_start =
- LOAD_MUTABLE_INSTANCE_FIELD(MemoryStart, MachineType::UintPtr());
+ LOAD_INSTANCE_FIELD_NO_ELIMINATION(MemoryStart, MachineType::UintPtr());
+#endif
// Load the memory size.
instance_cache->mem_size =
- LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
+ LOAD_INSTANCE_FIELD_NO_ELIMINATION(MemorySize, MachineType::UintPtr());
}
void WasmGraphBuilder::PrepareInstanceCacheForLoop(
@@ -3547,58 +3625,6 @@ void WasmGraphBuilder::SetEffectControl(Node* effect, Node* control) {
gasm_->InitializeEffectControl(effect, control);
}
-Node* WasmGraphBuilder::GetImportedMutableGlobals() {
- return LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr());
-}
-
-void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
- const wasm::WasmGlobal& global,
- Node** base_node,
- Node** offset_node) {
- if (global.mutability && global.imported) {
- *base_node = gasm_->LoadFromObject(
- MachineType::UintPtr(), GetImportedMutableGlobals(),
- Int32Constant(global.index * sizeof(Address)));
- *offset_node = Int32Constant(0);
- } else {
- Node* globals_start =
- LOAD_INSTANCE_FIELD(GlobalsStart, MachineType::UintPtr());
- *base_node = globals_start;
- *offset_node = Int32Constant(global.offset);
-
- if (mem_type == MachineType::Simd128() && global.offset != 0) {
- // TODO(titzer,bbudge): code generation for SIMD memory offsets is broken.
- *base_node = gasm_->IntAdd(*base_node, *offset_node);
- *offset_node = Int32Constant(0);
- }
- }
-}
-
-void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
- const wasm::WasmGlobal& global, Node** base, Node** offset) {
- // Load the base from the ImportedMutableGlobalsBuffer of the instance.
- Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
- MachineType::TaggedPointer());
- *base = gasm_->LoadFixedArrayElementAny(buffers, global.index);
-
- // For the offset we need the index of the global in the buffer, and then
- // calculate the actual offset from the index. Load the index from the
- // ImportedMutableGlobals array of the instance.
- Node* index =
- gasm_->LoadFromObject(MachineType::UintPtr(), GetImportedMutableGlobals(),
- Int32Constant(global.index * sizeof(Address)));
-
- // From the index, calculate the actual offset in the FixedArray. This
- // is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
- // wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
- Node* index_times_tagged_size = gasm_->IntMul(
- BuildChangeUint32ToUintPtr(index), Int32Constant(kTaggedSize));
- *offset = gasm_->IntAdd(
- index_times_tagged_size,
- mcgraph()->IntPtrConstant(
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
-}
-
Node* WasmGraphBuilder::MemBuffer(uintptr_t offset) {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_start = instance_cache_->mem_start;
@@ -3636,9 +3662,9 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
DCHECK_EQ(1, fun->result_size);
auto centry_id =
Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
- Node* centry_stub =
- gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
- IsolateData::BuiltinSlotOffset(centry_id));
+ int builtin_slot_offset = IsolateData::BuiltinSlotOffset(centry_id);
+ Node* centry_stub = gasm_->LoadFromObject(MachineType::Pointer(),
+ isolate_root, builtin_slot_offset);
// TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3667,74 +3693,74 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
parameter_count);
}
-Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
- const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.is_reference()) {
- if (global.mutability && global.imported) {
- Node* base = nullptr;
- Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
- return gasm_->LoadFromObject(MachineType::AnyTagged(), base, offset);
+void WasmGraphBuilder::GetGlobalBaseAndOffset(const wasm::WasmGlobal& global,
+ Node** base, Node** offset) {
+ if (global.mutability && global.imported) {
+ Node* base_or_index = gasm_->LoadFromObject(
+ MachineType::UintPtr(),
+ LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr()),
+ Int32Constant(global.index * kSystemPointerSize));
+ if (global.type.is_reference()) {
+ // Load the base from the ImportedMutableGlobalsBuffer of the instance.
+ Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
+ MachineType::TaggedPointer());
+ *base = gasm_->LoadFixedArrayElementAny(buffers, global.index);
+
+ // For this case, {base_or_index} gives the index of the global in the
+ // buffer. From the index, calculate the actual offset in the FixedArray.
+ // This is kHeaderSize + (index * kTaggedSize).
+ *offset = gasm_->IntAdd(
+ gasm_->IntMul(base_or_index, gasm_->IntPtrConstant(kTaggedSize)),
+ gasm_->IntPtrConstant(
+ wasm::ObjectAccess::ToTagged(FixedArray::kObjectsOffset)));
+ } else {
+ *base = base_or_index;
+ *offset = gasm_->IntPtrConstant(0);
}
- Node* globals_buffer =
+ } else if (global.type.is_reference()) {
+ *base =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
- return gasm_->LoadFixedArrayElementAny(globals_buffer, global.offset);
+ *offset = gasm_->IntPtrConstant(
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global.offset));
+ } else {
+ *base = LOAD_INSTANCE_FIELD(GlobalsStart, MachineType::UintPtr());
+ *offset = gasm_->IntPtrConstant(global.offset);
}
+}
- MachineType mem_type = global.type.machine_type();
- if (mem_type.representation() == MachineRepresentation::kSimd128) {
- has_simd_ = true;
- }
+Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
+ const wasm::WasmGlobal& global = env_->module->globals[index];
+ if (global.type == wasm::kWasmS128) has_simd_ = true;
Node* base = nullptr;
Node* offset = nullptr;
- GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- // TODO(manoskouk): Cannot use LoadFromObject here due to
- // GetGlobalBaseAndOffset pointer arithmetic.
- return gasm_->Load(mem_type, base, offset);
+ GetGlobalBaseAndOffset(global, &base, &offset);
+ MachineType mem_type = global.type.machine_type();
+ return global.mutability ? gasm_->LoadFromObject(mem_type, base, offset)
+ : gasm_->LoadImmutable(mem_type, base, offset);
}
void WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.is_reference()) {
- if (global.mutability && global.imported) {
- Node* base = nullptr;
- Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
-
- gasm_->StoreToObject(
- ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier), base,
- offset, val);
- return;
- }
- Node* globals_buffer =
- LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
- gasm_->StoreFixedArrayElementAny(globals_buffer, global.offset, val);
- return;
- }
-
- MachineType mem_type = global.type.machine_type();
- if (mem_type.representation() == MachineRepresentation::kSimd128) {
- has_simd_ = true;
- }
+ if (global.type == wasm::kWasmS128) has_simd_ = true;
Node* base = nullptr;
Node* offset = nullptr;
- GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- auto store_rep =
- StoreRepresentation(mem_type.representation(), kNoWriteBarrier);
- // TODO(manoskouk): Cannot use StoreToObject here due to
- // GetGlobalBaseAndOffset pointer arithmetic.
- gasm_->Store(store_rep, base, offset, val);
+ GetGlobalBaseAndOffset(global, &base, &offset);
+ ObjectAccess access(global.type.machine_type(), global.type.is_reference()
+ ? kFullWriteBarrier
+ : kNoWriteBarrier);
+ gasm_->StoreToObject(access, base, offset, val);
}
Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableGet,
+ Operator::kNoThrow,
gasm_->IntPtrConstant(table_index), index);
}
void WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
- gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet,
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet, Operator::kNoThrow,
gasm_->IntPtrConstant(table_index), index, val);
}
@@ -4003,96 +4029,6 @@ MemoryAccessKind GetMemoryAccessKind(
}
} // namespace
-// S390 simulator does not execute BE code, hence needs to also check if we are
-// running on a LE simulator.
-// TODO(miladfar): Remove SIM once V8_TARGET_BIG_ENDIAN includes the Sim.
-#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
-Node* WasmGraphBuilder::LoadTransformBigEndian(
- wasm::ValueType type, MachineType memtype,
- wasm::LoadTransformationKind transform, Node* index, uint64_t offset,
- uint32_t alignment, wasm::WasmCodePosition position) {
-#define LOAD_EXTEND(num_lanes, bytes_per_load, replace_lane) \
- result = graph()->NewNode(mcgraph()->machine()->S128Zero()); \
- Node* values[num_lanes]; \
- for (int i = 0; i < num_lanes; i++) { \
- values[i] = LoadMem(type, memtype, index, offset + i * bytes_per_load, \
- alignment, position); \
- if (memtype.IsSigned()) { \
- /* sign extend */ \
- values[i] = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), \
- values[i]); \
- } else { \
- /* zero extend */ \
- values[i] = graph()->NewNode( \
- mcgraph()->machine()->ChangeUint32ToUint64(), values[i]); \
- } \
- } \
- for (int lane = 0; lane < num_lanes; lane++) { \
- result = graph()->NewNode(mcgraph()->machine()->replace_lane(lane), \
- result, values[lane]); \
- }
- Node* result;
- LoadTransformation transformation = GetLoadTransformation(memtype, transform);
-
- switch (transformation) {
- case LoadTransformation::kS128Load8Splat: {
- result = LoadMem(type, memtype, index, offset, alignment, position);
- result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), result);
- break;
- }
- case LoadTransformation::kS128Load8x8S:
- case LoadTransformation::kS128Load8x8U: {
- LOAD_EXTEND(8, 1, I16x8ReplaceLane)
- break;
- }
- case LoadTransformation::kS128Load16Splat: {
- result = LoadMem(type, memtype, index, offset, alignment, position);
- result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), result);
- break;
- }
- case LoadTransformation::kS128Load16x4S:
- case LoadTransformation::kS128Load16x4U: {
- LOAD_EXTEND(4, 2, I32x4ReplaceLane)
- break;
- }
- case LoadTransformation::kS128Load32Splat: {
- result = LoadMem(type, memtype, index, offset, alignment, position);
- result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), result);
- break;
- }
- case LoadTransformation::kS128Load32x2S:
- case LoadTransformation::kS128Load32x2U: {
- LOAD_EXTEND(2, 4, I64x2ReplaceLane)
- break;
- }
- case LoadTransformation::kS128Load64Splat: {
- result = LoadMem(type, memtype, index, offset, alignment, position);
- result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), result);
- break;
- }
- case LoadTransformation::kS128Load32Zero: {
- result = graph()->NewNode(mcgraph()->machine()->S128Zero());
- result = graph()->NewNode(
- mcgraph()->machine()->I32x4ReplaceLane(0), result,
- LoadMem(type, memtype, index, offset, alignment, position));
- break;
- }
- case LoadTransformation::kS128Load64Zero: {
- result = graph()->NewNode(mcgraph()->machine()->S128Zero());
- result = graph()->NewNode(
- mcgraph()->machine()->I64x2ReplaceLane(0), result,
- LoadMem(type, memtype, index, offset, alignment, position));
- break;
- }
- default:
- UNREACHABLE();
- }
-
- return result;
-#undef LOAD_EXTEND
-}
-#endif
-
Node* WasmGraphBuilder::LoadLane(wasm::ValueType type, MachineType memtype,
Node* value, Node* index, uint64_t offset,
uint32_t alignment, uint8_t laneidx,
@@ -4106,24 +4042,6 @@ Node* WasmGraphBuilder::LoadLane(wasm::ValueType type, MachineType memtype,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
-#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
- load = LoadMem(type, memtype, index, offset, alignment, position);
- if (memtype == MachineType::Int8()) {
- load = graph()->NewNode(mcgraph()->machine()->I8x16ReplaceLane(laneidx),
- value, load);
- } else if (memtype == MachineType::Int16()) {
- load = graph()->NewNode(mcgraph()->machine()->I16x8ReplaceLane(laneidx),
- value, load);
- } else if (memtype == MachineType::Int32()) {
- load = graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(laneidx),
- value, load);
- } else if (memtype == MachineType::Int64()) {
- load = graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(laneidx),
- value, load);
- } else {
- UNREACHABLE();
- }
-#else
MemoryAccessKind load_kind = GetMemoryAccessKind(
mcgraph_, memtype.representation(), bounds_check_result);
@@ -4134,7 +4052,6 @@ Node* WasmGraphBuilder::LoadLane(wasm::ValueType type, MachineType memtype,
if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position);
}
-#endif
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
position);
@@ -4154,14 +4071,6 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
-#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
- // LoadTransform cannot efficiently be executed on BE machines as a
- // single operation since loaded bytes need to be reversed first,
- // therefore we divide them into separate "load" and "operation" nodes.
- load = LoadTransformBigEndian(type, memtype, transform, index, offset,
- alignment, position);
- USE(GetMemoryAccessKind);
-#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
@@ -4184,7 +4093,6 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position);
}
-#endif
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
@@ -4259,25 +4167,6 @@ void WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
-#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
- Node* output;
- if (mem_rep == MachineRepresentation::kWord8) {
- output =
- graph()->NewNode(mcgraph()->machine()->I8x16ExtractLaneS(laneidx), val);
- } else if (mem_rep == MachineRepresentation::kWord16) {
- output =
- graph()->NewNode(mcgraph()->machine()->I16x8ExtractLaneS(laneidx), val);
- } else if (mem_rep == MachineRepresentation::kWord32) {
- output =
- graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(laneidx), val);
- } else if (mem_rep == MachineRepresentation::kWord64) {
- output =
- graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(laneidx), val);
- } else {
- UNREACHABLE();
- }
- StoreMem(mem_rep, index, offset, alignment, output, position, type);
-#else
MemoryAccessKind load_kind =
GetMemoryAccessKind(mcgraph_, mem_rep, bounds_check_result);
@@ -4288,7 +4177,6 @@ void WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(store, position);
}
-#endif
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
}
@@ -5484,7 +5372,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
switch (opcode) {
case wasm::kExprAtomicNotify:
return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAtomicNotify,
- effective_offset, inputs[1]);
+ Operator::kNoThrow, effective_offset,
+ inputs[1]);
case wasm::kExprI32AtomicWait: {
auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
@@ -5532,17 +5421,20 @@ void WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_init());
+ MemTypeToUintPtrOrOOBTrap({&dst}, position);
+
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
- {MachineRepresentation::kWord32, dst},
+ {MachineType::PointerRepresentation(), dst},
{MachineRepresentation::kWord32, src},
{MachineRepresentation::kWord32,
gasm_->Uint32Constant(data_segment_index)},
{MachineRepresentation::kWord32, size}});
- MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
- MachineSignature sig(1, 1, sig_types);
+ auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
+ .Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
+ // TODO(manoskouk): Also throw kDataSegmentOutOfBounds.
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
@@ -5572,26 +5464,48 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
for (auto arg : args) {
MachineRepresentation type = arg.first;
Node* value = arg.second;
- gasm_->Store(StoreRepresentation(type, kNoWriteBarrier), stack_slot,
- Int32Constant(offset), value);
+ gasm_->StoreUnaligned(type, stack_slot, Int32Constant(offset), value);
offset += ElementSizeInBytes(type);
}
return stack_slot;
}
+void WasmGraphBuilder::MemTypeToUintPtrOrOOBTrap(
+ std::initializer_list<Node**> nodes, wasm::WasmCodePosition position) {
+ if (!env_->module->is_memory64) {
+ for (Node** node : nodes) {
+ *node = BuildChangeUint32ToUintPtr(*node);
+ }
+ return;
+ }
+ if (kSystemPointerSize == kInt64Size) return; // memory64 on 64-bit
+ Node* any_high_word = nullptr;
+ for (Node** node : nodes) {
+ Node* high_word =
+ gasm_->TruncateInt64ToInt32(gasm_->Word64Shr(*node, Int32Constant(32)));
+ any_high_word =
+ any_high_word ? gasm_->Word32Or(any_high_word, high_word) : high_word;
+ // Only keep the low word as uintptr_t.
+ *node = gasm_->TruncateInt64ToInt32(*node);
+ }
+ TrapIfTrue(wasm::kTrapMemOutOfBounds, any_high_word, position);
+}
+
void WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_copy());
+ MemTypeToUintPtrOrOOBTrap({&dst, &src, &size}, position);
+
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
- {MachineRepresentation::kWord32, dst},
- {MachineRepresentation::kWord32, src},
- {MachineRepresentation::kWord32, size}});
+ {MachineType::PointerRepresentation(), dst},
+ {MachineType::PointerRepresentation(), src},
+ {MachineType::PointerRepresentation(), size}});
- MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
- MachineSignature sig(1, 1, sig_types);
+ auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
+ .Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
@@ -5601,14 +5515,16 @@ void WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_fill());
+ MemTypeToUintPtrOrOOBTrap({&dst, &size}, position);
+
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
- {MachineRepresentation::kWord32, dst},
+ {MachineType::PointerRepresentation(), dst},
{MachineRepresentation::kWord32, value},
- {MachineRepresentation::kWord32, size}});
+ {MachineType::PointerRepresentation(), size}});
- MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
- MachineSignature sig(1, 1, sig_types);
+ auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
+ .Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
@@ -5617,8 +5533,8 @@ void WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, dst, src, size,
- gasm_->NumberConstant(table_index),
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, Operator::kNoThrow,
+ dst, src, size, gasm_->NumberConstant(table_index),
gasm_->NumberConstant(elem_segment_index));
}
@@ -5639,15 +5555,15 @@ void WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
void WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
uint32_t table_src_index, Node* dst, Node* src,
Node* size, wasm::WasmCodePosition position) {
- gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, dst, src, size,
- gasm_->NumberConstant(table_dst_index),
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, Operator::kNoThrow,
+ dst, src, size, gasm_->NumberConstant(table_dst_index),
gasm_->NumberConstant(table_src_index));
}
Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
Node* delta) {
return BuildChangeSmiToInt32(gasm_->CallRuntimeStub(
- wasm::WasmCode::kWasmTableGrow,
+ wasm::WasmCode::kWasmTableGrow, Operator::kNoThrow,
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), delta,
value));
}
@@ -5668,7 +5584,7 @@ Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
void WasmGraphBuilder::TableFill(uint32_t table_index, Node* start, Node* value,
Node* count) {
gasm_->CallRuntimeStub(
- wasm::WasmCode::kWasmTableFill,
+ wasm::WasmCode::kWasmTableFill, Operator::kNoThrow,
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), start,
count, value);
}
@@ -5680,7 +5596,7 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
int size = WasmStruct::Size(type);
Node* s = gasm_->Allocate(size);
gasm_->StoreMap(s, rtt);
- gasm_->StoreToObject(
+ gasm_->InitializeImmutableInObject(
ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), s,
wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
@@ -5752,8 +5668,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
-Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
- const wasm::ArrayType* type, Node* rtt,
+Node* WasmGraphBuilder::ArrayInit(const wasm::ArrayType* type, Node* rtt,
base::Vector<Node*> elements) {
wasm::ValueType element_type = type->element_type();
// TODO(7748): Consider using gasm_->Allocate().
@@ -5765,25 +5680,44 @@ Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
for (int i = 0; i < static_cast<int>(elements.size()); i++) {
Node* offset =
gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
- gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
- elements[i]);
+ if (type->mutability()) {
+ gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
+ elements[i]);
+ } else {
+ gasm_->InitializeImmutableInObject(ObjectAccessForGCStores(element_type),
+ array, offset, elements[i]);
+ }
}
return array;
}
+Node* WasmGraphBuilder::ArrayInitFromData(const wasm::ArrayType* type,
+ uint32_t data_segment, Node* offset,
+ Node* length, Node* rtt,
+ wasm::WasmCodePosition position) {
+ Node* array = gasm_->CallBuiltin(
+ Builtin::kWasmArrayInitFromData, Operator::kNoDeopt | Operator::kNoThrow,
+ gasm_->Uint32Constant(data_segment), offset, length, rtt);
+ TrapIfTrue(wasm::kTrapArrayTooLarge,
+ gasm_->TaggedEqual(
+ array, gasm_->NumberConstant(
+ wasm::kArrayInitFromDataArrayTooLargeErrorCode)),
+ position);
+ TrapIfTrue(
+ wasm::kTrapDataSegmentOutOfBounds,
+ gasm_->TaggedEqual(
+ array, gasm_->NumberConstant(
+ wasm::kArrayInitFromDataSegmentOutOfBoundsErrorCode)),
+ position);
+ return array;
+}
+
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
- return gasm_->LoadFixedArrayElementPtr(maps_list, type_index);
-}
-
-Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt,
- WasmRttSubMode mode) {
- Builtin target = mode == WasmRttSubMode::kCanonicalize
- ? Builtin::kWasmAllocateRtt
- : Builtin::kWasmAllocateFreshRtt;
- return gasm_->CallBuiltin(target, Operator::kEliminatable,
- Int32Constant(type_index), parent_rtt);
+ return gasm_->LoadImmutable(
+ MachineType::TaggedPointer(), maps_list,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index));
}
WasmGraphBuilder::Callbacks WasmGraphBuilder::TestCallbacks(
@@ -5876,22 +5810,18 @@ void WasmGraphBuilder::TypeCheck(
Node* type_info = gasm_->LoadWasmTypeInfo(map);
Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* rtt_depth =
- config.rtt_depth >= 0
- ? gasm_->IntPtrConstant(config.rtt_depth)
- : BuildChangeSmiToIntPtr(gasm_->LoadFixedArrayLengthAsSmi(
- gasm_->LoadSupertypes(gasm_->LoadWasmTypeInfo(rtt))));
+ Node* rtt_depth = gasm_->UintPtrConstant(config.rtt_depth);
+
// If the depth of the rtt is known to be less that the minimum supertype
// array length, we can access the supertype without bounds-checking the
// supertype array.
- if (config.rtt_depth < 0 || static_cast<uint32_t>(config.rtt_depth) >=
- wasm::kMinimumSupertypeArraySize) {
+ if (config.rtt_depth >= wasm::kMinimumSupertypeArraySize) {
Node* supertypes_length =
BuildChangeSmiToIntPtr(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
callbacks.fail_if_not(gasm_->UintLessThan(rtt_depth, supertypes_length),
BranchHint::kTrue);
}
- Node* maybe_match = gasm_->LoadFixedArrayElement(
+ Node* maybe_match = gasm_->LoadImmutableFixedArrayElement(
supertypes, rtt_depth, MachineType::TaggedPointer());
callbacks.fail_if_not(gasm_->TaggedEqual(maybe_match, rtt),
@@ -5908,15 +5838,16 @@ void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
callbacks.fail_if_not(gasm_->IsDataRefMap(map), BranchHint::kTrue);
}
-void WasmGraphBuilder::FuncCheck(Node* object, bool object_can_be_null,
- Callbacks callbacks) {
+void WasmGraphBuilder::ManagedObjectInstanceCheck(Node* object,
+ bool object_can_be_null,
+ InstanceType instance_type,
+ Callbacks callbacks) {
if (object_can_be_null) {
callbacks.fail_if(IsNull(object), BranchHint::kFalse);
}
callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
- callbacks.fail_if_not(
- gasm_->HasInstanceType(object, WASM_INTERNAL_FUNCTION_TYPE),
- BranchHint::kTrue);
+ callbacks.fail_if_not(gasm_->HasInstanceType(object, instance_type),
+ BranchHint::kTrue);
}
void WasmGraphBuilder::BrOnCastAbs(
@@ -6009,7 +5940,8 @@ void WasmGraphBuilder::BrOnData(Node* object, Node* /*rtt*/,
Node* WasmGraphBuilder::RefIsFunc(Node* object, bool object_can_be_null) {
auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- FuncCheck(object, object_can_be_null, TestCallbacks(&done));
+ ManagedObjectInstanceCheck(object, object_can_be_null,
+ WASM_INTERNAL_FUNCTION_TYPE, TestCallbacks(&done));
gasm_->Goto(&done, Int32Constant(1));
gasm_->Bind(&done);
return done.PhiAt(0);
@@ -6018,7 +5950,9 @@ Node* WasmGraphBuilder::RefIsFunc(Node* object, bool object_can_be_null) {
Node* WasmGraphBuilder::RefAsFunc(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position) {
auto done = gasm_->MakeLabel();
- FuncCheck(object, object_can_be_null, CastCallbacks(&done, position));
+ ManagedObjectInstanceCheck(object, object_can_be_null,
+ WASM_INTERNAL_FUNCTION_TYPE,
+ CastCallbacks(&done, position));
gasm_->Goto(&done);
gasm_->Bind(&done);
return object;
@@ -6031,7 +5965,41 @@ void WasmGraphBuilder::BrOnFunc(Node* object, Node* /*rtt*/,
Node** no_match_effect) {
BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
[=](Callbacks callbacks) -> void {
- return FuncCheck(object, config.object_can_be_null, callbacks);
+ return ManagedObjectInstanceCheck(
+ object, config.object_can_be_null,
+ WASM_INTERNAL_FUNCTION_TYPE, callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsArray(Node* object, bool object_can_be_null) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ ManagedObjectInstanceCheck(object, object_can_be_null, WASM_ARRAY_TYPE,
+ TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefAsArray(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ ManagedObjectInstanceCheck(object, object_can_be_null, WASM_ARRAY_TYPE,
+ CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+void WasmGraphBuilder::BrOnArray(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ return ManagedObjectInstanceCheck(object,
+ config.object_can_be_null,
+ WASM_ARRAY_TYPE, callbacks);
});
}
@@ -6069,7 +6037,10 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
MachineType machine_type = MachineType::TypeForRepresentation(
struct_type->field(field_index).machine_representation(), is_signed);
Node* offset = gasm_->FieldOffset(struct_type, field_index);
- return gasm_->LoadFromObject(machine_type, struct_object, offset);
+ return struct_type->mutability(field_index)
+ ? gasm_->LoadFromObject(machine_type, struct_object, offset)
+ : gasm_->LoadImmutableFromObject(machine_type, struct_object,
+ offset);
}
void WasmGraphBuilder::StructSet(Node* struct_object,
@@ -6114,7 +6085,10 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
MachineType machine_type = MachineType::TypeForRepresentation(
type->element_type().machine_representation(), is_signed);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
- return gasm_->LoadFromObject(machine_type, array_object, offset);
+ return type->mutability()
+ ? gasm_->LoadFromObject(machine_type, array_object, offset)
+ : gasm_->LoadImmutableFromObject(machine_type, array_object,
+ offset);
}
void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,
@@ -6410,17 +6384,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
int AddArgumentNodes(base::Vector<Node*> args, int pos, int param_count,
- const wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig, Node* context) {
// Convert wasm numbers to JS values.
for (int i = 0; i < param_count; ++i) {
Node* param =
Param(i + 1); // Start from index 1 to drop the instance_node.
- args[pos++] = ToJS(param, sig->GetParam(i));
+ args[pos++] = ToJS(param, sig->GetParam(i), context);
}
return pos;
}
- Node* ToJS(Node* node, wasm::ValueType type) {
+ Node* ToJS(Node* node, wasm::ValueType type, Node* context) {
switch (type.kind()) {
case wasm::kI32:
return BuildChangeInt32ToNumber(node);
@@ -6433,8 +6407,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kRef:
case wasm::kOptRef:
switch (type.heap_representation()) {
- case wasm::HeapType::kExtern:
- return node;
case wasm::HeapType::kFunc: {
if (type.kind() == wasm::kOptRef) {
auto done =
@@ -6455,8 +6427,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
WasmInternalFunction::kExternalOffset));
}
}
- case wasm::HeapType::kData:
case wasm::HeapType::kEq:
+ case wasm::HeapType::kData:
+ case wasm::HeapType::kArray:
case wasm::HeapType::kI31:
// TODO(7748): Update this when JS interop is settled.
if (type.kind() == wasm::kOptRef) {
@@ -6464,30 +6437,40 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
// Do not wrap {null}.
gasm_->GotoIf(IsNull(node), &done, node);
- gasm_->Goto(&done, BuildAllocateObjectWrapper(node));
+ gasm_->Goto(&done, BuildAllocateObjectWrapper(node, context));
gasm_->Bind(&done);
return done.PhiAt(0);
} else {
- return BuildAllocateObjectWrapper(node);
+ return BuildAllocateObjectWrapper(node, context);
}
case wasm::HeapType::kAny: {
- // Wrap {node} in object wrapper if it is an array/struct/i31.
+ if (!enabled_features_.has_gc()) return node;
+ // Wrap {node} in object wrapper if it is an array/struct.
// Extract external function if this is a WasmInternalFunction.
// Otherwise (i.e. null and external refs), return input.
+ // Treat i31 as externref because they are indistinguishable from
+ // Smis.
// TODO(7748): Update this when JS interop is settled.
+ auto wrap = gasm_->MakeLabel();
+ auto function = gasm_->MakeLabel();
auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
- gasm_->GotoIf(IsSmi(node), &done, BuildAllocateObjectWrapper(node));
- // This includes the case where {node == null}.
- gasm_->GotoIf(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &done,
- BuildAllocateObjectWrapper(node));
+ gasm_->GotoIf(IsSmi(node), &done, node);
+ gasm_->GotoIf(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &wrap);
gasm_->GotoIf(
gasm_->HasInstanceType(node, WASM_INTERNAL_FUNCTION_TYPE),
- &done,
- gasm_->LoadFromObject(
- MachineType::TaggedPointer(), node,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kExternalOffset)));
+ &function);
+ // This includes the case where {node == null}.
gasm_->Goto(&done, node);
+
+ gasm_->Bind(&wrap);
+ gasm_->Goto(&done, BuildAllocateObjectWrapper(node, context));
+
+ gasm_->Bind(&function);
+ gasm_->Goto(&done, gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset)));
+
gasm_->Bind(&done);
return done.PhiAt(0);
}
@@ -6506,7 +6489,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
UNREACHABLE();
}
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kI8:
case wasm::kI16:
case wasm::kS128:
@@ -6521,18 +6503,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// TODO(7748): Temporary solution to allow round-tripping of Wasm objects
// through JavaScript, where they show up as opaque boxes. This will disappear
// once we have a proper WasmGC <-> JS interaction story.
- Node* BuildAllocateObjectWrapper(Node* input) {
+ Node* BuildAllocateObjectWrapper(Node* input, Node* context) {
if (FLAG_wasm_gc_js_interop) return input;
- return gasm_->CallBuiltin(
- Builtin::kWasmAllocateObjectWrapper, Operator::kEliminatable, input,
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ return gasm_->CallBuiltin(Builtin::kWasmAllocateObjectWrapper,
+ Operator::kEliminatable, input, context);
}
// Assumes {input} has been checked for validity against the target wasm type.
// If {input} is a function, returns the WasmInternalFunction associated with
// it. If {input} has the {wasm_wrapped_object_symbol} property, returns the
// value of that property. Otherwise, returns {input}.
- Node* BuildUnpackObjectWrapper(Node* input) {
+ Node* BuildUnpackObjectWrapper(Node* input, Node* context) {
auto not_a_function = gasm_->MakeLabel();
auto end = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
@@ -6552,7 +6533,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* obj = gasm_->CallBuiltin(
Builtin::kWasmGetOwnProperty, Operator::kEliminatable, input,
LOAD_ROOT(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol),
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ context);
// Invalid object wrappers (i.e. any other JS object that doesn't have the
// magic hidden property) will return {undefined}. Map that to {input}.
Node* is_undefined = gasm_->TaggedEqual(obj, UndefinedValue());
@@ -6636,27 +6617,27 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kRef:
case wasm::kOptRef: {
switch (type.heap_representation()) {
- case wasm::HeapType::kExtern:
- return input;
case wasm::HeapType::kAny:
+ if (!enabled_features_.has_gc()) return input;
// If this is a wrapper for arrays/structs/i31s, unpack it.
// TODO(7748): Update this when JS interop has settled.
- return BuildUnpackObjectWrapper(input);
+ return BuildUnpackObjectWrapper(input, js_context);
case wasm::HeapType::kFunc:
BuildCheckValidRefValue(input, js_context, type);
- return BuildUnpackObjectWrapper(input);
+ return BuildUnpackObjectWrapper(input, js_context);
case wasm::HeapType::kData:
+ case wasm::HeapType::kArray:
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
// TODO(7748): Update this when JS interop has settled.
BuildCheckValidRefValue(input, js_context, type);
// This will just return {input} if the object is not wrapped, i.e.
// if it is null (given the check just above).
- return BuildUnpackObjectWrapper(input);
+ return BuildUnpackObjectWrapper(input, js_context);
default:
if (module_->has_signature(type.ref_index())) {
BuildCheckValidRefValue(input, js_context, type);
- return BuildUnpackObjectWrapper(input);
+ return BuildUnpackObjectWrapper(input, js_context);
}
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
@@ -6678,7 +6659,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildChangeBigIntToInt64(input, js_context, frame_state);
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kS128:
case wasm::kI8:
case wasm::kI16:
@@ -6734,7 +6714,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kOptRef:
case wasm::kI64:
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kS128:
case wasm::kI8:
case wasm::kI16:
@@ -6858,11 +6837,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* internal = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset));
- Node* sandboxed_pointer = gasm_->LoadFromObject(
- MachineType::Pointer(), internal,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kForeignAddressOffset));
- args[0] = BuildUnsandboxExternalPointer(sandboxed_pointer);
+ args[0] = BuildLoadExternalPointerFromObject(
+ internal, WasmInternalFunction::kForeignAddressOffset);
Node* instance_node = gasm_->LoadFromObject(
MachineType::TaggedPointer(), internal,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
@@ -6877,7 +6853,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} else if (sig_->return_count() == 1) {
jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion()
? rets[0]
- : ToJS(rets[0], sig_->GetReturn());
+ : ToJS(rets[0], sig_->GetReturn(), js_context);
} else {
int32_t return_count = static_cast<int32_t>(sig_->return_count());
Node* size = gasm_->NumberConstant(return_count);
@@ -6887,7 +6863,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* fixed_array = gasm_->LoadJSArrayElements(jsval);
for (int i = 0; i < return_count; ++i) {
- Node* value = ToJS(rets[i], sig_->GetReturn(i));
+ Node* value = ToJS(rets[i], sig_->GetReturn(i), js_context);
gasm_->StoreFixedArrayElementAny(fixed_array, i, value);
}
}
@@ -6903,7 +6879,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kOptRef:
case wasm::kI64:
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kS128:
case wasm::kI8:
case wasm::kI16:
@@ -6954,7 +6929,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kOptRef:
case wasm::kI64:
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kS128:
case wasm::kI8:
case wasm::kI16:
@@ -7085,8 +7059,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
global_proxy);
}
+ Node* BuildSuspend(Node* value, Node* api_function_ref,
+ MachineRepresentation rep) {
+ // If value is a promise, suspend to the js-to-wasm prompt, and resume later
+ // with the promise's resolved value.
+ auto resume = gasm_->MakeLabel(rep);
+ gasm_->GotoIf(IsSmi(value), &resume, value);
+ gasm_->GotoIfNot(gasm_->HasInstanceType(value, JS_PROMISE_TYPE), &resume,
+ BranchHint::kTrue, value);
+ Node* suspender = gasm_->Load(
+ MachineType::TaggedPointer(), api_function_ref,
+ wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kSuspenderOffset));
+ auto* call_descriptor = GetBuiltinCallDescriptor(
+ Builtin::kWasmSuspend, zone_, StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmSuspend, RelocInfo::WASM_STUB_CALL);
+ Node* args[] = {value, suspender};
+ Node* chained_promise =
+ BuildCallToRuntime(Runtime::kWasmCreateResumePromise, args, 2);
+ Node* resolved =
+ gasm_->Call(call_descriptor, call_target, chained_promise, suspender);
+ gasm_->Goto(&resume, resolved);
+ gasm_->Bind(&resume);
+ return resume.PhiAt(0);
+ }
+
// For wasm-to-js wrappers, parameter 0 is a WasmApiFunctionRef.
- bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity) {
+ bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity,
+ wasm::Suspend suspend) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -7136,7 +7136,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
+ pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_,
+ native_context);
args[pos++] = undefined_node; // new target
args[pos++] =
@@ -7147,6 +7148,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK_EQ(pos, args.size());
call = gasm_->Call(call_descriptor, pos, args.begin());
+ if (suspend == wasm::kSuspend) {
+ MachineRepresentation rep =
+ sig_->return_count() >= 1
+ ? sig_->GetReturn(0).machine_representation()
+ : MachineRepresentation::kNone;
+ call = BuildSuspend(call, Param(0), rep);
+ }
break;
}
// =======================================================================
@@ -7163,7 +7171,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildReceiverNode(callable_node, native_context, undefined_node);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
+ pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_,
+ native_context);
for (int i = wasm_count; i < expected_arity; ++i) {
args[pos++] = undefined_node;
}
@@ -7181,6 +7190,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = Linkage::GetJSCallDescriptor(
graph()->zone(), false, pushed_count + 1, CallDescriptor::kNoFlags);
call = gasm_->Call(call_descriptor, pos, args.begin());
+ // TODO(12191): Handle suspending wrapper.
break;
}
// =======================================================================
@@ -7202,7 +7212,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
StubCallMode::kCallBuiltinPointer);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
+ pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_,
+ native_context);
// The native_context is sufficient here, because all kind of callables
// which depend on the context provide their own context. The context
@@ -7215,6 +7226,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK_EQ(pos, args.size());
call = gasm_->Call(call_descriptor, pos, args.begin());
+ // TODO(12191): Handle suspending wrapper.
break;
}
default:
@@ -7351,6 +7363,110 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
}
+ void BuildJSFastApiCallWrapper(Handle<JSFunction> target) {
+ // Here 'callable_node' must be equal to 'target' but we cannot pass a
+ // HeapConstant(target) because WasmCode::Validate() fails with
+ // Unexpected mode: FULL_EMBEDDED_OBJECT.
+ Node* callable_node = gasm_->Load(
+ MachineType::TaggedPointer(), Param(0),
+ wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kCallableOffset));
+ Node* native_context = gasm_->Load(
+ MachineType::TaggedPointer(), Param(0),
+ wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kNativeContextOffset));
+ Node* undefined_node = UndefinedValue();
+ Node* receiver_node =
+ BuildReceiverNode(callable_node, native_context, undefined_node);
+
+ SharedFunctionInfo shared = target->shared();
+ FunctionTemplateInfo api_func_data = shared.get_api_func_data();
+ const Address c_address = api_func_data.GetCFunction(0);
+ const v8::CFunctionInfo* c_signature = api_func_data.GetCSignature(0);
+ int c_arg_count = c_signature->ArgumentCount();
+
+ BuildModifyThreadInWasmFlag(false);
+
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ Address c_functions[] = {c_address};
+ const v8::CFunctionInfo* const c_signatures[] = {c_signature};
+ target->GetIsolate()->simulator_data()->RegisterFunctionsAndSignatures(
+ c_functions, c_signatures, 1);
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+
+ MachineSignature::Builder builder(graph()->zone(), 1, c_arg_count);
+ builder.AddReturn(MachineType::TypeForCType(c_signature->ReturnInfo()));
+ for (int i = 0; i < c_arg_count; i += 1) {
+ builder.AddParam(MachineType::TypeForCType(c_signature->ArgumentInfo(i)));
+ }
+
+ base::SmallVector<Node*, 16> args(c_arg_count + 3);
+ int pos = 0;
+
+ args[pos++] = mcgraph()->ExternalConstant(
+ ExternalReference::Create(c_address, ExternalReference::FAST_C_CALL));
+
+ auto store_stack = [this](Node* node) -> Node* {
+ constexpr int kAlign = alignof(uintptr_t);
+ constexpr int kSize = sizeof(uintptr_t);
+ Node* stack_slot = gasm_->StackSlot(kSize, kAlign);
+
+ gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot, 0, node);
+ return stack_slot;
+ };
+
+ // Set receiver.
+ args[pos++] = store_stack(receiver_node);
+
+ for (int i = 1; i < c_arg_count; i += 1) {
+ switch (c_signature->ArgumentInfo(i).GetType()) {
+ case CTypeInfo::Type::kV8Value:
+ args[pos++] = store_stack(Param(i));
+ break;
+ default:
+ args[pos++] = Param(i);
+ break;
+ }
+ }
+ DCHECK(!c_signature->HasOptions());
+ args[pos++] = effect();
+ args[pos++] = control();
+
+ auto call_descriptor =
+ Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
+
+ // CPU profiler support.
+ Node* target_address = gasm_->ExternalConstant(
+ ExternalReference::fast_api_call_target_address(target->GetIsolate()));
+ gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ target_address, 0, gasm_->IntPtrConstant(c_address));
+
+ // Disable JS execution.
+ Node* javascript_execution_assert = gasm_->ExternalConstant(
+ ExternalReference::javascript_execution_assert(target->GetIsolate()));
+ gasm_->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ javascript_execution_assert, 0, gasm_->Int32Constant(0));
+
+ // Execute the fast call API.
+ Node* call = gasm_->Call(call_descriptor, pos, args.begin());
+
+ // Reenable JS execution.
+ gasm_->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ javascript_execution_assert, 0, gasm_->Int32Constant(1));
+
+ // Reset the CPU profiler target address.
+ gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ target_address, 0, gasm_->IntPtrConstant(0));
+
+ BuildModifyThreadInWasmFlag(true);
+
+ Return(call);
+ }
+
void BuildJSToJSWrapper() {
int wasm_count = static_cast<int>(sig_->parameter_count());
@@ -7398,8 +7514,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert parameter JS values to wasm numbers and back to JS values.
for (int i = 0; i < wasm_count; ++i) {
Node* param = Param(i + 1); // Start from index 1 to skip receiver.
- args[pos++] =
- ToJS(FromJS(param, context, sig_->GetParam(i)), sig_->GetParam(i));
+ args[pos++] = ToJS(FromJS(param, context, sig_->GetParam(i)),
+ sig_->GetParam(i), context);
}
args[pos++] = context;
@@ -7414,7 +7530,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (sig_->return_count() == 0) {
jsval = UndefinedValue();
} else if (sig_->return_count() == 1) {
- jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+ jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn(),
+ context);
} else {
Node* fixed_array =
BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
@@ -7425,7 +7542,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (unsigned i = 0; i < sig_->return_count(); ++i) {
const auto& type = sig_->GetReturn(i);
Node* elem = gasm_->LoadFixedArrayElementAny(fixed_array, i);
- Node* cast = ToJS(FromJS(elem, context, type), type);
+ Node* cast = ToJS(FromJS(elem, context, type), type, context);
gasm_->StoreFixedArrayElementAny(result_fixed_array, i, cast);
}
}
@@ -7581,46 +7698,172 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
std::move(debug_name), WasmAssemblerOptions());
}
-std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
+static MachineRepresentation NormalizeFastApiRepresentation(
+ const CTypeInfo& info) {
+ MachineType t = MachineType::TypeForCType(info);
+ // Wasm representation of bool is i32 instead of i1.
+ if (t.semantic() == MachineSemantic::kBool) {
+ return MachineRepresentation::kWord32;
+ }
+ return t.representation();
+}
+
+static bool IsSupportedWasmFastApiFunction(
+ const wasm::FunctionSig* expected_sig, Handle<SharedFunctionInfo> shared) {
+ if (!shared->IsApiFunction()) {
+ return false;
+ }
+ if (shared->get_api_func_data().GetCFunctionsCount() == 0) {
+ return false;
+ }
+ if (!shared->get_api_func_data().accept_any_receiver()) {
+ return false;
+ }
+ if (!shared->get_api_func_data().signature().IsUndefined()) {
+ // TODO(wasm): CFunctionInfo* signature check.
+ return false;
+ }
+ const CFunctionInfo* info = shared->get_api_func_data().GetCSignature(0);
+ if (!fast_api_call::CanOptimizeFastSignature(info)) {
+ return false;
+ }
+ // Options are not supported yet.
+ if (info->HasOptions()) {
+ return false;
+ }
+
+ const auto log_imported_function_mismatch = [&shared]() {
+ if (FLAG_trace_opt) {
+ CodeTracer::Scope scope(shared->GetIsolate()->GetCodeTracer());
+ PrintF(scope.file(), "[disabled optimization for ");
+ shared->ShortPrint(scope.file());
+ PrintF(scope.file(),
+ ", reason: the signature of the imported function in the Wasm "
+ "module doesn't match that of the Fast API function]\n");
+ }
+ };
+
+ // C functions only have one return value.
+ if (expected_sig->return_count() > 1) {
+ // Here and below, we log when the function we call is declared as an Api
+ // function but we cannot optimize the call, which might be unxepected. In
+ // that case we use the "slow" path making a normal Wasm->JS call and
+ // calling the "slow" callback specified in FunctionTemplate::New().
+ log_imported_function_mismatch();
+ return false;
+ }
+ CTypeInfo return_info = info->ReturnInfo();
+ // Unsupported if return type doesn't match.
+ if (expected_sig->return_count() == 0 &&
+ return_info.GetType() != CTypeInfo::Type::kVoid) {
+ log_imported_function_mismatch();
+ return false;
+ }
+ // Unsupported if return type doesn't match.
+ if (expected_sig->return_count() == 1) {
+ if (return_info.GetType() == CTypeInfo::Type::kVoid) {
+ log_imported_function_mismatch();
+ return false;
+ }
+ if (NormalizeFastApiRepresentation(return_info) !=
+ expected_sig->GetReturn(0).machine_type().representation()) {
+ log_imported_function_mismatch();
+ return false;
+ }
+ }
+ // Unsupported if arity doesn't match.
+ if (expected_sig->parameter_count() != info->ArgumentCount() - 1) {
+ log_imported_function_mismatch();
+ return false;
+ }
+ // Unsupported if any argument types don't match.
+ for (unsigned int i = 0; i < expected_sig->parameter_count(); i += 1) {
+ // Arg 0 is the receiver, skip over it since wasm doesn't
+ // have a concept of receivers.
+ CTypeInfo arg = info->ArgumentInfo(i + 1);
+ if (NormalizeFastApiRepresentation(arg) !=
+ expected_sig->GetParam(i).machine_type().representation()) {
+ log_imported_function_mismatch();
+ return false;
+ }
+ }
+ return true;
+}
+
+WasmImportData ResolveWasmImportCall(
Handle<JSReceiver> callable, const wasm::FunctionSig* expected_sig,
const wasm::WasmModule* module,
const wasm::WasmFeatures& enabled_features) {
+ Isolate* isolate = callable->GetIsolate();
+ Handle<HeapObject> no_suspender;
if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
auto imported_function = Handle<WasmExportedFunction>::cast(callable);
if (!imported_function->MatchesSignature(module, expected_sig)) {
- return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ return {WasmImportCallKind::kLinkError, callable, no_suspender};
}
uint32_t func_index =
static_cast<uint32_t>(imported_function->function_index());
if (func_index >=
imported_function->instance().module()->num_imported_functions) {
- return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
+ return {WasmImportCallKind::kWasmToWasm, callable, no_suspender};
}
- Isolate* isolate = callable->GetIsolate();
// Resolve the shortcut to the underlying callable and continue.
Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
ImportedFunctionEntry entry(instance, func_index);
callable = handle(entry.callable(), isolate);
}
+ Handle<HeapObject> suspender = isolate->factory()->undefined_value();
if (WasmJSFunction::IsWasmJSFunction(*callable)) {
auto js_function = Handle<WasmJSFunction>::cast(callable);
- if (!js_function->MatchesSignature(expected_sig)) {
- return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ suspender = handle(js_function->GetSuspender(), isolate);
+ if ((!suspender->IsUndefined() &&
+ !js_function->MatchesSignatureForSuspend(expected_sig)) ||
+ (suspender->IsUndefined() &&
+ !js_function->MatchesSignature(expected_sig))) {
+ return {WasmImportCallKind::kLinkError, callable, no_suspender};
}
- Isolate* isolate = callable->GetIsolate();
// Resolve the short-cut to the underlying callable and continue.
callable = handle(js_function->GetCallable(), isolate);
}
if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
auto capi_function = Handle<WasmCapiFunction>::cast(callable);
if (!capi_function->MatchesSignature(expected_sig)) {
- return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ return {WasmImportCallKind::kLinkError, callable, no_suspender};
}
- return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
+ return {WasmImportCallKind::kWasmToCapi, callable, no_suspender};
}
// Assuming we are calling to JS, check whether this would be a runtime error.
if (!wasm::IsJSCompatibleSignature(expected_sig, module, enabled_features)) {
- return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
+ return {WasmImportCallKind::kRuntimeTypeError, callable, no_suspender};
+ }
+ // Check if this can be a JS fast API call.
+ if (FLAG_turbo_fast_api_calls &&
+ (callable->IsJSFunction() || callable->IsJSBoundFunction())) {
+ Handle<JSFunction> target;
+ if (callable->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> bound_target =
+ Handle<JSBoundFunction>::cast(callable);
+ // Nested bound functions and arguments not supported yet.
+ if (bound_target->bound_arguments().length() == 0 &&
+ !bound_target->bound_target_function().IsJSBoundFunction()) {
+ Handle<JSReceiver> bound_target_function = handle(
+ bound_target->bound_target_function(), callable->GetIsolate());
+ if (bound_target_function->IsJSFunction()) {
+ target = Handle<JSFunction>::cast(bound_target_function);
+ }
+ }
+ } else {
+ DCHECK(callable->IsJSFunction());
+ target = Handle<JSFunction>::cast(callable);
+ }
+
+ if (!target.is_null()) {
+ Handle<SharedFunctionInfo> shared(target->shared(), target->GetIsolate());
+
+ if (IsSupportedWasmFastApiFunction(expected_sig, shared)) {
+ return {WasmImportCallKind::kWasmToJSFastApi, target, no_suspender};
+ }
+ }
}
// For JavaScript calls, determine whether the target has an arity match.
if (callable->IsJSFunction()) {
@@ -7636,7 +7879,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
DCHECK_NOT_NULL(sig); \
if (*expected_sig == *sig) { \
- return std::make_pair(WasmImportCallKind::k##name, callable); \
+ return {WasmImportCallKind::k##name, callable, no_suspender}; \
} \
}
#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
@@ -7681,13 +7924,12 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
if (IsClassConstructor(shared->kind())) {
// Class constructor will throw anyway.
- return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
+ return {WasmImportCallKind::kUseCallBuiltin, callable, suspender};
}
if (shared->internal_formal_parameter_count_without_receiver() ==
expected_sig->parameter_count()) {
- return std::make_pair(WasmImportCallKind::kJSFunctionArityMatch,
- callable);
+ return {WasmImportCallKind::kJSFunctionArityMatch, callable, suspender};
}
// If function isn't compiled, compile it now.
@@ -7698,11 +7940,10 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
&is_compiled_scope);
}
- return std::make_pair(WasmImportCallKind::kJSFunctionArityMismatch,
- callable);
+ return {WasmImportCallKind::kJSFunctionArityMismatch, callable, suspender};
}
// Unknown case. Use the call builtin.
- return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
+ return {WasmImportCallKind::kUseCallBuiltin, callable, suspender};
}
namespace {
@@ -7808,9 +8049,11 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::CompilationEnv* env, WasmImportCallKind kind,
- const wasm::FunctionSig* sig, bool source_positions, int expected_arity) {
+ const wasm::FunctionSig* sig, bool source_positions, int expected_arity,
+ wasm::Suspend suspend) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
+ DCHECK_NE(WasmImportCallKind::kWasmToJSFastApi, kind);
// Check for math intrinsics first.
if (FLAG_wasm_math_intrinsics &&
@@ -7821,6 +8064,11 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileWasmImportCallWrapper");
+ base::TimeTicks start_time;
+ if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)) {
+ start_time = base::TimeTicks::Now();
+ }
+
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -7840,7 +8088,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
&zone, mcgraph, sig, env->module,
WasmGraphBuilder::kWasmApiFunctionRefMode, nullptr, source_position_table,
StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
- builder.BuildWasmToJSWrapper(kind, expected_arity);
+ builder.BuildWasmToJSWrapper(kind, expected_arity, suspend);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -7856,9 +8104,19 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- return Pipeline::GenerateCodeForWasmNativeStub(
+ wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name,
WasmStubAssemblerOptions(), source_position_table);
+
+ if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)) {
+ base::TimeDelta time = base::TimeTicks::Now() - start_time;
+ int codesize = result.code_desc.body_size();
+ StdoutStream{} << "Compiled WasmToJS wrapper " << func_name << ", took "
+ << time.InMilliseconds() << " ms; codesize " << codesize
+ << std::endl;
+ }
+
+ return result;
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
@@ -7909,10 +8167,63 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
return published_code;
}
+wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
+ const wasm::FunctionSig* sig,
+ Handle<JSFunction> target) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmJSFastCallWrapper");
+
+ Zone zone(wasm::GetWasmEngine()->allocator(), ZONE_NAME, kCompressGraphZone);
+
+ // TODO(jkummerow): Extract common code into helper method.
+ SourcePositionTable* source_positions = nullptr;
+ MachineGraph* mcgraph = zone.New<MachineGraph>(
+ zone.New<Graph>(&zone), zone.New<CommonOperatorBuilder>(&zone),
+ zone.New<MachineOperatorBuilder>(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
+
+ WasmWrapperGraphBuilder builder(
+ &zone, mcgraph, sig, native_module->module(),
+ WasmGraphBuilder::kWasmApiFunctionRefMode, nullptr, source_positions,
+ StubCallMode::kCallWasmRuntimeStub, native_module->enabled_features());
+
+ // Set up the graph start.
+ int param_count = static_cast<int>(sig->parameter_count()) +
+ 1 /* offset for first parameter index being -1 */ +
+ 1 /* Wasm instance */ + 1 /* kExtraCallableParam */;
+ builder.Start(param_count);
+ builder.BuildJSFastApiCallWrapper(target);
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* call_descriptor =
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper);
+ if (mcgraph->machine()->Is32()) {
+ call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
+ }
+
+ const char* debug_name = "WasmJSFastApiCall";
+ wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
+ call_descriptor, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, debug_name,
+ WasmStubAssemblerOptions(), source_positions);
+ {
+ wasm::CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper,
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ return native_module->PublishCode(std::move(wasm_code));
+ }
+}
+
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
WasmImportCallKind kind,
- int expected_arity) {
+ int expected_arity,
+ wasm::Suspend suspend) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -7930,7 +8241,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
nullptr, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
- builder.BuildWasmToJSWrapper(kind, expected_arity);
+ builder.BuildWasmToJSWrapper(kind, expected_arity, suspend);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -8060,11 +8371,7 @@ Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
CompilationJob::FAILED);
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
-#ifdef V8_EXTERNAL_CODE_SPACE
- return handle(ToCodeT(*job->compilation_info()->code()), isolate);
-#else
- return job->compilation_info()->code();
-#endif
+ return ToCodeT(job->compilation_info()->code(), isolate);
}
namespace {
@@ -8099,8 +8406,23 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
return true;
}
-base::Vector<const char> GetDebugName(Zone* zone, int index) {
- // TODO(herhut): Use name from module if available.
+base::Vector<const char> GetDebugName(Zone* zone,
+ const wasm::WasmModule* module,
+ const wasm::WireBytesStorage* wire_bytes,
+ int index) {
+ base::Optional<wasm::ModuleWireBytes> module_bytes =
+ wire_bytes->GetModuleBytes();
+ if (module_bytes.has_value() &&
+ (FLAG_trace_turbo || FLAG_trace_turbo_scheduled ||
+ FLAG_trace_turbo_graph || FLAG_print_wasm_code)) {
+ wasm::WireBytesRef name = module->lazily_generated_names.LookupFunctionName(
+ module_bytes.value(), index);
+ int name_len = name.length();
+ char* index_name = zone->NewArray<char>(name_len);
+ memcpy(index_name, module_bytes->start() + name.offset(), name.length());
+ return base::Vector<const char>(index_name, name_len);
+ }
+
constexpr int kBufferLength = 24;
base::EmbeddedVector<char, kBufferLength> name_vector;
@@ -8115,7 +8437,7 @@ base::Vector<const char> GetDebugName(Zone* zone, int index) {
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_bytes_storage,
+ wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_byte_storage,
const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
@@ -8129,8 +8451,9 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()));
- OptimizedCompilationInfo info(GetDebugName(&zone, func_index), &zone,
- CodeKind::WASM_FUNCTION);
+ OptimizedCompilationInfo info(
+ GetDebugName(&zone, env->module, wire_byte_storage, func_index), &zone,
+ CodeKind::WASM_FUNCTION);
if (env->runtime_exception_support) {
info.set_wasm_runtime_exception_support();
}
@@ -8172,10 +8495,9 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
return wasm::WasmCompilationResult{};
}
- Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
- call_descriptor, source_positions,
- node_origins, func_body, env->module,
- func_index, &loop_infos);
+ Pipeline::GenerateCodeForWasmFunction(
+ &info, env, wire_byte_storage, mcgraph, call_descriptor, source_positions,
+ node_origins, func_body, env->module, func_index, &loop_infos);
if (counters) {
int zone_bytes =
@@ -8299,8 +8621,8 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
int return_slots = rets.NumStackSlots();
- const RegList kCalleeSaveRegisters = 0;
- const RegList kCalleeSaveFPRegisters = 0;
+ const RegList kCalleeSaveRegisters;
+ const DoubleRegList kCalleeSaveFPRegisters;
// The target for wasm calls is always a code object.
MachineType target_type = MachineType::Pointer();
@@ -8332,7 +8654,7 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
"wasm-call", // debug name
StackArgumentOrder::kDefault, // order of the arguments in the stack
fsig, // signature
- 0, // allocatable registers
+ RegList{}, // allocatable registers
return_slots); // return slot count
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 93b08a0dd9..9fa017ef84 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -15,6 +15,7 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/base/small-vector.h"
+#include "src/objects/js-function.h"
#include "src/runtime/runtime.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
@@ -55,6 +56,7 @@ class WasmCode;
class WasmFeatures;
class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
+enum Suspend : bool { kSuspend = false, kNoSuspend = true };
} // namespace wasm
namespace compiler {
@@ -71,6 +73,7 @@ enum class WasmImportCallKind : uint8_t {
kLinkError, // static Wasm->Wasm type error
kRuntimeTypeError, // runtime Wasm->JS type error
kWasmToCapi, // fast Wasm->C-API call
+ kWasmToJSFastApi, // fast Wasm->JS Fast API C call
kWasmToWasm, // fast Wasm->Wasm call
kJSFunctionArityMatch, // fast Wasm->JS call
kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
@@ -107,24 +110,34 @@ enum class WasmImportCallKind : uint8_t {
constexpr WasmImportCallKind kDefaultImportCallKind =
WasmImportCallKind::kJSFunctionArityMatch;
+struct WasmImportData {
+ WasmImportCallKind kind;
+ Handle<JSReceiver> callable;
+ Handle<HeapObject> suspender;
+};
// Resolves which import call wrapper is required for the given JS callable.
-// Returns the kind of wrapper need and the ultimate target callable. Note that
-// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap
-// another target, which is why the ultimate target is returned as well.
-V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
-ResolveWasmImportCall(Handle<JSReceiver> callable, const wasm::FunctionSig* sig,
- const wasm::WasmModule* module,
- const wasm::WasmFeatures& enabled_features);
+// Returns the kind of wrapper needed, the ultimate target callable, and the
+// suspender object if applicable. Note that some callables (e.g. a
+// {WasmExportedFunction} or {WasmJSFunction}) just wrap another target, which
+// is why the ultimate target is returned as well.
+V8_EXPORT_PRIVATE WasmImportData ResolveWasmImportCall(
+ Handle<JSReceiver> callable, const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module, const wasm::WasmFeatures& enabled_features);
// Compiles an import call wrapper, which allows Wasm to call imports.
V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::CompilationEnv* env, WasmImportCallKind, const wasm::FunctionSig*,
- bool source_positions, int expected_arity);
+ bool source_positions, int expected_arity, wasm::Suspend);
// Compiles a host call wrapper, which allows Wasm to call host functions.
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule*,
const wasm::FunctionSig*);
+// Compiles a wrapper to call a Fast API function from Wasm.
+wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule*,
+ const wasm::FunctionSig*,
+ Handle<JSFunction> target);
+
// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, const wasm::FunctionSig* sig,
@@ -134,7 +147,8 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
WasmImportCallKind kind,
- int expected_arity);
+ int expected_arity,
+ wasm::Suspend suspend);
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
@@ -183,12 +197,14 @@ struct WasmInstanceCacheNodes {
struct WasmLoopInfo {
Node* header;
uint32_t nesting_depth;
- bool is_innermost;
+ // This loop has, to our best knowledge, no other loops nested within it. A
+ // loop can obtain inner loops despite this after inlining.
+ bool can_be_innermost;
- WasmLoopInfo(Node* header, uint32_t nesting_depth, bool is_innermost)
+ WasmLoopInfo(Node* header, uint32_t nesting_depth, bool can_be_innermost)
: header(header),
nesting_depth(nesting_depth),
- is_innermost(is_innermost) {}
+ can_be_innermost(can_be_innermost) {}
};
// Abstracts details of building TurboFan graph nodes for wasm to separate
@@ -214,7 +230,7 @@ class WasmGraphBuilder {
struct ObjectReferenceKnowledge {
bool object_can_be_null;
ReferenceKind reference_kind;
- int8_t rtt_depth;
+ uint8_t rtt_depth;
};
enum EnforceBoundsCheck : bool { // --
kNeedsBoundsCheck = true,
@@ -290,7 +306,8 @@ class WasmGraphBuilder {
void AppendToMerge(Node* merge, Node* from);
void AppendToPhi(Node* phi, Node* from);
- void StackCheck(wasm::WasmCodePosition);
+ void StackCheck(WasmInstanceCacheNodes* shared_memory_instance_cache,
+ wasm::WasmCodePosition);
void PatchInStackCheckIfNeeded();
@@ -410,14 +427,6 @@ class WasmGraphBuilder {
return effect_and_control;
}
- Node* GetImportedMutableGlobals();
-
- void GetGlobalBaseAndOffset(MachineType mem_type, const wasm::WasmGlobal&,
- Node** base_node, Node** offset_node);
-
- void GetBaseAndOffsetForImportedMutableExternRefGlobal(
- const wasm::WasmGlobal& global, Node** base, Node** offset);
-
// Utilities to manipulate sets of instance cache nodes.
void InitInstanceCache(WasmInstanceCacheNodes* instance_cache);
void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache,
@@ -497,13 +506,15 @@ class WasmGraphBuilder {
void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
Node* src_array, Node* src_index, CheckForNull src_null_check,
Node* length, wasm::WasmCodePosition position);
- Node* ArrayInit(uint32_t array_index, const wasm::ArrayType* type, Node* rtt,
+ Node* ArrayInit(const wasm::ArrayType* type, Node* rtt,
base::Vector<Node*> elements);
+ Node* ArrayInitFromData(const wasm::ArrayType* type, uint32_t data_segment,
+ Node* offset, Node* length, Node* rtt,
+ wasm::WasmCodePosition position);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
Node* RttCanon(uint32_t type_index);
- Node* RttSub(uint32_t type_index, Node* parent_rtt, WasmRttSubMode mode);
Node* RefTest(Node* object, Node* rtt, ObjectReferenceKnowledge config);
Node* RefCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
@@ -523,6 +534,12 @@ class WasmGraphBuilder {
void BrOnFunc(Node* object, Node* rtt, ObjectReferenceKnowledge config,
Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect);
+ Node* RefIsArray(Node* object, bool object_can_be_null);
+ Node* RefAsArray(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position);
+ void BrOnArray(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
Node* RefIsI31(Node* object);
Node* RefAsI31(Node* object, wasm::WasmCodePosition position);
void BrOnI31(Node* object, Node* rtt, ObjectReferenceKnowledge config,
@@ -691,8 +708,14 @@ class WasmGraphBuilder {
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
+ void MemTypeToUintPtrOrOOBTrap(std::initializer_list<Node**> nodes,
+ wasm::WasmCodePosition position);
+
Node* IsNull(Node* object);
+ void GetGlobalBaseAndOffset(const wasm::WasmGlobal&, Node** base_node,
+ Node** offset_node);
+
using BranchBuilder = std::function<void(Node*, BranchHint)>;
struct Callbacks {
BranchBuilder succeed_if;
@@ -717,7 +740,9 @@ class WasmGraphBuilder {
void TypeCheck(Node* object, Node* rtt, ObjectReferenceKnowledge config,
bool null_succeeds, Callbacks callbacks);
void DataCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
- void FuncCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
+ void ManagedObjectInstanceCheck(Node* object, bool object_can_be_null,
+ InstanceType instance_type,
+ Callbacks callbacks);
void BrOnCastAbs(Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect,
@@ -757,7 +782,9 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
- Node* BuildUnsandboxExternalPointer(Node* external_pointer);
+ Node* BuildLoadExternalPointerFromObject(
+ Node* object, int offset,
+ ExternalPointerTag tag = kForeignForeignAddressTag);
Node* BuildLoadCallTargetFromExportedFunctionData(Node* function_data);
diff --git a/deps/v8/src/compiler/wasm-escape-analysis.cc b/deps/v8/src/compiler/wasm-escape-analysis.cc
index e05a792fba..450590d7d0 100644
--- a/deps/v8/src/compiler/wasm-escape-analysis.cc
+++ b/deps/v8/src/compiler/wasm-escape-analysis.cc
@@ -29,8 +29,8 @@ Reduction WasmEscapeAnalysis::ReduceAllocateRaw(Node* node) {
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsValueEdge(edge)) {
if (edge.index() != 0 ||
- edge.from()->opcode() != IrOpcode::kStoreToObject) {
- // The allocated object is used for something other than storing into.
+ (edge.from()->opcode() != IrOpcode::kStoreToObject &&
+ edge.from()->opcode() != IrOpcode::kInitializeImmutableInObject)) {
return NoChange();
}
value_edges.push_back(edge);
@@ -43,7 +43,8 @@ Reduction WasmEscapeAnalysis::ReduceAllocateRaw(Node* node) {
DCHECK_EQ(edge.index(), 0);
Node* use = edge.from();
DCHECK(!use->IsDead());
- DCHECK_EQ(use->opcode(), IrOpcode::kStoreToObject);
+ DCHECK(use->opcode() == IrOpcode::kStoreToObject ||
+ use->opcode() == IrOpcode::kInitializeImmutableInObject);
// The value stored by this StoreToObject node might be another allocation
// which has no more uses. Therefore we have to revisit it. Note that this
// will not happen automatically: ReplaceWithValue does not trigger revisits
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index 05e65951b3..e8acab6739 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -151,7 +151,7 @@ void WasmInliner::Finalize() {
wasm::WasmFeatures detected;
WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
source_positions_);
- std::vector<WasmLoopInfo> infos;
+ std::vector<WasmLoopInfo> inlinee_loop_infos;
size_t subgraph_min_node_id = graph()->NodeCount();
Node* inlinee_start;
@@ -160,8 +160,11 @@ void WasmInliner::Finalize() {
Graph::SubgraphScope scope(graph());
wasm::DecodeResult result = wasm::BuildTFGraph(
zone()->allocator(), env_->enabled_features, module(), &builder,
- &detected, inlinee_body, &infos, node_origins_,
- candidate.inlinee_index, wasm::kInlinedFunction);
+ &detected, inlinee_body, &inlinee_loop_infos, node_origins_,
+ candidate.inlinee_index,
+ NodeProperties::IsExceptionalCall(call)
+ ? wasm::kInlinedHandledCall
+ : wasm::kInlinedNonHandledCall);
if (result.failed()) {
// This can happen if the inlinee has never been compiled before and is
// invalid. Return, as there is no point to keep optimizing.
@@ -193,6 +196,8 @@ void WasmInliner::Finalize() {
InlineTailCall(call, inlinee_start, inlinee_end);
}
call->Kill();
+ loop_infos_->insert(loop_infos_->end(), inlinee_loop_infos.begin(),
+ inlinee_loop_infos.end());
// Returning after only one inlining has been tried and found worse.
}
}
@@ -218,7 +223,11 @@ void WasmInliner::RewireFunctionEntry(Node* call, Node* callee_start) {
if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
} else if (NodeProperties::IsControlEdge(edge)) {
- edge.UpdateTo(control);
+ // Projections pointing to the inlinee start are floating control.
+ // They should point to the graph's start.
+ edge.UpdateTo(use->opcode() == IrOpcode::kProjection
+ ? graph()->start()
+ : control);
} else {
UNREACHABLE();
}
@@ -230,7 +239,7 @@ void WasmInliner::RewireFunctionEntry(Node* call, Node* callee_start) {
void WasmInliner::InlineTailCall(Node* call, Node* callee_start,
Node* callee_end) {
- DCHECK(call->opcode() == IrOpcode::kTailCall);
+ DCHECK_EQ(call->opcode(), IrOpcode::kTailCall);
// 1) Rewire function entry.
RewireFunctionEntry(call, callee_start);
// 2) For tail calls, all we have to do is rewire all terminators of the
@@ -248,22 +257,59 @@ void WasmInliner::InlineTailCall(Node* call, Node* callee_start,
Revisit(graph()->end());
}
+namespace {
+// graph-builder-interface generates a dangling exception handler for each
+// throwing call in the inlinee. This might be followed by a LoopExit node.
+Node* DanglingHandler(Node* call) {
+ Node* if_exception = nullptr;
+ for (Node* use : call->uses()) {
+ if (use->opcode() == IrOpcode::kIfException) {
+ if_exception = use;
+ break;
+ }
+ }
+ DCHECK_NOT_NULL(if_exception);
+
+ // If this handler is dangling, return it.
+ if (if_exception->UseCount() == 0) return if_exception;
+
+ for (Node* use : if_exception->uses()) {
+ // Otherwise, look for a LoopExit use of this handler.
+ if (use->opcode() == IrOpcode::kLoopExit) {
+ for (Node* loop_exit_use : use->uses()) {
+ if (loop_exit_use->opcode() != IrOpcode::kLoopExitEffect &&
+ loop_exit_use->opcode() != IrOpcode::kLoopExitValue) {
+ // This LoopExit has a use other than LoopExitEffect/Value, so it is
+ // not dangling.
+ return nullptr;
+ }
+ }
+ return use;
+ }
+ }
+
+ return nullptr;
+}
+} // namespace
+
void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
const wasm::FunctionSig* inlinee_sig,
size_t subgraph_min_node_id) {
- DCHECK(call->opcode() == IrOpcode::kCall);
+ DCHECK_EQ(call->opcode(), IrOpcode::kCall);
// 0) Before doing anything, if {call} has an exception handler, collect all
// unhandled calls in the subgraph.
Node* handler = nullptr;
- std::vector<Node*> unhandled_subcalls;
+ std::vector<Node*> dangling_handlers;
if (NodeProperties::IsExceptionalCall(call, &handler)) {
AllNodes subgraph_nodes(zone(), callee_end, graph());
for (Node* node : subgraph_nodes.reachable) {
if (node->id() >= subgraph_min_node_id &&
- !node->op()->HasProperty(Operator::kNoThrow) &&
- !NodeProperties::IsExceptionalCall(node)) {
- unhandled_subcalls.push_back(node);
+ !node->op()->HasProperty(Operator::kNoThrow)) {
+ Node* dangling_handler = DanglingHandler(node);
+ if (dangling_handler != nullptr) {
+ dangling_handlers.push_back(dangling_handler);
+ }
}
}
}
@@ -326,29 +372,37 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
callee_end->Kill();
// 3) Rewire unhandled calls to the handler.
- std::vector<Node*> on_exception_nodes;
- for (Node* subcall : unhandled_subcalls) {
- Node* on_success = graph()->NewNode(common()->IfSuccess(), subcall);
- NodeProperties::ReplaceUses(subcall, subcall, subcall, on_success);
- NodeProperties::ReplaceControlInput(on_success, subcall);
- Node* on_exception =
- graph()->NewNode(common()->IfException(), subcall, subcall);
- on_exception_nodes.push_back(on_exception);
- }
+ int handler_count = static_cast<int>(dangling_handlers.size());
- int subcall_count = static_cast<int>(on_exception_nodes.size());
-
- if (subcall_count > 0) {
+ if (handler_count > 0) {
Node* control_output =
- graph()->NewNode(common()->Merge(subcall_count), subcall_count,
- on_exception_nodes.data());
- on_exception_nodes.push_back(control_output);
+ graph()->NewNode(common()->Merge(handler_count), handler_count,
+ dangling_handlers.data());
+ std::vector<Node*> effects;
+ std::vector<Node*> values;
+ for (Node* control : dangling_handlers) {
+ if (control->opcode() == IrOpcode::kIfException) {
+ effects.push_back(control);
+ values.push_back(control);
+ } else {
+ DCHECK_EQ(control->opcode(), IrOpcode::kLoopExit);
+ Node* if_exception = control->InputAt(0);
+ DCHECK_EQ(if_exception->opcode(), IrOpcode::kIfException);
+ effects.push_back(graph()->NewNode(common()->LoopExitEffect(),
+ if_exception, control));
+ values.push_back(graph()->NewNode(
+ common()->LoopExitValue(MachineRepresentation::kTagged),
+ if_exception, control));
+ }
+ }
+
+ effects.push_back(control_output);
+ values.push_back(control_output);
Node* value_output = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, subcall_count),
- subcall_count + 1, on_exception_nodes.data());
- Node* effect_output =
- graph()->NewNode(common()->EffectPhi(subcall_count), subcall_count + 1,
- on_exception_nodes.data());
+ common()->Phi(MachineRepresentation::kTagged, handler_count),
+ handler_count + 1, values.data());
+ Node* effect_output = graph()->NewNode(common()->EffectPhi(handler_count),
+ handler_count + 1, effects.data());
ReplaceWithValue(handler, value_output, effect_output, control_output);
} else if (handler != nullptr) {
// Nothing in the inlined function can throw. Remove the handler.
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index 0a2b9d2c51..0ded2ac0f4 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -29,17 +29,17 @@ namespace compiler {
class NodeOriginTable;
class SourcePositionTable;
+struct WasmLoopInfo;
// The WasmInliner provides the core graph inlining machinery for Webassembly
-// graphs. Note that this class only deals with the mechanics of how to inline
-// one graph into another; heuristics that decide what and how much to inline
-// are provided by {WasmInliningHeuristics}.
+// graphs.
class WasmInliner final : public AdvancedReducer {
public:
WasmInliner(Editor* editor, wasm::CompilationEnv* env,
uint32_t function_index, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, MachineGraph* mcgraph,
- const wasm::WireBytesStorage* wire_bytes)
+ const wasm::WireBytesStorage* wire_bytes,
+ std::vector<WasmLoopInfo>* loop_infos)
: AdvancedReducer(editor),
env_(env),
function_index_(function_index),
@@ -47,6 +47,7 @@ class WasmInliner final : public AdvancedReducer {
node_origins_(node_origins),
mcgraph_(mcgraph),
wire_bytes_(wire_bytes),
+ loop_infos_(loop_infos),
initial_graph_size_(mcgraph->graph()->NodeCount()),
current_graph_size_(initial_graph_size_),
inlining_candidates_() {}
@@ -143,6 +144,7 @@ class WasmInliner final : public AdvancedReducer {
NodeOriginTable* const node_origins_;
MachineGraph* const mcgraph_;
const wasm::WireBytesStorage* const wire_bytes_;
+ std::vector<WasmLoopInfo>* const loop_infos_;
const size_t initial_graph_size_;
size_t current_graph_size_;
std::priority_queue<CandidateInfo, std::vector<CandidateInfo>,
diff --git a/deps/v8/src/compiler/wasm-loop-peeling.cc b/deps/v8/src/compiler/wasm-loop-peeling.cc
new file mode 100644
index 0000000000..65d87e3672
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-loop-peeling.cc
@@ -0,0 +1,133 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-loop-peeling.h"
+
+#include "src/base/small-vector.h"
+#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/loop-peeling.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void PeelWasmLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, Graph* graph,
+ CommonOperatorBuilder* common, Zone* tmp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins) {
+ DCHECK_EQ(loop_node->opcode(), IrOpcode::kLoop);
+ DCHECK_NOT_NULL(loop);
+ // No back-jump to the loop header means this is not really a loop.
+ if (loop_node->InputCount() < 2) return;
+
+ uint32_t copied_size = static_cast<uint32_t>(loop->size()) * 2;
+
+ NodeVector copied_nodes(tmp_zone);
+
+ NodeCopier copier(graph, copied_size, &copied_nodes, 1);
+ source_positions->AddDecorator();
+ copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
+ base::make_iterator_range(loop->begin(), loop->end()),
+ source_positions, node_origins);
+ source_positions->RemoveDecorator();
+
+ Node* peeled_iteration_header = copier.map(loop_node);
+
+ // The terminator nodes in the copies need to get connected to the graph's end
+ // node, except Terminate nodes which will be deleted anyway.
+ for (Node* node : copied_nodes) {
+ if (IrOpcode::IsGraphTerminator(node->opcode()) &&
+ node->opcode() != IrOpcode::kTerminate && node->UseCount() == 0) {
+ NodeProperties::MergeControlToEnd(graph, common, node);
+ }
+ }
+
+ // Step 1: Create merges for loop exits.
+ for (Node* node : loop_node->uses()) {
+ // We do not need the Terminate node for the peeled iteration.
+ if (node->opcode() == IrOpcode::kTerminate) {
+ copier.map(node)->Kill();
+ continue;
+ }
+ if (node->opcode() != IrOpcode::kLoopExit) continue;
+ DCHECK_EQ(node->InputAt(1), loop_node);
+ // Create a merge node for the peeled iteration and main loop. Skip the
+ // LoopExit node in the peeled iteration, use its control input instead.
+ Node* merge_node =
+ graph->NewNode(common->Merge(2), node, copier.map(node)->InputAt(0));
+ // Replace all uses of the loop exit with the merge node.
+ for (Edge use_edge : node->use_edges()) {
+ Node* use = use_edge.from();
+ if (loop->count(use) == 1) {
+ // Uses within the loop will be LoopExitEffects and LoopExitValues.
+ // Those are used by nodes outside the loop. We need to create phis from
+ // the main loop and peeled iteration to replace loop exits.
+ DCHECK(use->opcode() == IrOpcode::kLoopExitEffect ||
+ use->opcode() == IrOpcode::kLoopExitValue);
+ const Operator* phi_operator =
+ use->opcode() == IrOpcode::kLoopExitEffect
+ ? common->EffectPhi(2)
+ : common->Phi(LoopExitValueRepresentationOf(use->op()), 2);
+ Node* phi = graph->NewNode(phi_operator, use,
+ copier.map(use)->InputAt(0), merge_node);
+ use->ReplaceUses(phi);
+ // Fix the input of phi we just broke.
+ phi->ReplaceInput(0, use);
+ copier.map(use)->Kill();
+ } else if (use != merge_node) {
+ // For uses outside the loop, simply redirect them to the merge.
+ use->ReplaceInput(use_edge.index(), merge_node);
+ }
+ }
+ copier.map(node)->Kill();
+ }
+
+ // Step 2: The peeled iteration is not a loop anymore. Any control uses of
+ // its loop header should now point to its non-recursive input. Any phi uses
+ // should use the value coming from outside the loop.
+ for (Edge use_edge : peeled_iteration_header->use_edges()) {
+ if (NodeProperties::IsPhi(use_edge.from())) {
+ use_edge.from()->ReplaceUses(use_edge.from()->InputAt(0));
+ } else {
+ use_edge.UpdateTo(loop_node->InputAt(0));
+ }
+ }
+
+ // We are now left with an unconnected subgraph of the peeled Loop node and
+ // its phi uses.
+
+ // Step 3: Rewire the peeled iteration to flow into the main loop.
+
+ // We are reusing the Loop node of the peeled iteration and its phis as the
+ // merge and phis which flow from the peeled iteration into the main loop.
+ // First, remove the non-recursive input.
+ peeled_iteration_header->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ peeled_iteration_header,
+ common->Merge(peeled_iteration_header->InputCount()));
+
+ // Remove the non-recursive input.
+ for (Edge use_edge : peeled_iteration_header->use_edges()) {
+ DCHECK(NodeProperties::IsPhi(use_edge.from()));
+ use_edge.from()->RemoveInput(0);
+ const Operator* phi = common->ResizeMergeOrPhi(
+ use_edge.from()->op(),
+ use_edge.from()->InputCount() - /* control input */ 1);
+ NodeProperties::ChangeOp(use_edge.from(), phi);
+ }
+
+ // In the main loop, change inputs to the merge and phis above.
+ loop_node->ReplaceInput(0, peeled_iteration_header);
+ for (Edge use_edge : loop_node->use_edges()) {
+ if (NodeProperties::IsPhi(use_edge.from())) {
+ use_edge.from()->ReplaceInput(0, copier.map(use_edge.from()));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-loop-peeling.h b/deps/v8/src/compiler/wasm-loop-peeling.h
new file mode 100644
index 0000000000..848dcceba0
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-loop-peeling.h
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_LOOP_PEELING_H_
+#define V8_COMPILER_WASM_LOOP_PEELING_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/loop-analysis.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Loop peeling is an optimization that copies the body of a loop, creating
+// a new copy of the body called the "peeled iteration" that represents the
+// first iteration. It enables a kind of loop hoisting: repeated computations
+// without side-effects in the body of the loop can be computed in the first
+// iteration only and reused in the next iterations.
+void PeelWasmLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, Graph* graph,
+ CommonOperatorBuilder* common, Zone* tmp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_LOOP_PEELING_H_
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index c69c00f9a0..0949c431b6 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_ZONE_STATS_H_
#include <map>
-#include <set>
#include <vector>
#include "src/common/globals.h"
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index 13b67ce8ea..eed6e19cf3 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -13,6 +13,69 @@
namespace v8 {
+namespace {
+AsyncHooksWrap* UnwrapHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ Local<Object> hook = args.This();
+
+ AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
+
+ if (!hooks->async_hook_ctor.Get(isolate)->HasInstance(hook)) {
+ isolate->ThrowError("Invalid 'this' passed instead of AsyncHooks instance");
+ return nullptr;
+ }
+
+ Local<External> wrap = hook->GetInternalField(0).As<External>();
+ void* ptr = wrap->Value();
+ return static_cast<AsyncHooksWrap*>(ptr);
+}
+
+void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ AsyncHooksWrap* wrap = UnwrapHook(args);
+ if (wrap) wrap->Enable();
+}
+
+void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ AsyncHooksWrap* wrap = UnwrapHook(args);
+ if (wrap) wrap->Disable();
+}
+
+} // namespace
+
+AsyncHooks::AsyncHooks(Isolate* isolate) : isolate_(isolate) {
+ AsyncContext ctx;
+ ctx.execution_async_id = 1;
+ ctx.trigger_async_id = 0;
+ asyncContexts.push(ctx);
+ current_async_id = 1;
+
+ HandleScope handle_scope(isolate_);
+
+ async_hook_ctor.Reset(isolate_, FunctionTemplate::New(isolate_));
+ async_hook_ctor.Get(isolate_)->SetClassName(
+ String::NewFromUtf8Literal(isolate_, "AsyncHook"));
+
+ async_hooks_templ.Reset(isolate_,
+ async_hook_ctor.Get(isolate_)->InstanceTemplate());
+ async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
+ async_hooks_templ.Get(isolate_)->Set(
+ isolate_, "enable", FunctionTemplate::New(isolate_, EnableHook));
+ async_hooks_templ.Get(isolate_)->Set(
+ isolate_, "disable", FunctionTemplate::New(isolate_, DisableHook));
+
+ async_id_smb.Reset(isolate_, Private::New(isolate_));
+ trigger_id_smb.Reset(isolate_, Private::New(isolate_));
+
+ isolate_->SetPromiseHook(ShellPromiseHook);
+}
+
+AsyncHooks::~AsyncHooks() {
+ isolate_->SetPromiseHook(nullptr);
+ base::RecursiveMutexGuard lock_guard(&async_wraps_mutex_);
+ async_wraps_.clear();
+}
+
void AsyncHooksWrap::Enable() { enabled_ = true; }
void AsyncHooksWrap::Disable() { enabled_ = false; }
@@ -43,38 +106,6 @@ void AsyncHooksWrap::set_promiseResolve_function(
promiseResolve_function_.Reset(isolate_, value);
}
-static AsyncHooksWrap* UnwrapHook(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = args.GetIsolate();
- HandleScope scope(isolate);
- Local<Object> hook = args.This();
-
- AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
-
- if (!hooks->async_hook_ctor.Get(isolate)->HasInstance(hook)) {
- isolate->ThrowError("Invalid 'this' passed instead of AsyncHooks instance");
- return nullptr;
- }
-
- Local<External> wrap = hook->GetInternalField(0).As<External>();
- void* ptr = wrap->Value();
- return static_cast<AsyncHooksWrap*>(ptr);
-}
-
-static void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
- AsyncHooksWrap* wrap = UnwrapHook(args);
- if (wrap) {
- wrap->Enable();
- }
-}
-
-static void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
- AsyncHooksWrap* wrap = UnwrapHook(args);
- if (wrap) {
- wrap->Disable();
- }
-}
-
async_id_t AsyncHooks::GetExecutionAsyncId() const {
return asyncContexts.top().execution_async_id;
}
@@ -95,7 +126,8 @@ Local<Object> AsyncHooks::CreateHook(
return Local<Object>();
}
- AsyncHooksWrap* wrap = new AsyncHooksWrap(isolate);
+ std::unique_ptr<AsyncHooksWrap> wrap =
+ std::make_unique<AsyncHooksWrap>(isolate);
Local<Object> fn_obj = args[0].As<Object>();
@@ -113,12 +145,15 @@ Local<Object> AsyncHooks::CreateHook(
SET_HOOK_FN(promiseResolve);
#undef SET_HOOK_FN
- async_wraps_.push_back(wrap);
-
Local<Object> obj = async_hooks_templ.Get(isolate)
->NewInstance(currentContext)
.ToLocalChecked();
- obj->SetInternalField(0, External::New(isolate, wrap));
+ obj->SetInternalField(0, External::New(isolate, wrap.get()));
+
+ {
+ base::RecursiveMutexGuard lock_guard(&async_wraps_mutex_);
+ async_wraps_.push_back(std::move(wrap));
+ }
return handle_scope.Escape(obj);
}
@@ -184,8 +219,9 @@ void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
hooks->asyncContexts.pop();
}
if (!i::StackLimitCheck{i_isolate}.HasOverflowed()) {
- for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
- PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ base::RecursiveMutexGuard lock_guard(&hooks->async_wraps_mutex_);
+ for (const auto& wrap : hooks->async_wraps_) {
+ PromiseHookDispatch(type, promise, parent, *wrap, hooks);
if (try_catch.HasCaught()) break;
}
if (try_catch.HasCaught()) Shell::ReportException(isolate, &try_catch);
@@ -196,39 +232,12 @@ void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
}
}
-void AsyncHooks::Initialize() {
- HandleScope handle_scope(isolate_);
-
- async_hook_ctor.Reset(isolate_, FunctionTemplate::New(isolate_));
- async_hook_ctor.Get(isolate_)->SetClassName(
- String::NewFromUtf8Literal(isolate_, "AsyncHook"));
-
- async_hooks_templ.Reset(isolate_,
- async_hook_ctor.Get(isolate_)->InstanceTemplate());
- async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
- async_hooks_templ.Get(isolate_)->Set(
- isolate_, "enable", FunctionTemplate::New(isolate_, EnableHook));
- async_hooks_templ.Get(isolate_)->Set(
- isolate_, "disable", FunctionTemplate::New(isolate_, DisableHook));
-
- async_id_smb.Reset(isolate_, Private::New(isolate_));
- trigger_id_smb.Reset(isolate_, Private::New(isolate_));
-
- isolate_->SetPromiseHook(ShellPromiseHook);
-}
-
-void AsyncHooks::Deinitialize() {
- isolate_->SetPromiseHook(nullptr);
- for (AsyncHooksWrap* wrap : async_wraps_) {
- delete wrap;
- }
-}
-
void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
Local<Promise> promise,
- Local<Value> parent, AsyncHooksWrap* wrap,
+ Local<Value> parent,
+ const AsyncHooksWrap& wrap,
AsyncHooks* hooks) {
- if (!wrap->IsEnabled()) return;
+ if (!wrap.IsEnabled()) return;
v8::Isolate* v8_isolate = hooks->isolate_;
HandleScope handle_scope(v8_isolate);
@@ -239,35 +248,30 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
.ToLocalChecked();
Local<Value> args[1] = {async_id};
- // This is unused. It's here to silence the warning about
- // not using the MaybeLocal return value from Call.
- MaybeLocal<Value> result;
-
- // Sacrifice the brevity for readability and debugfulness
switch (type) {
case PromiseHookType::kInit:
- if (!wrap->init_function().IsEmpty()) {
+ if (!wrap.init_function().IsEmpty()) {
Local<Value> initArgs[4] = {
async_id, String::NewFromUtf8Literal(v8_isolate, "PROMISE"),
promise->GetPrivate(context, hooks->trigger_id_smb.Get(v8_isolate))
.ToLocalChecked(),
promise};
- result = wrap->init_function()->Call(context, rcv, 4, initArgs);
+ USE(wrap.init_function()->Call(context, rcv, 4, initArgs));
}
break;
case PromiseHookType::kBefore:
- if (!wrap->before_function().IsEmpty()) {
- result = wrap->before_function()->Call(context, rcv, 1, args);
+ if (!wrap.before_function().IsEmpty()) {
+ USE(wrap.before_function()->Call(context, rcv, 1, args));
}
break;
case PromiseHookType::kAfter:
- if (!wrap->after_function().IsEmpty()) {
- result = wrap->after_function()->Call(context, rcv, 1, args);
+ if (!wrap.after_function().IsEmpty()) {
+ USE(wrap.after_function()->Call(context, rcv, 1, args));
}
break;
case PromiseHookType::kResolve:
- if (!wrap->promiseResolve_function().IsEmpty()) {
- result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
+ if (!wrap.promiseResolve_function().IsEmpty()) {
+ USE(wrap.promiseResolve_function()->Call(context, rcv, 1, args));
}
}
}
diff --git a/deps/v8/src/d8/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index 23cc0be9c0..cbc42a901d 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -28,10 +28,8 @@ struct AsyncContext {
class AsyncHooksWrap {
public:
- explicit AsyncHooksWrap(Isolate* isolate) {
- enabled_ = false;
- isolate_ = isolate;
- }
+ explicit AsyncHooksWrap(Isolate* isolate)
+ : isolate_(isolate), enabled_(false) {}
void Enable();
void Disable();
bool IsEnabled() const { return enabled_; }
@@ -58,18 +56,8 @@ class AsyncHooksWrap {
class AsyncHooks {
public:
- explicit AsyncHooks(Isolate* isolate) {
- isolate_ = isolate;
-
- AsyncContext ctx;
- ctx.execution_async_id = 1;
- ctx.trigger_async_id = 0;
- asyncContexts.push(ctx);
- current_async_id = 1;
-
- Initialize();
- }
- ~AsyncHooks() { Deinitialize(); }
+ explicit AsyncHooks(Isolate* isolate);
+ ~AsyncHooks();
async_id_t GetExecutionAsyncId() const;
async_id_t GetTriggerAsyncId() const;
@@ -79,19 +67,18 @@ class AsyncHooks {
Persistent<FunctionTemplate> async_hook_ctor;
private:
- std::vector<AsyncHooksWrap*> async_wraps_;
+ base::RecursiveMutex async_wraps_mutex_;
+ std::vector<std::unique_ptr<AsyncHooksWrap>> async_wraps_;
Isolate* isolate_;
Persistent<ObjectTemplate> async_hooks_templ;
Persistent<Private> async_id_smb;
Persistent<Private> trigger_id_smb;
- void Initialize();
- void Deinitialize();
-
static void ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent);
static void PromiseHookDispatch(PromiseHookType type, Local<Promise> promise,
- Local<Value> parent, AsyncHooksWrap* wrap,
+ Local<Value> parent,
+ const AsyncHooksWrap& wrap,
AsyncHooks* hooks);
std::stack<AsyncContext> asyncContexts;
diff --git a/deps/v8/src/d8/cov.h b/deps/v8/src/d8/cov.h
index 0c7dc6bac4..454c45dc53 100644
--- a/deps/v8/src/d8/cov.h
+++ b/deps/v8/src/d8/cov.h
@@ -10,6 +10,7 @@
// memory
// https://clang.llvm.org/docs/SanitizerCoverage.html
+#include <cstdint>
#include <vector>
void sanitizer_cov_reset_edgeguards();
diff --git a/deps/v8/src/d8/d8-console.cc b/deps/v8/src/d8/d8-console.cc
index fc76bab7ab..bd395145e5 100644
--- a/deps/v8/src/d8/d8-console.cc
+++ b/deps/v8/src/d8/d8-console.cc
@@ -34,7 +34,7 @@ void WriteToFile(const char* prefix, FILE* file, Isolate* isolate,
} // anonymous namespace
D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
- default_timer_ = base::TimeTicks::HighResolutionNow();
+ default_timer_ = base::TimeTicks::Now();
}
void D8Console::Assert(const debug::ConsoleCallArguments& args,
@@ -75,7 +75,7 @@ void D8Console::Time(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
if (internal::FLAG_correctness_fuzzer_suppressions) return;
if (args.Length() == 0) {
- default_timer_ = base::TimeTicks::HighResolutionNow();
+ default_timer_ = base::TimeTicks::Now();
} else {
Local<Value> arg = args[0];
Local<String> label;
@@ -85,10 +85,10 @@ void D8Console::Time(const debug::ConsoleCallArguments& args,
std::string string(*utf8);
auto find = timers_.find(string);
if (find != timers_.end()) {
- find->second = base::TimeTicks::HighResolutionNow();
+ find->second = base::TimeTicks::Now();
} else {
timers_.insert(std::pair<std::string, base::TimeTicks>(
- string, base::TimeTicks::HighResolutionNow()));
+ string, base::TimeTicks::Now()));
}
}
}
@@ -98,10 +98,10 @@ void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
if (internal::FLAG_correctness_fuzzer_suppressions) return;
base::TimeDelta delta;
if (args.Length() == 0) {
- delta = base::TimeTicks::HighResolutionNow() - default_timer_;
+ delta = base::TimeTicks::Now() - default_timer_;
printf("console.timeEnd: default, %f\n", delta.InMillisecondsF());
} else {
- base::TimeTicks now = base::TimeTicks::HighResolutionNow();
+ base::TimeTicks now = base::TimeTicks::Now();
Local<Value> arg = args[0];
Local<String> label;
v8::TryCatch try_catch(isolate_);
@@ -120,7 +120,7 @@ void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
void D8Console::TimeStamp(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
if (internal::FLAG_correctness_fuzzer_suppressions) return;
- base::TimeDelta delta = base::TimeTicks::HighResolutionNow() - default_timer_;
+ base::TimeDelta delta = base::TimeTicks::Now() - default_timer_;
if (args.Length() == 0) {
printf("console.timeStamp: default, %f\n", delta.InMillisecondsF());
} else {
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index d5af43b532..270d51f7fe 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -39,6 +39,8 @@ namespace {
class FastCApiObject {
public:
+ static FastCApiObject& instance();
+
#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
static AnyCType AddAllFastCallbackPatch(AnyCType receiver,
AnyCType should_fallback,
@@ -65,16 +67,58 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
} else {
- options.fallback = 0;
+ options.fallback = false;
}
return static_cast<double>(arg_i32) + static_cast<double>(arg_u32) +
static_cast<double>(arg_i64) + static_cast<double>(arg_u64) +
static_cast<double>(arg_f32) + arg_f64;
}
+
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static AnyCType AddAllFastCallbackNoOptionsPatch(
+ AnyCType receiver, AnyCType should_fallback, AnyCType arg_i32,
+ AnyCType arg_u32, AnyCType arg_i64, AnyCType arg_u64, AnyCType arg_f32,
+ AnyCType arg_f64) {
+ AnyCType ret;
+ ret.double_value = AddAllFastCallbackNoOptions(
+ receiver.object_value, should_fallback.bool_value, arg_i32.int32_value,
+ arg_u32.uint32_value, arg_i64.int64_value, arg_u64.uint64_value,
+ arg_f32.float_value, arg_f64.double_value);
+ return ret;
+ }
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static double AddAllFastCallbackNoOptions(Local<Object> receiver,
+ bool should_fallback,
+ int32_t arg_i32, uint32_t arg_u32,
+ int64_t arg_i64, uint64_t arg_u64,
+ float arg_f32, double arg_f64) {
+ FastCApiObject* self;
+
+ // For Wasm call, we don't pass FastCApiObject as the receiver, so we need
+ // to retrieve the FastCApiObject instance from a static variable.
+ if (Utils::OpenHandle(*receiver)->IsJSGlobalProxy() ||
+ Utils::OpenHandle(*receiver)->IsUndefined()) {
+ // Note: FastCApiObject::instance() returns the reference of an object
+ // allocated in thread-local storage, its value cannot be stored in a
+ // static variable here.
+ self = &FastCApiObject::instance();
+ } else {
+ // Fuzzing code can call this function from JS; in this case the receiver
+ // should be a FastCApiObject.
+ self = UnwrapObject(receiver);
+ CHECK_NOT_NULL(self);
+ }
+ self->fast_call_count_++;
+
+ return static_cast<double>(arg_i32) + static_cast<double>(arg_u32) +
+ static_cast<double>(arg_i64) + static_cast<double>(arg_u64) +
+ static_cast<double>(arg_f32) + arg_f64;
+ }
+
static void AddAllSlowCallback(const FunctionCallbackInfo<Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -143,13 +187,13 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
uint32_t length = seq_arg->Length();
if (length > 1024) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
@@ -157,7 +201,7 @@ class FastCApiObject {
bool result = TryToCopyAndConvertArrayToCppBuffer<
CTypeInfoBuilder<Type>::Build().GetId(), Type>(seq_arg, buffer, 1024);
if (!result) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
DCHECK_EQ(seq_arg->Length(), length);
@@ -288,7 +332,7 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
@@ -385,7 +429,7 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
@@ -444,7 +488,7 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return 0;
}
@@ -504,7 +548,7 @@ class FastCApiObject {
self->fast_call_count_++;
if (should_fallback) {
- options.fallback = 1;
+ options.fallback = true;
return false;
}
@@ -620,6 +664,9 @@ class FastCApiObject {
thread_local FastCApiObject kFastCApiObject;
} // namespace
+// static
+FastCApiObject& FastCApiObject::instance() { return kFastCApiObject; }
+
void CreateFastCAPIObject(const FunctionCallbackInfo<Value>& info) {
if (!info.IsConstructCall()) {
info.GetIsolate()->ThrowError(
@@ -765,6 +812,7 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
FastCApiObject::AddAll32BitIntFastCallback_5ArgsPatch));
const CFunction c_function_overloads[] = {add_all_32bit_int_6args_c_func,
add_all_32bit_int_5args_c_func};
+
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "overloaded_add_all_32bit_int",
FunctionTemplate::NewWithCFunctionOverloads(
@@ -772,6 +820,16 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, {c_function_overloads, 2}));
+ CFunction add_all_no_options_c_func = CFunction::Make(
+ FastCApiObject::AddAllFastCallbackNoOptions V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllFastCallbackNoOptionsPatch));
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_all_no_options",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::AddAllSlowCallback, Local<Value>(),
+ Local<Signature>(), 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &add_all_no_options_c_func));
+
CFunction add_32bit_int_c_func = CFunction::Make(
FastCApiObject::Add32BitIntFastCallback V8_IF_USE_SIMULATOR(
FastCApiObject::Add32BitIntFastCallbackPatch));
@@ -781,6 +839,7 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
isolate, FastCApiObject::Add32BitIntSlowCallback, Local<Value>(),
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_32bit_int_c_func));
+
CFunction is_valid_api_object_c_func =
CFunction::Make(FastCApiObject::IsFastCApiObjectFastCallback);
api_obj_ctor->PrototypeTemplate()->Set(
@@ -789,6 +848,7 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
isolate, FastCApiObject::IsFastCApiObjectSlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &is_valid_api_object_c_func));
+
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
FunctionTemplate::New(
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 770fcdd0b8..dd55dfa004 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -28,6 +28,7 @@
#include "include/v8-initialization.h"
#include "include/v8-inspector.h"
#include "include/v8-json.h"
+#include "include/v8-locker.h"
#include "include/v8-profiler.h"
#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
@@ -45,9 +46,11 @@
#include "src/debug/debug-interface.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/basic-block-profiler.h"
+#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/flags/flags.h"
#include "src/handles/maybe-handles.h"
+#include "src/heap/parked-scope.h"
#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
@@ -181,7 +184,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
- CHECK(i::FreePages(page_allocator, data, allocated));
+ i::FreePages(page_allocator, data, allocated);
}
};
@@ -460,8 +463,7 @@ base::OS::MemoryMappedFile* Shell::counters_file_ = nullptr;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
base::LazyMutex Shell::context_mutex_;
-const base::TimeTicks Shell::kInitialTicks =
- base::TimeTicks::HighResolutionNow();
+const base::TimeTicks Shell::kInitialTicks = base::TimeTicks::Now();
Global<Function> Shell::stringify_function_;
base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
@@ -1265,6 +1267,11 @@ void Shell::HostInitializeImportMetaObject(Local<Context> context,
meta->CreateDataProperty(context, url_key, url).ToChecked();
}
+MaybeLocal<Context> Shell::HostCreateShadowRealmContext(
+ Local<Context> initiator_context) {
+ return v8::Context::New(initiator_context->GetIsolate());
+}
+
void Shell::DoHostImportModuleDynamically(void* import_data) {
DynamicImportData* import_data_ =
static_cast<DynamicImportData*>(import_data);
@@ -1365,11 +1372,15 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
// isolate->ReportPendingMessages().
TryCatch try_catch(isolate);
+ ModuleEmbedderData* d = GetModuleDataFromContext(realm);
Local<Module> root_module;
-
- if (!FetchModuleTree(Local<Module>(), realm, absolute_path,
- ModuleType::kJavaScript)
- .ToLocal(&root_module)) {
+ auto module_it = d->module_map.find(
+ std::make_pair(absolute_path, ModuleType::kJavaScript));
+ if (module_it != d->module_map.end()) {
+ root_module = module_it->second.Get(isolate);
+ } else if (!FetchModuleTree(Local<Module>(), realm, absolute_path,
+ ModuleType::kJavaScript)
+ .ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
return false;
@@ -1390,11 +1401,9 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
}
// Loop until module execution finishes
- // TODO(cbruni): This is a bit wonky. "Real" engines would not be
- // able to just busy loop waiting for execution to finish.
Local<Promise> result_promise(result.As<Promise>());
while (result_promise->State() == Promise::kPending) {
- isolate->PerformMicrotaskCheckpoint();
+ Shell::CompleteMessageLoop(isolate);
}
if (result_promise->State() == Promise::kRejected) {
@@ -1421,6 +1430,8 @@ bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
Context::Scope context_scope(realm);
+ TryCatch try_catch(isolate);
+ bool success = false;
std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
@@ -1428,12 +1439,49 @@ bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
std::unique_ptr<uint8_t[]> snapshot_data(
reinterpret_cast<uint8_t*>(ReadChars(absolute_path.c_str(), &length)));
if (length == 0) {
- isolate->ThrowError("Error reading the web snapshot");
- return false;
+ isolate->ThrowError("Could not read the web snapshot file");
+ } else {
+ i::WebSnapshotDeserializer deserializer(isolate, snapshot_data.get(),
+ static_cast<size_t>(length));
+ success = deserializer.Deserialize();
}
- i::WebSnapshotDeserializer deserializer(isolate);
- return deserializer.UseWebSnapshot(snapshot_data.get(),
- static_cast<size_t>(length));
+ if (!success) {
+ CHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ }
+ return success;
+}
+
+// Treat every line as a JSON value and parse it.
+bool Shell::LoadJSON(Isolate* isolate, const char* file_name) {
+ HandleScope handle_scope(isolate);
+ PerIsolateData* isolate_data = PerIsolateData::Get(isolate);
+ Local<Context> realm =
+ isolate_data->realms_[isolate_data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+ TryCatch try_catch(isolate);
+
+ std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
+ int length = 0;
+ std::unique_ptr<char[]> data(ReadChars(absolute_path.c_str(), &length));
+ if (length == 0) {
+ printf("Error reading '%s'\n", file_name);
+ base::OS::ExitProcess(1);
+ }
+ std::stringstream stream(data.get());
+ std::string line;
+ while (std::getline(stream, line, '\n')) {
+ Local<String> source =
+ String::NewFromUtf8(isolate, line.c_str()).ToLocalChecked();
+ MaybeLocal<Value> maybe_value = JSON::Parse(realm, source);
+ Local<Value> value;
+ if (!maybe_value.ToLocal(&value)) {
+ DCHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ }
+ return true;
}
PerIsolateData::PerIsolateData(Isolate* isolate)
@@ -1620,8 +1668,7 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i::FLAG_verify_predictable) {
args.GetReturnValue().Set(g_platform->MonotonicallyIncreasingTime());
} else {
- base::TimeDelta delta =
- base::TimeTicks::HighResolutionNow() - kInitialTicks;
+ base::TimeDelta delta = base::TimeTicks::Now() - kInitialTicks;
args.GetReturnValue().Set(delta.InMillisecondsF());
}
}
@@ -1985,9 +2032,10 @@ void Shell::RealmUseWebSnapshot(
// Deserialize the snapshot in the specified Realm.
{
PerIsolateData::ExplicitRealmScope realm_scope(data, index);
- i::WebSnapshotDeserializer deserializer(isolate);
- bool success = deserializer.UseWebSnapshot(
- snapshot_data_shared->buffer, snapshot_data_shared->buffer_size);
+ i::WebSnapshotDeserializer deserializer(isolate,
+ snapshot_data_shared->buffer,
+ snapshot_data_shared->buffer_size);
+ bool success = deserializer.Deserialize();
args.GetReturnValue().Set(success);
}
}
@@ -2039,8 +2087,9 @@ void Shell::TestVerifySourcePositions(
return;
}
auto arg_handle = Utils::OpenHandle(*args[0]);
- if (!arg_handle->IsHeapObject() || !i::Handle<i::HeapObject>::cast(arg_handle)
- ->IsJSFunctionOrBoundFunction()) {
+ if (!arg_handle->IsHeapObject() ||
+ !i::Handle<i::HeapObject>::cast(arg_handle)
+ ->IsJSFunctionOrBoundFunctionOrWrappedFunction()) {
isolate->ThrowError("Expected function as single argument.");
return;
}
@@ -2048,18 +2097,21 @@ void Shell::TestVerifySourcePositions(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope handle_scope(isolate);
- auto callable = i::Handle<i::JSFunctionOrBoundFunction>::cast(arg_handle);
+ auto callable =
+ i::Handle<i::JSFunctionOrBoundFunctionOrWrappedFunction>::cast(
+ arg_handle);
while (callable->IsJSBoundFunction()) {
internal::DisallowGarbageCollection no_gc;
auto bound_function = i::Handle<i::JSBoundFunction>::cast(callable);
auto bound_target = bound_function->bound_target_function();
- if (!bound_target.IsJSFunctionOrBoundFunction()) {
+ if (!bound_target.IsJSFunctionOrBoundFunctionOrWrappedFunction()) {
internal::AllowGarbageCollection allow_gc;
isolate->ThrowError("Expected function as bound target.");
return;
}
- callable =
- handle(i::JSFunctionOrBoundFunction::cast(bound_target), i_isolate);
+ callable = handle(
+ i::JSFunctionOrBoundFunctionOrWrappedFunction::cast(bound_target),
+ i_isolate);
}
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(callable);
@@ -2164,6 +2216,7 @@ void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
"--correctness-fuzzer-suppressions");
return;
}
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
Local<Context> context = isolate->GetCurrentContext();
HandleScope handle_scope(isolate);
@@ -2174,6 +2227,11 @@ void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
args[3]->IsFunction() ? args[3].As<Function>() : Local<Function>());
args.GetReturnValue().Set(v8::Undefined(isolate));
+#else // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
+ isolate->ThrowError(
+ "d8.promise.setHooks is disabled due to missing build flag "
+ "v8_enabale_javascript_in_promise_hooks");
+#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
}
void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -2572,11 +2630,18 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
->Int32Value(args->GetIsolate()->GetCurrentContext())
.FromMaybe(0);
WaitForRunningWorkers();
- args->GetIsolate()->Exit();
+ Isolate* isolate = args->GetIsolate();
+ isolate->Exit();
+
// As we exit the process anyway, we do not dispose the platform and other
- // global data. Other isolates might still be running, so disposing here can
- // cause them to crash.
- OnExit(args->GetIsolate(), false);
+ // global data and manually unlock to quell DCHECKs. Other isolates might
+ // still be running, so disposing here can cause them to crash.
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (i_isolate->thread_manager()->IsLockedByCurrentThread()) {
+ i_isolate->thread_manager()->Unlock();
+ }
+
+ OnExit(isolate, false);
base::OS::ExitProcess(exit_code);
}
@@ -3173,6 +3238,8 @@ void Shell::Initialize(Isolate* isolate, D8Console* console,
Shell::HostImportModuleDynamically);
isolate->SetHostInitializeImportMetaObjectCallback(
Shell::HostInitializeImportMetaObject);
+ isolate->SetHostCreateShadowRealmContextCallback(
+ Shell::HostCreateShadowRealmContext);
#ifdef V8_FUZZILLI
// Let the parent process (Fuzzilli) know we are ready.
@@ -3213,8 +3280,8 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
isolate->SetWasmLoadSourceMapCallback(Shell::WasmLoadSourceMapCallback);
}
InitializeModuleEmbedderData(context);
+ Context::Scope scope(context);
if (options.include_arguments) {
- Context::Scope scope(context);
const std::vector<const char*>& args = options.arguments;
int size = static_cast<int>(args.size());
Local<Array> array = Array::New(isolate, size);
@@ -3228,6 +3295,15 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
isolate, "arguments", NewStringType::kInternalized);
context->Global()->Set(context, name, array).FromJust();
}
+ {
+ // setup console global.
+ Local<String> name = String::NewFromUtf8Literal(
+ isolate, "console", NewStringType::kInternalized);
+ Local<Value> console =
+ context->GetExtrasBindingObject()->Get(context, name).ToLocalChecked();
+ context->Global()->Set(context, name, console).FromJust();
+ }
+
return handle_scope.Escape(context);
}
@@ -3537,11 +3613,12 @@ char* Shell::ReadChars(const char* name, int* size_out) {
MaybeLocal<PrimitiveArray> Shell::ReadLines(Isolate* isolate,
const char* name) {
int length;
- const char* data = reinterpret_cast<const char*>(ReadChars(name, &length));
- if (data == nullptr) {
+ std::unique_ptr<char[]> data(ReadChars(name, &length));
+
+ if (data.get() == nullptr) {
return MaybeLocal<PrimitiveArray>();
}
- std::stringstream stream(data);
+ std::stringstream stream(data.get());
std::string line;
std::vector<std::string> lines;
while (std::getline(stream, line, '\n')) {
@@ -3889,6 +3966,15 @@ bool SourceGroup::Execute(Isolate* isolate) {
break;
}
continue;
+ } else if (strcmp(arg, "--json") == 0 && i + 1 < end_offset_) {
+ // Treat the next file as a JSON file.
+ arg = argv_[++i];
+ Shell::set_script_executed();
+ if (!Shell::LoadJSON(isolate, arg)) {
+ success = false;
+ break;
+ }
+ continue;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
continue;
@@ -3928,7 +4014,11 @@ void SourceGroup::ExecuteInThread() {
Shell::Initialize(isolate, &console, false);
for (int i = 0; i < Shell::options.stress_runs; ++i) {
- next_semaphore_.Wait();
+ {
+ i::ParkedScope parked_scope(
+ reinterpret_cast<i::Isolate*>(isolate)->main_thread_local_isolate());
+ next_semaphore_.Wait();
+ }
{
Isolate::Scope iscope(isolate);
PerIsolateData data(isolate);
@@ -4001,8 +4091,8 @@ Worker::Worker(const char* script) : script_(i::StrDup(script)) {
}
Worker::~Worker() {
+ CHECK(state_.load() == State::kTerminated);
DCHECK_NULL(isolate_);
-
delete thread_;
thread_ = nullptr;
delete[] script_;
@@ -4013,16 +4103,15 @@ bool Worker::is_running() const { return state_.load() == State::kRunning; }
bool Worker::StartWorkerThread(std::shared_ptr<Worker> worker) {
auto expected = State::kReady;
- CHECK(worker->state_.compare_exchange_strong(expected, State::kRunning));
+ CHECK(
+ worker->state_.compare_exchange_strong(expected, State::kPrepareRunning));
auto thread = new WorkerThread(worker);
worker->thread_ = thread;
- if (thread->Start()) {
- // Wait until the worker is ready to receive messages.
- worker->started_semaphore_.Wait();
- Shell::AddRunningWorker(std::move(worker));
- return true;
- }
- return false;
+ if (!thread->Start()) return false;
+ // Wait until the worker is ready to receive messages.
+ worker->started_semaphore_.Wait();
+ Shell::AddRunningWorker(std::move(worker));
+ return true;
}
void Worker::WorkerThread::Run() {
@@ -4054,10 +4143,8 @@ class ProcessMessageTask : public i::CancelableTask {
};
void Worker::PostMessage(std::unique_ptr<SerializationData> data) {
- if (!is_running()) return;
- // Hold the worker_mutex_ so that the worker thread can't delete task_runner_
- // after we've checked is_running().
base::MutexGuard lock_guard(&worker_mutex_);
+ if (!is_running()) return;
std::unique_ptr<v8::Task> task(new ProcessMessageTask(
task_manager_, shared_from_this(), std::move(data)));
task_runner_->PostNonNestableTask(std::move(task));
@@ -4071,11 +4158,8 @@ class TerminateTask : public i::CancelableTask {
void RunInternal() override {
auto expected = Worker::State::kTerminating;
- if (!worker_->state_.compare_exchange_strong(expected,
- Worker::State::kTerminated)) {
- // Thread was joined in the meantime.
- CHECK_EQ(worker_->state_.load(), Worker::State::kTerminatingAndJoining);
- }
+ CHECK(worker_->state_.compare_exchange_strong(expected,
+ Worker::State::kTerminated));
}
private:
@@ -4095,34 +4179,19 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
void Worker::TerminateAndWaitForThread() {
Terminate();
- // Don't double-join a terminated thread.
- auto expected = State::kTerminating;
- if (!state_.compare_exchange_strong(expected,
- State::kTerminatingAndJoining)) {
- expected = State::kTerminated;
- if (!state_.compare_exchange_strong(expected,
- State::kTerminatingAndJoining)) {
- // Avoid double-joining thread.
- DCHECK(state_.load() == State::kTerminatingAndJoining ||
- state_.load() == State::kTerminatedAndJoined);
- return;
- }
+ {
+ base::MutexGuard lock_guard(&worker_mutex_);
+ // Prevent double-joining.
+ if (is_joined_) return;
+ is_joined_ = true;
}
-
thread_->Join();
- expected = State::kTerminatingAndJoining;
- CHECK(state_.compare_exchange_strong(expected, State::kTerminatedAndJoined));
}
void Worker::Terminate() {
+ base::MutexGuard lock_guard(&worker_mutex_);
auto expected = State::kRunning;
if (!state_.compare_exchange_strong(expected, State::kTerminating)) return;
- // Hold the worker_mutex_ so that the worker thread can't delete task_runner_
- // after we've checked state_.
- base::MutexGuard lock_guard(&worker_mutex_);
- CHECK(state_.load() == State::kTerminating ||
- state_.load() == State::kTerminatingAndJoining);
- // Post a task to wake up the worker thread.
std::unique_ptr<v8::Task> task(
new TerminateTask(task_manager_, shared_from_this()));
task_runner_->PostTask(std::move(task));
@@ -4141,9 +4210,7 @@ void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
context, String::NewFromUtf8Literal(isolate_, "onmessage",
NewStringType::kInternalized));
Local<Value> onmessage;
- if (!maybe_onmessage.ToLocal(&onmessage) || !onmessage->IsFunction()) {
- return;
- }
+ if (!maybe_onmessage.ToLocal(&onmessage) || !onmessage->IsFunction()) return;
Local<Function> onmessage_fun = onmessage.As<Function>();
v8::TryCatch try_catch(isolate_);
@@ -4174,12 +4241,14 @@ void Worker::ExecuteInThread() {
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
create_params.experimental_attach_to_shared_isolate = Shell::shared_isolate;
isolate_ = Isolate::New(create_params);
- {
- base::MutexGuard lock_guard(&worker_mutex_);
- task_runner_ = g_default_platform->GetForegroundTaskRunner(isolate_);
- task_manager_ =
- reinterpret_cast<i::Isolate*>(isolate_)->cancelable_task_manager();
- }
+
+ task_runner_ = g_default_platform->GetForegroundTaskRunner(isolate_);
+ task_manager_ =
+ reinterpret_cast<i::Isolate*>(isolate_)->cancelable_task_manager();
+
+ auto expected = State::kPrepareRunning;
+ CHECK(state_.compare_exchange_strong(expected, State::kRunning));
+
// The Worker is now ready to receive messages.
started_semaphore_.Signal();
@@ -4236,17 +4305,15 @@ void Worker::ExecuteInThread() {
}
Shell::CollectGarbage(isolate_);
}
- // TODO(cbruni): Check for unhandled promises here.
+
{
- // Hold the mutex to ensure task_runner_ changes state
- // atomically (see Worker::PostMessage which reads them).
base::MutexGuard lock_guard(&worker_mutex_);
- // Mark worker as terminated if it's still running.
- auto expected = State::kRunning;
- state_.compare_exchange_strong(expected, State::kTerminated);
+ state_.store(State::kTerminated);
+ CHECK(!is_running());
task_runner_.reset();
task_manager_ = nullptr;
}
+
context_.Reset();
platform::NotifyIsolateShutdown(g_default_platform, isolate_);
isolate_->Dispose();
@@ -4331,7 +4398,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
argv[i] = nullptr;
} else if (strcmp(argv[i], "--invoke-weak-callbacks") == 0) {
options.invoke_weak_callbacks = true;
- // TODO(jochen) See issue 3351
+ // TODO(v8:3351): Invoking weak callbacks does not always collect all
+ // available garbage.
options.send_idle_notification = true;
argv[i] = nullptr;
} else if (strcmp(argv[i], "--omit-quit") == 0) {
@@ -4544,7 +4612,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current++;
current->Begin(argv, i + 1);
} else if (strcmp(str, "--module") == 0 ||
- strcmp(str, "--web-snapshot") == 0) {
+ strcmp(str, "--web-snapshot") == 0 ||
+ strcmp(str, "--json") == 0) {
// Pass on to SourceGroup, which understands these options.
} else if (strncmp(str, "--", 2) == 0) {
if (!i::FLAG_correctness_fuzzer_suppressions) {
@@ -4609,6 +4678,12 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
}
}
CollectGarbage(isolate);
+
+ // Park the main thread here to prevent deadlocks in shared GCs when waiting
+ // in JoinThread.
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::ParkedScope parked(i_isolate->main_thread_local_isolate());
+
for (int i = 1; i < options.num_isolates; ++i) {
if (last_run) {
options.isolate_sources[i].JoinThread();
@@ -4860,6 +4935,30 @@ class Serializer : public ValueSerializer::Delegate {
void FreeBufferMemory(void* buffer) override { base::Free(buffer); }
+ bool SupportsSharedValues() const override { return true; }
+
+ Maybe<uint32_t> GetSharedValueId(Isolate* isolate,
+ Local<Value> shared_value) override {
+ DCHECK_NOT_NULL(data_);
+ for (size_t index = 0; index < data_->shared_values_.size(); ++index) {
+ if (data_->shared_values_[index] == shared_value) {
+ return Just<uint32_t>(static_cast<uint32_t>(index));
+ }
+ }
+
+ size_t index = data_->shared_values_.size();
+ // Shared values in transit are kept alive by global handles in the shared
+ // isolate. No code ever runs in the shared Isolate, so locking it does not
+ // contend with long-running tasks.
+ {
+ DCHECK_EQ(reinterpret_cast<i::Isolate*>(isolate)->shared_isolate(),
+ reinterpret_cast<i::Isolate*>(Shell::shared_isolate));
+ v8::Locker locker(Shell::shared_isolate);
+ data_->shared_values_.emplace_back(Shell::shared_isolate, shared_value);
+ }
+ return Just<uint32_t>(static_cast<uint32_t>(index));
+ }
+
private:
Maybe<bool> PrepareTransfer(Local<Context> context, Local<Value> transfer) {
if (transfer->IsArray()) {
@@ -4926,6 +5025,12 @@ class Serializer : public ValueSerializer::Delegate {
size_t current_memory_usage_;
};
+void SerializationData::ClearSharedValuesUnderLockIfNeeded() {
+ if (shared_values_.empty()) return;
+ v8::Locker locker(Shell::shared_isolate);
+ shared_values_.clear();
+}
+
class Deserializer : public ValueDeserializer::Delegate {
public:
Deserializer(Isolate* isolate, std::unique_ptr<SerializationData> data)
@@ -4935,6 +5040,12 @@ class Deserializer : public ValueDeserializer::Delegate {
deserializer_.SetSupportsLegacyWireFormat(true);
}
+ ~Deserializer() {
+ DCHECK_EQ(reinterpret_cast<i::Isolate*>(isolate_)->shared_isolate(),
+ reinterpret_cast<i::Isolate*>(Shell::shared_isolate));
+ data_->ClearSharedValuesUnderLockIfNeeded();
+ }
+
Deserializer(const Deserializer&) = delete;
Deserializer& operator=(const Deserializer&) = delete;
@@ -4972,6 +5083,17 @@ class Deserializer : public ValueDeserializer::Delegate {
isolate_, data_->compiled_wasm_modules().at(transfer_id));
}
+ bool SupportsSharedValues() const override { return true; }
+
+ MaybeLocal<Value> GetSharedValueFromId(Isolate* isolate,
+ uint32_t id) override {
+ DCHECK_NOT_NULL(data_);
+ if (id < data_->shared_values().size()) {
+ return data_->shared_values().at(id).Get(isolate);
+ }
+ return MaybeLocal<Value>();
+ }
+
private:
Isolate* isolate_;
ValueDeserializer deserializer_;
@@ -5077,7 +5199,9 @@ void Shell::WaitForRunningWorkers() {
namespace {
-bool HasFlagThatRequiresSharedIsolate() { return i::FLAG_shared_string_table; }
+bool HasFlagThatRequiresSharedIsolate() {
+ return i::FLAG_shared_string_table || i::FLAG_harmony_struct;
+}
} // namespace
@@ -5169,9 +5293,9 @@ int Shell::Main(int argc, char* argv[]) {
V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
v8::V8::Initialize();
@@ -5308,24 +5432,31 @@ int Shell::Main(int argc, char* argv[]) {
}
} else if (options.code_cache_options !=
ShellOptions::CodeCacheOptions::kNoProduceCache) {
- printf("============ Run: Produce code cache ============\n");
- // First run to produce the cache
- Isolate::CreateParams create_params2;
- create_params2.array_buffer_allocator = Shell::array_buffer_allocator;
- create_params2.experimental_attach_to_shared_isolate =
- Shell::shared_isolate;
- i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* isolate2 = Isolate::New(create_params2);
- i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
{
- D8Console console2(isolate2);
- Initialize(isolate2, &console2);
- PerIsolateData data2(isolate2);
- Isolate::Scope isolate_scope(isolate2);
-
- result = RunMain(isolate2, false);
+ // Park the main thread here in case the new isolate wants to perform
+ // a shared GC to prevent a deadlock.
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::ParkedScope parked(i_isolate->main_thread_local_isolate());
+
+ printf("============ Run: Produce code cache ============\n");
+ // First run to produce the cache
+ Isolate::CreateParams create_params2;
+ create_params2.array_buffer_allocator = Shell::array_buffer_allocator;
+ create_params2.experimental_attach_to_shared_isolate =
+ Shell::shared_isolate;
+ i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
+ Isolate* isolate2 = Isolate::New(create_params2);
+ i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
+ {
+ D8Console console2(isolate2);
+ Initialize(isolate2, &console2);
+ PerIsolateData data2(isolate2);
+ Isolate::Scope isolate_scope(isolate2);
+
+ result = RunMain(isolate2, false);
+ }
+ isolate2->Dispose();
}
- isolate2->Dispose();
// Change the options to consume cache
DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 61f44455fb..7c42c5e7e2 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -151,6 +151,11 @@ class SerializationData {
const std::vector<CompiledWasmModule>& compiled_wasm_modules() {
return compiled_wasm_modules_;
}
+ const std::vector<v8::Global<v8::Value>>& shared_values() {
+ return shared_values_;
+ }
+
+ void ClearSharedValuesUnderLockIfNeeded();
private:
struct DataDeleter {
@@ -162,6 +167,7 @@ class SerializationData {
std::vector<std::shared_ptr<v8::BackingStore>> backing_stores_;
std::vector<std::shared_ptr<v8::BackingStore>> sab_backing_stores_;
std::vector<CompiledWasmModule> compiled_wasm_modules_;
+ std::vector<v8::Global<v8::Value>> shared_values_;
private:
friend class Serializer;
@@ -211,11 +217,10 @@ class Worker : public std::enable_shared_from_this<Worker> {
enum class State {
kReady,
+ kPrepareRunning,
kRunning,
kTerminating,
kTerminated,
- kTerminatingAndJoining,
- kTerminatedAndJoined
};
bool is_running() const;
@@ -242,6 +247,7 @@ class Worker : public std::enable_shared_from_this<Worker> {
base::Thread* thread_ = nullptr;
char* script_;
std::atomic<State> state_;
+ bool is_joined_ = false;
// For signalling that the worker has started.
base::Semaphore started_semaphore_{0};
@@ -485,6 +491,7 @@ class Shell : public i::AllStatic {
ProcessMessageQueue process_message_queue);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
static bool ExecuteWebSnapshot(Isolate* isolate, const char* file_name);
+ static bool LoadJSON(Isolate* isolate, const char* file_name);
static void ReportException(Isolate* isolate, Local<Message> message,
Local<Value> exception);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
@@ -636,6 +643,8 @@ class Shell : public i::AllStatic {
static void HostInitializeImportMetaObject(Local<Context> context,
Local<Module> module,
Local<Object> meta);
+ static MaybeLocal<Context> HostCreateShadowRealmContext(
+ Local<Context> initiator_context);
#ifdef V8_FUZZILLI
static void Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 122a7a2213..222f2cf2fa 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -545,7 +545,12 @@ void CollectAndMaybeResetCounts(Isolate* isolate,
count =
static_cast<uint32_t>(func.feedback_vector().invocation_count());
} else if (func.raw_feedback_cell().interrupt_budget() <
- FLAG_budget_for_feedback_vector_allocation) {
+ FLAG_interrupt_budget_for_feedback_allocation) {
+ // TODO(jgruber): The condition above is no longer precise since we
+ // may use either the fixed interrupt_budget or
+ // FLAG_interrupt_budget_factor_for_feedback_allocation. If the
+ // latter, we may incorrectly set a count of 1.
+ //
// We haven't allocated feedback vector, but executed the function
// atleast once. We don't have precise invocation count here.
count = 1;
@@ -607,7 +612,8 @@ std::unique_ptr<Coverage> Coverage::CollectPrecise(Isolate* isolate) {
isolate->is_block_binary_code_coverage())) {
// We do not have to hold onto feedback vectors for invocations we already
// reported. So we can reset the list.
- isolate->SetFeedbackVectorsForProfilingTools(*ArrayList::New(isolate, 0));
+ isolate->SetFeedbackVectorsForProfilingTools(
+ ReadOnlyRoots(isolate).empty_array_list());
}
return result;
}
@@ -805,7 +811,7 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
IsCompiledScope is_compiled_scope(
func->shared().is_compiled_scope(isolate));
CHECK(is_compiled_scope.is_compiled());
- JSFunction::EnsureFeedbackVector(func, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, func, &is_compiled_scope);
}
// Root all feedback vectors to avoid early collection.
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 6f3f79f9ef..184929c80b 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -283,8 +283,8 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
for (int i = 0; i < keys->length(); i++) {
DCHECK(keys->get(i).IsString());
Handle<String> key(String::cast(keys->get(i)), isolate_);
- Handle<Object> value =
- JSReceiver::GetDataProperty(element.materialized_object, key);
+ Handle<Object> value = JSReceiver::GetDataProperty(
+ isolate_, element.materialized_object, key);
scope_iterator_.SetVariableValue(key, value);
}
}
@@ -433,8 +433,8 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
// Loads.
case Bytecode::kLdaLookupSlot:
case Bytecode::kLdaGlobal:
- case Bytecode::kLdaNamedProperty:
- case Bytecode::kLdaKeyedProperty:
+ case Bytecode::kGetNamedProperty:
+ case Bytecode::kGetKeyedProperty:
case Bytecode::kLdaGlobalInsideTypeof:
case Bytecode::kLdaLookupSlotInsideTypeof:
case Bytecode::kGetIterator:
@@ -831,12 +831,22 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kErrorConstructor:
// RegExp builtins.
case Builtin::kRegExpConstructor:
+ // Reflect builtins.
+ case Builtin::kReflectApply:
+ case Builtin::kReflectConstruct:
+ case Builtin::kReflectGetOwnPropertyDescriptor:
+ case Builtin::kReflectGetPrototypeOf:
+ case Builtin::kReflectHas:
+ case Builtin::kReflectIsExtensible:
+ case Builtin::kReflectOwnKeys:
// Internal.
case Builtin::kStrictPoisonPillThrower:
case Builtin::kAllocateInYoungGeneration:
case Builtin::kAllocateInOldGeneration:
case Builtin::kAllocateRegularInYoungGeneration:
case Builtin::kAllocateRegularInOldGeneration:
+ case Builtin::kConstructVarargs:
+ case Builtin::kConstructWithArrayLike:
return DebugInfo::kHasNoSideEffect;
#ifdef V8_INTL_SUPPORT
@@ -975,11 +985,11 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
using interpreter::Bytecode;
switch (bytecode) {
- case Bytecode::kStaNamedProperty:
- case Bytecode::kStaNamedOwnProperty:
- case Bytecode::kStaKeyedProperty:
+ case Bytecode::kSetNamedProperty:
+ case Bytecode::kDefineNamedOwnProperty:
+ case Bytecode::kSetKeyedProperty:
case Bytecode::kStaInArrayLiteral:
- case Bytecode::kStaDataPropertyInLiteral:
+ case Bytecode::kDefineKeyedOwnPropertyInLiteral:
case Bytecode::kStaCurrentContextSlot:
return true;
default:
@@ -1188,7 +1198,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
if (state != DebugInfo::kHasNoSideEffect) continue;
- Code code = isolate->builtins()->code(caller);
+ Code code = FromCodeT(isolate->builtins()->code(caller));
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index bcb8da6652..70d185c309 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -94,9 +94,7 @@ bool FrameInspector::IsJavaScript() { return frame_->is_java_script(); }
bool FrameInspector::ParameterIsShadowedByContextLocal(
Handle<ScopeInfo> info, Handle<String> parameter_name) {
- VariableLookupResult lookup_result;
- return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &lookup_result) !=
- -1;
+ return info->ContextSlotIndex(parameter_name) != -1;
}
RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index 1ec2f04bc7..a09a21252a 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -15,8 +15,8 @@
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/heap.h"
#include "src/objects/js-generator-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/strings/string-builder-inl.h"
@@ -112,17 +112,15 @@ void CollectPrivateMethodsAndAccessorsFromContext(
i::IsStaticFlag is_static_flag, std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out) {
i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::VariableMode mode = scope_info->ContextLocalMode(j);
- i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(j);
+ for (auto it : i::ScopeInfo::IterateLocalNames(scope_info)) {
+ i::Handle<i::String> name(it->name(), isolate);
+ i::VariableMode mode = scope_info->ContextLocalMode(it->index());
+ i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(it->index());
if (!i::IsPrivateMethodOrAccessorVariableMode(mode) ||
flag != is_static_flag) {
continue;
}
-
- i::Handle<i::String> name(scope_info->ContextLocalName(j), isolate);
- int context_index = scope_info->ContextHeaderLength() + j;
+ int context_index = scope_info->ContextHeaderLength() + it->index();
i::Handle<i::Object> slot_value(context->get(context_index), isolate);
DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
slot_value->IsJSFunction());
@@ -295,8 +293,43 @@ void SetTerminateOnResume(Isolate* v8_isolate) {
bool CanBreakProgram(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
- return isolate->debug()->AllFramesOnStackAreBlackboxed();
+ return !isolate->debug()->AllFramesOnStackAreBlackboxed();
+}
+
+size_t ScriptSource::Length() const {
+ i::Handle<i::HeapObject> source = Utils::OpenHandle(this);
+ if (source->IsString()) return i::Handle<i::String>::cast(source)->length();
+ return Size();
+}
+
+size_t ScriptSource::Size() const {
+#if V8_ENABLE_WEBASSEMBLY
+ MemorySpan<const uint8_t> wasm_bytecode;
+ if (WasmBytecode().To(&wasm_bytecode)) {
+ return wasm_bytecode.size();
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ i::Handle<i::HeapObject> source = Utils::OpenHandle(this);
+ if (!source->IsString()) return 0;
+ i::Handle<i::String> string = i::Handle<i::String>::cast(source);
+ return string->length() * (string->IsTwoByteRepresentation() ? 2 : 1);
+}
+
+MaybeLocal<String> ScriptSource::JavaScriptCode() const {
+ i::Handle<i::HeapObject> source = Utils::OpenHandle(this);
+ if (!source->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(i::Handle<i::String>::cast(source));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+Maybe<MemorySpan<const uint8_t>> ScriptSource::WasmBytecode() const {
+ i::Handle<i::HeapObject> source = Utils::OpenHandle(this);
+ if (!source->IsForeign()) return Nothing<MemorySpan<const uint8_t>>();
+ base::Vector<const uint8_t> wire_bytes =
+ i::Managed<i::wasm::NativeModule>::cast(*source).raw()->wire_bytes();
+ return Just(MemorySpan<const uint8_t>{wire_bytes.begin(), wire_bytes.size()});
}
+#endif // V8_ENABLE_WEBASSEMBLY
Isolate* Script::GetIsolate() const {
return reinterpret_cast<Isolate*>(Utils::OpenHandle(this)->GetIsolate());
@@ -319,81 +352,89 @@ bool Script::IsEmbedded() const {
int Script::Id() const { return Utils::OpenHandle(this)->id(); }
-int Script::LineOffset() const {
- return Utils::OpenHandle(this)->line_offset();
-}
+int Script::StartLine() const { return Utils::OpenHandle(this)->line_offset(); }
-int Script::ColumnOffset() const {
+int Script::StartColumn() const {
return Utils::OpenHandle(this)->column_offset();
}
-std::vector<int> Script::LineEnds() const {
+int Script::EndLine() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
#if V8_ENABLE_WEBASSEMBLY
- if (script->type() == i::Script::TYPE_WASM) return {};
+ if (script->type() == i::Script::TYPE_WASM) return 0;
#endif // V8_ENABLE_WEBASSEMBLY
-
+ if (!script->source().IsString()) {
+ return script->line_offset();
+ }
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
- i::Script::InitLineEnds(isolate, script);
- CHECK(script->line_ends().IsFixedArray());
- i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
- isolate);
- std::vector<int> result(line_ends->length());
- for (int i = 0; i < line_ends->length(); ++i) {
- i::Smi line_end = i::Smi::cast(line_ends->get(i));
- result[i] = line_end.value();
+ i::Script::PositionInfo info;
+ i::Script::GetPositionInfo(script, i::String::cast(script->source()).length(),
+ &info, i::Script::WITH_OFFSET);
+ return info.line;
+}
+
+int Script::EndColumn() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+#if V8_ENABLE_WEBASSEMBLY
+ if (script->type() == i::Script::TYPE_WASM) {
+ return script->wasm_native_module()->wire_bytes().length();
}
- return result;
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (!script->source().IsString()) {
+ return script->column_offset();
+ }
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Script::PositionInfo info;
+ i::Script::GetPositionInfo(script, i::String::cast(script->source()).length(),
+ &info, i::Script::WITH_OFFSET);
+ return info.column;
}
MaybeLocal<String> Script::Name() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->name(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+ return Utils::ToLocal(i::Handle<i::String>::cast(value));
}
MaybeLocal<String> Script::SourceURL() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->source_url(), isolate);
+ i::Handle<i::PrimitiveHeapObject> value(script->source_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+ return Utils::ToLocal(i::Handle<i::String>::cast(value));
}
MaybeLocal<String> Script::SourceMappingURL() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source_mapping_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+ return Utils::ToLocal(i::Handle<i::String>::cast(value));
}
Maybe<int> Script::ContextId() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Object value = script->context_data();
if (value.IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
-MaybeLocal<String> Script::Source() const {
+Local<ScriptSource> Script::Source() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->source(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+#if V8_ENABLE_WEBASSEMBLY
+ if (script->type() == i::Script::TYPE_WASM) {
+ i::Handle<i::Object> wasm_native_module(
+ script->wasm_managed_native_module(), isolate);
+ return Utils::Convert<i::Object, ScriptSource>(wasm_native_module);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ i::Handle<i::PrimitiveHeapObject> source(script->source(), isolate);
+ return Utils::Convert<i::PrimitiveHeapObject, ScriptSource>(source);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -506,6 +547,10 @@ Location Script::GetSourceLocation(int offset) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Script::PositionInfo info;
i::Script::GetPositionInfo(script, offset, &info, i::Script::WITH_OFFSET);
+ if (script->HasSourceURLComment()) {
+ info.line -= script->line_offset();
+ if (info.line == 0) info.column -= script->column_offset();
+ }
return Location(info.line, info.column);
}
@@ -530,12 +575,12 @@ bool Script::SetBreakpoint(Local<String> condition, Location* location,
return true;
}
-bool Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
+bool Script::SetInstrumentationBreakpoint(BreakpointId* id) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (script->type() == i::Script::TYPE_WASM) {
- isolate->debug()->SetOnEntryBreakpointForWasmScript(script, id);
+ isolate->debug()->SetInstrumentationBreakpointForWasmScript(script, id);
return true;
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -543,7 +588,8 @@ bool Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
for (i::SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
if (sfi.is_toplevel()) {
return isolate->debug()->SetBreakpointForFunction(
- handle(sfi, isolate), isolate->factory()->empty_string(), id);
+ handle(sfi, isolate), isolate->factory()->empty_string(), id,
+ internal::Debug::kInstrumentation);
}
}
return false;
@@ -568,8 +614,9 @@ Platform* GetCurrentPlatform() { return i::V8::GetCurrentPlatform(); }
void ForceGarbageCollection(
Isolate* isolate,
EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
- i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
+ i::EmbedderStackStateScope stack_scope(
+ reinterpret_cast<i::Isolate*>(isolate)->heap(),
+ i::EmbedderStackStateScope::kImplicitThroughTask, embedder_stack_state);
isolate->LowMemoryNotification();
}
@@ -629,13 +676,6 @@ int WasmScript::NumImportedFunctions() const {
return static_cast<int>(module->num_imported_functions);
}
-MemorySpan<const uint8_t> WasmScript::Bytecode() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- base::Vector<const uint8_t> wire_bytes =
- script->wasm_native_module()->wire_bytes();
- return {wire_bytes.begin(), wire_bytes.size()};
-}
-
std::pair<int, int> WasmScript::GetFunctionRange(int function_index) const {
i::DisallowGarbageCollection no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -875,16 +915,14 @@ ConsoleCallArguments::ConsoleCallArguments(
args.length() > 1 ? args.address_of_first_argument() : nullptr,
args.length() - 1) {}
-v8::Local<v8::StackTrace> GetDetailedStackTrace(
- Isolate* v8_isolate, v8::Local<v8::Object> v8_error) {
+v8::Local<v8::Message> CreateMessageFromException(
+ Isolate* v8_isolate, v8::Local<v8::Value> v8_error) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*v8_error);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::Handle<i::JSReceiver> error = Utils::OpenHandle(*v8_error);
- if (!error->IsJSObject()) {
- return v8::Local<v8::StackTrace>();
- }
- i::Handle<i::FixedArray> stack_trace =
- isolate->GetDetailedStackTrace(i::Handle<i::JSObject>::cast(error));
- return Utils::StackTraceToLocal(stack_trace);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::HandleScope scope(isolate);
+ return Utils::MessageToLocal(
+ scope.CloseAndEscape(isolate->CreateMessageFromException(obj)));
}
MaybeLocal<Script> GeneratorObject::Script() {
@@ -1000,11 +1038,9 @@ void GlobalLexicalScopeNames(v8::Local<v8::Context> v8_context,
i::ScriptContextTable::GetContext(isolate, table, i);
DCHECK(script_context->IsScriptContext());
i::Handle<i::ScopeInfo> scope_info(script_context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::String name = scope_info->ContextLocalName(j);
- if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
- names->Append(Utils::ToLocal(handle(name, isolate)));
+ for (auto it : i::ScopeInfo::IterateLocalNames(scope_info)) {
+ if (i::ScopeInfo::VariableIsSynthetic(it->name())) continue;
+ names->Append(Utils::ToLocal(handle(it->name(), isolate)));
}
}
}
@@ -1226,7 +1262,7 @@ MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
i::Handle<i::Symbol> key = isolate->factory()->promise_debug_message_symbol();
i::Handle<i::Object> maybeMessage =
- i::JSReceiver::GetDataProperty(promise, key);
+ i::JSReceiver::GetDataProperty(isolate, promise, key);
if (!maybeMessage->IsJSMessageObject(isolate)) return MaybeLocal<Message>();
return ToApiHandle<Message>(
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 125623afea..e099cb699a 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -161,6 +161,28 @@ struct LiveEditResult {
};
/**
+ * An internal representation of the source for a given
+ * `v8::debug::Script`, which can be a `v8::String`, in
+ * which case it represents JavaScript source, or it can
+ * be a managed pointer to a native Wasm module, or it
+ * can be undefined to indicate that source is unavailable.
+ */
+class V8_EXPORT_PRIVATE ScriptSource {
+ public:
+ // The number of characters in case of JavaScript or
+ // the size of the memory in case of WebAssembly.
+ size_t Length() const;
+
+ // The actual size of the source in bytes.
+ size_t Size() const;
+
+ MaybeLocal<String> JavaScriptCode() const;
+#if V8_ENABLE_WEBASSEMBLY
+ Maybe<MemorySpan<const uint8_t>> WasmBytecode() const;
+#endif // V8_ENABLE_WEBASSEMBLY
+};
+
+/**
* Native wrapper around v8::internal::Script object.
*/
class V8_EXPORT_PRIVATE Script {
@@ -171,14 +193,15 @@ class V8_EXPORT_PRIVATE Script {
bool WasCompiled() const;
bool IsEmbedded() const;
int Id() const;
- int LineOffset() const;
- int ColumnOffset() const;
- std::vector<int> LineEnds() const;
+ int StartLine() const;
+ int StartColumn() const;
+ int EndLine() const;
+ int EndColumn() const;
MaybeLocal<String> Name() const;
MaybeLocal<String> SourceURL() const;
MaybeLocal<String> SourceMappingURL() const;
Maybe<int> ContextId() const;
- MaybeLocal<String> Source() const;
+ Local<ScriptSource> Source() const;
bool IsModule() const;
bool GetPossibleBreakpoints(
const debug::Location& start, const debug::Location& end,
@@ -194,7 +217,7 @@ class V8_EXPORT_PRIVATE Script {
bool IsWasm() const;
void RemoveWasmBreakpoint(BreakpointId id);
#endif // V8_ENABLE_WEBASSEMBLY
- bool SetBreakpointOnScriptEntry(BreakpointId* id) const;
+ bool SetInstrumentationBreakpoint(BreakpointId* id) const;
};
#if V8_ENABLE_WEBASSEMBLY
@@ -208,7 +231,6 @@ class WasmScript : public Script {
MemorySpan<const char> ExternalSymbolsURL() const;
int NumFunctions() const;
int NumImportedFunctions() const;
- MemorySpan<const uint8_t> Bytecode() const;
std::pair<int, int> GetFunctionRange(int function_index) const;
int GetContainingFunction(int byte_offset) const;
@@ -239,6 +261,9 @@ class DebugDelegate {
v8::Local<v8::Context> paused_context,
const std::vector<debug::BreakpointId>& inspector_break_points_hit,
base::EnumSet<BreakReason> break_reasons = {}) {}
+ virtual void BreakOnInstrumentation(
+ v8::Local<v8::Context> paused_context,
+ const debug::BreakpointId instrumentationId) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught,
@@ -284,8 +309,8 @@ Local<Function> GetBuiltin(Isolate* isolate, Builtin builtin);
V8_EXPORT_PRIVATE void SetConsoleDelegate(Isolate* isolate,
ConsoleDelegate* delegate);
-v8::Local<v8::StackTrace> GetDetailedStackTrace(Isolate* isolate,
- v8::Local<v8::Object> error);
+V8_EXPORT_PRIVATE v8::Local<v8::Message> CreateMessageFromException(
+ Isolate* isolate, v8::Local<v8::Value> error);
/**
* Native wrapper around v8::internal::JSGeneratorObject object.
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index 40406deacf..951ec028c0 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -124,7 +124,15 @@ v8::Maybe<v8::PropertyAttribute> DebugPropertyIterator::attributes() {
// If you are running into this problem, check your embedder implementation
// and verify that the data from both sides matches. If there is a mismatch,
// V8 will crash.
- DCHECK(result.FromJust() != ABSENT);
+
+#if DEBUG
+ base::ScopedVector<char> property_message(128);
+ base::ScopedVector<char> name_buffer(100);
+ raw_name()->NameShortPrint(name_buffer);
+ v8::base::SNPrintF(property_message, "Invalid result for property \"%s\"\n",
+ name_buffer.begin());
+ DCHECK_WITH_MSG(result.FromJust() != ABSENT, property_message.begin());
+#endif
return Just(static_cast<v8::PropertyAttribute>(result.FromJust()));
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 93f7667486..bc21f68137 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -565,7 +565,7 @@ Handle<JSObject> ScopeIterator::ScopeObject(Mode mode) {
if (value->IsTheHole(isolate_)) {
// Reflect variables under TDZ as undefined in scope object.
if (scope_type == ScopeTypeScript &&
- JSReceiver::HasOwnProperty(scope, name).FromMaybe(true)) {
+ JSReceiver::HasOwnProperty(isolate_, scope, name).FromMaybe(true)) {
// We also use the hole to represent overridden let-declarations via
// REPL mode in a script context. Catch this case.
return false;
@@ -780,10 +780,10 @@ bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
Handle<Context> context,
ScopeType scope_type) const {
// Fill all context locals to the context extension.
- for (int i = 0; i < scope_info->ContextLocalCount(); ++i) {
- Handle<String> name(scope_info->ContextLocalName(i), isolate_);
+ for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
+ Handle<String> name(it->name(), isolate_);
if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- int context_index = scope_info->ContextHeaderLength() + i;
+ int context_index = scope_info->ContextHeaderLength() + it->index();
Handle<Object> value(context->get(context_index), isolate_);
if (visitor(name, value, scope_type)) return true;
}
@@ -964,7 +964,8 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode,
// Names of variables introduced by eval are strings.
DCHECK(keys->get(i).IsString());
Handle<String> key(String::cast(keys->get(i)), isolate_);
- Handle<Object> value = JSReceiver::GetDataProperty(extension, key);
+ Handle<Object> value =
+ JSReceiver::GetDataProperty(isolate_, extension, key);
if (visitor(key, value, scope_type)) return;
}
}
@@ -1062,11 +1063,8 @@ bool ScopeIterator::SetContextExtensionValue(Handle<String> variable_name,
bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- VariableLookupResult lookup_result;
- int slot_index = ScopeInfo::ContextSlotIndex(context_->scope_info(),
- *variable_name, &lookup_result);
+ int slot_index = context_->scope_info().ContextSlotIndex(variable_name);
if (slot_index < 0) return false;
-
context_->set(slot_index, *new_value);
return true;
}
@@ -1098,8 +1096,7 @@ bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
context_->global_object().native_context().script_context_table(),
isolate_);
VariableLookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate_, *script_contexts, *variable_name,
- &lookup_result)) {
+ if (script_contexts->Lookup(variable_name, &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate_, script_contexts, lookup_result.context_index);
script_context->set(lookup_result.slot_index, *new_value);
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 7cc0e1ed16..a499a6f399 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -5,8 +5,6 @@
#ifndef V8_DEBUG_DEBUG_SCOPES_H_
#define V8_DEBUG_DEBUG_SCOPES_H_
-#include <vector>
-
#include "src/debug/debug-frames.h"
#include "src/parsing/parse-info.h"
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 81b2487a5b..2425e8bed5 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -101,10 +101,8 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
return v8::MaybeLocal<v8::Value>();
}
DisallowGarbageCollection no_gc;
- VariableLookupResult lookup_result;
- int slot_index = ScopeInfo::ContextSlotIndex(
- context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
- &lookup_result);
+ int slot_index = context->scope_info().ContextSlotIndex(
+ ReadOnlyRoots(isolate_).this_string_handle());
if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
Handle<Object> value = handle(context->get(slot_index), isolate_);
if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index 2a65ffc34b..70fef6a943 100644
--- a/deps/v8/src/debug/debug-wasm-objects.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -649,7 +649,8 @@ class ContextProxy {
GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance);
JSObject::AddProperty(isolate, object, "functions", functions, FROZEN);
Handle<JSObject> prototype = ContextProxyPrototype::Create(isolate);
- JSObject::SetPrototype(object, prototype, false, kDontThrow).Check();
+ JSObject::SetPrototype(isolate, object, prototype, false, kDontThrow)
+ .Check();
return object;
}
};
@@ -770,68 +771,23 @@ Handle<String> WasmSimd128ToString(Isolate* isolate, wasm::Simd128 s128) {
Handle<String> GetRefTypeName(Isolate* isolate, wasm::ValueType type,
wasm::NativeModule* module) {
- bool is_nullable = type.kind() == wasm::kOptRef;
- const char* null_str = is_nullable ? " null" : "";
- // This length only needs to be enough for generated names like
- // "(ref null $type12345)". For names coming from the name section,
- // we'll dynamically allocate an appropriately sized vector.
- base::EmbeddedVector<char, 32> type_name;
- size_t len;
+ DCHECK(type.is_object_reference());
+ std::ostringstream name;
if (type.heap_type().is_generic()) {
- const char* generic_name = "";
- wasm::HeapType::Representation heap_rep = type.heap_representation();
- switch (heap_rep) {
- case wasm::HeapType::kFunc:
- generic_name = "func";
- break;
- case wasm::HeapType::kExtern:
- generic_name = "extern";
- break;
- case wasm::HeapType::kEq:
- generic_name = "eq";
- break;
- case wasm::HeapType::kI31:
- generic_name = "i31";
- break;
- case wasm::HeapType::kData:
- generic_name = "data";
- break;
- case wasm::HeapType::kAny:
- generic_name = "any";
- break;
- default:
- UNREACHABLE();
- }
- len = SNPrintF(type_name, "(ref%s %s)", null_str, generic_name);
+ name << type.name();
} else {
- int type_index = type.ref_index();
+ name << "(ref " << (type.is_nullable() ? "null " : "") << "$";
wasm::ModuleWireBytes module_wire_bytes(module->wire_bytes());
- base::Vector<const char> name_vec = module_wire_bytes.GetNameOrNull(
- module->GetDebugInfo()->GetTypeName(type_index));
- if (name_vec.empty()) {
- len = SNPrintF(type_name, "(ref%s $type%u)", null_str, type_index);
+ base::Vector<const char> module_name = module_wire_bytes.GetNameOrNull(
+ module->GetDebugInfo()->GetTypeName(type.ref_index()));
+ if (module_name.empty()) {
+ name << "type" << type.ref_index();
} else {
- size_t required_length =
- name_vec.size() + // length of provided name
- 7 + // length of "(ref $)"
- (is_nullable ? 5 : 0); // length of " null" (optional)
- base::Vector<char> long_type_name =
- base::Vector<char>::New(required_length);
- len = SNPrintF(long_type_name, "(ref%s $", null_str);
- base::Vector<char> suffix =
- long_type_name.SubVector(len, long_type_name.size());
- // StrNCpy requires that there is room for an assumed trailing \0...
- DCHECK_EQ(suffix.size(), name_vec.size() + 1);
- base::StrNCpy(suffix, name_vec.data(), name_vec.size());
- // ...but we actually write ')' into that byte.
- long_type_name[required_length - 1] = ')';
- Handle<String> result =
- isolate->factory()->InternalizeString(long_type_name);
- long_type_name.Dispose();
- return result;
+ name.write(module_name.begin(), module_name.size());
}
+ name << ")";
}
- return isolate->factory()->InternalizeString(type_name.SubVector(0, len));
+ return isolate->factory()->InternalizeString(base::VectorOf(name.str()));
}
} // namespace
@@ -1013,13 +969,6 @@ Handle<WasmValueObject> WasmValueObject::New(
break;
}
case wasm::kOptRef:
- if (value.type().is_reference_to(wasm::HeapType::kExtern)) {
- t = isolate->factory()->InternalizeString(
- base::StaticCharVector("externref"));
- v = value.to_ref();
- break;
- }
- V8_FALLTHROUGH;
case wasm::kRef: {
t = GetRefTypeName(isolate, value.type(), module_object->native_module());
Handle<Object> ref = value.to_ref();
@@ -1027,11 +976,12 @@ Handle<WasmValueObject> WasmValueObject::New(
v = StructProxy::Create(isolate, value, module_object);
} else if (ref->IsWasmArray()) {
v = ArrayProxy::Create(isolate, value, module_object);
- } else if (ref->IsJSFunction() || ref->IsSmi() || ref->IsNull()) {
- v = ref;
} else if (ref->IsWasmInternalFunction()) {
v = handle(Handle<WasmInternalFunction>::cast(ref)->external(),
isolate);
+ } else if (ref->IsJSFunction() || ref->IsSmi() || ref->IsNull() ||
+ value.type().is_reference_to(wasm::HeapType::kAny)) {
+ v = ref;
} else {
// Fail gracefully.
base::EmbeddedVector<char, 64> error;
@@ -1041,8 +991,7 @@ Handle<WasmValueObject> WasmValueObject::New(
}
break;
}
- case wasm::kRtt:
- case wasm::kRttWithDepth: {
+ case wasm::kRtt: {
// TODO(7748): Expose RTTs to DevTools.
t = isolate->factory()->InternalizeString(base::StaticCharVector("rtt"));
v = isolate->factory()->InternalizeString(
@@ -1149,8 +1098,8 @@ Handle<ArrayList> AddWasmTableObjectInternalProperties(
}
Handle<JSArray> final_entries = isolate->factory()->NewJSArrayWithElements(
entries, i::PACKED_ELEMENTS, length);
- JSObject::SetPrototype(final_entries, isolate->factory()->null_value(), false,
- kDontThrow)
+ JSObject::SetPrototype(isolate, final_entries,
+ isolate->factory()->null_value(), false, kDontThrow)
.Check();
Handle<String> entries_string =
isolate->factory()->NewStringFromStaticChars("[[Entries]]");
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 48d0086155..d19cd0712e 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -124,6 +124,35 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
return it.GetBreakLocation();
}
+MaybeHandle<FixedArray> Debug::CheckBreakPointsForLocations(
+ Handle<DebugInfo> debug_info, std::vector<BreakLocation>& break_locations,
+ bool* has_break_points) {
+ Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(
+ debug_info->GetBreakPointCount(isolate_));
+ int break_points_hit_count = 0;
+ bool has_break_points_at_all = false;
+ for (size_t i = 0; i < break_locations.size(); i++) {
+ bool location_has_break_points;
+ MaybeHandle<FixedArray> check_result = CheckBreakPoints(
+ debug_info, &break_locations[i], &location_has_break_points);
+ has_break_points_at_all |= location_has_break_points;
+ if (!check_result.is_null()) {
+ Handle<FixedArray> break_points_current_hit =
+ check_result.ToHandleChecked();
+ int num_objects = break_points_current_hit->length();
+ for (int j = 0; j < num_objects; ++j) {
+ break_points_hit->set(break_points_hit_count++,
+ break_points_current_hit->get(j));
+ }
+ }
+ }
+ *has_break_points = has_break_points_at_all;
+ if (break_points_hit_count == 0) return {};
+
+ break_points_hit->Shrink(isolate_, break_points_hit_count);
+ return break_points_hit;
+}
+
void BreakLocation::AllAtCurrentStatement(
Handle<DebugInfo> debug_info, JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out) {
@@ -440,9 +469,22 @@ void Debug::Unload() {
ClearStepping();
RemoveAllCoverageInfos();
ClearAllDebuggerHints();
+ ClearGlobalPromiseStack();
debug_delegate_ = nullptr;
}
+void Debug::OnInstrumentationBreak() {
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
+ if (!debug_delegate_) return;
+ DCHECK(in_debug_scope());
+ HandleScope scope(isolate_);
+ DisableBreak no_recursive_break(this);
+
+ Handle<Context> native_context(isolate_->native_context());
+ debug_delegate_->BreakOnInstrumentation(v8::Utils::ToLocal(native_context),
+ kInstrumentationId);
+}
+
void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
// Just continue if breaks are disabled or debugger cannot be loaded.
@@ -461,10 +503,16 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Find the break location where execution has stopped.
BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
+ const bool hitInstrumentationBreak =
+ IsBreakOnInstrumentation(debug_info, location);
+ if (hitInstrumentationBreak) {
+ OnInstrumentationBreak();
+ }
// Find actual break points, if any, and trigger debug break event.
+ bool has_break_points;
MaybeHandle<FixedArray> break_points_hit =
- CheckBreakPoints(debug_info, &location);
+ CheckBreakPoints(debug_info, &location, &has_break_points);
if (!break_points_hit.is_null() || break_on_next_function_call()) {
StepAction lastStepAction = last_step_action();
// Clear all current stepping setup.
@@ -494,7 +542,9 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// StepOut at not return position was requested and return break locations
// were flooded with one shots.
if (thread_local_.fast_forward_to_return_) {
- DCHECK(location.IsReturnOrSuspend());
+ // We might hit an instrumentation breakpoint before running into a
+ // return/suspend location.
+ DCHECK(location.IsReturnOrSuspend() || hitInstrumentationBreak);
// We have to ignore recursive calls to function.
if (current_frame_count > target_frame_count) return;
ClearStepping();
@@ -552,19 +602,51 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
}
}
+bool Debug::IsBreakOnInstrumentation(Handle<DebugInfo> debug_info,
+ const BreakLocation& location) {
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
+ bool has_break_points_to_check =
+ break_points_active_ && location.HasBreakPoint(isolate_, debug_info);
+ if (!has_break_points_to_check) return {};
+
+ Handle<Object> break_points =
+ debug_info->GetBreakPoints(isolate_, location.position());
+ DCHECK(!break_points->IsUndefined(isolate_));
+ if (!break_points->IsFixedArray()) {
+ const Handle<BreakPoint> break_point =
+ Handle<BreakPoint>::cast(break_points);
+ return break_point->id() == kInstrumentationId;
+ }
+
+ Handle<FixedArray> array(FixedArray::cast(*break_points), isolate_);
+ for (int i = 0; i < array->length(); ++i) {
+ const Handle<BreakPoint> break_point =
+ Handle<BreakPoint>::cast(handle(array->get(i), isolate_));
+ if (break_point->id() == kInstrumentationId) {
+ return true;
+ }
+ }
+ return false;
+}
+
// Find break point objects for this location, if any, and evaluate them.
// Return an array of break point objects that evaluated true, or an empty
// handle if none evaluated true.
+// has_break_points will be true, if there is any (non-instrumentation)
+// breakpoint.
MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
BreakLocation* location,
bool* has_break_points) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
bool has_break_points_to_check =
break_points_active_ && location->HasBreakPoint(isolate_, debug_info);
- if (has_break_points) *has_break_points = has_break_points_to_check;
- if (!has_break_points_to_check) return {};
+ if (!has_break_points_to_check) {
+ *has_break_points = false;
+ return {};
+ }
- return Debug::GetHitBreakPoints(debug_info, location->position());
+ return Debug::GetHitBreakPoints(debug_info, location->position(),
+ has_break_points);
}
bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
@@ -595,31 +677,8 @@ MaybeHandle<FixedArray> Debug::GetHitBreakpointsAtCurrentStatement(
DebugScope debug_scope(this);
std::vector<BreakLocation> break_locations;
BreakLocation::AllAtCurrentStatement(debug_info, frame, &break_locations);
-
- Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(
- debug_info->GetBreakPointCount(isolate_));
- int break_points_hit_count = 0;
- bool has_break_points_at_all = false;
- for (size_t i = 0; i < break_locations.size(); i++) {
- bool location_has_break_points;
- MaybeHandle<FixedArray> check_result = CheckBreakPoints(
- debug_info, &break_locations[i], &location_has_break_points);
- has_break_points_at_all |= location_has_break_points;
- if (!check_result.is_null()) {
- Handle<FixedArray> break_points_current_hit =
- check_result.ToHandleChecked();
- int num_objects = break_points_current_hit->length();
- for (int j = 0; j < num_objects; ++j) {
- break_points_hit->set(break_points_hit_count++,
- break_points_current_hit->get(j));
- }
- }
- }
- *has_break_points = has_break_points_at_all;
- if (break_points_hit_count == 0) return {};
-
- break_points_hit->Shrink(isolate_, break_points_hit_count);
- return break_points_hit;
+ return CheckBreakPointsForLocations(debug_info, break_locations,
+ has_break_points);
}
// Check whether a single break point object is triggered.
@@ -628,6 +687,11 @@ bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
HandleScope scope(isolate_);
+ // Instrumentation breakpoints are handled separately.
+ if (break_point->id() == kInstrumentationId) {
+ return false;
+ }
+
if (!break_point->condition().length()) return true;
Handle<String> condition(break_point->condition(), isolate_);
MaybeHandle<Object> maybe_result;
@@ -805,9 +869,14 @@ int Debug::GetFunctionDebuggingId(Handle<JSFunction> function) {
}
bool Debug::SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
- Handle<String> condition, int* id) {
+ Handle<String> condition, int* id,
+ BreakPointKind kind) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- *id = ++thread_local_.last_breakpoint_id_;
+ if (kind == kInstrumentation) {
+ *id = kInstrumentationId;
+ } else {
+ *id = ++thread_local_.last_breakpoint_id_;
+ }
Handle<BreakPoint> breakpoint =
isolate_->factory()->NewBreakPoint(*id, condition);
int source_position = 0;
@@ -834,14 +903,16 @@ void Debug::RemoveBreakpoint(int id) {
}
#if V8_ENABLE_WEBASSEMBLY
-void Debug::SetOnEntryBreakpointForWasmScript(Handle<Script> script, int* id) {
+void Debug::SetInstrumentationBreakpointForWasmScript(Handle<Script> script,
+ int* id) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
DCHECK_EQ(Script::TYPE_WASM, script->type());
- *id = ++thread_local_.last_breakpoint_id_;
+ *id = kInstrumentationId;
+
Handle<BreakPoint> break_point = isolate_->factory()->NewBreakPoint(
*id, isolate_->factory()->empty_string());
RecordWasmScriptWithBreakpoints(script);
- WasmScript::SetBreakPointOnEntry(script, break_point);
+ WasmScript::SetInstrumentationBreakpoint(script, break_point);
}
void Debug::RemoveBreakpointForWasmScript(Handle<Script> script, int id) {
@@ -942,14 +1013,17 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
- int position) {
+ int position,
+ bool* has_break_points) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
Handle<Object> break_points = debug_info->GetBreakPoints(isolate_, position);
bool is_break_at_entry = debug_info->BreakAtEntry();
DCHECK(!break_points->IsUndefined(isolate_));
if (!break_points->IsFixedArray()) {
- if (!CheckBreakPoint(Handle<BreakPoint>::cast(break_points),
- is_break_at_entry)) {
+ const Handle<BreakPoint> break_point =
+ Handle<BreakPoint>::cast(break_points);
+ *has_break_points = break_point->id() != kInstrumentationId;
+ if (!CheckBreakPoint(break_point, is_break_at_entry)) {
return {};
}
Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
@@ -962,10 +1036,12 @@ MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
Handle<FixedArray> break_points_hit =
isolate_->factory()->NewFixedArray(num_objects);
int break_points_hit_count = 0;
+ *has_break_points = false;
for (int i = 0; i < num_objects; ++i) {
- Handle<Object> break_point(array->get(i), isolate_);
- if (CheckBreakPoint(Handle<BreakPoint>::cast(break_point),
- is_break_at_entry)) {
+ Handle<BreakPoint> break_point =
+ Handle<BreakPoint>::cast(handle(array->get(i), isolate_));
+ *has_break_points |= break_point->id() != kInstrumentationId;
+ if (CheckBreakPoint(break_point, is_break_at_entry)) {
break_points_hit->set(break_points_hit_count++, *break_point);
}
}
@@ -1182,14 +1258,32 @@ void Debug::PrepareStep(StepAction step_action) {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_frame_count_ = -1;
- if (!shared.is_null() && !location.IsReturnOrSuspend() &&
- !IsBlackboxed(shared)) {
- // At not return position we flood return positions with one shots and
- // will repeat StepOut automatically at next break.
- thread_local_.target_frame_count_ = current_frame_count;
- thread_local_.fast_forward_to_return_ = true;
- FloodWithOneShot(shared, true);
- return;
+ if (!shared.is_null()) {
+ if (!location.IsReturnOrSuspend() && !IsBlackboxed(shared)) {
+ // At not return position we flood return positions with one shots and
+ // will repeat StepOut automatically at next break.
+ thread_local_.target_frame_count_ = current_frame_count;
+ thread_local_.fast_forward_to_return_ = true;
+ FloodWithOneShot(shared, true);
+ return;
+ }
+ if (IsAsyncFunction(shared->kind())) {
+ // Stepping out of an async function whose implicit promise is awaited
+ // by some other async function, should resume the latter. The return
+ // value here is either a JSPromise or a JSGeneratorObject (for the
+ // initial yield of async generators).
+ Handle<JSReceiver> return_value(
+ JSReceiver::cast(thread_local_.return_value_), isolate_);
+ Handle<Object> awaited_by = JSReceiver::GetDataProperty(
+ isolate_, return_value,
+ isolate_->factory()->promise_awaited_by_symbol());
+ if (awaited_by->IsJSGeneratorObject()) {
+ DCHECK(!has_suspended_generator());
+ thread_local_.suspended_generator_ = *awaited_by;
+ ClearStepping();
+ return;
+ }
+ }
}
// Skip the current frame, find the first frame we want to step out to
// and deoptimize every frame along the way.
@@ -1490,7 +1584,7 @@ void Debug::InstallDebugBreakTrampoline() {
if (!needs_to_use_trampoline) return;
- Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
+ Handle<CodeT> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
std::vector<Handle<JSFunction>> needs_compile;
{
HeapObjectIterator iterator(isolate_->heap());
@@ -1973,6 +2067,11 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
delete node;
}
+void Debug::ClearGlobalPromiseStack() {
+ while (isolate_->PopPromise()) {
+ }
+}
+
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
HandleScope scope(isolate_);
@@ -2047,7 +2146,8 @@ void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
// Check whether the promise has been marked as having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
if (!promise->IsJSObject() ||
- JSReceiver::GetDataProperty(Handle<JSObject>::cast(promise), key)
+ JSReceiver::GetDataProperty(isolate_, Handle<JSObject>::cast(promise),
+ key)
->IsUndefined(isolate_)) {
OnException(value, promise, v8::debug::kPromiseRejection);
}
@@ -2399,12 +2499,33 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode,
? IsBlackboxed(shared)
: AllFramesOnStackAreBlackboxed();
if (ignore_break) return;
- // Don't stop if the break location is muted.
- bool has_break_points;
- break_points =
- GetHitBreakpointsAtCurrentStatement(it.frame(), &has_break_points);
- bool is_muted = has_break_points && break_points.is_null();
- if (is_muted) return;
+ if (function->shared().HasBreakInfo()) {
+ Handle<DebugInfo> debug_info(function->shared().GetDebugInfo(),
+ isolate_);
+ // Enter the debugger.
+ DebugScope debug_scope(this);
+
+ std::vector<BreakLocation> break_locations;
+ BreakLocation::AllAtCurrentStatement(debug_info, it.frame(),
+ &break_locations);
+
+ for (size_t i = 0; i < break_locations.size(); i++) {
+ if (IsBreakOnInstrumentation(debug_info, break_locations[i])) {
+ OnInstrumentationBreak();
+ break;
+ }
+ }
+
+ bool has_break_points;
+ break_points = CheckBreakPointsForLocations(debug_info, break_locations,
+ &has_break_points);
+ bool is_muted = has_break_points && break_points.is_null();
+ // If we get to this point, a break was triggered because e.g. of a
+ // debugger statement, an assert, .. . However, we do not stop if this
+ // position "is muted", which happens if a conditional breakpoint at
+ // this point evaluates to false.
+ if (is_muted) return;
+ }
}
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index eba382258e..2747dab566 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -218,6 +218,7 @@ class V8_EXPORT_PRIVATE Debug {
// Debug event triggers.
void OnDebugBreak(Handle<FixedArray> break_points_hit, StepAction stepAction,
debug::BreakReasons break_reasons = {});
+ void OnInstrumentationBreak();
base::Optional<Object> OnThrow(Handle<Object> exception)
V8_WARN_UNUSED_RESULT;
@@ -236,6 +237,7 @@ class V8_EXPORT_PRIVATE Debug {
Handle<FixedArray> GetLoadedScripts();
// Break point handling.
+ enum BreakPointKind { kRegular, kInstrumentation };
bool SetBreakpoint(Handle<SharedFunctionInfo> shared,
Handle<BreakPoint> break_point, int* source_position);
void ClearBreakPoint(Handle<BreakPoint> break_point);
@@ -247,10 +249,12 @@ class V8_EXPORT_PRIVATE Debug {
bool SetBreakPointForScript(Handle<Script> script, Handle<String> condition,
int* source_position, int* id);
bool SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
- Handle<String> condition, int* id);
+ Handle<String> condition, int* id,
+ BreakPointKind kind = kRegular);
void RemoveBreakpoint(int id);
#if V8_ENABLE_WEBASSEMBLY
- void SetOnEntryBreakpointForWasmScript(Handle<Script> script, int* id);
+ void SetInstrumentationBreakpointForWasmScript(Handle<Script> script,
+ int* id);
void RemoveBreakpointForWasmScript(Handle<Script> script, int id);
void RecordWasmScriptWithBreakpoints(Handle<Script> script);
@@ -258,9 +262,11 @@ class V8_EXPORT_PRIVATE Debug {
// Find breakpoints from the debug info and the break location and check
// whether they are hit. Return an empty handle if not, or a FixedArray with
- // hit BreakPoint objects.
+ // hit BreakPoint objects. has_break_points is set to true if position has
+ // any non-instrumentation breakpoint.
MaybeHandle<FixedArray> GetHitBreakPoints(Handle<DebugInfo> debug_info,
- int position);
+ int position,
+ bool* has_break_points);
// Stepping handling.
void PrepareStep(StepAction step_action);
@@ -395,6 +401,9 @@ class V8_EXPORT_PRIVATE Debug {
// source position for break points.
static const int kBreakAtEntryPosition = 0;
+ // Use -1 to encode instrumentation breakpoints.
+ static const int kInstrumentationId = -1;
+
void RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info);
static char* Iterate(RootVisitor* v, char* thread_storage);
@@ -448,9 +457,15 @@ class V8_EXPORT_PRIVATE Debug {
bool IsFrameBlackboxed(JavaScriptFrame* frame);
void ActivateStepOut(StackFrame* frame);
+ bool IsBreakOnInstrumentation(Handle<DebugInfo> debug_info,
+ const BreakLocation& location);
MaybeHandle<FixedArray> CheckBreakPoints(Handle<DebugInfo> debug_info,
BreakLocation* location,
- bool* has_break_points = nullptr);
+ bool* has_break_points);
+ MaybeHandle<FixedArray> CheckBreakPointsForLocations(
+ Handle<DebugInfo> debug_info, std::vector<BreakLocation>& break_locations,
+ bool* has_break_points);
+
MaybeHandle<FixedArray> GetHitBreakpointsAtCurrentStatement(
JavaScriptFrame* frame, bool* hasBreakpoints);
@@ -475,6 +490,8 @@ class V8_EXPORT_PRIVATE Debug {
DebugInfoListNode** curr);
void FreeDebugInfoListNode(DebugInfoListNode* prev, DebugInfoListNode* node);
+ void ClearGlobalPromiseStack();
+
void SetTemporaryObjectTrackingDisabled(bool disabled);
bool GetTemporaryObjectTrackingDisabled() const;
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 8c8d4bf2ad..7fb95ef1fe 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -6,8 +6,6 @@
#define V8_DEBUG_INTERFACE_TYPES_H_
#include <cstdint>
-#include <string>
-#include <vector>
#include "include/v8-function-callback.h"
#include "include/v8-local-handle.h"
@@ -46,13 +44,12 @@ class V8_EXPORT_PRIVATE Location {
};
enum DebugAsyncActionType {
+ kDebugAwait,
kDebugPromiseThen,
kDebugPromiseCatch,
kDebugPromiseFinally,
kDebugWillHandle,
- kDebugDidHandle,
- kAsyncFunctionSuspended,
- kAsyncFunctionFinished
+ kDebugDidHandle
};
enum BreakLocationType {
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 4f090e918d..1cef52ac2f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1081,7 +1081,8 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
if (!js_function->is_compiled()) continue;
IsCompiledScope is_compiled_scope(
js_function->shared().is_compiled_scope(isolate));
- JSFunction::EnsureFeedbackVector(js_function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, js_function,
+ &is_compiled_scope);
}
if (!sfi->HasBytecodeArray()) continue;
@@ -1124,7 +1125,8 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
if (!js_function->is_compiled()) continue;
IsCompiledScope is_compiled_scope(
js_function->shared().is_compiled_scope(isolate));
- JSFunction::EnsureFeedbackVector(js_function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, js_function,
+ &is_compiled_scope);
}
}
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
diff --git a/deps/v8/src/debug/wasm/gdb-server/packet.h b/deps/v8/src/debug/wasm/gdb-server/packet.h
index 4308081cad..87b5b6b91a 100644
--- a/deps/v8/src/debug/wasm/gdb-server/packet.h
+++ b/deps/v8/src/debug/wasm/gdb-server/packet.h
@@ -6,7 +6,7 @@
#define V8_DEBUG_WASM_GDB_SERVER_PACKET_H_
#include <string>
-#include <vector>
+
#include "src/base/macros.h"
namespace v8 {
diff --git a/deps/v8/src/debug/wasm/gdb-server/transport.h b/deps/v8/src/debug/wasm/gdb-server/transport.h
index 9d44f81acb..279c8d3d84 100644
--- a/deps/v8/src/debug/wasm/gdb-server/transport.h
+++ b/deps/v8/src/debug/wasm/gdb-server/transport.h
@@ -6,7 +6,7 @@
#define V8_DEBUG_WASM_GDB_SERVER_TRANSPORT_H_
#include <sstream>
-#include <vector>
+
#include "src/base/macros.h"
#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
@@ -31,7 +31,6 @@ typedef int socklen_t;
#include <sys/select.h>
#include <sys/socket.h>
#include <unistd.h>
-#include <string>
typedef int SocketHandle;
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index d6195b333e..5b964e4c91 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -389,21 +389,21 @@ bool WasmModuleDebug::GetWasmValue(const wasm::WasmValue& wasm_value,
uint8_t* buffer, uint32_t buffer_size,
uint32_t* size) {
switch (wasm_value.type().kind()) {
- case wasm::kWasmI32.kind():
+ case wasm::kI32:
return StoreValue(wasm_value.to_i32(), buffer, buffer_size, size);
- case wasm::kWasmI64.kind():
+ case wasm::kI64:
return StoreValue(wasm_value.to_i64(), buffer, buffer_size, size);
- case wasm::kWasmF32.kind():
+ case wasm::kF32:
return StoreValue(wasm_value.to_f32(), buffer, buffer_size, size);
- case wasm::kWasmF64.kind():
+ case wasm::kF64:
return StoreValue(wasm_value.to_f64(), buffer, buffer_size, size);
- case wasm::kWasmS128.kind():
+ case wasm::kS128:
return StoreValue(wasm_value.to_s128(), buffer, buffer_size, size);
-
- case wasm::kWasmVoid.kind():
- case wasm::kWasmExternRef.kind():
- case wasm::kWasmBottom.kind():
- default:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kVoid:
+ case wasm::kBottom:
// Not supported
return false;
}
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 796dd072b3..b8b84a5872 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -431,7 +431,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
function.ResetIfCodeFlushed();
- if (code.is_null()) code = function.code();
+ if (code.is_null()) code = FromCodeT(function.code());
if (CodeKindCanDeoptimize(code.kind())) {
// Mark the code for deoptimization and unlink any functions that also
@@ -441,8 +441,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
// The code in the function's optimized code feedback vector slot might
// be different from the code on the function - evict it if necessary.
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function.raw_feedback_cell(), function.shared(),
- "unlinking code marked for deopt");
+ function.shared(), "unlinking code marked for deopt");
if (!code.deopt_already_counted()) {
code.set_deopt_already_counted(true);
}
@@ -461,13 +460,12 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
deoptimizer->DoComputeOutputFrames();
}
-const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
- DCHECK_IMPLIES(reuse_code, kind == DeoptimizeKind::kSoft);
+const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return "deopt-eager";
case DeoptimizeKind::kSoft:
- return reuse_code ? "bailout-soft" : "deopt-soft";
+ return "deopt-soft";
case DeoptimizeKind::kLazy:
return "deopt-lazy";
case DeoptimizeKind::kBailout:
@@ -526,9 +524,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
compiled_code_.set_deopt_already_counted(true);
{
HandleScope scope(isolate_);
- PROFILE(isolate_,
- CodeDeoptEvent(handle(compiled_code_, isolate_), kind, from_,
- fp_to_sp_delta_, should_reuse_code()));
+ PROFILE(isolate_, CodeDeoptEvent(handle(compiled_code_, isolate_), kind,
+ from_, fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
const int parameter_count =
@@ -594,16 +591,11 @@ Code Deoptimizer::FindOptimizedCode() {
Handle<JSFunction> Deoptimizer::function() const {
return Handle<JSFunction>(function_, isolate());
}
+
Handle<Code> Deoptimizer::compiled_code() const {
return Handle<Code>(compiled_code_, isolate());
}
-bool Deoptimizer::should_reuse_code() const {
- int count = compiled_code_.deoptimization_count();
- return deopt_kind_ == DeoptimizeKind::kSoft &&
- count < FLAG_reuse_opt_code_count;
-}
-
Deoptimizer::~Deoptimizer() {
DCHECK(input_ == nullptr && output_ == nullptr);
DCHECK_NULL(disallow_garbage_collection_);
@@ -728,8 +720,7 @@ void Deoptimizer::TraceDeoptBegin(int optimization_id,
Deoptimizer::DeoptInfo info =
Deoptimizer::GetDeoptInfo(compiled_code_, from_);
PrintF(file, "[bailout (kind: %s, reason: %s): begin. deoptimizing ",
- MessageFor(deopt_kind_, should_reuse_code()),
- DeoptimizeReasonToString(info.deopt_reason));
+ MessageFor(deopt_kind_), DeoptimizeReasonToString(info.deopt_reason));
if (function_.IsJSFunction()) {
function_.ShortPrint(file);
} else {
@@ -1063,7 +1054,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
!goto_catch_handler;
const bool is_baseline = shared.HasBaselineCode();
Code dispatch_builtin =
- builtins->code(DispatchBuiltinFor(is_baseline, advance_bc));
+ FromCodeT(builtins->code(DispatchBuiltinFor(is_baseline, advance_bc)));
if (verbose_tracing_enabled()) {
PrintF(trace_scope()->file(), " translating %s frame ",
@@ -1092,14 +1083,11 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
}
}
- // Note: parameters_count includes the receiver.
- // TODO(v8:11112): Simplify once the receiver is always included in argc.
if (verbose_tracing_enabled() && is_bottommost &&
- actual_argument_count_ - kJSArgcReceiverSlots > parameters_count - 1) {
- PrintF(
- trace_scope_->file(),
- " -- %d extra argument(s) already in the stack --\n",
- actual_argument_count_ - kJSArgcReceiverSlots - parameters_count + 1);
+ actual_argument_count_ > parameters_count) {
+ PrintF(trace_scope_->file(),
+ " -- %d extra argument(s) already in the stack --\n",
+ actual_argument_count_ - parameters_count);
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
@@ -1180,7 +1168,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
(translated_state_.frames()[frame_index - 1]).kind();
argc = previous_frame_kind == TranslatedFrame::kArgumentsAdaptor
? output_[frame_index - 1]->parameter_count()
- : parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
+ : parameters_count;
}
frame_writer.PushRawValue(argc, "actual argument count\n");
@@ -1306,7 +1294,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
// Set the continuation for the topmost frame.
- Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
+ CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@@ -1387,7 +1375,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
Builtins* builtins = isolate_->builtins();
- Code construct_stub = builtins->code(Builtin::kJSConstructStubGeneric);
+ Code construct_stub =
+ FromCodeT(builtins->code(Builtin::kJSConstructStubGeneric));
BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
const int parameters_count = translated_frame->height();
@@ -1466,7 +1455,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushTranslatedValue(value_iterator++, "context");
// Number of incoming arguments.
- const uint32_t argc = parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
+ const uint32_t argc = parameters_count;
frame_writer.PushRawObject(Smi::FromInt(argc), "argc\n");
// The constructor function was mentioned explicitly in the
@@ -1541,7 +1530,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
- Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
+ CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@@ -1965,7 +1954,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
// ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
// return value that we have already set.
- Code continue_to_builtin =
+ CodeT continue_to_builtin =
isolate()->builtins()->code(TrampolineForBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot() &&
!is_js_to_wasm_builtin_continuation));
@@ -1981,7 +1970,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
}
- Code continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
+ CodeT continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 173a8a4e02..abfd668dd9 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -49,14 +49,12 @@ class Deoptimizer : public Malloced {
Isolate* isolate, SharedFunctionInfo shared,
BytecodeOffset bytecode_offset);
- static const char* MessageFor(DeoptimizeKind kind, bool reuse_code);
+ static const char* MessageFor(DeoptimizeKind kind);
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
- bool should_reuse_code() const;
-
static Deoptimizer* New(Address raw_function, DeoptimizeKind kind,
unsigned deopt_exit_index, Address from,
int fp_to_sp_delta, Isolate* isolate);
diff --git a/deps/v8/src/deoptimizer/frame-description.h b/deps/v8/src/deoptimizer/frame-description.h
index f7e79aec6c..1b004b5089 100644
--- a/deps/v8/src/deoptimizer/frame-description.h
+++ b/deps/v8/src/deoptimizer/frame-description.h
@@ -5,7 +5,7 @@
#ifndef V8_DEOPTIMIZER_FRAME_DESCRIPTION_H_
#define V8_DEOPTIMIZER_FRAME_DESCRIPTION_H_
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
#include "src/utils/boxed-float.h"
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index ab3a2d1275..c066e94c0b 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -516,7 +516,8 @@ Handle<Object> TranslatedValue::GetValue() {
// headers.
// TODO(hpayer): Find a cleaner way to support a group of
// non-fully-initialized objects.
- isolate()->heap()->mark_compact_collector()->EnsureSweepingCompleted();
+ isolate()->heap()->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// 2. Initialize the objects. If we have allocated only byte arrays
// for some objects, we now overwrite the byte arrays with the
diff --git a/deps/v8/src/deoptimizer/translation-array.h b/deps/v8/src/deoptimizer/translation-array.h
index c8b2e485cb..1634a8a6db 100644
--- a/deps/v8/src/deoptimizer/translation-array.h
+++ b/deps/v8/src/deoptimizer/translation-array.h
@@ -5,7 +5,7 @@
#ifndef V8_DEOPTIMIZER_TRANSLATION_ARRAY_H_
#define V8_DEOPTIMIZER_TRANSLATION_ARRAY_H_
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/deoptimizer/translation-opcode.h"
#include "src/objects/fixed-array.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index 81b5893226..55640459d7 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -278,7 +278,7 @@ static void PrintRelocInfo(std::ostringstream& out, Isolate* isolate,
Address addr = relocinfo->target_address();
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, addr, &type)) {
- out << " ;; " << Deoptimizer::MessageFor(type, false)
+ out << " ;; " << Deoptimizer::MessageFor(type)
<< " deoptimization bailout";
} else {
out << " ;; " << RelocInfo::RelocModeName(rmode);
diff --git a/deps/v8/src/diagnostics/eh-frame.h b/deps/v8/src/diagnostics/eh-frame.h
index 0d32085b0b..3cd3318f69 100644
--- a/deps/v8/src/diagnostics/eh-frame.h
+++ b/deps/v8/src/diagnostics/eh-frame.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/memory.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index e32309923d..3e4dc1b477 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -16,6 +16,7 @@
#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
@@ -53,12 +54,14 @@
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
+#include "src/objects/js-shadow-realms-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format-inl.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/js-segments-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-struct-inl.h"
#include "src/objects/js-temporal-objects-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
@@ -69,7 +72,6 @@
#include "src/objects/oddball-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module-inl.h"
@@ -97,7 +99,7 @@ namespace internal {
// every encountered tagged pointer.
// - Verification should be pushed down to the specific instance type if its
// integrity is independent of an outer object.
-// - In cases where the InstanceType is too genernic (e.g. FixedArray) the
+// - In cases where the InstanceType is too generic (e.g. FixedArray) the
// XXXVerify of the outer method has to do recursive verification.
// - If the corresponding objects have inheritence the parent's Verify method
// is called as well.
@@ -202,6 +204,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
case ORDERED_NAME_DICTIONARY_TYPE:
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ case REGISTERED_SYMBOL_TABLE_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -518,15 +522,24 @@ void Map::MapVerify(Isolate* isolate) {
}
}
SLOW_DCHECK(instance_descriptors(isolate).IsSortedNoDuplicates());
- DisallowGarbageCollection no_gc;
+ SLOW_DCHECK(TransitionsAccessor(isolate, *this).IsSortedNoDuplicates());
SLOW_DCHECK(
- TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
- SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
- .IsConsistentWithBackPointers());
+ TransitionsAccessor(isolate, *this).IsConsistentWithBackPointers());
// Only JSFunction maps have has_prototype_slot() bit set and constructible
// JSFunction objects must have prototype slot.
- CHECK_IMPLIES(has_prototype_slot(),
- InstanceTypeChecker::IsJSFunction(instance_type()));
+ CHECK_IMPLIES(has_prototype_slot(), IsJSFunctionMap());
+
+ if (IsJSObjectMap()) {
+ int header_end_offset = JSObject::GetHeaderSize(*this);
+ int inobject_fields_start_offset = GetInObjectPropertyOffset(0);
+ // Ensure that embedder fields are located exactly between header and
+ // inobject properties.
+ CHECK_EQ(header_end_offset, JSObject::GetEmbedderFieldsStartOffset(*this));
+ CHECK_EQ(header_end_offset +
+ JSObject::GetEmbedderFieldCount(*this) * kEmbedderDataSlotSize,
+ inobject_fields_start_offset);
+ }
+
if (!may_have_interesting_symbols()) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
@@ -570,6 +583,16 @@ void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
}
}
+void FixedArray::FixedArrayVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::FixedArrayVerify(*this, isolate);
+ if (*this == ReadOnlyRoots(isolate).empty_fixed_array()) {
+ CHECK_EQ(length(), 0);
+ CHECK_EQ(map(), ReadOnlyRoots(isolate).fixed_array_map());
+ } else if (IsArrayList()) {
+ ArrayList::cast(*this).ArrayListVerify(isolate);
+ }
+}
+
void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::WeakFixedArrayVerify(*this, isolate);
for (int i = 0; i < length(); i++) {
@@ -577,6 +600,17 @@ void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
}
}
+void ArrayList::ArrayListVerify(Isolate* isolate) {
+ // Avoid calling the torque-generated ArrayListVerify to prevent an endlessly
+ // recursion verification.
+ CHECK(IsArrayList());
+ CHECK_LE(ArrayList::kLengthIndex, length());
+ CHECK_LE(0, Length());
+ if (Length() == 0 && length() == ArrayList::kLengthIndex) {
+ CHECK_EQ(*this, ReadOnlyRoots(isolate).empty_array_list());
+ }
+}
+
void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::PropertyArrayVerify(*this, isolate);
if (length() == 0) {
@@ -845,7 +879,7 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
STATIC_ASSERT(JSFunction::TorqueGeneratedClass::kHeaderSize ==
8 * kTaggedSize);
- JSFunctionOrBoundFunctionVerify(isolate);
+ JSFunctionOrBoundFunctionOrWrappedFunctionVerify(isolate);
CHECK(IsJSFunction());
VerifyPointer(isolate, shared(isolate));
CHECK(shared(isolate).IsSharedFunctionInfo());
@@ -853,8 +887,8 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
CHECK(context(isolate, kRelaxedLoad).IsContext());
VerifyPointer(isolate, raw_feedback_cell(isolate));
CHECK(raw_feedback_cell(isolate).IsFeedbackCell());
- VerifyPointer(isolate, raw_code(isolate));
- CHECK(raw_code(isolate).IsCodeT());
+ VerifyPointer(isolate, code(isolate));
+ CHECK(code(isolate).IsCodeT());
CHECK(map(isolate).is_callable());
Handle<JSFunction> function(*this, isolate);
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
@@ -891,7 +925,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
#if V8_ENABLE_WEBASSEMBLY
bool is_wasm = HasWasmExportedFunctionData() || HasAsmWasmData() ||
- HasWasmJSFunctionData() || HasWasmCapiFunctionData();
+ HasWasmJSFunctionData() || HasWasmCapiFunctionData() ||
+ HasWasmOnFulfilledData();
#else
bool is_wasm = false;
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1013,8 +1048,39 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(next_code_link().IsCodeT() || next_code_link().IsUndefined(isolate));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
if (raw_code() != Smi::zero()) {
- CHECK_EQ(code().InstructionStart(), code_entry_point());
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // kind and builtin_id() getters are not available on CodeDataContainer
+ // when external code space is not enabled.
+ CHECK_EQ(code().kind(), kind());
+ CHECK_EQ(code().builtin_id(), builtin_id());
+#endif // V8_EXTERNAL_CODE_SPACE
CHECK_EQ(code().code_data_container(kAcquireLoad), *this);
+
+ // Ensure the cached code entry point corresponds to the Code object
+ // associated with this CodeDataContainer.
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ if (V8_SHORT_BUILTIN_CALLS_BOOL) {
+ if (code().InstructionStart() == code_entry_point()) {
+ // Most common case, all good.
+ } else {
+ // When shared pointer compression cage is enabled and it has the
+ // embedded code blob copy then the Code::InstructionStart() might
+ // return address of the remapped builtin regardless of whether the
+ // builtins copy exsisted when the code_entry_point value was cached
+ // in the CodeDataContainer (see Code::OffHeapInstructionStart()).
+ // So, do a reverse Code object lookup via code_entry_point value to
+ // ensure it corresponds to the same Code object associated with this
+ // CodeDataContainer.
+ Code the_code = isolate->heap()->GcSafeFindCodeForInnerPointer(
+ code_entry_point());
+ CHECK_EQ(the_code, code());
+ }
+ } else {
+ CHECK_EQ(code().InstructionStart(), code_entry_point());
+ }
+#else
+ CHECK_EQ(code().InstructionStart(), code_entry_point());
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
}
}
}
@@ -1127,6 +1193,36 @@ void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(index().IsSmi());
}
+USE_TORQUE_VERIFIER(JSShadowRealm)
+USE_TORQUE_VERIFIER(JSWrappedFunction)
+
+void JSSharedStruct::JSSharedStructVerify(Isolate* isolate) {
+ CHECK(IsJSSharedStruct());
+ JSObjectVerify(isolate);
+ CHECK(HasFastProperties());
+ // Shared structs can only point to primitives or other shared HeapObjects,
+ // even internally.
+ // TODO(v8:12547): Generalize shared -> shared pointer verification.
+ Map struct_map = map();
+ CHECK(struct_map.InSharedHeap());
+ CHECK(struct_map.GetBackPointer().IsUndefined(isolate));
+ Object maybe_cell = struct_map.prototype_validity_cell();
+ if (maybe_cell.IsCell()) CHECK(maybe_cell.InSharedHeap());
+ CHECK(!struct_map.is_extensible());
+ CHECK(!struct_map.is_prototype_map());
+ CHECK(property_array().InSharedHeap());
+ DescriptorArray descriptors = struct_map.instance_descriptors(isolate);
+ CHECK(descriptors.InSharedHeap());
+ for (InternalIndex i : struct_map.IterateOwnDescriptors()) {
+ PropertyDetails details = descriptors.GetDetails(i);
+ CHECK_EQ(PropertyKind::kData, details.kind());
+ CHECK_EQ(PropertyLocation::kField, details.location());
+ CHECK(details.representation().IsTagged());
+ CHECK(
+ RawFastPropertyAt(FieldIndex::ForDescriptor(struct_map, i)).IsShared());
+ }
+}
+
void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(IsWeakCell());
@@ -1778,8 +1874,8 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) {
}
}
-void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::StackFrameInfoVerify(*this, isolate);
+void CallSiteInfo::CallSiteInfoVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::CallSiteInfoVerify(*this, isolate);
#if V8_ENABLE_WEBASSEMBLY
CHECK_IMPLIES(IsAsmJsWasm(), IsWasm());
CHECK_IMPLIES(IsWasm(), receiver_or_instance().IsWasmInstanceObject());
@@ -1796,6 +1892,16 @@ void FunctionTemplateRareData::FunctionTemplateRareDataVerify(
c_function_overloads().IsUndefined(isolate));
}
+void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::StackFrameInfoVerify(*this, isolate);
+}
+
+void ErrorStackData::ErrorStackDataVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::ErrorStackDataVerify(*this, isolate);
+ CHECK_IMPLIES(!call_site_infos_or_formatted_stack().IsFixedArray(),
+ limit_or_stack_frame_infos().IsFixedArray());
+}
+
// Helper class for verifying the string table.
class StringTableVerifier : public RootVisitor {
public:
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index cd44c9ff31..d89f9b4723 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -138,6 +138,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case HASH_TABLE_TYPE:
ObjectHashTable::cast(*this).ObjectHashTablePrint(os);
break;
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ NameToIndexHashTable::cast(*this).NameToIndexHashTablePrint(os);
+ break;
+ case REGISTERED_SYMBOL_TABLE_TYPE:
+ RegisteredSymbolTable::cast(*this).RegisteredSymbolTablePrint(os);
+ break;
case ORDERED_HASH_MAP_TYPE:
OrderedHashMap::cast(*this).OrderedHashMapPrint(os);
break;
@@ -600,7 +606,7 @@ static void JSObjectPrintBody(std::ostream& os, JSObject obj,
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
- Isolate* isolate = GetIsolateForHeapSandbox(obj);
+ Isolate* isolate = GetIsolateForSandbox(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
@@ -788,7 +794,7 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionPrint(
}
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Isolate* isolate = GetIsolateForSandbox(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
@@ -961,6 +967,16 @@ void ObjectHashTable::ObjectHashTablePrint(std::ostream& os) {
PrintHashMapContentsFull(os, *this);
}
+void NameToIndexHashTable::NameToIndexHashTablePrint(std::ostream& os) {
+ PrintHashTableHeader(os, *this, "NameToIndexHashTable");
+ PrintHashMapContentsFull(os, *this);
+}
+
+void RegisteredSymbolTable::RegisteredSymbolTablePrint(std::ostream& os) {
+ PrintHashTableHeader(os, *this, "RegisteredSymbolTable");
+ PrintHashMapContentsFull(os, *this);
+}
+
void NumberDictionary::NumberDictionaryPrint(std::ostream& os) {
PrintHashTableHeader(os, *this, "NumberDictionary");
PrintDictionaryContentsFull(os, *this);
@@ -1189,7 +1205,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {
os << "\n - no optimized code";
}
os << "\n - optimization marker: " << optimization_marker();
- os << "\n - optimization tier: " << optimization_tier();
+ os << "\n - maybe has optimized code: " << maybe_has_optimized_code();
os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks();
os << "\n - closure feedback cell array: ";
@@ -1224,22 +1240,22 @@ void FeedbackNexus::Print(std::ostream& os) {
switch (kind()) {
case FeedbackSlotKind::kCall:
case FeedbackSlotKind::kCloneObject:
- case FeedbackSlotKind::kDefineOwnKeyed:
+ case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kLoadProperty:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed: {
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kDefineNamedOwn: {
os << InlineCacheState2String(ic_state());
break;
}
@@ -1391,6 +1407,18 @@ void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
+void JSShadowRealm::JSShadowRealmPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSShadowRealm");
+ os << "\n - native_context: " << Brief(native_context());
+ JSObjectPrintBody(os, *this);
+}
+
+void JSWrappedFunction::JSWrappedFunctionPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSWrappedFunction");
+ os << "\n - wrapped_target_function: " << Brief(wrapped_target_function());
+ JSObjectPrintBody(os, *this);
+}
+
void JSFinalizationRegistry::JSFinalizationRegistryPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSFinalizationRegistry");
os << "\n - native_context: " << Brief(native_context());
@@ -1411,6 +1439,14 @@ void JSFinalizationRegistry::JSFinalizationRegistryPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
+void JSSharedStruct::JSSharedStructPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSSharedStruct");
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
+ os << "\n - isolate: " << isolate;
+ if (isolate->is_shared()) os << " (shared)";
+ JSObjectPrintBody(os, *this);
+}
+
void JSWeakMap::JSWeakMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
@@ -1516,7 +1552,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) {
<< shared().internal_formal_parameter_count_without_receiver();
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
- os << "\n - code: " << Brief(raw_code());
+ os << "\n - code: " << Brief(code());
if (code().kind() == CodeKind::FOR_TESTING) {
os << "\n - FOR_TESTING";
} else if (ActiveTierIsIgnition()) {
@@ -1580,6 +1616,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
if (has_static_private_methods_or_accessors()) {
os << "\n - has_static_private_methods_or_accessors";
}
+ if (private_name_lookup_skips_outer_class()) {
+ os << "\n - private_name_lookup_skips_outer_class";
+ }
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
os << "\n - function_map_index: " << function_map_index();
@@ -1796,10 +1835,16 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
break;
case wasm::kRef:
case wasm::kOptRef:
- case wasm::kRtt:
- case wasm::kRttWithDepth:
- os << Brief(base::ReadUnalignedValue<Object>(field_address));
+ case wasm::kRtt: {
+ Tagged_t raw = base::ReadUnalignedValue<Tagged_t>(field_address);
+#if V8_COMPRESS_POINTERS
+ Address obj = DecompressTaggedPointer(address(), raw);
+#else
+ Address obj = raw;
+#endif
+ os << Brief(Object(obj));
break;
+ }
case wasm::kS128:
os << "UNIMPLEMENTED"; // TODO(7748): Implement.
break;
@@ -1836,17 +1881,23 @@ void WasmArray::WasmArrayPrint(std::ostream& os) {
true);
break;
case wasm::kI8:
+ PrintTypedArrayElements(os, reinterpret_cast<int8_t*>(data_ptr), len,
+ true);
+ break;
case wasm::kI16:
+ PrintTypedArrayElements(os, reinterpret_cast<int16_t*>(data_ptr), len,
+ true);
+ break;
case wasm::kS128:
case wasm::kRef:
case wasm::kOptRef:
case wasm::kRtt:
- case wasm::kRttWithDepth:
- case wasm::kBottom:
- case wasm::kVoid:
os << "\n Printing elements of this type is unimplemented, sorry";
// TODO(7748): Implement.
break;
+ case wasm::kBottom:
+ case wasm::kVoid:
+ UNREACHABLE();
}
os << "\n";
}
@@ -1862,6 +1913,8 @@ void WasmContinuationObject::WasmContinuationObjectPrint(std::ostream& os) {
void WasmSuspenderObject::WasmSuspenderObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmSuspenderObject");
os << "\n - continuation: " << continuation();
+ os << "\n - parent: " << parent();
+ os << "\n - state: " << state();
os << "\n";
}
@@ -1929,6 +1982,7 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(std::ostream& os) {
os << "\n - function_index: " << function_index();
os << "\n - signature: " << Brief(signature());
os << "\n - wrapper_budget: " << wrapper_budget();
+ os << "\n - suspender: " << suspender();
os << "\n";
}
@@ -1941,11 +1995,18 @@ void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) {
os << "\n";
}
+void WasmOnFulfilledData::WasmOnFulfilledDataPrint(std::ostream& os) {
+ PrintHeader(os, "WasmOnFulfilledData");
+ os << "\n - suspender: " << Brief(suspender());
+ os << '\n';
+}
+
void WasmApiFunctionRef::WasmApiFunctionRefPrint(std::ostream& os) {
PrintHeader(os, "WasmApiFunctionRef");
os << "\n - isolate_root: " << reinterpret_cast<void*>(isolate_root());
os << "\n - native_context: " << Brief(native_context());
os << "\n - callable: " << Brief(callable());
+ os << "\n - suspender: " << Brief(suspender());
os << "\n";
}
@@ -2317,13 +2378,12 @@ void JSSegments::JSSegmentsPrint(std::ostream& os) {
namespace {
void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
const char* list_name, int length) {
+ DisallowGarbageCollection no_gc;
if (length <= 0) return;
os << "\n - " << list_name;
os << " {\n";
- for (int i = 0; i < length; ++i) {
- os << " - " << i << ": ";
- scope_info.context_local_names(i).ShortPrint(os);
- os << "\n";
+ for (auto it : ScopeInfo::IterateLocalNames(&scope_info, no_gc)) {
+ os << " - " << it->index() << ": " << it->name() << "\n";
}
os << " }";
}
@@ -2339,6 +2399,12 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
os << "\n - parameters: " << ParameterCount();
os << "\n - context locals : " << ContextLocalCount();
+ if (HasInlinedLocalNames()) {
+ os << "\n - inlined local names";
+ } else {
+ os << "\n - local names in a hashtable: "
+ << Brief(context_local_names_hashtable());
+ }
os << "\n - scope type: " << scope_type();
if (SloppyEvalCanExtendVars()) os << "\n - sloppy eval";
@@ -2347,8 +2413,8 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
if (HasReceiver()) {
os << "\n - receiver: " << ReceiverVariableBits::decode(flags);
}
- if (HasClassBrand()) os << "\n - has class brand";
- if (HasSavedClassVariableIndex()) os << "\n - has saved class variable index";
+ if (ClassScopeHasPrivateBrand()) os << "\n - class scope has private brand";
+ if (HasSavedClassVariable()) os << "\n - has saved class variable";
if (HasNewTarget()) os << "\n - needs new target";
if (HasFunctionName()) {
os << "\n - function name(" << FunctionVariableBits::decode(flags) << "): ";
@@ -2356,6 +2422,8 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
}
if (IsAsmModule()) os << "\n - asm module";
if (HasSimpleParameters()) os << "\n - simple parameters";
+ if (PrivateNameLookupSkipsOuterClass())
+ os << "\n - private name lookup skips outer class";
os << "\n - function kind: " << function_kind();
if (HasOuterScopeInfo()) {
os << "\n - outer scope info: " << Brief(OuterScopeInfo());
@@ -2546,8 +2614,7 @@ void Map::MapPrint(std::ostream& os) {
// the isolate to iterate over the transitions.
if (!IsReadOnlyHeapObject(*this)) {
Isolate* isolate = GetIsolateFromWritableObject(*this);
- DisallowGarbageCollection no_gc;
- TransitionsAccessor transitions(isolate, *this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this);
int nof_transitions = transitions.NumberOfTransitions();
if (nof_transitions > 0) {
os << "\n - transitions #" << nof_transitions << ": ";
@@ -2734,14 +2801,13 @@ void TransitionsAccessor::PrintTransitionTree(
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
- TransitionsAccessor transitions(isolate_, target, no_gc);
+ TransitionsAccessor transitions(isolate_, target);
transitions.PrintTransitionTree(os, level + 1, no_gc);
}
}
void JSObject::PrintTransitions(std::ostream& os) {
- DisallowGarbageCollection no_gc;
- TransitionsAccessor ta(GetIsolate(), map(), &no_gc);
+ TransitionsAccessor ta(GetIsolate(), map());
if (ta.NumberOfTransitions() == 0) return;
os << "\n - transitions";
ta.PrintTransitions(os);
@@ -2845,9 +2911,8 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
printf("Please provide a valid Map\n");
} else {
#if defined(DEBUG) || defined(OBJECT_PRINT)
- i::DisallowGarbageCollection no_gc;
i::Map map = i::Map::unchecked_cast(o);
- i::TransitionsAccessor transitions(i::Isolate::Current(), map, &no_gc);
+ i::TransitionsAccessor transitions(i::Isolate::Current(), map);
transitions.PrintTransitionTree();
#endif
}
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 35b47e2b1c..4bd99cbaca 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -116,10 +116,12 @@ const char PerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
// Extra padding for the PID in the filename
const int PerfJitLogger::kFilenameBufferPadding = 16;
-static const char kStringTerminator[] = "\0";
+static const char kStringTerminator[] = {'\0'};
+static const char kRepeatedNameMarker[] = {'\xff', '\0'};
base::LazyRecursiveMutex PerfJitLogger::file_mutex_;
// The following static variables are protected by PerfJitLogger::file_mutex_.
+int PerfJitLogger::process_id_ = 0;
uint64_t PerfJitLogger::reference_count_ = 0;
void* PerfJitLogger::marker_address_ = nullptr;
uint64_t PerfJitLogger::code_index_ = 0;
@@ -131,8 +133,7 @@ void PerfJitLogger::OpenJitDumpFile() {
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
base::ScopedVector<char> perf_dump_name(bufferSize);
- int size = SNPrintF(perf_dump_name, kFilenameFormatString,
- base::OS::GetCurrentProcessId());
+ int size = SNPrintF(perf_dump_name, kFilenameFormatString, process_id_);
CHECK_NE(size, -1);
int fd = open(perf_dump_name.begin(), O_CREAT | O_TRUNC | O_RDWR, 0666);
@@ -179,6 +180,7 @@ void PerfJitLogger::CloseMarkerFile(void* marker_address) {
PerfJitLogger::PerfJitLogger(Isolate* isolate) : CodeEventLogger(isolate) {
base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ process_id_ = base::OS::GetCurrentProcessId();
reference_count_++;
// If this is the first logger, open the file and write the header.
@@ -215,7 +217,6 @@ void PerfJitLogger::LogRecordedBuffer(
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
abstract_code->kind() != CodeKind::TURBOFAN &&
- abstract_code->kind() != CodeKind::TURBOPROP &&
abstract_code->kind() != CodeKind::BASELINE)) {
return;
}
@@ -256,9 +257,7 @@ void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
if (perf_output_handle_ == nullptr) return;
- if (FLAG_perf_prof_annotate_wasm) {
- LogWriteDebugInfo(code);
- }
+ if (FLAG_perf_prof_annotate_wasm) LogWriteDebugInfo(code);
WriteJitCodeLoadEntry(code->instructions().begin(),
code->instructions().length(), name, length);
@@ -272,8 +271,7 @@ void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
code_load.event_ = PerfJitCodeLoad::kLoad;
code_load.size_ = sizeof(code_load) + name_length + 1 + code_size;
code_load.time_stamp_ = GetTimestamp();
- code_load.process_id_ =
- static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+ code_load.process_id_ = static_cast<uint32_t>(process_id_);
code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
code_load.vma_ = reinterpret_cast<uint64_t>(code_pointer);
code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
@@ -284,7 +282,7 @@ void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
LogWriteBytes(name, name_length);
- LogWriteBytes(kStringTerminator, 1);
+ LogWriteBytes(kStringTerminator, sizeof(kStringTerminator));
LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
}
@@ -294,25 +292,12 @@ constexpr char kUnknownScriptNameString[] = "<unknown>";
constexpr size_t kUnknownScriptNameStringLen =
arraysize(kUnknownScriptNameString) - 1;
-size_t GetScriptNameLength(const SourcePositionInfo& info) {
- if (!info.script.is_null()) {
- Object name_or_url = info.script->GetNameOrSourceURL();
- if (name_or_url.IsString()) {
- String str = String::cast(name_or_url);
- if (str.IsOneByteRepresentation()) return str.length();
- int length;
- str.ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
- return static_cast<size_t>(length);
- }
- }
- return kUnknownScriptNameStringLen;
-}
-
-base::Vector<const char> GetScriptName(const SourcePositionInfo& info,
+namespace {
+base::Vector<const char> GetScriptName(Object maybeScript,
std::unique_ptr<char[]>* storage,
const DisallowGarbageCollection& no_gc) {
- if (!info.script.is_null()) {
- Object name_or_url = info.script->GetNameOrSourceURL();
+ if (maybeScript.IsScript()) {
+ Object name_or_url = Script::cast(maybeScript).GetNameOrSourceURL();
if (name_or_url.IsSeqOneByteString()) {
SeqOneByteString str = SeqOneByteString::cast(name_or_url);
return {reinterpret_cast<char*>(str.GetChars(no_gc)),
@@ -327,12 +312,14 @@ base::Vector<const char> GetScriptName(const SourcePositionInfo& info,
return {kUnknownScriptNameString, kUnknownScriptNameStringLen};
}
+} // namespace
+
SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
Handle<SharedFunctionInfo> function,
SourcePosition pos) {
+ DisallowGarbageCollection disallow;
if (code->is_turbofanned()) {
- DisallowGarbageCollection disallow;
- return pos.InliningStack(code)[0];
+ return pos.FirstInfo(code);
} else {
return SourcePositionInfo(pos, function);
}
@@ -342,38 +329,47 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
Handle<SharedFunctionInfo> shared) {
+ // Line ends of all scripts have been initialized prior to this.
DisallowGarbageCollection no_gc;
- // TODO(v8:11429,cbruni): add proper baseline source position iterator
+ // The WasmToJS wrapper stubs have source position entries.
+ if (!shared->HasSourceCode()) return;
+
+ PerfJitCodeDebugInfo debug_info;
+ uint32_t size = sizeof(debug_info);
+
ByteArray source_position_table = code->SourcePositionTable(*shared);
- // Compute the entry count and get the name of the script.
+ // Compute the entry count and get the names of all scripts.
+ // Avoid additional work if the script name is repeated. Multiple script
+ // names only occur for cross-script inlining.
uint32_t entry_count = 0;
+ Object last_script = Smi::zero();
+ std::vector<base::Vector<const char>> script_names;
for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
+ SourcePositionInfo info(
+ GetSourcePositionInfo(code, shared, iterator.source_position()));
+ Object current_script = *info.script;
+ if (current_script != last_script) {
+ std::unique_ptr<char[]> name_storage;
+ auto name = GetScriptName(shared->script(), &name_storage, no_gc);
+ script_names.push_back(name);
+ // Add the size of the name after each entry.
+ size += name.size() + sizeof(kStringTerminator);
+ last_script = current_script;
+ } else {
+ size += sizeof(kRepeatedNameMarker);
+ }
entry_count++;
}
if (entry_count == 0) return;
- // The WasmToJS wrapper stubs have source position entries.
- if (!shared->HasSourceCode()) return;
- Handle<Script> script(Script::cast(shared->script()), isolate_);
-
- PerfJitCodeDebugInfo debug_info;
debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
debug_info.time_stamp_ = GetTimestamp();
debug_info.address_ = code->InstructionStart();
debug_info.entry_count_ = entry_count;
- uint32_t size = sizeof(debug_info);
// Add the sizes of fixed parts of entries.
size += entry_count * sizeof(PerfJitDebugEntry);
- // Add the size of the name after each entry.
-
- for (SourcePositionTableIterator iterator(source_position_table);
- !iterator.done(); iterator.Advance()) {
- SourcePositionInfo info(
- GetSourcePositionInfo(code, shared, iterator.source_position()));
- size += GetScriptNameLength(info) + 1;
- }
int padding = ((size + 7) & (~7)) - size;
debug_info.size_ = size + padding;
@@ -381,6 +377,8 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
Address code_start = code->InstructionStart();
+ last_script = Smi::zero();
+ int script_names_index = 0;
for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(
@@ -393,12 +391,18 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
entry.line_number_ = info.line + 1;
entry.column_ = info.column + 1;
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
- std::unique_ptr<char[]> name_storage;
- base::Vector<const char> name_string =
- GetScriptName(info, &name_storage, no_gc);
- LogWriteBytes(name_string.begin(),
- static_cast<uint32_t>(name_string.size()));
- LogWriteBytes(kStringTerminator, 1);
+ Object current_script = *info.script;
+ if (current_script != last_script) {
+ auto name_string = script_names[script_names_index];
+ LogWriteBytes(name_string.begin(),
+ static_cast<uint32_t>(name_string.size()));
+ LogWriteBytes(kStringTerminator, sizeof(kStringTerminator));
+ script_names_index++;
+ last_script = current_script;
+ } else {
+ // Use the much shorter kRepeatedNameMarker for repeated names.
+ LogWriteBytes(kRepeatedNameMarker, sizeof(kRepeatedNameMarker));
+ }
}
char padding_bytes[8] = {0};
LogWriteBytes(padding_bytes, padding);
@@ -465,7 +469,7 @@ void PerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
std::string name_string = source_map->GetFilename(offset);
LogWriteBytes(name_string.c_str(), static_cast<int>(name_string.size()));
- LogWriteBytes(kStringTerminator, 1);
+ LogWriteBytes(kStringTerminator, sizeof(kStringTerminator));
}
char padding_bytes[8] = {0};
@@ -529,7 +533,7 @@ void PerfJitLogger::LogWriteHeader() {
header.size_ = sizeof(header);
header.elf_mach_target_ = GetElfMach();
header.reserved_ = 0xDEADBEEF;
- header.process_id_ = base::OS::GetCurrentProcessId();
+ header.process_id_ = process_id_;
header.time_stamp_ =
static_cast<uint64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis() *
base::Time::kMicrosecondsPerMillisecond);
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 47a6002b08..488b5a1461 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -135,6 +135,7 @@ class PerfJitLogger : public CodeEventLogger {
static uint64_t reference_count_;
static void* marker_address_;
static uint64_t code_index_;
+ static int process_id_;
};
} // namespace internal
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index 02d3bbd9cd..4d9a0d1748 100644
--- a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -2068,6 +2068,9 @@ void Decoder::DecodeRvvIVI(Instruction* instr) {
case RO_V_VSLIDEDOWN_VI:
Format(instr, "vslidedown.vi 'vd, 'vs2, 'uimm5'vm");
break;
+ case RO_V_VSLIDEUP_VI:
+ Format(instr, "vslideup.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
case RO_V_VSRL_VI:
Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
break;
@@ -2234,6 +2237,10 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
case RO_V_VWXUNARY0:
if (instr->Vs1Value() == 0x0) {
Format(instr, "vmv.x.s 'rd, 'vs2");
+ } else if (instr->Vs1Value() == 0b10001) {
+ Format(instr, "vfirst.m 'rd, 'vs2");
+ } else if (instr->Vs1Value() == 0b10000) {
+ Format(instr, "vcpop.m 'rd, 'vs2");
} else {
UNSUPPORTED_RISCV();
}
@@ -2397,6 +2404,12 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case VFSQRT_V:
Format(instr, "vfsqrt.v 'vd, 'vs2'vm");
break;
+ case VFRSQRT7_V:
+ Format(instr, "vfrsqrt7.v 'vd, 'vs2'vm");
+ break;
+ case VFREC7_V:
+ Format(instr, "vfrec7.v 'vd, 'vs2'vm");
+ break;
default:
break;
}
@@ -2482,6 +2495,39 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
+ case RO_V_VFWADD_VV:
+ Format(instr, "vfwadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWSUB_VV:
+ Format(instr, "vfwsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWADD_W_VV:
+ Format(instr, "vfwadd.wv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWSUB_W_VV:
+ Format(instr, "vfwsub.wv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWREDUSUM_VV:
+ Format(instr, "vfwredusum.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWREDOSUM_VV:
+ Format(instr, "vfwredosum.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWMUL_VV:
+ Format(instr, "vfwmul.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFWMACC_VV:
+ Format(instr, "vfwmacc.vv 'vd, 'vs1, 'vs2'vm");
+ break;
+ case RO_V_VFWNMACC_VV:
+ Format(instr, "vfwnmacc.vv 'vd, 'vs1, 'vs2'vm");
+ break;
+ case RO_V_VFWMSAC_VV:
+ Format(instr, "vfwmsac.vv 'vd, 'vs1, 'vs2'vm");
+ break;
+ case RO_V_VFWNMSAC_VV:
+ Format(instr, "vfwnmsac.vv 'vd, 'vs1, 'vs2'vm");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2527,6 +2573,33 @@ void Decoder::DecodeRvvFVF(Instruction* instr) {
case RO_V_VFNMSAC_VF:
Format(instr, "vfnmsac.vf 'vd, 'fs1, 'vs2'vm");
break;
+ case RO_V_VFWADD_VF:
+ Format(instr, "vfwadd.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFWSUB_VF:
+ Format(instr, "vfwsub.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFWADD_W_VF:
+ Format(instr, "vfwadd.wf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFWSUB_W_VF:
+ Format(instr, "vfwsub.wf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFWMUL_VF:
+ Format(instr, "vfwmul.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFWMACC_VF:
+ Format(instr, "vfwmacc.vf 'vd, 'fs1, 'vs2'vm");
+ break;
+ case RO_V_VFWNMACC_VF:
+ Format(instr, "vfwnmacc.vf 'vd, 'fs1, 'vs2'vm");
+ break;
+ case RO_V_VFWMSAC_VF:
+ Format(instr, "vfwmsac.vf 'vd, 'fs1, 'vs2'vm");
+ break;
+ case RO_V_VFWNMSAC_VF:
+ Format(instr, "vfwnmsac.vf 'vd, 'fs1, 'vs2'vm");
+ break;
default:
UNSUPPORTED_RISCV();
break;
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 40c435f0f3..12695fa39c 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -2001,6 +2001,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 2;
} else if (opcode == 0xE6) {
current += PrintOperands("cvtdq2pd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0xAE) {
+ // incssp[d|q]
+ AppendToBuffer("incssp%c ", operand_size_code());
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index 921f4f742a..6adcb7afc8 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,7 +1,7 @@
+bmeurer@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-delphick@chromium.org
verwaest@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/src/execution/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index 2f69cd7adc..96e96fd7fa 100644
--- a/deps/v8/src/execution/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -24,18 +24,27 @@ Arguments<T>::ChangeValueScope::ChangeValueScope(Isolate* isolate,
}
template <ArgumentsType T>
-int Arguments<T>::smi_at(int index) const {
- return Smi::ToInt(Object(*address_of_arg_at(index)));
+int Arguments<T>::smi_value_at(int index) const {
+ Object obj = (*this)[index];
+ int value = Smi::ToInt(obj);
+ DCHECK_IMPLIES(obj.IsTaggedIndex(), value == tagged_index_value_at(index));
+ return value;
}
template <ArgumentsType T>
-int Arguments<T>::tagged_index_at(int index) const {
- Address raw = *address_of_arg_at(index);
- return static_cast<int>(TaggedIndex(raw).value());
+uint32_t Arguments<T>::positive_smi_value_at(int index) const {
+ int value = smi_value_at(index);
+ DCHECK_LE(0, value);
+ return value;
}
template <ArgumentsType T>
-double Arguments<T>::number_at(int index) const {
+int Arguments<T>::tagged_index_value_at(int index) const {
+ return static_cast<int>(TaggedIndex::cast((*this)[index]).value());
+}
+
+template <ArgumentsType T>
+double Arguments<T>::number_value_at(int index) const {
return (*this)[index].Number();
}
diff --git a/deps/v8/src/execution/arguments.cc b/deps/v8/src/execution/arguments.cc
index 3268d6d151..36a69acd06 100644
--- a/deps/v8/src/execution/arguments.cc
+++ b/deps/v8/src/execution/arguments.cc
@@ -3,16 +3,3 @@
// found in the LICENSE file.
#include "src/execution/arguments.h"
-
-namespace v8 {
-namespace internal {
-
-double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
- // TODO(v8:11798): This clobbers only subset of registers depending on
- // compiler, Rewrite this in assembly to really clobber all registers. GCC for
- // ia32 uses the FPU and does not touch XMM registers.
- return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index e1cd8d8c5f..0cbdb18b24 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_ARGUMENTS_H_
#define V8_EXECUTION_ARGUMENTS_H_
+#include "src/execution/clobber-registers.h"
#include "src/handles/handles.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects.h"
@@ -50,24 +51,25 @@ class Arguments {
DCHECK_GE(length_, 0);
}
- Object operator[](int index) const {
+ V8_INLINE Object operator[](int index) const {
return Object(*address_of_arg_at(index));
}
template <class S = Object>
- inline Handle<S> at(int index) const;
+ V8_INLINE Handle<S> at(int index) const;
- inline int smi_at(int index) const;
+ V8_INLINE int smi_value_at(int index) const;
+ V8_INLINE uint32_t positive_smi_value_at(int index) const;
- inline int tagged_index_at(int index) const;
+ V8_INLINE int tagged_index_value_at(int index) const;
- inline double number_at(int index) const;
+ V8_INLINE double number_value_at(int index) const;
- inline FullObjectSlot slot_at(int index) const {
+ V8_INLINE FullObjectSlot slot_at(int index) const {
return FullObjectSlot(address_of_arg_at(index));
}
- inline Address* address_of_arg_at(int index) const {
+ V8_INLINE Address* address_of_arg_at(int index) const {
DCHECK_LE(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
uintptr_t offset = index * kSystemPointerSize;
if (arguments_type == ArgumentsType::kJS) {
@@ -78,7 +80,7 @@ class Arguments {
}
// Get the total number of arguments including the receiver.
- int length() const { return static_cast<int>(length_); }
+ V8_INLINE int length() const { return static_cast<int>(length_); }
// Arguments on the stack are in reverse order (compared to an array).
FullObjectSlot first_slot() const {
@@ -105,8 +107,6 @@ Handle<S> Arguments<T>::at(int index) const {
return Handle<S>::cast(obj);
}
-double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
-
#ifdef DEBUG
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
#else
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index 2e3b1ed665..46bff6230d 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/arm/register-arm.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -77,17 +77,15 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
- static constexpr RegList kPushedGpRegs =
- Register::ListOf(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9);
+ static constexpr RegList kPushedGpRegs = {r0, r1, r2, r3, r4,
+ r5, r6, r7, r8, r9};
// d13: zero, d14-d15: scratch
- static constexpr RegList kPushedFpRegs = LowDwVfpRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+ static constexpr DoubleRegList kPushedFpRegs = {d0, d1, d2, d3, d4, d5, d6,
+ d7, d8, d9, d10, d11, d12};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-TypedFrameConstants::kFixedFrameSizeFromFp -
@@ -97,15 +95,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index 8810586360..80bcda9de2 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -7,7 +7,6 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
@@ -98,23 +97,21 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// x16: ip0, x17: ip1, x18: platform register, x26: root, x28: base, x29: fp,
// x30: lr, x31: xzr.
- static constexpr RegList kPushedGpRegs = CPURegister::ListOf(
- x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x19,
- x20, x21, x22, x23, x24, x25, x27);
+ static constexpr RegList kPushedGpRegs = {
+ x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x19, x20, x21, x22, x23, x24, x25, x27};
// We push FpRegs as 128-bit SIMD registers, so 16-byte frame alignment
// is guaranteed regardless of register count.
- static constexpr RegList kPushedFpRegs = CPURegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d16, d17,
- d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29);
+ static constexpr DoubleRegList kPushedFpRegs = {
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14,
+ d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
static_assert(kNumPushedGpRegisters % 2 == 0,
"stack frames need to be 16-byte aligned");
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
// Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
@@ -125,15 +122,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSimd128Size;
}
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index d48789969e..6299cb2141 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -187,7 +187,7 @@ int PopLowestIndexAsCode(CPURegList* list) {
if (list->IsEmpty()) {
return -1;
}
- RegList reg_list = list->list();
+ uint64_t reg_list = list->bits();
int index = base::bits::CountTrailingZeros(reg_list);
DCHECK((1LL << index) & reg_list);
list->Remove(index);
diff --git a/deps/v8/src/execution/clobber-registers.cc b/deps/v8/src/execution/clobber-registers.cc
new file mode 100644
index 0000000000..0aea68dbe4
--- /dev/null
+++ b/deps/v8/src/execution/clobber-registers.cc
@@ -0,0 +1,63 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/execution/clobber-registers.h"
+
+#include "src/base/build_config.h"
+
+#if V8_HOST_ARCH_ARM
+#include "src/codegen/arm/register-arm.h"
+#elif V8_HOST_ARCH_ARM64
+#include "src/codegen/arm64/register-arm64.h"
+#elif V8_HOST_ARCH_IA32
+#include "src/codegen/ia32/register-ia32.h"
+#elif V8_HOST_ARCH_X64
+#include "src/codegen/x64/register-x64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+#if V8_CC_MSVC
+// msvc only support inline assembly on x86
+#if V8_HOST_ARCH_IA32
+#define CLOBBER_REGISTER(R) __asm xorps R, R
+
+#endif
+
+#else // !V8_CC_MSVC
+
+#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32
+#define CLOBBER_REGISTER(R) \
+ __asm__ volatile( \
+ "xorps " \
+ "%%" #R \
+ "," \
+ "%%" #R :: \
+ :);
+
+#elif V8_HOST_ARCH_ARM64
+#define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::);
+
+#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64
+
+#endif // V8_CC_MSVC
+
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
+ // clobber all double registers
+
+#ifdef CLOBBER_REGISTER
+ DOUBLE_REGISTERS(CLOBBER_REGISTER)
+#undef CLOBBER_REGISTER
+ return 0;
+
+#else
+ // TODO(v8:11798): This clobbers only subset of registers depending on
+ // compiler, Rewrite this in assembly to really clobber all registers. GCC for
+ // ia32 uses the FPU and does not touch XMM registers.
+ return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
+#endif // CLOBBER_REGISTER
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/clobber-registers.h b/deps/v8/src/execution/clobber-registers.h
new file mode 100644
index 0000000000..9869fe8990
--- /dev/null
+++ b/deps/v8/src/execution/clobber-registers.h
@@ -0,0 +1,18 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_CLOBBER_REGISTERS_H_
+#define V8_EXECUTION_CLOBBER_REGISTERS_H_
+
+namespace v8 {
+
+namespace internal {
+
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
+
+}
+} // namespace v8
+
+#endif // V8_EXECUTION_CLOBBER_REGISTERS_H_
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 3683f80a4d..aa83922593 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -13,6 +13,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#include "src/wasm/code-space-access.h"
#include "src/wasm/wasm-engine.h"
#endif // V8_ENABLE_WEBASSEMBLY
@@ -169,8 +170,8 @@ InvokeParams InvokeParams::SetUpForRunMicrotasks(
return params;
}
-Handle<Code> JSEntry(Isolate* isolate, Execution::Target execution_target,
- bool is_construct) {
+Handle<CodeT> JSEntry(Isolate* isolate, Execution::Target execution_target,
+ bool is_construct) {
if (is_construct) {
DCHECK_EQ(Execution::Target::kCallable, execution_target);
return BUILTIN_CODE(isolate, JSConstructEntry);
@@ -207,11 +208,11 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
native_context->script_context_table(), isolate);
// Find name clashes.
- for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
- Handle<String> name(scope_info->ContextLocalName(var), isolate);
- VariableMode mode = scope_info->ContextLocalMode(var);
+ for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
+ Handle<String> name(it->name(), isolate);
+ VariableMode mode = scope_info->ContextLocalMode(it->index());
VariableLookupResult lookup;
- if (ScriptContextTable::Lookup(isolate, *script_context, *name, &lookup)) {
+ if (script_context->Lookup(name, &lookup)) {
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
Handle<Context> context = ScriptContextTable::GetContext(
isolate, script_context, lookup.context_index);
@@ -263,9 +264,12 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
isolate->factory()->NewScriptContext(native_context, scope_info);
result->Initialize(isolate);
-
+ // In REPL mode, we are allowed to add/modify let/const variables.
+ // We use the previous defined script context for those.
+ const bool ignore_duplicates = scope_info->IsReplModeScope();
Handle<ScriptContextTable> new_script_context_table =
- ScriptContextTable::Extend(script_context, result);
+ ScriptContextTable::Extend(isolate, script_context, result,
+ ignore_duplicates);
native_context->synchronized_set_script_context_table(
*new_script_context_table);
return result;
@@ -278,6 +282,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
DCHECK_LE(params.argc, FixedArray::kMaxLength);
#if V8_ENABLE_WEBASSEMBLY
+ // When executing JS code, there should be no {CodeSpaceWriteScope} open.
+ DCHECK(!wasm::CodeSpaceWriteScope::IsInScope());
// If we have PKU support for Wasm, ensure that code is currently write
// protected for this thread.
DCHECK_IMPLIES(wasm::GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
@@ -390,7 +396,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
- Handle<Code> code =
+ Handle<CodeT> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
// Save and restore context around invocation and block the
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 417350d93f..d171bdea0d 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -204,20 +204,18 @@ class BuiltinFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(2);
};
+// Fixed frame slots shared by the js-to-wasm wrapper, the
+// ReturnPromiseOnSuspend wrapper and the WasmResume wrapper.
class BuiltinWasmWrapperConstants : public TypedFrameConstants {
public:
// This slot contains the number of slots at the top of the frame that need to
// be scanned by the GC.
static constexpr int kGCScanSlotCountOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
-};
-
-class ReturnPromiseOnSuspendFrameConstants
- : public BuiltinWasmWrapperConstants {
- public:
- static constexpr int kParamCountOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static constexpr int kSpillAreaSize =
- -(kParamCountOffset - TypedFrameConstants::kFixedFrameSizeFromFp);
+ // The number of parameters passed to this function.
+ static constexpr int kInParamCountOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ // The number of parameters according to the signature.
+ static constexpr int kParamCountOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
};
class ConstructFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index 8db9f4dce4..32a974fbc3 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -239,8 +239,7 @@ inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
-inline ReturnPromiseOnSuspendFrame::ReturnPromiseOnSuspendFrame(
- StackFrameIteratorBase* iterator)
+inline StackSwitchFrame::StackSwitchFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index b102164b61..0fdc7e6311 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -28,6 +28,7 @@
#include "src/zone/zone-containers.h"
#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -112,7 +113,7 @@ StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
#if V8_ENABLE_WEBASSEMBLY
StackFrameIterator::StackFrameIterator(Isolate* isolate,
wasm::StackMemory* stack)
- : StackFrameIterator(isolate) {
+ : StackFrameIteratorBase(isolate, true) {
Reset(isolate->thread_local_top(), stack);
}
#endif
@@ -161,14 +162,15 @@ void StackFrameIterator::Reset(ThreadLocalTop* top) {
#if V8_ENABLE_WEBASSEMBLY
void StackFrameIterator::Reset(ThreadLocalTop* top, wasm::StackMemory* stack) {
- if (stack->jmpbuf()->sp == stack->base()) {
- // Empty stack.
+ if (stack->jmpbuf()->sp == kNullAddress) {
+ // A null SP indicates that the computation associated with this stack has
+ // returned, leaving the stack segment empty.
return;
}
StackFrame::State state;
- ReturnPromiseOnSuspendFrame::GetStateForJumpBuffer(stack->jmpbuf(), &state);
+ StackSwitchFrame::GetStateForJumpBuffer(stack->jmpbuf(), &state);
handler_ = StackHandler::FromAddress(Isolate::handler(top));
- frame_ = SingletonFor(StackFrame::RETURN_PROMISE_ON_SUSPEND, &state);
+ frame_ = SingletonFor(StackFrame::STACK_SWITCH, &state);
}
#endif
@@ -671,7 +673,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
return BUILTIN;
case CodeKind::TURBOFAN:
- case CodeKind::TURBOPROP:
+ case CodeKind::MAGLEV:
return OPTIMIZED;
case CodeKind::BASELINE:
return Type::BASELINE;
@@ -717,7 +719,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case WASM_EXIT:
case WASM_DEBUG_BREAK:
case JS_TO_WASM:
- case RETURN_PROMISE_ON_SUSPEND:
+ case STACK_SWITCH:
#endif // V8_ENABLE_WEBASSEMBLY
return candidate;
case OPTIMIZED:
@@ -756,7 +758,7 @@ void NativeFrame::ComputeCallerState(State* state) const {
}
Code EntryFrame::unchecked_code() const {
- return isolate()->builtins()->code(Builtin::kJSEntry);
+ return FromCodeT(isolate()->builtins()->code(Builtin::kJSEntry));
}
void EntryFrame::ComputeCallerState(State* state) const {
@@ -778,7 +780,7 @@ StackFrame::Type CWasmEntryFrame::GetCallerState(State* state) const {
#endif // V8_ENABLE_WEBASSEMBLY
Code ConstructEntryFrame::unchecked_code() const {
- return isolate()->builtins()->code(Builtin::kJSConstructEntry);
+ return FromCodeT(isolate()->builtins()->code(Builtin::kJSConstructEntry));
}
void ExitFrame::ComputeCallerState(State* state) const {
@@ -831,7 +833,7 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
case BUILTIN_EXIT:
#if V8_ENABLE_WEBASSEMBLY
case WASM_EXIT:
- case RETURN_PROMISE_ON_SUSPEND:
+ case STACK_SWITCH:
#endif // V8_ENABLE_WEBASSEMBLY
return frame_type;
default:
@@ -869,16 +871,24 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->constant_pool_address = nullptr;
}
+void BuiltinExitFrame::Summarize(std::vector<FrameSummary>* frames) const {
+ DCHECK(frames->empty());
+ Handle<FixedArray> parameters = GetParameters();
+ DisallowGarbageCollection no_gc;
+ Code code = LookupCode();
+ int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
+ FrameSummary::JavaScriptFrameSummary summary(
+ isolate(), receiver(), function(), AbstractCode::cast(code), code_offset,
+ IsConstructor(), *parameters);
+ frames->push_back(summary);
+}
+
JSFunction BuiltinExitFrame::function() const {
return JSFunction::cast(target_slot_object());
}
Object BuiltinExitFrame::receiver() const { return receiver_slot_object(); }
-bool BuiltinExitFrame::IsConstructor() const {
- return !new_target_slot_object().IsUndefined(isolate());
-}
-
Object BuiltinExitFrame::GetParameter(int i) const {
DCHECK(i >= 0 && i < ComputeParametersCount());
int offset =
@@ -896,6 +906,22 @@ int BuiltinExitFrame::ComputeParametersCount() const {
return argc;
}
+Handle<FixedArray> BuiltinExitFrame::GetParameters() const {
+ if (V8_LIKELY(!FLAG_detailed_error_stack_trace)) {
+ return isolate()->factory()->empty_fixed_array();
+ }
+ int param_count = ComputeParametersCount();
+ auto parameters = isolate()->factory()->NewFixedArray(param_count);
+ for (int i = 0; i < param_count; i++) {
+ parameters->set(i, GetParameter(i));
+ }
+ return parameters;
+}
+
+bool BuiltinExitFrame::IsConstructor() const {
+ return !new_target_slot_object().IsUndefined(isolate());
+}
+
namespace {
void PrintIndex(StringStream* accumulator, StackFrame::PrintMode mode,
int index) {
@@ -1035,10 +1061,10 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!is_wasm) {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- if (!entry->safepoint_entry.is_valid()) {
+ if (!entry->safepoint_entry.is_initialized()) {
entry->safepoint_entry =
entry->code.GetSafepointEntry(isolate(), inner_pointer);
- DCHECK(entry->safepoint_entry.is_valid());
+ DCHECK(entry->safepoint_entry.is_initialized());
} else {
DCHECK_EQ(entry->safepoint_entry,
entry->code.GetSafepointEntry(isolate(), inner_pointer));
@@ -1060,7 +1086,6 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
if (is_wasm_call) has_tagged_outgoing_params = false;
#endif // V8_ENABLE_WEBASSEMBLY
}
- uint32_t slot_space = stack_slots * kSystemPointerSize;
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
@@ -1082,7 +1107,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
case CONSTRUCT:
#if V8_ENABLE_WEBASSEMBLY
case JS_TO_WASM:
- case RETURN_PROMISE_ON_SUSPEND:
+ case STACK_SWITCH:
case C_WASM_ENTRY:
case WASM_DEBUG_BREAK:
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1118,14 +1143,22 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
UNREACHABLE();
}
}
- slot_space -=
+
+ // slot_space holds the actual number of spill slots, without fixed frame
+ // slots.
+ const uint32_t slot_space =
+ stack_slots * kSystemPointerSize -
(frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
+ // base <= limit.
+ // Fixed frame slots.
FullObjectSlot frame_header_base(&Memory<Address>(fp() - frame_header_size));
FullObjectSlot frame_header_limit(
&Memory<Address>(fp() - StandardFrameConstants::kCPSlotSize));
+ // Parameters passed to the callee.
FullObjectSlot parameters_base(&Memory<Address>(sp()));
FullObjectSlot parameters_limit(frame_header_base.address() - slot_space);
+ // Spill slots are in the region ]frame_header_base, parameters_limit];
// Visit the rest of the parameters if they are tagged.
if (has_tagged_outgoing_params) {
@@ -1140,7 +1173,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
PtrComprCageBase cage_base(isolate());
for (uint8_t bits : safepoint_entry.tagged_slots()) {
while (bits) {
- int bit = base::bits::CountTrailingZeros(bits);
+ const int bit = base::bits::CountTrailingZeros(bits);
bits &= ~(1 << bit);
FullObjectSlot spill_slot = parameters_limit + slot_offset + bit;
#ifdef V8_COMPRESS_POINTERS
@@ -1178,7 +1211,17 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
HeapObject forwarded = map_word.IsForwardingAddress()
? map_word.ToForwardingAddress()
: raw;
- CHECK(forwarded.map(cage_base).IsMap());
+ bool is_self_forwarded =
+ forwarded.map_word(cage_base, kRelaxedLoad).ptr() ==
+ forwarded.address();
+ if (is_self_forwarded) {
+ // The object might be in a self-forwarding state if it's located
+ // in new large object space. GC will fix this at a later stage.
+ CHECK(BasicMemoryChunk::FromHeapObject(forwarded)
+ ->InNewLargeObjectSpace());
+ } else {
+ CHECK(forwarded.map(cage_base).IsMap(cage_base));
+ }
}
}
} else {
@@ -1258,7 +1301,7 @@ bool JavaScriptFrame::HasInlinedFrames() const {
}
Code CommonFrameWithJSLinkage::unchecked_code() const {
- return function().code();
+ return FromCodeT(function().code());
}
int OptimizedFrame::ComputeParametersCount() const {
@@ -1568,6 +1611,31 @@ Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
return handle(function_->context().native_context(), isolate());
}
+Handle<StackFrameInfo>
+FrameSummary::JavaScriptFrameSummary::CreateStackFrameInfo() const {
+ Handle<SharedFunctionInfo> shared(function_->shared(), isolate());
+ Handle<Script> script(Script::cast(shared->script()), isolate());
+ Handle<String> function_name = JSFunction::GetDebugName(function_);
+ if (function_name->length() == 0 &&
+ script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+ function_name = isolate()->factory()->eval_string();
+ }
+ int bytecode_offset = code_offset();
+ if (bytecode_offset == kFunctionEntryBytecodeOffset) {
+ // For the special function entry bytecode offset (-1), which signals
+ // that the stack trace was captured while the function entry was
+ // executing (i.e. during the interrupt check), we cannot store this
+ // sentinel in the bit field, so we just eagerly lookup the source
+ // position within the script.
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared);
+ int source_position = abstract_code()->SourcePosition(bytecode_offset);
+ return isolate()->factory()->NewStackFrameInfo(
+ script, source_position, function_name, is_constructor());
+ }
+ return isolate()->factory()->NewStackFrameInfo(
+ shared, bytecode_offset, function_name, is_constructor());
+}
+
#if V8_ENABLE_WEBASSEMBLY
FrameSummary::WasmFrameSummary::WasmFrameSummary(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
@@ -1604,6 +1672,14 @@ Handle<Script> FrameSummary::WasmFrameSummary::script() const {
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
return handle(wasm_instance()->native_context(), isolate());
}
+
+Handle<StackFrameInfo> FrameSummary::WasmFrameSummary::CreateStackFrameInfo()
+ const {
+ Handle<String> function_name =
+ GetWasmFunctionDebugName(isolate(), wasm_instance(), function_index());
+ return isolate()->factory()->NewStackFrameInfo(script(), SourcePosition(),
+ function_name, false);
+}
#endif // V8_ENABLE_WEBASSEMBLY
FrameSummary::~FrameSummary() {
@@ -1673,6 +1749,7 @@ FRAME_SUMMARY_DISPATCH(Handle<Object>, script)
FRAME_SUMMARY_DISPATCH(int, SourcePosition)
FRAME_SUMMARY_DISPATCH(int, SourceStatementPosition)
FRAME_SUMMARY_DISPATCH(Handle<Context>, native_context)
+FRAME_SUMMARY_DISPATCH(Handle<StackFrameInfo>, CreateStackFrameInfo)
#undef FRAME_SUMMARY_DISPATCH
@@ -1731,9 +1808,9 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
it->kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
code_offset = 0;
- abstract_code = handle(
- AbstractCode::cast(isolate()->builtins()->code(
- Builtins::GetBuiltinFromBytecodeOffset(it->bytecode_offset()))),
+ abstract_code = ToAbstractCode(
+ isolate()->builtins()->code_handle(
+ Builtins::GetBuiltinFromBytecodeOffset(it->bytecode_offset())),
isolate());
} else {
DCHECK_EQ(it->kind(), TranslatedFrame::kUnoptimizedFunction);
@@ -1783,7 +1860,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
DCHECK(is_optimized());
JSFunction opt_function = function();
- Code code = opt_function.code();
+ Code code = FromCodeT(opt_function.code());
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
@@ -1976,7 +2053,7 @@ void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
return;
}
wasm::WasmCodeRefScope code_ref_scope;
- accumulator->Add("WASM [");
+ accumulator->Add("Wasm [");
accumulator->PrintName(script().name());
Address instruction_start = wasm_code()->instruction_start();
base::Vector<const uint8_t> raw_func_name =
@@ -2103,7 +2180,7 @@ void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
- accumulator->Add("WASM DEBUG BREAK");
+ accumulator->Add("WasmDebugBreak");
if (mode != OVERVIEW) accumulator->Add("\n");
}
@@ -2142,7 +2219,7 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
spill_slot_limit);
}
-void ReturnPromiseOnSuspendFrame::Iterate(RootVisitor* v) const {
+void StackSwitchFrame::Iterate(RootVisitor* v) const {
// See JsToWasmFrame layout.
// We cannot DCHECK that the pc matches the expected builtin code here,
// because the return address is on a different stack.
@@ -2159,10 +2236,10 @@ void ReturnPromiseOnSuspendFrame::Iterate(RootVisitor* v) const {
}
// static
-void ReturnPromiseOnSuspendFrame::GetStateForJumpBuffer(
- wasm::JumpBuffer* jmpbuf, State* state) {
+void StackSwitchFrame::GetStateForJumpBuffer(wasm::JumpBuffer* jmpbuf,
+ State* state) {
DCHECK_NE(jmpbuf->fp, kNullAddress);
- DCHECK_EQ(ComputeFrameType(jmpbuf->fp), RETURN_PROMISE_ON_SUSPEND);
+ DCHECK_EQ(ComputeFrameType(jmpbuf->fp), STACK_SWITCH);
FillState(jmpbuf->fp, jmpbuf->sp, state);
DCHECK_NE(*state->pc_address, kNullAddress);
}
@@ -2283,12 +2360,12 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
if (heap_locals_count > 0) {
accumulator->Add(" // heap-allocated locals\n");
}
- for (int i = 0; i < heap_locals_count; i++) {
+ for (auto it : ScopeInfo::IterateLocalNames(&scope_info, no_gc)) {
accumulator->Add(" var ");
- accumulator->PrintName(scope_info.ContextLocalName(i));
+ accumulator->PrintName(it->name());
accumulator->Add(" = ");
if (!context.is_null()) {
- int slot_index = Context::MIN_CONTEXT_SLOTS + i;
+ int slot_index = Context::MIN_CONTEXT_SLOTS + it->index();
if (slot_index < context.length()) {
accumulator->Add("%o", context.get(slot_index));
} else {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 878c265632..9d67d0fd57 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -52,7 +52,6 @@ struct JumpBuffer;
class StackMemory;
} // namespace wasm
-// Forward declarations.
class AbstractCode;
class Debug;
class ExternalCallbackScope;
@@ -61,6 +60,7 @@ class Isolate;
class ObjectVisitor;
class Register;
class RootVisitor;
+class StackFrameInfo;
class StackFrameIteratorBase;
class StringStream;
class ThreadLocalTop;
@@ -102,7 +102,7 @@ class StackHandler {
IF_WASM(V, WASM, WasmFrame) \
IF_WASM(V, WASM_TO_JS, WasmToJsFrame) \
IF_WASM(V, JS_TO_WASM, JsToWasmFrame) \
- IF_WASM(V, RETURN_PROMISE_ON_SUSPEND, ReturnPromiseOnSuspendFrame) \
+ IF_WASM(V, STACK_SWITCH, StackSwitchFrame) \
IF_WASM(V, WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
IF_WASM(V, C_WASM_ENTRY, CWasmEntryFrame) \
IF_WASM(V, WASM_EXIT, WasmExitFrame) \
@@ -394,6 +394,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
int SourceStatementPosition() const;
Handle<Object> script() const;
Handle<Context> native_context() const;
+ Handle<StackFrameInfo> CreateStackFrameInfo() const;
private:
Handle<Object> receiver_;
@@ -423,6 +424,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
Handle<WasmInstanceObject> wasm_instance() const { return wasm_instance_; }
Handle<Context> native_context() const;
bool at_to_number_conversion() const { return at_to_number_conversion_; }
+ Handle<StackFrameInfo> CreateStackFrameInfo() const;
private:
Handle<WasmInstanceObject> wasm_instance_;
@@ -456,6 +458,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
int SourcePosition() const;
int SourceStatementPosition() const;
Handle<Context> native_context() const;
+ Handle<StackFrameInfo> CreateStackFrameInfo() const;
#define FRAME_SUMMARY_CAST(kind_, type, field, desc) \
bool Is##desc() const { return base_.kind() == kind_; } \
@@ -756,32 +759,32 @@ class BuiltinExitFrame : public ExitFrame {
public:
Type type() const override { return BUILTIN_EXIT; }
- static BuiltinExitFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_builtin_exit());
- return static_cast<BuiltinExitFrame*>(frame);
- }
-
JSFunction function() const;
+
Object receiver() const;
+ Object GetParameter(int i) const;
+ int ComputeParametersCount() const;
+ Handle<FixedArray> GetParameters() const;
+
+ // Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
+ // Summarize Frame
+ void Summarize(std::vector<FrameSummary>* frames) const override;
+
protected:
inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
private:
- Object GetParameter(int i) const;
- int ComputeParametersCount() const;
-
inline Object receiver_slot_object() const;
inline Object argc_slot_object() const;
inline Object target_slot_object() const;
inline Object new_target_slot_object() const;
friend class StackFrameIteratorBase;
- friend class StackTraceBuilder;
};
class StubFrame : public TypedFrame {
@@ -1049,14 +1052,14 @@ class JsToWasmFrame : public StubFrame {
friend class StackFrameIteratorBase;
};
-class ReturnPromiseOnSuspendFrame : public ExitFrame {
+class StackSwitchFrame : public ExitFrame {
public:
- Type type() const override { return RETURN_PROMISE_ON_SUSPEND; }
+ Type type() const override { return STACK_SWITCH; }
void Iterate(RootVisitor* v) const override;
static void GetStateForJumpBuffer(wasm::JumpBuffer* jmpbuf, State* state);
protected:
- inline explicit ReturnPromiseOnSuspendFrame(StackFrameIteratorBase* iterator);
+ inline explicit StackSwitchFrame(StackFrameIteratorBase* iterator);
private:
friend class StackFrameIteratorBase;
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index c1120dd8eb..edec8818a3 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -297,7 +297,7 @@ Object FutexEmulation::WaitWasm32(Isolate* isolate,
size_t addr, int32_t value,
int64_t rel_timeout_ns) {
return Wait<int32_t>(isolate, WaitMode::kSync, array_buffer, addr, value,
- rel_timeout_ns >= 0, rel_timeout_ns);
+ rel_timeout_ns >= 0, rel_timeout_ns, CallType::kIsWasm);
}
Object FutexEmulation::WaitWasm64(Isolate* isolate,
@@ -305,7 +305,7 @@ Object FutexEmulation::WaitWasm64(Isolate* isolate,
size_t addr, int64_t value,
int64_t rel_timeout_ns) {
return Wait<int64_t>(isolate, WaitMode::kSync, array_buffer, addr, value,
- rel_timeout_ns >= 0, rel_timeout_ns);
+ rel_timeout_ns >= 0, rel_timeout_ns, CallType::kIsWasm);
}
template <typename T>
@@ -346,21 +346,22 @@ double WaitTimeoutInMs(double timeout_ns) {
template <typename T>
Object FutexEmulation::Wait(Isolate* isolate, WaitMode mode,
Handle<JSArrayBuffer> array_buffer, size_t addr,
- T value, bool use_timeout, int64_t rel_timeout_ns) {
+ T value, bool use_timeout, int64_t rel_timeout_ns,
+ CallType call_type) {
if (mode == WaitMode::kSync) {
return WaitSync(isolate, array_buffer, addr, value, use_timeout,
- rel_timeout_ns);
+ rel_timeout_ns, call_type);
}
DCHECK_EQ(mode, WaitMode::kAsync);
return WaitAsync(isolate, array_buffer, addr, value, use_timeout,
- rel_timeout_ns);
+ rel_timeout_ns, call_type);
}
template <typename T>
Object FutexEmulation::WaitSync(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
T value, bool use_timeout,
- int64_t rel_timeout_ns) {
+ int64_t rel_timeout_ns, CallType call_type) {
VMState<ATOMICS_WAIT> state(isolate);
base::TimeDelta rel_timeout =
base::TimeDelta::FromNanoseconds(rel_timeout_ns);
@@ -398,7 +399,15 @@ Object FutexEmulation::WaitSync(Isolate* isolate,
FutexWaitListNode::ResetWaitingOnScopeExit reset_waiting(node);
std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(wait_location);
- if (p->load() != value) {
+ T loaded_value = p->load();
+#if defined(V8_TARGET_BIG_ENDIAN)
+ // If loading a Wasm value, it needs to be reversed on Big Endian platforms.
+ if (call_type == CallType::kIsWasm) {
+ DCHECK(sizeof(T) == kInt32Size || sizeof(T) == kInt64Size);
+ loaded_value = ByteReverse(loaded_value);
+ }
+#endif
+ if (loaded_value != value) {
result = handle(Smi::FromInt(WaitReturnValue::kNotEqual), isolate);
callback_result = AtomicsWaitEvent::kNotEqual;
break;
@@ -523,7 +532,7 @@ template <typename T>
Object FutexEmulation::WaitAsync(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
- int64_t rel_timeout_ns) {
+ int64_t rel_timeout_ns, CallType call_type) {
base::TimeDelta rel_timeout =
base::TimeDelta::FromNanoseconds(rel_timeout_ns);
@@ -543,7 +552,15 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
// 17. Let w be ! AtomicLoad(typedArray, i).
std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(
static_cast<int8_t*>(backing_store->buffer_start()) + addr);
- if (p->load() != value) {
+ T loaded_value = p->load();
+#if defined(V8_TARGET_BIG_ENDIAN)
+ // If loading a Wasm value, it needs to be reversed on Big Endian platforms.
+ if (call_type == CallType::kIsWasm) {
+ DCHECK(sizeof(T) == kInt32Size || sizeof(T) == kInt64Size);
+ loaded_value = ByteReverse(loaded_value);
+ }
+#endif
+ if (loaded_value != value) {
result_kind = ResultKind::kNotEqual;
} else if (use_timeout && rel_timeout_ns == 0) {
result_kind = ResultKind::kTimedOut;
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index 2ab84295e0..4747ed5f48 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -7,8 +7,6 @@
#include <stdint.h>
-#include <map>
-
#include "include/v8-persistent-handle.h"
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
@@ -141,6 +139,7 @@ class FutexWaitListNode {
class FutexEmulation : public AllStatic {
public:
enum WaitMode { kSync = 0, kAsync };
+ enum class CallType { kIsNotWasm = 0, kIsWasm };
// Pass to Wake() to wake all waiters.
static const uint32_t kWakeAll = UINT32_MAX;
@@ -214,17 +213,18 @@ class FutexEmulation : public AllStatic {
template <typename T>
static Object Wait(Isolate* isolate, WaitMode mode,
Handle<JSArrayBuffer> array_buffer, size_t addr, T value,
- bool use_timeout, int64_t rel_timeout_ns);
+ bool use_timeout, int64_t rel_timeout_ns,
+ CallType call_type = CallType::kIsNotWasm);
template <typename T>
static Object WaitSync(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
- int64_t rel_timeout_ns);
+ int64_t rel_timeout_ns, CallType call_type);
template <typename T>
static Object WaitAsync(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
- int64_t rel_timeout_ns);
+ int64_t rel_timeout_ns, CallType call_type);
// Resolve the Promises of the async waiters which belong to |isolate|.
static void ResolveAsyncWaiterPromises(Isolate* isolate);
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.h b/deps/v8/src/execution/ia32/frame-constants-ia32.h
index 45c7355979..0ad9f51e6e 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.h
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/ia32/register-ia32.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -53,17 +53,14 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// Omit ebx, which is the root register.
- static constexpr RegList kPushedGpRegs =
- Register::ListOf(eax, ecx, edx, esi, edi);
+ static constexpr RegList kPushedGpRegs = {eax, ecx, edx, esi, edi};
// Omit xmm7, which is the kScratchDoubleReg.
- static constexpr RegList kPushedFpRegs =
- DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6);
+ static constexpr DoubleRegList kPushedFpRegs = {xmm0, xmm1, xmm2, xmm3,
+ xmm4, xmm5, xmm6};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -72,15 +69,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSimd128Size;
}
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index ca514657de..671492bb95 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -12,7 +12,7 @@
#include "src/execution/thread-local-top.h"
#include "src/heap/linear-allocation-area.h"
#include "src/roots/roots.h"
-#include "src/security/external-pointer-table.h"
+#include "src/sandbox/external-pointer-table.h"
#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
@@ -52,25 +52,16 @@ class Isolate;
/* Linear allocation areas for the heap's new and old space */ \
V(kNewAllocationInfo, LinearAllocationArea::kSize, new_allocation_info) \
V(kOldAllocationInfo, LinearAllocationArea::kSize, old_allocation_info) \
- ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V) \
- ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
+ ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
-#ifdef V8_EXTERNAL_CODE_SPACE
-#define ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V) \
- V(kBuiltinCodeDataContainerTableOffset, \
- Builtins::kBuiltinCount* kSystemPointerSize, \
- builtin_code_data_container_table)
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
+ V(kExternalPointerTableOffset, ExternalPointerTable::kSize, \
+ external_pointer_table)
#else
-#define ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V)
-#endif // V8_EXTERNAL_CODE_SPACE
-
-#ifdef V8_HEAP_SANDBOX
-#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
- V(kExternalPointerTableOffset, kSystemPointerSize * 3, external_pointer_table)
-#else
-#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V)
-#endif // V8_HEAP_SANDBOX
+#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V)
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including builtins, interpreter bytecode handlers and
@@ -118,17 +109,6 @@ class IsolateData final {
Builtins::ToInt(id) * kSystemPointerSize;
}
- static int BuiltinCodeDataContainerSlotOffset(Builtin id) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // TODO(v8:11880): implement table tiering once the builtin table containing
- // Code objects is no longer used.
- return builtin_code_data_container_table_offset() +
- Builtins::ToInt(id) * kSystemPointerSize;
-#else
- UNREACHABLE();
-#endif // V8_EXTERNAL_CODE_SPACE
- }
-
#define V(Offset, Size, Name) \
Address Name##_address() { return reinterpret_cast<Address>(&Name##_); }
ISOLATE_DATA_FIELDS(V)
@@ -151,13 +131,6 @@ class IsolateData final {
ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
Address* builtin_entry_table() { return builtin_entry_table_; }
Address* builtin_table() { return builtin_table_; }
- Address* builtin_code_data_container_table() {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return builtin_code_data_container_table_;
-#else
- UNREACHABLE();
-#endif
- }
uint8_t stack_is_iterable() const { return stack_is_iterable_; }
// Returns true if this address points to data stored in this instance. If
@@ -236,12 +209,8 @@ class IsolateData final {
LinearAllocationArea new_allocation_info_;
LinearAllocationArea old_allocation_info_;
-#ifdef V8_EXTERNAL_CODE_SPACE
- Address builtin_code_data_container_table_[Builtins::kBuiltinCount] = {};
-#endif
-
// Table containing pointers to external objects.
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
ExternalPointerTable external_pointer_table_;
#endif
@@ -282,7 +251,7 @@ void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}
-#undef ISOLATE_DATA_FIELDS_HEAP_SANDBOX
+#undef ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS
#undef ISOLATE_DATA_FIELDS
} // namespace internal
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index d044f3e646..44de6b52c8 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -86,10 +86,10 @@ V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
-// Use this function instead of Internals::GetIsolateForHeapSandbox for internal
+// Use this function instead of Internals::GetIsolateForSandbox for internal
// code, as this function is fully inlinable.
-V8_INLINE static Isolate* GetIsolateForHeapSandbox(HeapObject object) {
-#ifdef V8_HEAP_SANDBOX
+V8_INLINE static Isolate* GetIsolateForSandbox(HeapObject object) {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
return GetIsolateFromWritableObject(object);
#else
// Not used in non-sandbox mode.
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 89b0ba3c1a..2f643ef627 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -38,19 +38,23 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/date/date.h"
#include "src/debug/debug-frames.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/deoptimizer/materialized-object-store.h"
#include "src/diagnostics/basic-block-profiler.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/local-isolate.h"
#include "src/execution/messages.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/protectors-inl.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
+#include "src/execution/tiering-manager.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles-inl.h"
@@ -73,6 +77,7 @@
#include "src/logging/runtime-call-stats-scope.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/backing-store.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/elements.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/hash-table-inl.h"
@@ -87,7 +92,6 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/source-text-module-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/visitors.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
@@ -112,6 +116,10 @@
#include "unicode/uobject.h"
#endif // V8_INTL_SUPPORT
+#if V8_ENABLE_MAGLEV
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+#endif // V8_ENABLE_MAGLEV
+
#if V8_ENABLE_WEBASSEMBLY
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/wasm-code-manager.h"
@@ -275,7 +283,7 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
// embedded blob may change (e.g. in tests or mksnapshot). If the blob is
// binary-embedded, it is immortal immovable.
const uint8_t* code =
- current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed);
+ current_embedded_blob_code_.load(std::memory_order_relaxed);
if (code == nullptr) return false;
return code == DefaultEmbeddedBlobCode();
}
@@ -352,26 +360,22 @@ uint32_t Isolate::embedded_blob_data_size() const {
// static
const uint8_t* Isolate::CurrentEmbeddedBlobCode() {
- return current_embedded_blob_code_.load(
- std::memory_order::memory_order_relaxed);
+ return current_embedded_blob_code_.load(std::memory_order_relaxed);
}
// static
uint32_t Isolate::CurrentEmbeddedBlobCodeSize() {
- return current_embedded_blob_code_size_.load(
- std::memory_order::memory_order_relaxed);
+ return current_embedded_blob_code_size_.load(std::memory_order_relaxed);
}
// static
const uint8_t* Isolate::CurrentEmbeddedBlobData() {
- return current_embedded_blob_data_.load(
- std::memory_order::memory_order_relaxed);
+ return current_embedded_blob_data_.load(std::memory_order_relaxed);
}
// static
uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
- return current_embedded_blob_data_size_.load(
- std::memory_order::memory_order_relaxed);
+ return current_embedded_blob_data_size_.load(std::memory_order_relaxed);
}
// static
@@ -419,7 +423,7 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
// Hash data sections of builtin code objects.
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = builtins()->code(builtin);
+ Code code = FromCodeT(builtins()->code(builtin));
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
uint8_t* const code_ptr =
@@ -456,9 +460,7 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-#if DEBUG
std::atomic<bool> Isolate::isolate_key_created_{false};
-#endif
namespace {
// A global counter for all generated Isolates, might overflow.
@@ -515,12 +517,20 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
void Isolate::InitializeOncePerProcess() {
isolate_key_ = base::Thread::CreateThreadLocalKey();
-#if DEBUG
bool expected = false;
- DCHECK_EQ(true, isolate_key_created_.compare_exchange_strong(
- expected, true, std::memory_order_relaxed));
-#endif
+ CHECK(isolate_key_created_.compare_exchange_strong(
+ expected, true, std::memory_order_relaxed));
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
+
+ Heap::InitializeOncePerProcess();
+}
+
+void Isolate::DisposeOncePerProcess() {
+ base::Thread::DeleteThreadLocalKey(isolate_key_);
+ bool expected = true;
+ CHECK(isolate_key_created_.compare_exchange_strong(
+ expected, false, std::memory_order_relaxed));
+ base::Thread::DeleteThreadLocalKey(per_isolate_thread_data_key_);
}
Address Isolate::get_address_from_id(IsolateAddressId id) {
@@ -638,10 +648,11 @@ void Isolate::PushStackTraceAndDie(void* ptr1, void* ptr2, void* ptr3,
base::OS::Abort();
}
-void Isolate::PushParamsAndDie(void* ptr1, void* ptr2, void* ptr3, void* ptr4) {
+void Isolate::PushParamsAndDie(void* ptr1, void* ptr2, void* ptr3, void* ptr4,
+ void* ptr5, void* ptr6) {
StackTraceFailureMessage message(
this, StackTraceFailureMessage::kDontIncludeStackTrace, ptr1, ptr2, ptr3,
- ptr4);
+ ptr4, ptr5, ptr6);
message.Print();
base::OS::Abort();
}
@@ -651,18 +662,20 @@ void StackTraceFailureMessage::Print() volatile {
// to force stack allocation.
base::OS::PrintError(
"Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
- "failure_message_object=%p\n%s",
- ptr1_, ptr2_, ptr3_, ptr4_, this, &js_stack_trace_[0]);
+ "ptr5=%p\n ptr6=%p\n failure_message_object=%p\n%s",
+ ptr1_, ptr2_, ptr3_, ptr4_, ptr5_, ptr6_, this, &js_stack_trace_[0]);
}
StackTraceFailureMessage::StackTraceFailureMessage(
Isolate* isolate, StackTraceFailureMessage::StackTraceMode mode, void* ptr1,
- void* ptr2, void* ptr3, void* ptr4) {
+ void* ptr2, void* ptr3, void* ptr4, void* ptr5, void* ptr6) {
isolate_ = isolate;
ptr1_ = ptr1;
ptr2_ = ptr2;
ptr3_ = ptr3;
ptr4_ = ptr4;
+ ptr5_ = ptr5;
+ ptr6_ = ptr6;
// Write a stracktrace into the {js_stack_trace_} buffer.
const size_t buffer_length = arraysize(js_stack_trace_);
memset(&js_stack_trace_, 0, buffer_length);
@@ -683,18 +696,19 @@ StackTraceFailureMessage::StackTraceFailureMessage(
}
}
-class StackTraceBuilder {
- public:
- enum FrameFilterMode { ALL, CURRENT_SECURITY_CONTEXT };
+bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
- StackTraceBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
- Handle<Object> caller, FrameFilterMode filter_mode)
+namespace {
+
+class CallSiteBuilder {
+ public:
+ CallSiteBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
+ Handle<Object> caller)
: isolate_(isolate),
mode_(mode),
limit_(limit),
caller_(caller),
- skip_next_frame_(mode != SKIP_NONE),
- check_security_context_(filter_mode == CURRENT_SECURITY_CONTEXT) {
+ skip_next_frame_(mode != SKIP_NONE) {
DCHECK_IMPLIES(mode_ == SKIP_UNTIL_SEEN, caller_->IsJSFunction());
// Modern web applications are usually built with multiple layers of
// framework and library code, and stack depth tends to be more than
@@ -703,11 +717,23 @@ class StackTraceBuilder {
elements_ = isolate->factory()->NewFixedArray(std::min(64, limit));
}
+ bool Visit(FrameSummary const& summary) {
+ if (Full()) return false;
+#if V8_ENABLE_WEBASSEMBLY
+ if (summary.IsWasm()) {
+ AppendWasmFrame(summary.AsWasm());
+ return true;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ AppendJavaScriptFrame(summary.AsJavaScript());
+ return true;
+ }
+
void AppendAsyncFrame(Handle<JSGeneratorObject> generator_object) {
Handle<JSFunction> function(generator_object->function(), isolate_);
if (!IsVisibleInStackTrace(function)) return;
- int flags = StackFrameInfo::kIsAsync;
- if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
+ int flags = CallSiteInfo::kIsAsync;
+ if (IsStrictFrame(function)) flags |= CallSiteInfo::kIsStrict;
Handle<Object> receiver(generator_object->receiver(), isolate_);
Handle<BytecodeArray> code(function->shared().GetBytecodeArray(isolate_),
@@ -732,11 +758,12 @@ class StackTraceBuilder {
Handle<JSFunction> combinator) {
if (!IsVisibleInStackTrace(combinator)) return;
int flags =
- StackFrameInfo::kIsAsync | StackFrameInfo::kIsSourcePositionComputed;
+ CallSiteInfo::kIsAsync | CallSiteInfo::kIsSourcePositionComputed;
Handle<Object> receiver(combinator->native_context().promise_function(),
isolate_);
- Handle<Code> code(combinator->code(), isolate_);
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ Handle<Code> code(FromCodeT(combinator->code()), isolate_);
// TODO(mmarchini) save Promises list from the Promise combinator
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
@@ -756,8 +783,8 @@ class StackTraceBuilder {
int flags = 0;
Handle<JSFunction> function = summary.function();
- if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
- if (summary.is_constructor()) flags |= StackFrameInfo::kIsConstructor;
+ if (IsStrictFrame(function)) flags |= CallSiteInfo::kIsStrict;
+ if (summary.is_constructor()) flags |= CallSiteInfo::kIsConstructor;
AppendFrame(summary.receiver(), function, summary.abstract_code(),
summary.code_offset(), flags, summary.parameters());
@@ -767,11 +794,11 @@ class StackTraceBuilder {
void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
if (summary.code()->kind() != wasm::WasmCode::kWasmFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
- int flags = StackFrameInfo::kIsWasm;
+ int flags = CallSiteInfo::kIsWasm;
if (instance->module_object().is_asm_js()) {
- flags |= StackFrameInfo::kIsAsmJsWasm;
+ flags |= CallSiteInfo::kIsAsmJsWasm;
if (summary.at_to_number_conversion()) {
- flags |= StackFrameInfo::kIsAsmJsAtNumberConversion;
+ flags |= CallSiteInfo::kIsAsmJsAtNumberConversion;
}
}
@@ -785,38 +812,6 @@ class StackTraceBuilder {
}
#endif // V8_ENABLE_WEBASSEMBLY
- void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
- Handle<JSFunction> function(exit_frame->function(), isolate_);
- if (!IsVisibleInStackTrace(function)) return;
-
- // TODO(szuend): Remove this check once the flag is enabled
- // by default.
- if (!FLAG_experimental_stack_trace_frames &&
- function->shared().IsApiFunction()) {
- return;
- }
-
- Handle<Object> receiver(exit_frame->receiver(), isolate_);
- Handle<Code> code(exit_frame->LookupCode(), isolate_);
- const int offset =
- code->GetOffsetFromInstructionStart(isolate_, exit_frame->pc());
-
- int flags = 0;
- if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
- if (exit_frame->IsConstructor()) flags |= StackFrameInfo::kIsConstructor;
-
- Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
- if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
- int param_count = exit_frame->ComputeParametersCount();
- parameters = isolate_->factory()->NewFixedArray(param_count);
- for (int i = 0; i < param_count; i++) {
- parameters->set(i, exit_frame->GetParameter(i));
- }
- }
-
- AppendFrame(receiver, function, code, offset, flags, parameters);
- }
-
bool Full() { return index_ >= limit_; }
Handle<FixedArray> Build() {
@@ -839,8 +834,7 @@ class StackTraceBuilder {
// Determines whether the given stack frame should be displayed in a stack
// trace.
bool IsVisibleInStackTrace(Handle<JSFunction> function) {
- return ShouldIncludeFrame(function) && IsNotHidden(function) &&
- IsInSameSecurityContext(function);
+ return ShouldIncludeFrame(function) && IsNotHidden(function);
}
// This mechanism excludes a number of uninteresting frames from the stack
@@ -866,6 +860,12 @@ class StackTraceBuilder {
}
bool IsNotHidden(Handle<JSFunction> function) {
+ // TODO(szuend): Remove this check once the flag is enabled
+ // by default.
+ if (!FLAG_experimental_stack_trace_frames &&
+ function->shared().IsApiFunction()) {
+ return false;
+ }
// Functions defined not in user scripts are not visible unless directly
// exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
@@ -877,28 +877,17 @@ class StackTraceBuilder {
return true;
}
- bool IsInSameSecurityContext(Handle<JSFunction> function) {
- if (!check_security_context_) return true;
- return isolate_->context().HasSameSecurityTokenAs(function->context());
- }
-
void AppendFrame(Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code, int offset, int flags,
Handle<FixedArray> parameters) {
- DCHECK_LE(index_, elements_->length());
- DCHECK_LE(elements_->length(), limit_);
- if (index_ == elements_->length()) {
- elements_ = isolate_->factory()->CopyFixedArrayAndGrow(
- elements_, std::min(16, limit_ - elements_->length()));
- }
if (receiver_or_instance->IsTheHole(isolate_)) {
// TODO(jgruber): Fix all cases in which frames give us a hole value
// (e.g. the receiver in RegExp constructor frames).
receiver_or_instance = isolate_->factory()->undefined_value();
}
- auto info = isolate_->factory()->NewStackFrameInfo(
+ auto info = isolate_->factory()->NewCallSiteInfo(
receiver_or_instance, function, code, offset, flags, parameters);
- elements_->set(index_++, *info);
+ elements_ = FixedArray::SetAndGrow(isolate_, elements_, index_++, info);
}
Isolate* isolate_;
@@ -908,16 +897,16 @@ class StackTraceBuilder {
const Handle<Object> caller_;
bool skip_next_frame_;
bool encountered_strict_function_ = false;
- const bool check_security_context_;
Handle<FixedArray> elements_;
};
bool GetStackTraceLimit(Isolate* isolate, int* result) {
- DCHECK(!FLAG_correctness_fuzzer_suppressions);
+ if (FLAG_correctness_fuzzer_suppressions) return false;
Handle<JSObject> error = isolate->error_function();
Handle<String> key = isolate->factory()->stackTraceLimit_string();
- Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(error, key);
+ Handle<Object> stack_trace_limit =
+ JSReceiver::GetDataProperty(isolate, error, key);
if (!stack_trace_limit->IsNumber()) return false;
// Ensure that limit is not negative.
@@ -930,10 +919,6 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) {
return true;
}
-bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
-
-namespace {
-
bool IsBuiltinFunction(Isolate* isolate, HeapObject object, Builtin builtin) {
if (!object.IsJSFunction()) return false;
JSFunction const function = JSFunction::cast(object);
@@ -941,7 +926,7 @@ bool IsBuiltinFunction(Isolate* isolate, HeapObject object, Builtin builtin) {
}
void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
- StackTraceBuilder* builder) {
+ CallSiteBuilder* builder) {
while (!builder->Full()) {
// Check that the {promise} is not settled.
if (promise->status() != Promise::kPending) return;
@@ -1051,40 +1036,73 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
}
}
-struct CaptureStackTraceOptions {
- int limit;
- // 'filter_mode' and 'skip_mode' are somewhat orthogonal. 'filter_mode'
- // specifies whether to capture all frames, or just frames in the same
- // security context. While 'skip_mode' allows skipping the first frame.
- FrameSkipMode skip_mode;
- StackTraceBuilder::FrameFilterMode filter_mode;
-
- bool capture_builtin_exit_frames;
- bool capture_only_frames_subject_to_debugging;
- bool async_stack_trace;
-};
+void CaptureAsyncStackTrace(Isolate* isolate, CallSiteBuilder* builder) {
+ Handle<Object> current_microtask = isolate->factory()->current_microtask();
+ if (current_microtask->IsPromiseReactionJobTask()) {
+ Handle<PromiseReactionJobTask> promise_reaction_job_task =
+ Handle<PromiseReactionJobTask>::cast(current_microtask);
+ // Check if the {reaction} has one of the known async function or
+ // async generator continuations as its fulfill handler.
+ if (IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
+ Builtin::kAsyncFunctionAwaitResolveClosure) ||
+ IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
+ Builtin::kAsyncGeneratorAwaitResolveClosure) ||
+ IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
+ Builtin::kAsyncGeneratorYieldResolveClosure) ||
+ IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
+ Builtin::kAsyncFunctionAwaitRejectClosure) ||
+ IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
+ Builtin::kAsyncGeneratorAwaitRejectClosure)) {
+ // Now peek into the handlers' AwaitContext to get to
+ // the JSGeneratorObject for the async function.
+ Handle<Context> context(
+ JSFunction::cast(promise_reaction_job_task->handler()).context(),
+ isolate);
+ Handle<JSGeneratorObject> generator_object(
+ JSGeneratorObject::cast(context->extension()), isolate);
+ if (generator_object->is_executing()) {
+ if (generator_object->IsJSAsyncFunctionObject()) {
+ Handle<JSAsyncFunctionObject> async_function_object =
+ Handle<JSAsyncFunctionObject>::cast(generator_object);
+ Handle<JSPromise> promise(async_function_object->promise(), isolate);
+ CaptureAsyncStackTrace(isolate, promise, builder);
+ } else {
+ Handle<JSAsyncGeneratorObject> async_generator_object =
+ Handle<JSAsyncGeneratorObject>::cast(generator_object);
+ Handle<Object> queue(async_generator_object->queue(), isolate);
+ if (!queue->IsUndefined(isolate)) {
+ Handle<AsyncGeneratorRequest> async_generator_request =
+ Handle<AsyncGeneratorRequest>::cast(queue);
+ Handle<JSPromise> promise(
+ JSPromise::cast(async_generator_request->promise()), isolate);
+ CaptureAsyncStackTrace(isolate, promise, builder);
+ }
+ }
+ }
+ } else {
+ // The {promise_reaction_job_task} doesn't belong to an await (or
+ // yield inside an async generator), but we might still be able to
+ // find an async frame if we follow along the chain of promises on
+ // the {promise_reaction_job_task}.
+ Handle<HeapObject> promise_or_capability(
+ promise_reaction_job_task->promise_or_capability(), isolate);
+ if (promise_or_capability->IsJSPromise()) {
+ Handle<JSPromise> promise =
+ Handle<JSPromise>::cast(promise_or_capability);
+ CaptureAsyncStackTrace(isolate, promise, builder);
+ }
+ }
+ }
+}
-Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
- CaptureStackTraceOptions options) {
+template <typename Visitor>
+void VisitStack(Isolate* isolate, Visitor* visitor,
+ StackTrace::StackTraceOptions options = StackTrace::kDetailed) {
DisallowJavascriptExecution no_js(isolate);
-
- TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "CaptureStackTrace", "maxFrameCount", options.limit);
-
-#if V8_ENABLE_WEBASSEMBLY
- wasm::WasmCodeRefScope code_ref_scope;
-#endif // V8_ENABLE_WEBASSEMBLY
-
- StackTraceBuilder builder(isolate, options.skip_mode, options.limit, caller,
- options.filter_mode);
-
- // Build the regular stack trace, and remember the last relevant
- // frame ID and inlined index (for the async stack trace handling
- // below, which starts from this last frame).
- for (StackFrameIterator it(isolate); !it.done() && !builder.Full();
- it.Advance()) {
- StackFrame* const frame = it.frame();
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ StackFrame* frame = it.frame();
switch (frame->type()) {
+ case StackFrame::BUILTIN_EXIT:
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
@@ -1097,178 +1115,152 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
{
// A standard frame may include many summarized frames (due to
// inlining).
- std::vector<FrameSummary> frames;
- CommonFrame::cast(frame)->Summarize(&frames);
- for (size_t i = frames.size(); i-- != 0 && !builder.Full();) {
- auto& summary = frames[i];
- if (options.capture_only_frames_subject_to_debugging &&
- !summary.is_subject_to_debugging()) {
+ std::vector<FrameSummary> summaries;
+ CommonFrame::cast(frame)->Summarize(&summaries);
+ for (auto rit = summaries.rbegin(); rit != summaries.rend(); ++rit) {
+ FrameSummary& summary = *rit;
+ // Skip frames from other origins when asked to do so.
+ if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+ !summary.native_context()->HasSameSecurityTokenAs(
+ isolate->context())) {
continue;
}
-
- if (summary.IsJavaScript()) {
- //=========================================================
- // Handle a JavaScript frame.
- //=========================================================
- auto const& java_script = summary.AsJavaScript();
- builder.AppendJavaScriptFrame(java_script);
-#if V8_ENABLE_WEBASSEMBLY
- } else if (summary.IsWasm()) {
- //=========================================================
- // Handle a Wasm frame.
- //=========================================================
- auto const& wasm = summary.AsWasm();
- builder.AppendWasmFrame(wasm);
-#endif // V8_ENABLE_WEBASSEMBLY
- }
+ if (!visitor->Visit(summary)) return;
}
break;
}
- case StackFrame::BUILTIN_EXIT:
- if (!options.capture_builtin_exit_frames) continue;
-
- // BuiltinExitFrames are not standard frames, so they do not have
- // Summarize(). However, they may have one JS frame worth showing.
- builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame));
- break;
-
default:
break;
}
}
+}
+
+Handle<FixedArray> CaptureSimpleStackTrace(Isolate* isolate, int limit,
+ FrameSkipMode mode,
+ Handle<Object> caller) {
+ TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__,
+ "maxFrameCount", limit);
+
+#if V8_ENABLE_WEBASSEMBLY
+ wasm::WasmCodeRefScope code_ref_scope;
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CallSiteBuilder builder(isolate, mode, limit, caller);
+ VisitStack(isolate, &builder);
// If --async-stack-traces are enabled and the "current microtask" is a
// PromiseReactionJobTask, we try to enrich the stack trace with async
// frames.
- if (options.async_stack_trace) {
- Handle<Object> current_microtask = isolate->factory()->current_microtask();
- if (current_microtask->IsPromiseReactionJobTask()) {
- Handle<PromiseReactionJobTask> promise_reaction_job_task =
- Handle<PromiseReactionJobTask>::cast(current_microtask);
- // Check if the {reaction} has one of the known async function or
- // async generator continuations as its fulfill handler.
- if (IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
- Builtin::kAsyncFunctionAwaitResolveClosure) ||
- IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
- Builtin::kAsyncGeneratorAwaitResolveClosure) ||
- IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
- Builtin::kAsyncGeneratorYieldResolveClosure) ||
- IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
- Builtin::kAsyncFunctionAwaitRejectClosure) ||
- IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
- Builtin::kAsyncGeneratorAwaitRejectClosure)) {
- // Now peek into the handlers' AwaitContext to get to
- // the JSGeneratorObject for the async function.
- Handle<Context> context(
- JSFunction::cast(promise_reaction_job_task->handler()).context(),
- isolate);
- Handle<JSGeneratorObject> generator_object(
- JSGeneratorObject::cast(context->extension()), isolate);
- if (generator_object->is_executing()) {
- if (generator_object->IsJSAsyncFunctionObject()) {
- Handle<JSAsyncFunctionObject> async_function_object =
- Handle<JSAsyncFunctionObject>::cast(generator_object);
- Handle<JSPromise> promise(async_function_object->promise(),
- isolate);
- CaptureAsyncStackTrace(isolate, promise, &builder);
- } else {
- Handle<JSAsyncGeneratorObject> async_generator_object =
- Handle<JSAsyncGeneratorObject>::cast(generator_object);
- Handle<Object> queue(async_generator_object->queue(), isolate);
- if (!queue->IsUndefined(isolate)) {
- Handle<AsyncGeneratorRequest> async_generator_request =
- Handle<AsyncGeneratorRequest>::cast(queue);
- Handle<JSPromise> promise(
- JSPromise::cast(async_generator_request->promise()), isolate);
- CaptureAsyncStackTrace(isolate, promise, &builder);
- }
- }
- }
- } else {
- // The {promise_reaction_job_task} doesn't belong to an await (or
- // yield inside an async generator), but we might still be able to
- // find an async frame if we follow along the chain of promises on
- // the {promise_reaction_job_task}.
- Handle<HeapObject> promise_or_capability(
- promise_reaction_job_task->promise_or_capability(), isolate);
- if (promise_or_capability->IsJSPromise()) {
- Handle<JSPromise> promise =
- Handle<JSPromise>::cast(promise_or_capability);
- CaptureAsyncStackTrace(isolate, promise, &builder);
- }
- }
- }
+ if (FLAG_async_stack_traces) {
+ CaptureAsyncStackTrace(isolate, &builder);
}
Handle<FixedArray> stack_trace = builder.Build();
- TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "CaptureStackTrace", "frameCount", stack_trace->length());
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__,
+ "frameCount", stack_trace->length());
return stack_trace;
}
} // namespace
-Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
- FrameSkipMode mode,
- Handle<Object> caller) {
- int limit;
- if (FLAG_correctness_fuzzer_suppressions ||
- !GetStackTraceLimit(this, &limit)) {
- return factory()->undefined_value();
+MaybeHandle<JSObject> Isolate::CaptureAndSetErrorStack(
+ Handle<JSObject> error_object, FrameSkipMode mode, Handle<Object> caller) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__);
+ Handle<Object> error_stack = factory()->undefined_value();
+
+ // Capture the "simple stack trace" for the error.stack property,
+ // which can be disabled by setting Error.stackTraceLimit to a non
+ // number value or simply deleting the property. If the inspector
+ // is active, and requests more stack frames than the JavaScript
+ // program itself, we collect up to the maximum.
+ int stack_trace_limit = 0;
+ if (GetStackTraceLimit(this, &stack_trace_limit)) {
+ int limit = stack_trace_limit;
+ if (capture_stack_trace_for_uncaught_exceptions_ &&
+ !(stack_trace_for_uncaught_exceptions_options_ &
+ StackTrace::kExposeFramesAcrossSecurityOrigins)) {
+ // Collect up to the maximum of what the JavaScript program and
+ // the inspector want. There's a special case here where the API
+ // can ask the stack traces to also include cross-origin frames,
+ // in which case we collect a separate trace below. Note that
+ // the inspector doesn't use this option, so we could as well
+ // just deprecate this in the future.
+ if (limit < stack_trace_for_uncaught_exceptions_frame_limit_) {
+ limit = stack_trace_for_uncaught_exceptions_frame_limit_;
+ }
+ }
+ error_stack = CaptureSimpleStackTrace(this, limit, mode, caller);
}
- CaptureStackTraceOptions options;
- options.limit = limit;
- options.skip_mode = mode;
- options.capture_builtin_exit_frames = true;
- options.async_stack_trace = FLAG_async_stack_traces;
- options.filter_mode = StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
- options.capture_only_frames_subject_to_debugging = false;
-
- return CaptureStackTrace(this, caller, options);
-}
-
-MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
- Handle<JSReceiver> error_object) {
+ // Next is the inspector part: Depending on whether we got a "simple
+ // stack trace" above and whether that's usable (meaning the API
+ // didn't request to include cross-origin frames), we remember the
+ // cap for the stack trace (either a positive limit indicating that
+ // the Error.stackTraceLimit value was below what was requested via
+ // the API, or a negative limit to indicate the opposite), or we
+ // collect a "detailed stack trace" eagerly and stash that away.
if (capture_stack_trace_for_uncaught_exceptions_) {
- // Capture stack trace for a detailed exception message.
- Handle<Name> key = factory()->detailed_stack_trace_symbol();
- Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit_,
- stack_trace_for_uncaught_exceptions_options_);
- RETURN_ON_EXCEPTION(
- this,
- Object::SetProperty(this, error_object, key, stack_trace,
- StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError)),
- JSReceiver);
+ Handle<Object> limit_or_stack_frame_infos;
+ if (error_stack->IsUndefined(this) ||
+ (stack_trace_for_uncaught_exceptions_options_ &
+ StackTrace::kExposeFramesAcrossSecurityOrigins)) {
+ limit_or_stack_frame_infos = CaptureDetailedStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit_,
+ stack_trace_for_uncaught_exceptions_options_);
+ } else {
+ int limit =
+ stack_trace_limit > stack_trace_for_uncaught_exceptions_frame_limit_
+ ? -stack_trace_for_uncaught_exceptions_frame_limit_
+ : stack_trace_limit;
+ limit_or_stack_frame_infos = handle(Smi::FromInt(limit), this);
+ }
+ error_stack =
+ factory()->NewErrorStackData(error_stack, limit_or_stack_frame_infos);
}
- return error_object;
-}
-MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
- Handle<JSReceiver> error_object, FrameSkipMode mode,
- Handle<Object> caller) {
- // Capture stack trace for simple stack trace string formatting.
- Handle<Name> key = factory()->stack_trace_symbol();
- Handle<Object> stack_trace =
- CaptureSimpleStackTrace(error_object, mode, caller);
- RETURN_ON_EXCEPTION(this,
- Object::SetProperty(this, error_object, key, stack_trace,
- StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError)),
- JSReceiver);
+ RETURN_ON_EXCEPTION(
+ this,
+ JSObject::SetProperty(this, error_object, factory()->error_stack_symbol(),
+ error_stack, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
+ JSObject);
return error_object;
}
Handle<FixedArray> Isolate::GetDetailedStackTrace(
- Handle<JSObject> error_object) {
- Handle<Name> key_detailed = factory()->detailed_stack_trace_symbol();
- Handle<Object> stack_trace =
- JSReceiver::GetDataProperty(error_object, key_detailed);
- if (stack_trace->IsFixedArray()) return Handle<FixedArray>::cast(stack_trace);
- return Handle<FixedArray>();
+ Handle<JSReceiver> error_object) {
+ Handle<Object> error_stack = JSReceiver::GetDataProperty(
+ this, error_object, factory()->error_stack_symbol());
+ if (!error_stack->IsErrorStackData()) {
+ return Handle<FixedArray>();
+ }
+ Handle<ErrorStackData> error_stack_data =
+ Handle<ErrorStackData>::cast(error_stack);
+ ErrorStackData::EnsureStackFrameInfos(this, error_stack_data);
+ if (!error_stack_data->limit_or_stack_frame_infos().IsFixedArray()) {
+ return Handle<FixedArray>();
+ }
+ return handle(
+ FixedArray::cast(error_stack_data->limit_or_stack_frame_infos()), this);
+}
+
+Handle<FixedArray> Isolate::GetSimpleStackTrace(
+ Handle<JSReceiver> error_object) {
+ Handle<Object> error_stack = JSReceiver::GetDataProperty(
+ this, error_object, factory()->error_stack_symbol());
+ if (error_stack->IsFixedArray()) {
+ return Handle<FixedArray>::cast(error_stack);
+ }
+ if (!error_stack->IsErrorStackData()) {
+ return factory()->empty_fixed_array();
+ }
+ Handle<ErrorStackData> error_stack_data =
+ Handle<ErrorStackData>::cast(error_stack);
+ if (!error_stack_data->HasCallSiteInfos()) {
+ return factory()->empty_fixed_array();
+ }
+ return handle(error_stack_data->call_site_infos(), this);
}
Address Isolate::GetAbstractPC(int* line, int* column) {
@@ -1308,20 +1300,90 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
return frame->pc();
}
-Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions stack_trace_options) {
- CaptureStackTraceOptions options;
- options.limit = std::max(frame_limit, 0); // Ensure no negative values.
- options.skip_mode = SKIP_NONE;
- options.capture_builtin_exit_frames = false;
- options.async_stack_trace = false;
- options.filter_mode =
- (stack_trace_options & StackTrace::kExposeFramesAcrossSecurityOrigins)
- ? StackTraceBuilder::ALL
- : StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
- options.capture_only_frames_subject_to_debugging = true;
+namespace {
+
+class StackFrameBuilder {
+ public:
+ StackFrameBuilder(Isolate* isolate, int limit)
+ : isolate_(isolate),
+ frames_(isolate_->factory()->empty_fixed_array()),
+ index_(0),
+ limit_(limit) {}
+
+ bool Visit(FrameSummary& summary) {
+ // Check if we have enough capacity left.
+ if (index_ >= limit_) return false;
+ // Skip frames that aren't subject to debugging.
+ if (!summary.is_subject_to_debugging()) return true;
+ Handle<StackFrameInfo> frame = summary.CreateStackFrameInfo();
+ frames_ = FixedArray::SetAndGrow(isolate_, frames_, index_++, frame);
+ return true;
+ }
- return CaptureStackTrace(this, factory()->undefined_value(), options);
+ Handle<FixedArray> Build() {
+ return FixedArray::ShrinkOrEmpty(isolate_, frames_, index_);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<FixedArray> frames_;
+ int index_;
+ int limit_;
+};
+
+} // namespace
+
+Handle<FixedArray> Isolate::CaptureDetailedStackTrace(
+ int limit, StackTrace::StackTraceOptions options) {
+ TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__,
+ "maxFrameCount", limit);
+ StackFrameBuilder builder(this, limit);
+ VisitStack(this, &builder, options);
+ Handle<FixedArray> stack_trace = builder.Build();
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__,
+ "frameCount", stack_trace->length());
+ return stack_trace;
+}
+
+namespace {
+
+class CurrentScriptNameStackVisitor {
+ public:
+ explicit CurrentScriptNameStackVisitor(Isolate* isolate)
+ : isolate_(isolate) {}
+
+ bool Visit(FrameSummary& summary) {
+ // Skip frames that aren't subject to debugging. Keep this in sync with
+ // StackFrameBuilder::Visit so both visitors visit the same frames.
+ if (!summary.is_subject_to_debugging()) return true;
+
+ // Frames that are subject to debugging always have a valid script object.
+ Handle<Script> script = Handle<Script>::cast(summary.script());
+ Handle<Object> name_or_url_obj =
+ handle(script->GetNameOrSourceURL(), isolate_);
+ if (!name_or_url_obj->IsString()) return true;
+
+ Handle<String> name_or_url = Handle<String>::cast(name_or_url_obj);
+ if (!name_or_url->length()) return true;
+
+ name_or_url_ = name_or_url;
+ return false;
+ }
+
+ Handle<String> CurrentScriptNameOrSourceURL() const { return name_or_url_; }
+
+ private:
+ Isolate* const isolate_;
+ Handle<String> name_or_url_;
+};
+
+} // namespace
+
+Handle<String> Isolate::CurrentScriptNameOrSourceURL() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__);
+ CurrentScriptNameStackVisitor visitor(this);
+ VisitStack(this, &visitor);
+ return visitor.CurrentScriptNameOrSourceURL();
}
void Isolate::PrintStack(FILE* out, PrintStackMode mode) {
@@ -1489,7 +1551,7 @@ Object Isolate::StackOverflow() {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
this, exception,
ErrorUtils::Construct(this, fun, fun, msg, options, SKIP_NONE, no_caller,
- ErrorUtils::StackTraceCollection::kSimple));
+ ErrorUtils::StackTraceCollection::kEnabled));
JSObject::AddProperty(this, exception, factory()->wasm_uncatchable_symbol(),
factory()->true_value(), NONE);
@@ -1790,6 +1852,8 @@ class SetThreadInWasmFlagScope {
} // namespace
Object Isolate::UnwindAndFindHandler() {
+ // TODO(v8:12676): Fix gcmole failures in this function.
+ DisableGCMole no_gcmole;
#if V8_ENABLE_WEBASSEMBLY
// Create the {SetThreadInWasmFlagScope} first in this function so that its
// destructor gets called after all the other destructors. It is important
@@ -1804,7 +1868,7 @@ Object Isolate::UnwindAndFindHandler() {
auto FoundHandler = [&](Context context, Address instruction_start,
intptr_t handler_offset,
Address constant_pool_address, Address handler_sp,
- Address handler_fp) {
+ Address handler_fp, int num_frames_above_handler) {
// Store information to be consumed by the CEntry.
thread_local_top()->pending_handler_context_ = context;
thread_local_top()->pending_handler_entrypoint_ =
@@ -1812,6 +1876,8 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->pending_handler_constant_pool_ = constant_pool_address;
thread_local_top()->pending_handler_fp_ = handler_fp;
thread_local_top()->pending_handler_sp_ = handler_sp;
+ thread_local_top()->num_frames_above_pending_handler_ =
+ num_frames_above_handler;
// Return and clear pending exception. The contract is that:
// (1) the pending exception is stored in one place (no duplication), and
@@ -1826,10 +1892,11 @@ Object Isolate::UnwindAndFindHandler() {
// Special handling of termination exceptions, uncatchable by JavaScript and
// Wasm code, we unwind the handlers until the top ENTRY handler is found.
bool catchable_by_js = is_catchable_by_javascript(exception);
+ int visited_frames = 0;
// Compute handler and stack unwinding information by performing a full walk
// over the stack and dispatching according to the frame type.
- for (StackFrameIterator iter(this);; iter.Advance()) {
+ for (StackFrameIterator iter(this);; iter.Advance(), visited_frames++) {
// Handler must exist.
DCHECK(!iter.done());
@@ -1850,7 +1917,7 @@ Object Isolate::UnwindAndFindHandler() {
return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
table.LookupReturn(0), code.constant_pool(),
handler->address() + StackHandlerConstants::kSize,
- 0);
+ 0, visited_frames);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -1869,7 +1936,8 @@ Object Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
code.stack_slots() * kSystemPointerSize;
return FoundHandler(Context(), instruction_start, handler_offset,
- code.constant_pool(), return_sp, frame->fp());
+ code.constant_pool(), return_sp, frame->fp(),
+ visited_frames);
}
case StackFrame::WASM: {
@@ -1897,7 +1965,8 @@ Object Isolate::UnwindAndFindHandler() {
// destructors have been executed.
set_thread_in_wasm_flag_scope.Enable();
return FoundHandler(Context(), wasm_code->instruction_start(), offset,
- wasm_code->constant_pool(), return_sp, frame->fp());
+ wasm_code->constant_pool(), return_sp, frame->fp(),
+ visited_frames);
}
case StackFrame::WASM_COMPILE_LAZY: {
@@ -1934,7 +2003,7 @@ Object Isolate::UnwindAndFindHandler() {
return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
offset, code.constant_pool(), return_sp,
- frame->fp());
+ frame->fp(), visited_frames);
}
case StackFrame::STUB: {
@@ -1962,7 +2031,7 @@ Object Isolate::UnwindAndFindHandler() {
return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
offset, code.constant_pool(), return_sp,
- frame->fp());
+ frame->fp(), visited_frames);
}
case StackFrame::INTERPRETED:
@@ -2002,14 +2071,24 @@ Object Isolate::UnwindAndFindHandler() {
sp_frame->PatchContext(context);
return FoundHandler(
Context(), code.InstructionStart(this, sp_frame->sp()), pc_offset,
- code.constant_pool(), return_sp, sp_frame->fp());
+ code.constant_pool(), return_sp, sp_frame->fp(), visited_frames);
} else {
InterpretedFrame::cast(js_frame)->PatchBytecodeOffset(
static_cast<int>(offset));
- Code code = builtins()->code(Builtin::kInterpreterEnterAtBytecode);
+ Code code =
+ FromCodeT(builtins()->code(Builtin::kInterpreterEnterAtBytecode));
+ // We subtract a frame from visited_frames because otherwise the
+ // shadow stack will drop the underlying interpreter entry trampoline
+ // in which the handler runs.
+ //
+ // An interpreted frame cannot be the first frame we look at
+ // because at a minimum, an exit frame into C++ has to separate
+ // it and the context in which this C++ code runs.
+ CHECK_GE(visited_frames, 1);
return FoundHandler(context, code.InstructionStart(), 0,
- code.constant_pool(), return_sp, frame->fp());
+ code.constant_pool(), return_sp, frame->fp(),
+ visited_frames - 1);
}
}
@@ -2032,7 +2111,8 @@ Object Isolate::UnwindAndFindHandler() {
Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
Code code = js_frame->LookupCode();
return FoundHandler(Context(), code.InstructionStart(), 0,
- code.constant_pool(), return_sp, frame->fp());
+ code.constant_pool(), return_sp, frame->fp(),
+ visited_frames);
}
default:
@@ -2227,21 +2307,13 @@ Object Isolate::PromoteScheduledException() {
}
void Isolate::PrintCurrentStackTrace(std::ostream& out) {
- CaptureStackTraceOptions options;
- options.limit = 0;
- options.skip_mode = SKIP_NONE;
- options.capture_builtin_exit_frames = true;
- options.async_stack_trace = FLAG_async_stack_traces;
- options.filter_mode = StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
- options.capture_only_frames_subject_to_debugging = false;
-
- Handle<FixedArray> frames =
- CaptureStackTrace(this, this->factory()->undefined_value(), options);
+ Handle<FixedArray> frames = CaptureSimpleStackTrace(
+ this, FixedArray::kMaxLength, SKIP_NONE, factory()->undefined_value());
IncrementalStringBuilder builder(this);
for (int i = 0; i < frames->length(); ++i) {
- Handle<StackFrameInfo> frame(StackFrameInfo::cast(frames->get(i)), this);
- SerializeStackFrameInfo(this, frame, &builder);
+ Handle<CallSiteInfo> frame(CallSiteInfo::cast(frames->get(i)), this);
+ SerializeCallSiteInfo(this, frame, &builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2284,19 +2356,19 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
Handle<Name> start_pos_symbol = factory()->error_start_pos_symbol();
Handle<Object> start_pos = JSReceiver::GetDataProperty(
- Handle<JSObject>::cast(exception), start_pos_symbol);
+ this, Handle<JSObject>::cast(exception), start_pos_symbol);
if (!start_pos->IsSmi()) return false;
int start_pos_value = Handle<Smi>::cast(start_pos)->value();
Handle<Name> end_pos_symbol = factory()->error_end_pos_symbol();
Handle<Object> end_pos = JSReceiver::GetDataProperty(
- Handle<JSObject>::cast(exception), end_pos_symbol);
+ this, Handle<JSObject>::cast(exception), end_pos_symbol);
if (!end_pos->IsSmi()) return false;
int end_pos_value = Handle<Smi>::cast(end_pos)->value();
Handle<Name> script_symbol = factory()->error_script_symbol();
Handle<Object> script = JSReceiver::GetDataProperty(
- Handle<JSObject>::cast(exception), script_symbol);
+ this, Handle<JSObject>::cast(exception), script_symbol);
if (!script->IsScript()) return false;
Handle<Script> cast_script(Script::cast(*script), this);
@@ -2304,21 +2376,40 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
return true;
}
-bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
- Handle<Object> exception) {
- if (!exception->IsJSObject()) return false;
- Handle<Name> key = factory()->stack_trace_symbol();
- Handle<Object> property =
- JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
- if (!property->IsFixedArray()) return false;
- Handle<FixedArray> stack = Handle<FixedArray>::cast(property);
- for (int i = 0; i < stack->length(); i++) {
- Handle<StackFrameInfo> frame(StackFrameInfo::cast(stack->get(i)), this);
- if (StackFrameInfo::ComputeLocation(frame, target)) return true;
+bool Isolate::ComputeLocationFromSimpleStackTrace(MessageLocation* target,
+ Handle<Object> exception) {
+ if (!exception->IsJSReceiver()) {
+ return false;
+ }
+ Handle<FixedArray> call_site_infos =
+ GetSimpleStackTrace(Handle<JSReceiver>::cast(exception));
+ for (int i = 0; i < call_site_infos->length(); ++i) {
+ Handle<CallSiteInfo> call_site_info(
+ CallSiteInfo::cast(call_site_infos->get(i)), this);
+ if (CallSiteInfo::ComputeLocation(call_site_info, target)) {
+ return true;
+ }
}
return false;
}
+bool Isolate::ComputeLocationFromDetailedStackTrace(MessageLocation* target,
+ Handle<Object> exception) {
+ if (!exception->IsJSReceiver()) return false;
+
+ Handle<FixedArray> stack_frame_infos =
+ GetDetailedStackTrace(Handle<JSReceiver>::cast(exception));
+ if (stack_frame_infos.is_null() || stack_frame_infos->length() == 0) {
+ return false;
+ }
+
+ Handle<StackFrameInfo> info(StackFrameInfo::cast(stack_frame_infos->get(0)),
+ this);
+ const int pos = StackFrameInfo::GetSourcePosition(info);
+ *target = MessageLocation(handle(info->script(), this), pos, pos + 1);
+ return true;
+}
+
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<FixedArray> stack_trace_object;
@@ -2333,7 +2424,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
}
if (stack_trace_object.is_null()) {
// Not an error object, we capture stack and location at throw site.
- stack_trace_object = CaptureCurrentStackTrace(
+ stack_trace_object = CaptureDetailedStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
}
@@ -2341,7 +2432,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation computed_location;
if (location == nullptr &&
(ComputeLocationFromException(&computed_location, exception) ||
- ComputeLocationFromStackTrace(&computed_location, exception) ||
+ ComputeLocationFromSimpleStackTrace(&computed_location, exception) ||
ComputeLocation(&computed_location))) {
location = &computed_location;
}
@@ -2351,6 +2442,26 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
stack_trace_object);
}
+Handle<JSMessageObject> Isolate::CreateMessageFromException(
+ Handle<Object> exception) {
+ Handle<FixedArray> stack_trace_object;
+ if (exception->IsJSError()) {
+ stack_trace_object =
+ GetDetailedStackTrace(Handle<JSObject>::cast(exception));
+ }
+
+ MessageLocation* location = nullptr;
+ MessageLocation computed_location;
+ if (ComputeLocationFromException(&computed_location, exception) ||
+ ComputeLocationFromDetailedStackTrace(&computed_location, exception)) {
+ location = &computed_location;
+ }
+
+ return MessageHandler::MakeMessageObject(
+ this, MessageTemplate::kPlaceholderOnly, location, exception,
+ stack_trace_object);
+}
+
Isolate::ExceptionHandlerType Isolate::TopExceptionHandlerType(
Object exception) {
DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
@@ -2495,14 +2606,15 @@ void Isolate::PushPromise(Handle<JSObject> promise) {
tltop->promise_on_stack_ = new PromiseOnStack(global_promise, prev);
}
-void Isolate::PopPromise() {
+bool Isolate::PopPromise() {
ThreadLocalTop* tltop = thread_local_top();
- if (tltop->promise_on_stack_ == nullptr) return;
+ if (tltop->promise_on_stack_ == nullptr) return false;
PromiseOnStack* prev = tltop->promise_on_stack_->prev();
Handle<Object> global_promise = tltop->promise_on_stack_->promise();
delete tltop->promise_on_stack_;
tltop->promise_on_stack_ = prev;
global_handles()->Destroy(global_promise.location());
+ return true;
}
namespace {
@@ -2515,7 +2627,8 @@ bool PromiseIsRejectHandler(Isolate* isolate, Handle<JSReceiver> handler) {
// has a dependency edge to the generated outer Promise.
// Otherwise, this is a real reject handler for the Promise.
Handle<Symbol> key = isolate->factory()->promise_forwarding_handler_symbol();
- Handle<Object> forwarding_handler = JSReceiver::GetDataProperty(handler, key);
+ Handle<Object> forwarding_handler =
+ JSReceiver::GetDataProperty(isolate, handler, key);
return forwarding_handler->IsUndefined(isolate);
}
@@ -2559,7 +2672,8 @@ bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise) {
if (promise->status() == Promise::kPending) {
promises.push(promise);
}
- Handle<Object> outer_promise_obj = JSObject::GetDataProperty(promise, key);
+ Handle<Object> outer_promise_obj =
+ JSObject::GetDataProperty(this, promise, key);
if (!outer_promise_obj->IsJSPromise()) break;
promise = Handle<JSPromise>::cast(outer_promise_obj);
}
@@ -2661,7 +2775,8 @@ void Isolate::InstallConditionalFeatures(Handle<Context> context) {
Handle<JSGlobalObject> global = handle(context->global_object(), this);
Handle<String> sab_name = factory()->SharedArrayBuffer_string();
if (IsSharedArrayBufferConstructorEnabled(context)) {
- if (!JSObject::HasRealNamedProperty(global, sab_name).FromMaybe(true)) {
+ if (!JSObject::HasRealNamedProperty(this, global, sab_name)
+ .FromMaybe(true)) {
JSObject::AddProperty(this, global, factory()->SharedArrayBuffer_string(),
shared_array_buffer_fun(), DONT_ENUM);
}
@@ -2779,7 +2894,7 @@ void Isolate::ReleaseSharedPtrs() {
bool Isolate::IsBuiltinTableHandleLocation(Address* handle_location) {
FullObjectSlot location(handle_location);
FullObjectSlot first_root(builtin_table());
- FullObjectSlot last_root(builtin_table() + Builtins::kBuiltinCount);
+ FullObjectSlot last_root(first_root + Builtins::kBuiltinCount);
if (location >= last_root) return false;
if (location < first_root) return false;
return true;
@@ -3107,17 +3222,19 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
handle_scope_data_.Initialize();
- // When pointer compression is on with a per-Isolate cage, allocation in the
- // shared Isolate can point into the per-Isolate RO heap as the offsets are
- // constant across Isolates.
+ // A shared Isolate is used to support JavaScript shared memory features
+ // across Isolates. These features require all of the following to hold in the
+ // build configuration:
//
- // When pointer compression is on with a shared cage or when pointer
- // compression is off, a shared RO heap is required. Otherwise a shared
- // allocation requested by a client Isolate could point into the client
- // Isolate's RO space (e.g. an RO map) whose pages gets unmapped when it is
- // disposed.
- CHECK_IMPLIES(is_shared_, COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
- V8_SHARED_RO_HEAP_BOOL);
+ // 1. The RO space is shared, so e.g. immortal RO maps can be shared across
+ // Isolates.
+ // 2. HeapObjects are shareable across Isolates, which requires either
+ // pointers to be uncompressed (!COMPRESS_POINTER_BOOL), or that there is a
+ // single virtual memory reservation shared by all Isolates in the process
+ // for compressing pointers (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL).
+ CHECK_IMPLIES(is_shared_, V8_SHARED_RO_HEAP_BOOL &&
+ (!COMPRESS_POINTERS_BOOL ||
+ COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL));
#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
name##_ = (initial_value);
@@ -3167,13 +3284,13 @@ void Isolate::CheckIsolateLayout() {
STATIC_ASSERT(Internals::kBuiltinTier0EntryTableSize ==
Builtins::kBuiltinTier0Count * kSystemPointerSize);
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
Internals::kExternalPointerTableBufferOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, length_)),
- Internals::kExternalPointerTableLengthOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
Internals::kExternalPointerTableCapacityOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, freelist_head_)),
+ Internals::kExternalPointerTableFreelistHeadOffset);
#endif
}
@@ -3255,10 +3372,15 @@ void Isolate::Deinit() {
cancelable_task_manager()->CancelAndWait();
}
- // Cancel all baseline compiler tasks.
+ // Cancel all compiler tasks.
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
+#ifdef V8_ENABLE_MAGLEV
+ delete maglev_concurrent_dispatcher_;
+ maglev_concurrent_dispatcher_ = nullptr;
+#endif // V8_ENABLE_MAGLEV
+
if (lazy_compile_dispatcher_) {
lazy_compile_dispatcher_->AbortAll();
lazy_compile_dispatcher_.reset();
@@ -3279,9 +3401,9 @@ void Isolate::Deinit() {
builtins_.TearDown();
bootstrapper_->TearDown();
- if (runtime_profiler_ != nullptr) {
- delete runtime_profiler_;
- runtime_profiler_ = nullptr;
+ if (tiering_manager_ != nullptr) {
+ delete tiering_manager_;
+ tiering_manager_ = nullptr;
}
delete heap_profiler_;
@@ -3330,6 +3452,10 @@ void Isolate::Deinit() {
ClearSerializerData();
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ external_pointer_table().TearDown();
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+
{
base::MutexGuard lock_guard(&thread_data_table_mutex_);
thread_data_table_.RemoveAllThreads();
@@ -3503,15 +3629,13 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Address instruction_start = d.InstructionStartOfBuiltin(builtin);
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
- builtins->code_handle(builtin), instruction_start);
+ FromCodeT(builtins->code_handle(builtin), isolate), instruction_start);
// From this point onwards, the old builtin code object is unreachable and
// will be collected by the next GC.
- builtins->set_code(builtin, *trampoline);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- builtins->set_codet(builtin, ToCodeT(*trampoline));
- }
+ builtins->set_code(builtin, ToCodeT(*trampoline));
}
}
@@ -3764,6 +3888,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
this, V8::GetCurrentPlatform(), FLAG_stack_size);
}
baseline_batch_compiler_ = new baseline::BaselineBatchCompiler(this);
+#ifdef V8_ENABLE_MAGLEV
+ maglev_concurrent_dispatcher_ = new maglev::MaglevConcurrentDispatcher(this);
+#endif // V8_ENABLE_MAGLEV
#if USE_SIMULATOR
simulator_data_ = new SimulatorData;
@@ -3851,6 +3978,10 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
isolate_data_.external_reference_table()->Init(this);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ external_pointer_table().Init(this);
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+
#if V8_ENABLE_WEBASSEMBLY
wasm::GetWasmEngine()->AddIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -3894,7 +4025,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// this at mksnapshot-time, but not at runtime.
// See also: https://crbug.com/v8/8713.
heap_.SetInterpreterEntryTrampolineForProfiling(
- builtins()->code(Builtin::kInterpreterEntryTrampoline));
+ FromCodeT(builtins()->code(Builtin::kInterpreterEntryTrampoline)));
#endif
builtins_constants_table_builder_->Finalize();
@@ -3921,9 +4052,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
}
- // Initialize runtime profiler before deserialization, because collections may
- // occur, clearing/updating ICs.
- runtime_profiler_ = new RuntimeProfiler(this);
+ // Initialize before deserialization since collections may occur,
+ // clearing/updating ICs (and thus affecting tiering decisions).
+ tiering_manager_ = new TieringManager(this);
// If we are deserializing, read the state into the now-empty heap.
{
@@ -4193,6 +4324,7 @@ CodeTracer* Isolate::GetCodeTracer() {
}
bool Isolate::use_optimizer() {
+ // TODO(v8:7700): Update this predicate for a world with multiple tiers.
return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
!is_precise_count_code_coverage();
}
@@ -4363,16 +4495,16 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
Handle<String> name, bool private_symbol) {
Handle<String> key = factory()->InternalizeString(name);
- Handle<NameDictionary> dictionary =
- Handle<NameDictionary>::cast(root_handle(dictionary_index));
+ Handle<RegisteredSymbolTable> dictionary =
+ Handle<RegisteredSymbolTable>::cast(root_handle(dictionary_index));
InternalIndex entry = dictionary->FindEntry(this, key);
Handle<Symbol> symbol;
if (entry.is_not_found()) {
symbol =
private_symbol ? factory()->NewPrivateSymbol() : factory()->NewSymbol();
symbol->set_description(*key);
- dictionary = NameDictionary::Add(this, dictionary, key, symbol,
- PropertyDetails::Empty(), &entry);
+ dictionary = RegisteredSymbolTable::Add(this, dictionary, key, symbol);
+
switch (dictionary_index) {
case RootIndex::kPublicSymbolTable:
symbol->set_is_in_public_symbol_table(true);
@@ -4661,6 +4793,32 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
host_initialize_import_meta_object_callback_ = callback;
}
+void Isolate::SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallback callback) {
+ host_create_shadow_realm_context_callback_ = callback;
+}
+
+MaybeHandle<NativeContext> Isolate::RunHostCreateShadowRealmContextCallback() {
+ if (host_create_shadow_realm_context_callback_ == nullptr) {
+ Handle<Object> exception =
+ factory()->NewError(error_function(), MessageTemplate::kUnsupported);
+ Throw(*exception);
+ return kNullMaybeHandle;
+ }
+
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(Handle<Context>(native_context()));
+ v8::Local<v8::Context> shadow_realm_context;
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, shadow_realm_context,
+ host_create_shadow_realm_context_callback_(api_context),
+ MaybeHandle<NativeContext>());
+ Handle<Context> shadow_realm_context_handle =
+ v8::Utils::OpenHandle(*shadow_realm_context);
+ DCHECK(shadow_realm_context_handle->IsNativeContext());
+ return Handle<NativeContext>::cast(shadow_realm_context_handle);
+}
+
MaybeHandle<Object> Isolate::RunPrepareStackTraceCallback(
Handle<Context> context, Handle<JSObject> error, Handle<JSArray> sites) {
v8::Local<v8::Context> api_context = Utils::ToLocal(context);
@@ -4775,80 +4933,137 @@ void Isolate::RunAllPromiseHooks(PromiseHookType type,
void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent) {
- RunPromiseHookForAsyncEventDelegate(type, promise);
if (!HasIsolatePromiseHooks()) return;
DCHECK(promise_hook_ != nullptr);
promise_hook_(type, v8::Utils::PromiseToLocal(promise),
v8::Utils::ToLocal(parent));
}
-void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
- Handle<JSPromise> promise) {
+void Isolate::OnAsyncFunctionSuspended(Handle<JSPromise> promise,
+ Handle<JSPromise> parent) {
+ DCHECK_EQ(0, promise->async_task_id());
+ RunPromiseHook(PromiseHookType::kInit, promise, parent);
+ if (HasAsyncEventDelegate()) {
+ DCHECK_NE(nullptr, async_event_delegate_);
+ promise->set_async_task_id(++async_task_count_);
+ async_event_delegate_->AsyncEventOccurred(debug::kDebugAwait,
+ promise->async_task_id(), false);
+ }
+ if (debug()->is_active()) {
+ // We are about to suspend execution of the current async function,
+ // so pop the outer promise from the isolate's promise stack.
+ PopPromise();
+ }
+}
+
+void Isolate::OnPromiseThen(Handle<JSPromise> promise) {
if (!HasAsyncEventDelegate()) return;
- DCHECK(async_event_delegate_ != nullptr);
- switch (type) {
- case PromiseHookType::kResolve:
+ Maybe<debug::DebugAsyncActionType> action_type =
+ Nothing<debug::DebugAsyncActionType>();
+ for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
+ std::vector<Handle<SharedFunctionInfo>> infos;
+ it.frame()->GetFunctions(&infos);
+ for (auto it = infos.rbegin(); it != infos.rend(); ++it) {
+ Handle<SharedFunctionInfo> info = *it;
+ if (info->HasBuiltinId()) {
+ // We should not report PromiseThen and PromiseCatch which is called
+ // indirectly, e.g. Promise.all calls Promise.then internally.
+ switch (info->builtin_id()) {
+ case Builtin::kPromisePrototypeCatch:
+ action_type = Just(debug::kDebugPromiseCatch);
+ continue;
+ case Builtin::kPromisePrototypeFinally:
+ action_type = Just(debug::kDebugPromiseFinally);
+ continue;
+ case Builtin::kPromisePrototypeThen:
+ action_type = Just(debug::kDebugPromiseThen);
+ continue;
+ default:
+ return;
+ }
+ }
+ if (info->IsUserJavaScript() && action_type.IsJust()) {
+ DCHECK_EQ(0, promise->async_task_id());
+ promise->set_async_task_id(++async_task_count_);
+ async_event_delegate_->AsyncEventOccurred(action_type.FromJust(),
+ promise->async_task_id(),
+ debug()->IsBlackboxed(info));
+ }
return;
- case PromiseHookType::kBefore:
- if (!promise->async_task_id()) return;
+ }
+ }
+}
+
+void Isolate::OnPromiseBefore(Handle<JSPromise> promise) {
+ RunPromiseHook(PromiseHookType::kBefore, promise,
+ factory()->undefined_value());
+ if (HasAsyncEventDelegate()) {
+ if (promise->async_task_id()) {
async_event_delegate_->AsyncEventOccurred(
debug::kDebugWillHandle, promise->async_task_id(), false);
- break;
- case PromiseHookType::kAfter:
- if (!promise->async_task_id()) return;
+ }
+ }
+ if (debug()->is_active()) PushPromise(promise);
+}
+
+void Isolate::OnPromiseAfter(Handle<JSPromise> promise) {
+ RunPromiseHook(PromiseHookType::kAfter, promise,
+ factory()->undefined_value());
+ if (HasAsyncEventDelegate()) {
+ if (promise->async_task_id()) {
async_event_delegate_->AsyncEventOccurred(
debug::kDebugDidHandle, promise->async_task_id(), false);
- break;
- case PromiseHookType::kInit:
- debug::DebugAsyncActionType action_type = debug::kDebugPromiseThen;
- bool last_frame_was_promise_builtin = false;
- JavaScriptFrameIterator it(this);
- while (!it.done()) {
- std::vector<Handle<SharedFunctionInfo>> infos;
- it.frame()->GetFunctions(&infos);
- for (size_t i = 1; i <= infos.size(); ++i) {
- Handle<SharedFunctionInfo> info = infos[infos.size() - i];
- if (info->IsUserJavaScript()) {
- // We should not report PromiseThen and PromiseCatch which is called
- // indirectly, e.g. Promise.all calls Promise.then internally.
- if (last_frame_was_promise_builtin) {
- if (!promise->async_task_id()) {
- promise->set_async_task_id(++async_task_count_);
- }
- async_event_delegate_->AsyncEventOccurred(
- action_type, promise->async_task_id(),
- debug()->IsBlackboxed(info));
- }
- return;
- }
- last_frame_was_promise_builtin = false;
- if (info->HasBuiltinId()) {
- if (info->builtin_id() == Builtin::kPromisePrototypeThen) {
- action_type = debug::kDebugPromiseThen;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtin::kPromisePrototypeCatch) {
- action_type = debug::kDebugPromiseCatch;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() ==
- Builtin::kPromisePrototypeFinally) {
- action_type = debug::kDebugPromiseFinally;
- last_frame_was_promise_builtin = true;
- }
- }
- }
- it.Advance();
- }
+ }
}
+ if (debug()->is_active()) PopPromise();
}
-void Isolate::OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
- debug::DebugAsyncActionType event) {
- if (!async_event_delegate_) return;
- if (!promise->async_task_id()) {
- promise->set_async_task_id(++async_task_count_);
+void Isolate::OnTerminationDuringRunMicrotasks() {
+ // This performs cleanup for when RunMicrotasks (in
+ // builtins-microtask-queue-gen.cc) is aborted via a termination exception.
+ // This has to be kept in sync with the code in said file. Currently this
+ // includes:
+ //
+ // (1) Resetting the |current_microtask| slot on the Isolate to avoid leaking
+ // memory (and also to keep |current_microtask| not being undefined as an
+ // indicator that we're currently pumping the microtask queue).
+ // (2) Empty the promise stack to avoid leaking memory.
+ // (3) If the |current_microtask| is a promise reaction or resolve thenable
+ // job task, then signal the async event delegate and debugger that the
+ // microtask finished running.
+ //
+
+ // Reset the |current_microtask| global slot.
+ Handle<Microtask> current_microtask(
+ Microtask::cast(heap()->current_microtask()), this);
+ heap()->set_current_microtask(ReadOnlyRoots(this).undefined_value());
+
+ // Empty the promise stack.
+ while (PopPromise()) {
}
- async_event_delegate_->AsyncEventOccurred(event, promise->async_task_id(),
- false);
+
+ if (current_microtask->IsPromiseReactionJobTask()) {
+ Handle<PromiseReactionJobTask> promise_reaction_job_task =
+ Handle<PromiseReactionJobTask>::cast(current_microtask);
+ Handle<HeapObject> promise_or_capability(
+ promise_reaction_job_task->promise_or_capability(), this);
+ if (promise_or_capability->IsPromiseCapability()) {
+ promise_or_capability = handle(
+ Handle<PromiseCapability>::cast(promise_or_capability)->promise(),
+ this);
+ }
+ if (promise_or_capability->IsJSPromise()) {
+ OnPromiseAfter(Handle<JSPromise>::cast(promise_or_capability));
+ }
+ } else if (current_microtask->IsPromiseResolveThenableJobTask()) {
+ Handle<PromiseResolveThenableJobTask> promise_resolve_thenable_job_task =
+ Handle<PromiseResolveThenableJobTask>::cast(current_microtask);
+ Handle<JSPromise> promise_to_resolve(
+ promise_resolve_thenable_job_task->promise_to_resolve(), this);
+ OnPromiseAfter(promise_to_resolve);
+ }
+
+ SetTerminationOnExternalTryCatch();
}
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 65a85dac9e..a75a78de80 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -37,9 +37,10 @@
#include "src/objects/code.h"
#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
+#include "src/objects/js-objects.h"
#include "src/runtime/runtime.h"
-#include "src/security/external-pointer-table.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/external-pointer-table.h"
+#include "src/sandbox/sandbox.h"
#include "src/strings/unicode.h"
#include "src/utils/allocation.h"
@@ -86,6 +87,10 @@ namespace heap {
class HeapTester;
} // namespace heap
+namespace maglev {
+class MaglevConcurrentDispatcher;
+} // namespace maglev
+
class AddressToIndexHashMap;
class AstStringConstants;
class Bootstrapper;
@@ -102,11 +107,11 @@ class Deoptimizer;
class DescriptorLookupCache;
class EmbeddedFileWriterInterface;
class EternalHandles;
+class GlobalHandles;
+class GlobalSafepoint;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
-class GlobalHandles;
-class GlobalSafepoint;
class InnerPointerToCodeCache;
class LazyCompileDispatcher;
class LocalIsolate;
@@ -120,7 +125,6 @@ class PersistentHandlesList;
class ReadOnlyArtifacts;
class RegExpStack;
class RootVisitor;
-class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
class SnapshotData;
@@ -129,6 +133,7 @@ class StubCache;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
+class TieringManager;
class TracingCpuProfilerImpl;
class UnicodeCache;
struct ManagedPtrDestructor;
@@ -578,6 +583,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
};
static void InitializeOncePerProcess();
+ static void DisposeOncePerProcess();
// Creates Isolate object. Must be used instead of constructing Isolate with
// new operator.
@@ -744,6 +750,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
+ THREAD_LOCAL_TOP_ADDRESS(uintptr_t, num_frames_above_pending_handler)
THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
@@ -852,7 +859,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
- void PopPromise();
+ bool PopPromise();
// Return the relevant Promise that a throw/rejection pertains to, based
// on the contents of the Promise stack
@@ -894,18 +901,19 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void* ptr4 = nullptr);
// Similar to the above but without collecting the stack trace.
V8_NOINLINE void PushParamsAndDie(void* ptr1 = nullptr, void* ptr2 = nullptr,
- void* ptr3 = nullptr, void* ptr4 = nullptr);
- Handle<FixedArray> CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions options);
- Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
- FrameSkipMode mode,
- Handle<Object> caller);
- MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
- Handle<JSReceiver> error_object);
- MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
- Handle<JSReceiver> error_object, FrameSkipMode mode,
- Handle<Object> caller);
- Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
+ void* ptr3 = nullptr, void* ptr4 = nullptr,
+ void* ptr5 = nullptr, void* ptr6 = nullptr);
+ Handle<FixedArray> CaptureDetailedStackTrace(
+ int limit, StackTrace::StackTraceOptions options);
+ MaybeHandle<JSObject> CaptureAndSetErrorStack(Handle<JSObject> error_object,
+ FrameSkipMode mode,
+ Handle<Object> caller);
+ Handle<FixedArray> GetDetailedStackTrace(Handle<JSReceiver> error_object);
+ Handle<FixedArray> GetSimpleStackTrace(Handle<JSReceiver> error_object);
+ // Walks the JS stack to find the first frame with a script name or
+ // source URL. The inspected frames are the same as for the detailed stack
+ // trace.
+ Handle<String> CurrentScriptNameOrSourceURL();
Address GetAbstractPC(int* line, int* column);
@@ -949,8 +957,14 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
async_event_delegate_ = delegate;
PromiseHookStateUpdated();
}
- void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
- debug::DebugAsyncActionType);
+
+ // Async function and promise instrumentation support.
+ void OnAsyncFunctionSuspended(Handle<JSPromise> promise,
+ Handle<JSPromise> parent);
+ void OnPromiseThen(Handle<JSPromise> promise);
+ void OnPromiseBefore(Handle<JSPromise> promise);
+ void OnPromiseAfter(Handle<JSPromise> promise);
+ void OnTerminationDuringRunMicrotasks();
// Re-throw an exception. This involves no error reporting since error
// reporting was handled when the exception was thrown originally.
@@ -993,13 +1007,19 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool ComputeLocation(MessageLocation* target);
bool ComputeLocationFromException(MessageLocation* target,
Handle<Object> exception);
- bool ComputeLocationFromStackTrace(MessageLocation* target,
- Handle<Object> exception);
+ bool ComputeLocationFromSimpleStackTrace(MessageLocation* target,
+ Handle<Object> exception);
+ bool ComputeLocationFromDetailedStackTrace(MessageLocation* target,
+ Handle<Object> exception);
Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
MessageLocation* location);
Handle<JSMessageObject> CreateMessageOrAbort(Handle<Object> exception,
MessageLocation* location);
+ // Similar to Isolate::CreateMessage but DOESN'T inspect the JS stack and
+ // only looks at the "detailed stack trace" as the "simple stack trace" might
+ // have already been stringified.
+ Handle<JSMessageObject> CreateMessageFromException(Handle<Object> exception);
// Out of resource exception helpers.
Object StackOverflow();
@@ -1080,7 +1100,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const std::shared_ptr<metrics::Recorder>& metrics_recorder() {
return metrics_recorder_;
}
- RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
+ TieringManager* tiering_manager() { return tiering_manager_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
// Call InitializeLoggingAndCounters() if logging is needed before
@@ -1173,9 +1193,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
V8_INLINE Address* builtin_tier0_table() {
return isolate_data_.builtin_tier0_table();
}
- V8_INLINE Address* builtin_code_data_container_table() {
- return isolate_data_.builtin_code_data_container_table();
- }
bool IsBuiltinTableHandleLocation(Address* handle_location);
@@ -1450,11 +1467,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsDeferredHandle(Address* location);
#endif // DEBUG
- baseline::BaselineBatchCompiler* baseline_batch_compiler() {
+ baseline::BaselineBatchCompiler* baseline_batch_compiler() const {
DCHECK_NOT_NULL(baseline_batch_compiler_);
return baseline_batch_compiler_;
}
+#ifdef V8_ENABLE_MAGLEV
+ maglev::MaglevConcurrentDispatcher* maglev_concurrent_dispatcher() {
+ DCHECK_NOT_NULL(maglev_concurrent_dispatcher_);
+ return maglev_concurrent_dispatcher_;
+ }
+#endif // V8_ENABLE_MAGLEV
+
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
DCHECK(optimizing_compile_dispatcher_ == nullptr ||
@@ -1472,6 +1496,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
int id() const { return id_; }
+ bool was_locker_ever_used() const {
+ return was_locker_ever_used_.load(std::memory_order_relaxed);
+ }
+ void set_was_locker_ever_used() {
+ was_locker_ever_used_.store(true, std::memory_order_relaxed);
+ }
+
CompilationStatistics* GetTurboStatistics();
CodeTracer* GetCodeTracer();
@@ -1569,11 +1600,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#endif
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
void SetHasContextPromiseHooks(bool context_promise_hook) {
promise_hook_flags_ = PromiseHookFields::HasContextPromiseHook::update(
promise_hook_flags_, context_promise_hook);
PromiseHookStateUpdated();
}
+#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
bool HasContextPromiseHooks() const {
return PromiseHookFields::HasContextPromiseHook::decode(
@@ -1711,10 +1744,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return lazy_compile_dispatcher_.get();
}
- baseline::BaselineBatchCompiler* baseline_batch_compiler() const {
- return baseline_batch_compiler_;
- }
-
bool IsInAnyContext(Object object, uint32_t index);
void ClearKeptObjects();
@@ -1732,6 +1761,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
MaybeHandle<JSObject> RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module);
+ void SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallback callback);
+ MaybeHandle<NativeContext> RunHostCreateShadowRealmContextCallback();
+
void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
embedded_file_writer_ = writer;
}
@@ -1857,7 +1890,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
LocalHeap* main_thread_local_heap();
LocalHeap* CurrentLocalHeap();
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
ExternalPointerTable& external_pointer_table() {
return isolate_data_.external_pointer_table_;
}
@@ -1969,10 +2002,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey isolate_key_;
-
-#ifdef DEBUG
static std::atomic<bool> isolate_key_created_;
-#endif
void Deinit();
@@ -1992,9 +2022,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool PropagatePendingExceptionToExternalTryCatch(
ExceptionHandlerType top_handler);
- void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
- Handle<JSPromise> promise);
-
bool HasIsolatePromiseHooks() const {
return PromiseHookFields::HasIsolatePromiseHook::decode(
promise_hook_flags_);
@@ -2048,10 +2075,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const int id_;
EntryStackItem* entry_stack_ = nullptr;
int stack_trace_nesting_level_ = 0;
+ std::atomic<bool> was_locker_ever_used_{false};
StringStream* incomplete_message_ = nullptr;
Address isolate_addresses_[kIsolateAddressCount + 1] = {};
Bootstrapper* bootstrapper_ = nullptr;
- RuntimeProfiler* runtime_profiler_ = nullptr;
+ TieringManager* tiering_manager_ = nullptr;
CompilationCache* compilation_cache_ = nullptr;
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
@@ -2121,6 +2149,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
HostInitializeImportMetaObjectCallback
host_initialize_import_meta_object_callback_ = nullptr;
+ HostCreateShadowRealmContextCallback
+ host_create_shadow_realm_context_callback_ = nullptr;
+
base::Mutex rail_mutex_;
double load_start_time_ms_ = 0;
@@ -2194,6 +2225,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
std::unique_ptr<LazyCompileDispatcher> lazy_compile_dispatcher_;
baseline::BaselineBatchCompiler* baseline_batch_compiler_ = nullptr;
+#ifdef V8_ENABLE_MAGLEV
+ maglev::MaglevConcurrentDispatcher* maglev_concurrent_dispatcher_ = nullptr;
+#endif // V8_ENABLE_MAGLEV
using InterruptEntry = std::pair<InterruptCallback, void*>;
std::queue<InterruptEntry> api_interrupts_queue_;
@@ -2503,7 +2537,8 @@ class StackTraceFailureMessage {
explicit StackTraceFailureMessage(Isolate* isolate, StackTraceMode mode,
void* ptr1 = nullptr, void* ptr2 = nullptr,
- void* ptr3 = nullptr, void* ptr4 = nullptr);
+ void* ptr3 = nullptr, void* ptr4 = nullptr,
+ void* ptr5 = nullptr, void* ptr6 = nullptr);
V8_NOINLINE void Print() volatile;
@@ -2517,6 +2552,8 @@ class StackTraceFailureMessage {
void* ptr2_;
void* ptr3_;
void* ptr4_;
+ void* ptr5_;
+ void* ptr6_;
void* code_objects_[4];
char js_stack_trace_[kStacktraceBufferSize];
uintptr_t end_marker_ = kEndMarker;
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index c3dd70718b..be91d39aeb 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -14,8 +14,7 @@
namespace v8 {
namespace internal {
-LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
- RuntimeCallStats* runtime_call_stats)
+LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
: HiddenLocalFactory(isolate),
heap_(isolate->heap(), kind),
isolate_(isolate),
@@ -23,16 +22,20 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
thread_id_(ThreadId::Current()),
stack_limit_(kind == ThreadKind::kMain
? isolate->stack_guard()->real_climit()
- : GetCurrentStackPosition() - FLAG_stack_size * KB),
- runtime_call_stats_(kind == ThreadKind::kMain &&
- runtime_call_stats == nullptr
- ? isolate->counters()->runtime_call_stats()
- : runtime_call_stats)
+ : GetCurrentStackPosition() - FLAG_stack_size * KB)
#ifdef V8_INTL_SUPPORT
,
default_locale_(isolate->DefaultLocale())
#endif
{
+#ifdef V8_RUNTIME_CALL_STATS
+ if (kind == ThreadKind::kMain) {
+ runtime_call_stats_ = isolate->counters()->runtime_call_stats();
+ } else {
+ rcs_scope_.emplace(isolate->counters()->worker_thread_runtime_call_stats());
+ runtime_call_stats_ = rcs_scope_->Get();
+ }
+#endif
}
LocalIsolate::~LocalIsolate() {
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index a7fa429beb..519891cc87 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -13,6 +13,7 @@
#include "src/handles/maybe-handles.h"
#include "src/heap/local-factory.h"
#include "src/heap/local-heap.h"
+#include "src/logging/runtime-call-stats.h"
namespace v8 {
@@ -43,8 +44,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
public:
using HandleScopeType = LocalHandleScope;
- explicit LocalIsolate(Isolate* isolate, ThreadKind kind,
- RuntimeCallStats* runtime_call_stats = nullptr);
+ explicit LocalIsolate(Isolate* isolate, ThreadKind kind);
~LocalIsolate();
// Kinda sketchy.
@@ -110,7 +110,11 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalLogger* logger() const { return logger_.get(); }
ThreadId thread_id() const { return thread_id_; }
Address stack_limit() const { return stack_limit_; }
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
+#else
+ RuntimeCallStats* runtime_call_stats() const { return nullptr; }
+#endif
bigint::Processor* bigint_processor() {
if (!bigint_processor_) InitializeBigIntProcessor();
return bigint_processor_;
@@ -155,8 +159,12 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
ThreadId const thread_id_;
Address const stack_limit_;
- RuntimeCallStats* runtime_call_stats_;
bigint::Processor* bigint_processor_{nullptr};
+
+#ifdef V8_RUNTIME_CALL_STATS
+ base::Optional<WorkerThreadRuntimeCallStatsScope> rcs_scope_;
+ RuntimeCallStats* runtime_call_stats_;
+#endif
#ifdef V8_INTL_SUPPORT
std::string default_locale_;
#endif
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h
index 1395f47a7b..6984c0fd16 100644
--- a/deps/v8/src/execution/loong64/frame-constants-loong64.h
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -40,14 +41,16 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7, s8}
- static constexpr uint32_t kPushedGpRegs = 0b11010011100000111111111111110000;
+ static constexpr RegList kPushedGpRegs = {a0, a1, a2, a3, a4, a5, a6,
+ a7, t0, t1, t2, t3, t4, t5,
+ s0, s1, s2, s5, s7, s8};
// {f0, f1, f2, ... f27, f28}
- static constexpr uint32_t kPushedFpRegs = 0x1fffffff;
+ static constexpr DoubleRegList kPushedFpRegs = {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
+ f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -56,15 +59,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc
index 7c9d42719c..a71f39aad3 100644
--- a/deps/v8/src/execution/loong64/simulator-loong64.cc
+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc
@@ -3484,13 +3484,13 @@ void Simulator::DecodeTypeOp17() {
printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
rj(), Registers::Name(rk_reg()), rk());
- SetResult(rd_reg(), rk() == 0 ? rj() : 0);
+ SetResult(rd_reg(), rk() == 0 ? 0 : rj());
break;
case MASKNEZ:
printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
rj(), Registers::Name(rk_reg()), rk());
- SetResult(rd_reg(), rk() != 0 ? rj() : 0);
+ SetResult(rd_reg(), rk() != 0 ? 0 : rj());
break;
case NOR:
printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
@@ -4267,12 +4267,28 @@ void Simulator::DecodeTypeOp17() {
case FSCALEB_D:
printf("Sim UNIMPLEMENTED: FSCALEB_D\n");
UNIMPLEMENTED();
- case FCOPYSIGN_S:
- printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n");
- UNIMPLEMENTED();
- case FCOPYSIGN_D:
- printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n");
- UNIMPLEMENTED();
+ case FCOPYSIGN_S: {
+ printf_instr("FCOPYSIGN_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+ [](float lhs, float rhs) {
+ return std::copysign(lhs, rhs);
+ },
+ fj_float(), fk_float()));
+ } break;
+ case FCOPYSIGN_D: {
+ printf_instr("FCOPYSIGN_d\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
+ [](double lhs, double rhs) {
+ return std::copysign(lhs, rhs);
+ },
+ fj_double(), fk_double()));
+ } break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 472e0ebdb0..595c50e911 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -15,9 +15,9 @@
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
@@ -223,18 +223,17 @@ MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
Handle<JSFunction> constructor = isolate->callsite_function();
Handle<FixedArray> sites = isolate->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; ++i) {
- Handle<StackFrameInfo> frame(StackFrameInfo::cast(frames->get(i)), isolate);
+ Handle<CallSiteInfo> frame(CallSiteInfo::cast(frames->get(i)), isolate);
Handle<JSObject> site;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
JSArray);
- RETURN_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- site, isolate->factory()->call_site_frame_info_symbol(), frame,
- DONT_ENUM),
- JSArray);
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ site, isolate->factory()->call_site_info_symbol(),
+ frame, DONT_ENUM),
+ JSArray);
sites->set(i, *site);
}
@@ -372,8 +371,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
for (int i = 0; i < elems->length(); ++i) {
builder.AppendCStringLiteral("\n at ");
- Handle<StackFrameInfo> frame(StackFrameInfo::cast(elems->get(i)), isolate);
- SerializeStackFrameInfo(isolate, frame, &builder);
+ Handle<CallSiteInfo> frame(CallSiteInfo::cast(elems->get(i)), isolate);
+ SerializeCallSiteInfo(isolate, frame, &builder);
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
@@ -501,7 +500,7 @@ MaybeHandle<JSObject> ErrorUtils::Construct(Isolate* isolate,
return ErrorUtils::Construct(isolate, target, new_target, message, options,
mode, caller,
- ErrorUtils::StackTraceCollection::kDetailed);
+ ErrorUtils::StackTraceCollection::kEnabled);
}
MaybeHandle<JSObject> ErrorUtils::Construct(
@@ -558,7 +557,8 @@ MaybeHandle<JSObject> ErrorUtils::Construct(
Handle<Name> cause_string = isolate->factory()->cause_string();
if (options->IsJSReceiver()) {
Handle<JSReceiver> js_options = Handle<JSReceiver>::cast(options);
- Maybe<bool> has_cause = JSObject::HasProperty(js_options, cause_string);
+ Maybe<bool> has_cause =
+ JSObject::HasProperty(isolate, js_options, cause_string);
if (has_cause.IsNothing()) {
DCHECK((isolate)->has_pending_exception());
return MaybeHandle<JSObject>();
@@ -577,16 +577,12 @@ MaybeHandle<JSObject> ErrorUtils::Construct(
}
switch (stack_trace_collection) {
- case StackTraceCollection::kDetailed:
- RETURN_ON_EXCEPTION(
- isolate, isolate->CaptureAndSetDetailedStackTrace(err), JSObject);
- V8_FALLTHROUGH;
- case StackTraceCollection::kSimple:
- RETURN_ON_EXCEPTION(
- isolate, isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
- JSObject);
+ case StackTraceCollection::kEnabled:
+ RETURN_ON_EXCEPTION(isolate,
+ isolate->CaptureAndSetErrorStack(err, mode, caller),
+ JSObject);
break;
- case StackTraceCollection::kNone:
+ case StackTraceCollection::kDisabled:
break;
}
return err;
@@ -712,7 +708,7 @@ Handle<JSObject> ErrorUtils::MakeGenericError(
// The call below can't fail because constructor is a builtin.
DCHECK(constructor->shared().HasBuiltinId());
return ErrorUtils::Construct(isolate, constructor, constructor, msg, options,
- mode, no_caller, StackTraceCollection::kDetailed)
+ mode, no_caller, StackTraceCollection::kEnabled)
.ToHandleChecked();
}
@@ -996,5 +992,69 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
return ReadOnlyRoots(isolate).exception();
}
+// static
+MaybeHandle<Object> ErrorUtils::GetFormattedStack(
+ Isolate* isolate, Handle<JSObject> error_object) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"), __func__);
+
+ Handle<Object> error_stack = JSReceiver::GetDataProperty(
+ isolate, error_object, isolate->factory()->error_stack_symbol());
+ if (error_stack->IsErrorStackData()) {
+ Handle<ErrorStackData> error_stack_data =
+ Handle<ErrorStackData>::cast(error_stack);
+ if (error_stack_data->HasFormattedStack()) {
+ return handle(error_stack_data->formatted_stack(), isolate);
+ }
+ ErrorStackData::EnsureStackFrameInfos(isolate, error_stack_data);
+ Handle<Object> formatted_stack;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, formatted_stack,
+ FormatStackTrace(isolate, error_object,
+ handle(error_stack_data->call_site_infos(), isolate)),
+ Object);
+ error_stack_data->set_formatted_stack(*formatted_stack);
+ return formatted_stack;
+ }
+
+ if (error_stack->IsFixedArray()) {
+ Handle<Object> formatted_stack;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, formatted_stack,
+ FormatStackTrace(isolate, error_object,
+ Handle<FixedArray>::cast(error_stack)),
+ Object);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSObject::SetProperty(isolate, error_object,
+ isolate->factory()->error_stack_symbol(),
+ formatted_stack, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
+ Object);
+ return formatted_stack;
+ }
+
+ return error_stack;
+}
+
+// static
+void ErrorUtils::SetFormattedStack(Isolate* isolate,
+ Handle<JSObject> error_object,
+ Handle<Object> formatted_stack) {
+ Handle<Object> error_stack = JSReceiver::GetDataProperty(
+ isolate, error_object, isolate->factory()->error_stack_symbol());
+ if (error_stack->IsErrorStackData()) {
+ Handle<ErrorStackData> error_stack_data =
+ Handle<ErrorStackData>::cast(error_stack);
+ ErrorStackData::EnsureStackFrameInfos(isolate, error_stack_data);
+ error_stack_data->set_formatted_stack(*formatted_stack);
+ } else {
+ JSObject::SetProperty(isolate, error_object,
+ isolate->factory()->error_stack_symbol(),
+ formatted_stack, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
+ .Check();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 5a54279647..20b737894b 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -72,9 +72,9 @@ enum FrameSkipMode {
class ErrorUtils : public AllStatic {
public:
- // |kNone| is useful when you don't need the stack information at all, for
+ // |kDisabled| is useful when you don't need the stack information at all, for
// example when creating a deserialized error.
- enum class StackTraceCollection { kDetailed, kSimple, kNone };
+ enum class StackTraceCollection { kEnabled, kDisabled };
static MaybeHandle<JSObject> Construct(Isolate* isolate,
Handle<JSFunction> target,
Handle<Object> new_target,
@@ -112,6 +112,11 @@ class ErrorUtils : public AllStatic {
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object,
MaybeHandle<Object> key);
+
+ static MaybeHandle<Object> GetFormattedStack(Isolate* isolate,
+ Handle<JSObject> error_object);
+ static void SetFormattedStack(Isolate* isolate, Handle<JSObject> error_object,
+ Handle<Object> formatted_stack);
};
class MessageFormatter {
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 68c2c4a09e..12a626900d 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -4,8 +4,8 @@
#include "src/execution/microtask-queue.h"
-#include <stddef.h>
#include <algorithm>
+#include <cstddef>
#include "src/api/api-inl.h"
#include "src/base/logging.h"
@@ -187,7 +187,7 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
size_ = 0;
start_ = 0;
DCHECK(isolate->has_scheduled_exception());
- isolate->SetTerminationOnExternalTryCatch();
+ isolate->OnTerminationDuringRunMicrotasks();
OnCompleted(isolate);
return -1;
}
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.h b/deps/v8/src/execution/mips/frame-constants-mips.h
index 48704cf56a..81a85eb75b 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.h
+++ b/deps/v8/src/execution/mips/frame-constants-mips.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -45,14 +46,14 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// {v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7}
- static constexpr uint32_t kPushedGpRegs = 0b111111111111100 + (1 << 23);
+ static constexpr RegList kPushedGpRegs = {v0, v1, a0, a1, a2, a3, t0,
+ t1, t2, t3, t4, t5, t6, s7};
// {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24}
- static constexpr uint32_t kPushedFpRegs = 0b1010101010101010101010101;
+ static constexpr DoubleRegList kPushedFpRegs = {
+ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -61,15 +62,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.h b/deps/v8/src/execution/mips64/frame-constants-mips64.h
index 40349ea8ec..c470fc6d6a 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -41,14 +42,14 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// {v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7}
- static constexpr uint32_t kPushedGpRegs = 0b111111111111100 + (1 << 23);
+ static constexpr RegList kPushedGpRegs = {v0, v1, a0, a1, a2, a3, a4,
+ a5, a6, a7, t0, t1, t2, s7};
// {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26}
- static constexpr uint32_t kPushedFpRegs = 0b101010101010101010101010101;
+ static constexpr DoubleRegList kPushedFpRegs = {
+ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -57,15 +58,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 7b0b4bc00c..076eda3587 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/ppc/register-ppc.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -43,16 +43,17 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- static constexpr RegList kPushedGpRegs =
- Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11, cp);
+ static constexpr RegList kPushedGpRegs = {r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, cp};
- static constexpr RegList kPushedFpRegs = DoubleRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+ static constexpr DoubleRegList kPushedFpRegs = {d0, d1, d2, d3, d4, d5, d6,
+ d7, d8, d9, d10, d11, d12};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr Simd128RegList kPushedSimd128Regs = {
+ v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12};
+
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-TypedFrameConstants::kFixedFrameSizeFromFp -
@@ -62,17 +63,19 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
- base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ base::bits::CountPopulation(lower_regs) * kSimd128Size;
}
};
diff --git a/deps/v8/src/execution/riscv64/frame-constants-riscv64.h b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h
index ab9de0528e..6b70815ea4 100644
--- a/deps/v8/src/execution/riscv64/frame-constants-riscv64.h
+++ b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h
@@ -24,7 +24,7 @@ class EntryFrameConstants : public AllStatic {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
static constexpr int kNumberOfSavedGpParamRegs =
- arraysize(wasm::kGpParamRegisters);
+ arraysize(wasm::kGpParamRegisters) + 1;
static constexpr int kNumberOfSavedFpParamRegs =
arraysize(wasm::kFpParamRegisters);
static constexpr int kNumberOfSavedAllParamRegs =
@@ -45,19 +45,13 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // constexpr RegList kLiftoffAssemblerGpCacheRegs =
- // Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7);
- static constexpr uint32_t kPushedGpRegs = wasm::kLiftoffAssemblerGpCacheRegs;
+ static constexpr RegList kPushedGpRegs = wasm::kLiftoffAssemblerGpCacheRegs;
- // constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- // ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2, fa3, fa4, fa5,
- // fa6, fa7, ft8, ft9, ft10, ft11);
- static constexpr uint32_t kPushedFpRegs = wasm::kLiftoffAssemblerFpCacheRegs;
+ static constexpr DoubleRegList kPushedFpRegs =
+ wasm::kLiftoffAssemblerFpCacheRegs;
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -66,15 +60,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 5b45f0a36d..7b2aff765d 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -51,6 +51,7 @@
#include <stdlib.h>
#include "src/base/bits.h"
+#include "src/base/overflowing-math.h"
#include "src/base/vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -843,6 +844,94 @@ struct type_sew_t<128> {
RVV_VI_VFP_LOOP_END \
rvv_trace_vd();
+#define RVV_VI_VFP_VF_LOOP_WIDEN(BODY32, vs2_is_widen) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: \
+ case E64: { \
+ UNIMPLEMENTED(); \
+ break; \
+ } \
+ case E32: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double fs1 = static_cast<double>(get_fpu_register_float(rs1_reg())); \
+ double vs2 = vs2_is_widen \
+ ? Rvvelt<double>(rvv_vs2_reg(), i) \
+ : static_cast<double>(Rvvelt<float>(rvv_vs2_reg(), i)); \
+ double vs3 = static_cast<double>(Rvvelt<float>(rvv_vd_reg(), i)); \
+ BODY32; \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_VV_LOOP_WIDEN(BODY32, vs2_is_widen) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: \
+ case E64: { \
+ UNIMPLEMENTED(); \
+ break; \
+ } \
+ case E32: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double vs2 = vs2_is_widen \
+ ? static_cast<double>(Rvvelt<double>(rvv_vs2_reg(), i)) \
+ : static_cast<double>(Rvvelt<float>(rvv_vs2_reg(), i)); \
+ double vs1 = static_cast<double>(Rvvelt<float>(rvv_vs1_reg(), i)); \
+ double vs3 = static_cast<double>(Rvvelt<float>(rvv_vd_reg(), i)); \
+ BODY32; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(type, check_fn, op) \
+ auto fn = [this](type frs1, type frs2) { \
+ if (check_fn(frs1, frs2)) { \
+ this->set_fflags(kInvalidOperation); \
+ return std::numeric_limits<type>::quiet_NaN(); \
+ } else { \
+ return frs2 op frs1; \
+ } \
+ }; \
+ auto alu_out = fn(vs1, vs2); \
+ /** if any input or result is NaN, the result is quiet_NaN*/ \
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) { \
+ /** signaling_nan sets kInvalidOperation bit*/ \
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2)) \
+ set_fflags(kInvalidOperation); \
+ alu_out = std::numeric_limits<type>::quiet_NaN(); \
+ } \
+ vd = alu_out;
+
+#define RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(type, check_fn, op) \
+ auto fn = [this](type frs1, type frs2) { \
+ if (check_fn(frs1, frs2)) { \
+ this->set_fflags(kInvalidOperation); \
+ return std::numeric_limits<type>::quiet_NaN(); \
+ } else { \
+ return frs2 op frs1; \
+ } \
+ }; \
+ auto alu_out = fn(fs1, vs2); \
+ /** if any input or result is NaN, the result is quiet_NaN*/ \
+ if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) { \
+ /** signaling_nan sets kInvalidOperation bit*/ \
+ if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2)) \
+ set_fflags(kInvalidOperation); \
+ alu_out = std::numeric_limits<type>::quiet_NaN(); \
+ } \
+ vd = alu_out;
+
#define RVV_VI_VFP_FMA(type, _f1, _f2, _a) \
auto fn = [](type f1, type f2, type a) { return std::fma(f1, f2, a); }; \
vd = CanonicalizeFPUOpFMA<type>(fn, _f1, _f2, _a);
@@ -2571,14 +2660,14 @@ T Simulator::ReadMem(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
-#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
- // // check for natural alignment
- // if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
- // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
- // addr,
- // reinterpret_cast<intptr_t>(instr));
- // DieOrDebug();
- // }
+#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
+ // check for natural alignment
+ if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
+ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
#endif
T* ptr = reinterpret_cast<T*>(addr);
T value = *ptr;
@@ -2594,7 +2683,7 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
-#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
+#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
// check for natural alignment
if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
@@ -5374,6 +5463,34 @@ void Simulator::DecodeRvvIVI() {
RVV_VI_LOOP_END
rvv_trace_vd();
} break;
+ case RO_V_VSLIDEUP_VI: {
+ RVV_VI_CHECK_SLIDE(true);
+
+ const uint8_t offset = instr_.RvvUimm5();
+ RVV_VI_GENERAL_LOOP_BASE
+ if (rvv_vstart() < offset && i < offset) continue;
+
+ switch (rvv_vsew()) {
+ case E8: {
+ VI_XI_SLIDEUP_PARAMS(8, offset);
+ vd = vs2;
+ } break;
+ case E16: {
+ VI_XI_SLIDEUP_PARAMS(16, offset);
+ vd = vs2;
+ } break;
+ case E32: {
+ VI_XI_SLIDEUP_PARAMS(32, offset);
+ vd = vs2;
+ } break;
+ default: {
+ VI_XI_SLIDEUP_PARAMS(64, offset);
+ vd = vs2;
+ } break;
+ }
+ RVV_VI_LOOP_END
+ rvv_trace_vd();
+ } break;
case RO_V_VSRL_VI:
RVV_VI_VI_ULOOP({ vd = vs2 >> uimm5; })
break;
@@ -5698,7 +5815,32 @@ void Simulator::DecodeRvvMVV() {
UNREACHABLE();
}
set_rvv_vstart(0);
- SNPrintF(trace_buf_, "%lx", get_register(rd_reg()));
+ rvv_trace_vd();
+ } else if (rvv_vs1_reg() == 0b10000) {
+ uint64_t cnt = 0;
+ RVV_VI_GENERAL_LOOP_BASE
+ RVV_VI_LOOP_MASK_SKIP()
+ const uint8_t idx = i / 64;
+ const uint8_t pos = i % 64;
+ bool mask = (Rvvelt<uint64_t>(rvv_vs2_reg(), idx) >> pos) & 0x1;
+ if (mask) cnt++;
+ RVV_VI_LOOP_END
+ set_register(rd_reg(), cnt);
+ rvv_trace_vd();
+ } else if (rvv_vs1_reg() == 0b10001) {
+ int64_t index = -1;
+ RVV_VI_GENERAL_LOOP_BASE
+ RVV_VI_LOOP_MASK_SKIP()
+ const uint8_t idx = i / 64;
+ const uint8_t pos = i % 64;
+ bool mask = (Rvvelt<uint64_t>(rvv_vs2_reg(), idx) >> pos) & 0x1;
+ if (mask) {
+ index = i;
+ break;
+ }
+ RVV_VI_LOOP_END
+ set_register(rd_reg(), index);
+ rvv_trace_vd();
} else {
v8::base::EmbeddedVector<char, 256> buffer;
disasm::NameConverter converter;
@@ -6162,6 +6304,30 @@ void Simulator::DecodeRvvFVV() {
USE(fs1);
})
break;
+ case VFRSQRT7_V:
+ RVV_VI_VFP_VF_LOOP(
+ {},
+ {
+ vd = base::RecipSqrt(vs2);
+ USE(fs1);
+ },
+ {
+ vd = base::RecipSqrt(vs2);
+ USE(fs1);
+ })
+ break;
+ case VFREC7_V:
+ RVV_VI_VFP_VF_LOOP(
+ {},
+ {
+ vd = base::Recip(vs2);
+ USE(fs1);
+ },
+ {
+ vd = base::Recip(vs2);
+ USE(fs1);
+ })
+ break;
default:
break;
}
@@ -6305,6 +6471,87 @@ void Simulator::DecodeRvvFVV() {
vd = alu_out;
})
break;
+ case RO_V_VFWADD_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(double, is_invalid_fadd, +);
+ USE(vs3);
+ },
+ false)
+ break;
+ case RO_V_VFWSUB_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(double, is_invalid_fsub, -);
+ USE(vs3);
+ },
+ false)
+ break;
+ case RO_V_VFWADD_W_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(double, is_invalid_fadd, +);
+ USE(vs3);
+ },
+ true)
+ break;
+ case RO_V_VFWSUB_W_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(double, is_invalid_fsub, -);
+ USE(vs3);
+ },
+ true)
+ break;
+ case RO_V_VFWMUL_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(double, is_invalid_fmul, *);
+ USE(vs3);
+ },
+ false)
+ break;
+ case RO_V_VFWREDUSUM_VV:
+ case RO_V_VFWREDOSUM_VV:
+ RVV_VI_CHECK_DSS(true);
+ switch (rvv_vsew()) {
+ case E16:
+ case E64: {
+ UNIMPLEMENTED();
+ }
+ case E32: {
+ double& vd = Rvvelt<double>(rvv_vd_reg(), 0, true);
+ float vs1 = Rvvelt<float>(rvv_vs1_reg(), 0);
+ double alu_out = vs1;
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) {
+ double vs2 = static_cast<double>(Rvvelt<float>(rvv_vs2_reg(), i));
+ if (is_invalid_fadd(alu_out, vs2)) {
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ break;
+ }
+ alu_out = alu_out + vs2;
+ if (std::isnan(alu_out) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs2)) set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ break;
+ }
+ }
+ vd = alu_out;
+ break;
+ }
+ default:
+ require(false);
+ break;
+ }
+
+ break;
case RO_V_VFMADD_VV:
RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(float, vd, vs1, vs2)},
{RVV_VI_VFP_FMA(double, vd, vs1, vs2)})
@@ -6337,6 +6584,22 @@ void Simulator::DecodeRvvFVV() {
RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(float, -vs2, vs1, +vd)},
{RVV_VI_VFP_FMA(double, -vs2, vs1, +vd)})
break;
+ case RO_V_VFWMACC_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(float, vs2, vs1, vs3)}, false)
+ break;
+ case RO_V_VFWNMACC_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(float, -vs2, vs1, -vs3)}, false)
+ break;
+ case RO_V_VFWMSAC_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(float, vs2, vs1, -vs3)}, false)
+ break;
+ case RO_V_VFWNMSAC_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(float, -vs2, vs1, +vs3)}, false)
+ break;
case RO_V_VFMV_FS:
switch (rvv_vsew()) {
case E16: {
@@ -6394,6 +6657,51 @@ void Simulator::DecodeRvvFVF() {
USE(vs2);
})
break;
+ case RO_V_VFWADD_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(double, is_invalid_fadd, +);
+ USE(vs3);
+ },
+ false)
+ break;
+ case RO_V_VFWSUB_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(double, is_invalid_fsub, -);
+ USE(vs3);
+ },
+ false)
+ break;
+ case RO_V_VFWADD_W_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(double, is_invalid_fadd, +);
+ USE(vs3);
+ },
+ true)
+ break;
+ case RO_V_VFWSUB_W_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(double, is_invalid_fsub, -);
+ USE(vs3);
+ },
+ true)
+ break;
+ case RO_V_VFWMUL_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN(
+ {
+ RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(double, is_invalid_fmul, *);
+ USE(vs3);
+ },
+ false)
+ break;
case RO_V_VFMADD_VF:
RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(float, vd, fs1, vs2)},
{RVV_VI_VFP_FMA(double, vd, fs1, vs2)})
@@ -6426,6 +6734,22 @@ void Simulator::DecodeRvvFVF() {
RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(float, -vs2, fs1, vd)},
{RVV_VI_VFP_FMA(double, -vs2, fs1, vd)})
break;
+ case RO_V_VFWMACC_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(float, vs2, fs1, vs3)}, false)
+ break;
+ case RO_V_VFWNMACC_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(float, -vs2, fs1, -vs3)}, false)
+ break;
+ case RO_V_VFWMSAC_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(float, vs2, fs1, -vs3)}, false)
+ break;
+ case RO_V_VFWNMSAC_VF:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(float, -vs2, fs1, vs3)}, false)
+ break;
default:
UNSUPPORTED_RISCV();
break;
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
deleted file mode 100644
index a586d2d3b6..0000000000
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/execution/runtime-profiler.h"
-
-#include "src/base/platform/platform.h"
-#include "src/codegen/assembler.h"
-#include "src/codegen/compilation-cache.h"
-#include "src/codegen/compiler.h"
-#include "src/codegen/pending-optimization-table.h"
-#include "src/diagnostics/code-tracer.h"
-#include "src/execution/execution.h"
-#include "src/execution/frames-inl.h"
-#include "src/handles/global-handles.h"
-#include "src/init/bootstrapper.h"
-#include "src/interpreter/interpreter.h"
-#include "src/tracing/trace-event.h"
-
-namespace v8 {
-namespace internal {
-
-// Maximum size in bytes of generate code for a function to allow OSR.
-static const int kOSRBytecodeSizeAllowanceBase = 119;
-
-static const int kOSRBytecodeSizeAllowancePerTick = 44;
-
-#define OPTIMIZATION_REASON_LIST(V) \
- V(DoNotOptimize, "do not optimize") \
- V(HotAndStable, "hot and stable") \
- V(SmallFunction, "small function")
-
-enum class OptimizationReason : uint8_t {
-#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
- OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
-#undef OPTIMIZATION_REASON_CONSTANTS
-};
-
-char const* OptimizationReasonToString(OptimizationReason reason) {
- static char const* reasons[] = {
-#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
- OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
-#undef OPTIMIZATION_REASON_TEXTS
- };
- size_t const index = static_cast<size_t>(reason);
- DCHECK_LT(index, arraysize(reasons));
- return reasons[index];
-}
-
-#undef OPTIMIZATION_REASON_LIST
-
-std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
- return os << OptimizationReasonToString(reason);
-}
-
-namespace {
-
-void TraceInOptimizationQueue(JSFunction function) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[function ");
- function.PrintName();
- PrintF(" is already in optimization queue]\n");
- }
-}
-
-void TraceHeuristicOptimizationDisallowed(JSFunction function) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[function ");
- function.PrintName();
- PrintF(" has been marked manually for optimization]\n");
- }
-}
-
-void TraceRecompile(JSFunction function, OptimizationReason reason,
- CodeKind code_kind, Isolate* isolate) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[marking ");
- function.ShortPrint(scope.file());
- PrintF(scope.file(), " for optimized recompilation, reason: %s",
- OptimizationReasonToString(reason));
- PrintF(scope.file(), "]\n");
- }
-}
-
-} // namespace
-
-RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
- : isolate_(isolate), any_ic_changed_(false) {}
-
-void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
- CodeKind code_kind) {
- DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
- TraceRecompile(function, reason, code_kind, isolate_);
- function.MarkForOptimization(ConcurrencyMode::kConcurrent);
-}
-
-void RuntimeProfiler::AttemptOnStackReplacement(UnoptimizedFrame* frame,
- int loop_nesting_levels) {
- JSFunction function = frame->function();
- SharedFunctionInfo shared = function.shared();
- if (!FLAG_use_osr || !shared.IsUserJavaScript()) {
- return;
- }
-
- // If the code is not optimizable, don't try OSR.
- if (shared.optimization_disabled()) return;
-
- // We're using on-stack replacement: Store new loop nesting level in
- // BytecodeArray header so that certain back edges in any interpreter frame
- // for this bytecode will trigger on-stack replacement for that frame.
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate_->GetCodeTracer());
- PrintF(scope.file(), "[OSR - arming back edges in ");
- function.PrintName(scope.file());
- PrintF(scope.file(), "]\n");
- }
-
- DCHECK(frame->is_unoptimized());
- int level = frame->GetBytecodeArray().osr_loop_nesting_level();
- frame->GetBytecodeArray().set_osr_loop_nesting_level(std::min(
- {level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker}));
-}
-
-void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
- JavaScriptFrame* frame,
- CodeKind code_kind) {
- if (function.IsInOptimizationQueue()) {
- TraceInOptimizationQueue(function);
- return;
- }
-
- if (FLAG_testing_d8_test_runner &&
- !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
- function)) {
- TraceHeuristicOptimizationDisallowed(function);
- return;
- }
-
- if (function.shared().optimization_disabled()) return;
-
- // Note: We currently do not trigger OSR compilation from TP code.
- if (frame->is_unoptimized()) {
- if (FLAG_always_osr) {
- AttemptOnStackReplacement(UnoptimizedFrame::cast(frame),
- AbstractCode::kMaxLoopNestingMarker);
- // Fall through and do a normal optimized compile as well.
- } else if (MaybeOSR(function, UnoptimizedFrame::cast(frame))) {
- return;
- }
- }
-
- OptimizationReason reason =
- ShouldOptimize(function, function.shared().GetBytecodeArray(isolate_));
-
- if (reason != OptimizationReason::kDoNotOptimize) {
- Optimize(function, reason, code_kind);
- }
-}
-
-bool RuntimeProfiler::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
- int ticks = function.feedback_vector().profiler_ticks();
- if (function.IsMarkedForOptimization() ||
- function.IsMarkedForConcurrentOptimization() ||
- function.HasAvailableOptimizedCode()) {
- int64_t allowance = kOSRBytecodeSizeAllowanceBase +
- ticks * kOSRBytecodeSizeAllowancePerTick;
- if (function.shared().GetBytecodeArray(isolate_).length() <= allowance) {
- AttemptOnStackReplacement(frame);
- }
- return true;
- }
- return false;
-}
-
-namespace {
-
-bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
- return !any_ic_changed &&
- bytecode_size < FLAG_max_bytecode_size_for_early_opt;
-}
-
-} // namespace
-
-OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
- BytecodeArray bytecode) {
- if (function.ActiveTierIsTurbofan()) {
- return OptimizationReason::kDoNotOptimize;
- }
- if (V8_UNLIKELY(FLAG_turboprop) && function.ActiveTierIsToptierTurboprop()) {
- return OptimizationReason::kDoNotOptimize;
- }
- const int ticks = function.feedback_vector().profiler_ticks();
- const int ticks_for_optimization =
- FLAG_ticks_before_optimization +
- (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
- if (ticks >= ticks_for_optimization) {
- return OptimizationReason::kHotAndStable;
- } else if (ShouldOptimizeAsSmallFunction(bytecode.length(),
- any_ic_changed_)) {
- // If no IC was patched since the last tick and this function is very
- // small, optimistically optimize it now.
- return OptimizationReason::kSmallFunction;
- } else if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function.PrintName();
- PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
- if (any_ic_changed_) {
- PrintF("ICs changed]\n");
- } else {
- PrintF(" too large for small function optimization: %d/%d]\n",
- bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
- }
- }
- return OptimizationReason::kDoNotOptimize;
-}
-
-RuntimeProfiler::MarkCandidatesForOptimizationScope::
- MarkCandidatesForOptimizationScope(RuntimeProfiler* profiler)
- : handle_scope_(profiler->isolate_), profiler_(profiler) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.MarkCandidatesForOptimization");
-}
-
-RuntimeProfiler::MarkCandidatesForOptimizationScope::
- ~MarkCandidatesForOptimizationScope() {
- profiler_->any_ic_changed_ = false;
-}
-
-void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
- if (!isolate_->use_optimizer()) return;
- MarkCandidatesForOptimizationScope scope(this);
-
- JSFunction function = frame->function();
- CodeKind code_kind = function.GetActiveTier().value();
-
- DCHECK(function.shared().is_compiled());
- DCHECK(function.shared().IsInterpreted());
-
- DCHECK_IMPLIES(CodeKindIsOptimizedJSFunction(code_kind),
- function.has_feedback_vector());
- if (!function.has_feedback_vector()) return;
-
- function.feedback_vector().SaturatingIncrementProfilerTicks();
- MaybeOptimizeFrame(function, frame, code_kind);
-}
-
-void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
- JavaScriptFrameIterator it(isolate_);
- DCHECK(it.frame()->is_unoptimized());
- MarkCandidatesForOptimization(it.frame());
-}
-
-void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
- JavaScriptFrameIterator it(isolate_);
- DCHECK(it.frame()->is_optimized());
- MarkCandidatesForOptimization(it.frame());
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index fc47b9e995..88d5ad5676 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/s390/register-s390.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -46,36 +46,35 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- static constexpr RegList kPushedGpRegs =
- Register::ListOf(r2, r3, r4, r5, r6, r7, r8, cp);
+ static constexpr RegList kPushedGpRegs = {r2, r3, r4, r5, r6, r7, r8, cp};
- static constexpr RegList kPushedFpRegs = DoubleRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+ static constexpr DoubleRegList kPushedFpRegs = {d0, d1, d2, d3, d4, d5, d6,
+ d7, d8, d9, d10, d11, d12};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-TypedFrameConstants::kFixedFrameSizeFromFp -
kSystemPointerSize * kNumPushedGpRegisters;
static constexpr int kLastPushedFpRegisterOffset =
- kLastPushedGpRegisterOffset - kDoubleSize * kNumPushedFpRegisters;
+ kLastPushedGpRegisterOffset - kSimd128Size * kNumPushedFpRegisters;
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
- base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ base::bits::CountPopulation(lower_regs) * kSimd128Size;
}
};
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 42d2000bcb..77b6dd22ba 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -8310,9 +8310,16 @@ EVALUATE(SGR) {
}
EVALUATE(ALGR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(ALGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // 64-bit Non-clobbering unsigned arithmetics
+ uint64_t r1_val = get_register(r1);
+ uint64_t r2_val = get_register(r2);
+ bool isOF = CheckOverflowForUIntAdd(r1_val, r2_val);
+ SetS390ConditionCode<uint64_t>(r1_val + r2_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val + r2_val);
+ return length;
}
EVALUATE(SLGR) {
diff --git a/deps/v8/src/execution/simulator-base.cc b/deps/v8/src/execution/simulator-base.cc
index 6bdb8f8a17..32ef8432e2 100644
--- a/deps/v8/src/execution/simulator-base.cc
+++ b/deps/v8/src/execution/simulator-base.cc
@@ -106,11 +106,6 @@ void SimulatorData::RegisterFunctionsAndSignatures(
}
}
-void SimulatorData::AddSignatureForTarget(Address target,
- const EncodedCSignature& signature) {
- target_to_signature_table_[target] = signature;
-}
-
const EncodedCSignature& SimulatorData::GetSignatureForTarget(Address target) {
base::MutexGuard guard(&signature_map_mutex_);
auto entry = target_to_signature_table_.find(target);
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index c97cecfdc1..ab8679aca2 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -222,7 +222,9 @@ class SimulatorData {
private:
void AddSignatureForTarget(Address target,
- const EncodedCSignature& signature);
+ const EncodedCSignature& signature) {
+ target_to_signature_table_[target] = signature;
+ }
v8::base::Mutex signature_map_mutex_;
typedef std::unordered_map<Address, EncodedCSignature> TargetToSignatureTable;
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index 9a7c8cb6eb..ddd6b7f153 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -8,7 +8,6 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/execution/interrupts-scope.h"
#include "src/execution/isolate.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/logging/counters.h"
#include "src/objects/backing-store.h"
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 3bdeb227a8..302ad9a7b1 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -19,6 +19,7 @@ void ThreadLocalTop::Clear() {
pending_handler_constant_pool_ = kNullAddress;
pending_handler_fp_ = kNullAddress;
pending_handler_sp_ = kNullAddress;
+ num_frames_above_pending_handler_ = 0;
last_api_entry_ = kNullAddress;
pending_message_ = Object();
rethrowing_message_ = false;
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index b072005d40..98c4f1b60f 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -35,9 +35,9 @@ class ThreadLocalTop {
// refactor this to really consist of just Addresses and 32-bit
// integer fields.
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- static constexpr uint32_t kSizeInBytes = 26 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 27 * kSystemPointerSize;
#else
- static constexpr uint32_t kSizeInBytes = 25 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 26 * kSystemPointerSize;
#endif
// Does early low-level initialization that does not depend on the
@@ -118,6 +118,7 @@ class ThreadLocalTop {
Address pending_handler_constant_pool_;
Address pending_handler_fp_;
Address pending_handler_sp_;
+ uintptr_t num_frames_above_pending_handler_;
Address last_api_entry_;
diff --git a/deps/v8/src/execution/tiering-manager.cc b/deps/v8/src/execution/tiering-manager.cc
new file mode 100644
index 0000000000..e87b170a60
--- /dev/null
+++ b/deps/v8/src/execution/tiering-manager.cc
@@ -0,0 +1,412 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/tiering-manager.h"
+
+#include "src/base/platform/platform.h"
+#include "src/baseline/baseline-batch-compiler.h"
+#include "src/baseline/baseline.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/pending-optimization-table.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/init/bootstrapper.h"
+#include "src/interpreter/interpreter.h"
+#include "src/tracing/trace-event.h"
+
+namespace v8 {
+namespace internal {
+
+// Maximum size in bytes of generate code for a function to allow OSR.
+static const int kOSRBytecodeSizeAllowanceBase = 119;
+static const int kOSRBytecodeSizeAllowancePerTick = 44;
+
+#define OPTIMIZATION_REASON_LIST(V) \
+ V(DoNotOptimize, "do not optimize") \
+ V(HotAndStable, "hot and stable") \
+ V(SmallFunction, "small function")
+
+enum class OptimizationReason : uint8_t {
+#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
+ OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
+#undef OPTIMIZATION_REASON_CONSTANTS
+};
+
+char const* OptimizationReasonToString(OptimizationReason reason) {
+ static char const* reasons[] = {
+#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
+ OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
+#undef OPTIMIZATION_REASON_TEXTS
+ };
+ size_t const index = static_cast<size_t>(reason);
+ DCHECK_LT(index, arraysize(reasons));
+ return reasons[index];
+}
+
+#undef OPTIMIZATION_REASON_LIST
+
+std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
+ return os << OptimizationReasonToString(reason);
+}
+
+class OptimizationDecision {
+ public:
+ static constexpr OptimizationDecision Maglev() {
+ // TODO(v8:7700): Consider using another reason here.
+ // TODO(v8:7700): Support concurrency.
+ return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV,
+ ConcurrencyMode::kNotConcurrent};
+ }
+ static constexpr OptimizationDecision TurbofanHotAndStable() {
+ return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
+ ConcurrencyMode::kConcurrent};
+ }
+ static constexpr OptimizationDecision TurbofanSmallFunction() {
+ return {OptimizationReason::kSmallFunction, CodeKind::TURBOFAN,
+ ConcurrencyMode::kConcurrent};
+ }
+ static constexpr OptimizationDecision DoNotOptimize() {
+ return {OptimizationReason::kDoNotOptimize,
+ // These values don't matter but we have to pass something.
+ CodeKind::TURBOFAN, ConcurrencyMode::kConcurrent};
+ }
+
+ constexpr bool should_optimize() const {
+ return optimization_reason != OptimizationReason::kDoNotOptimize;
+ }
+
+ OptimizationReason optimization_reason;
+ CodeKind code_kind;
+ ConcurrencyMode concurrency_mode;
+
+ private:
+ OptimizationDecision() = default;
+ constexpr OptimizationDecision(OptimizationReason optimization_reason,
+ CodeKind code_kind,
+ ConcurrencyMode concurrency_mode)
+ : optimization_reason(optimization_reason),
+ code_kind(code_kind),
+ concurrency_mode(concurrency_mode) {}
+};
+// Since we pass by value:
+STATIC_ASSERT(sizeof(OptimizationDecision) <= kInt32Size);
+
+namespace {
+
+void TraceInOptimizationQueue(JSFunction function) {
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not marking function ");
+ function.PrintName();
+ PrintF(" for optimization: already queued]\n");
+ }
+}
+
+void TraceHeuristicOptimizationDisallowed(JSFunction function) {
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not marking function ");
+ function.PrintName();
+ PrintF(
+ " for optimization: marked with "
+ "%%PrepareFunctionForOptimization for manual optimization]\n");
+ }
+}
+
+void TraceRecompile(Isolate* isolate, JSFunction function,
+ OptimizationDecision d) {
+ if (FLAG_trace_opt) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[marking ");
+ function.ShortPrint(scope.file());
+ PrintF(scope.file(), " for optimization to %s, %s, reason: %s",
+ CodeKindToString(d.code_kind), ToString(d.concurrency_mode),
+ OptimizationReasonToString(d.optimization_reason));
+ PrintF(scope.file(), "]\n");
+ }
+}
+
+} // namespace
+
+void TraceManualRecompile(JSFunction function, CodeKind code_kind,
+ ConcurrencyMode concurrency_mode) {
+ if (FLAG_trace_opt) {
+ PrintF("[manually marking ");
+ function.ShortPrint();
+ PrintF(" for optimization to %s, %s]\n", CodeKindToString(code_kind),
+ ToString(concurrency_mode));
+ }
+}
+
+void TieringManager::Optimize(JSFunction function, CodeKind code_kind,
+ OptimizationDecision d) {
+ DCHECK(d.should_optimize());
+ TraceRecompile(isolate_, function, d);
+ function.MarkForOptimization(isolate_, d.code_kind, d.concurrency_mode);
+}
+
+void TieringManager::AttemptOnStackReplacement(UnoptimizedFrame* frame,
+ int loop_nesting_levels) {
+ JSFunction function = frame->function();
+ SharedFunctionInfo shared = function.shared();
+ if (!FLAG_use_osr || !shared.IsUserJavaScript()) {
+ return;
+ }
+
+ // If the code is not optimizable, don't try OSR.
+ if (shared.optimization_disabled()) return;
+
+ // We're using on-stack replacement: Store new loop nesting level in
+ // BytecodeArray header so that certain back edges in any interpreter frame
+ // for this bytecode will trigger on-stack replacement for that frame.
+ if (FLAG_trace_osr) {
+ CodeTracer::Scope scope(isolate_->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - arming back edges in ");
+ function.PrintName(scope.file());
+ PrintF(scope.file(), "]\n");
+ }
+
+ DCHECK(frame->is_unoptimized());
+ int level = frame->GetBytecodeArray().osr_loop_nesting_level();
+ frame->GetBytecodeArray().set_osr_loop_nesting_level(std::min(
+ {level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker}));
+}
+
+namespace {
+
+bool TiersUpToMaglev(CodeKind code_kind) {
+ // TODO(v8:7700): Flip the UNLIKELY when appropriate.
+ return V8_UNLIKELY(FLAG_maglev) && CodeKindIsUnoptimizedJSFunction(code_kind);
+}
+
+bool TiersUpToMaglev(base::Optional<CodeKind> code_kind) {
+ return code_kind.has_value() && TiersUpToMaglev(code_kind.value());
+}
+
+} // namespace
+
+// static
+int TieringManager::InterruptBudgetFor(Isolate* isolate, JSFunction function) {
+ if (function.has_feedback_vector()) {
+ return TiersUpToMaglev(function.GetActiveTier())
+ ? FLAG_interrupt_budget_for_maglev
+ : FLAG_interrupt_budget;
+ }
+
+ DCHECK(!function.has_feedback_vector());
+ DCHECK(function.shared().is_compiled());
+ return function.shared().GetBytecodeArray(isolate).length() *
+ FLAG_interrupt_budget_factor_for_feedback_allocation;
+}
+
+// static
+int TieringManager::InitialInterruptBudget() {
+ return V8_LIKELY(FLAG_lazy_feedback_allocation)
+ ? FLAG_interrupt_budget_for_feedback_allocation
+ : FLAG_interrupt_budget;
+}
+
+void TieringManager::MaybeOptimizeFrame(JSFunction function,
+ JavaScriptFrame* frame,
+ CodeKind code_kind) {
+ if (function.IsInOptimizationQueue()) {
+ TraceInOptimizationQueue(function);
+ return;
+ }
+
+ if (FLAG_testing_d8_test_runner &&
+ !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
+ function)) {
+ TraceHeuristicOptimizationDisallowed(function);
+ return;
+ }
+
+ // TODO(v8:7700): Consider splitting this up for Maglev/Turbofan.
+ if (function.shared().optimization_disabled()) return;
+
+ if (frame->is_unoptimized()) {
+ if (V8_UNLIKELY(FLAG_always_osr)) {
+ AttemptOnStackReplacement(UnoptimizedFrame::cast(frame),
+ AbstractCode::kMaxLoopNestingMarker);
+ // Fall through and do a normal optimized compile as well.
+ } else if (MaybeOSR(function, UnoptimizedFrame::cast(frame))) {
+ return;
+ }
+ }
+
+ OptimizationDecision d = ShouldOptimize(function, code_kind, frame);
+ if (d.should_optimize()) Optimize(function, code_kind, d);
+}
+
+bool TieringManager::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
+ int ticks = function.feedback_vector().profiler_ticks();
+ if (function.IsMarkedForOptimization() ||
+ function.IsMarkedForConcurrentOptimization() ||
+ function.HasAvailableOptimizedCode()) {
+ int64_t allowance = kOSRBytecodeSizeAllowanceBase +
+ ticks * kOSRBytecodeSizeAllowancePerTick;
+ if (function.shared().GetBytecodeArray(isolate_).length() <= allowance) {
+ AttemptOnStackReplacement(frame);
+ }
+ return true;
+ }
+ return false;
+}
+
+namespace {
+
+bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
+ return !any_ic_changed &&
+ bytecode_size < FLAG_max_bytecode_size_for_early_opt;
+}
+
+} // namespace
+
+OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
+ CodeKind code_kind,
+ JavaScriptFrame* frame) {
+ DCHECK_EQ(code_kind, function.GetActiveTier().value());
+
+ if (TiersUpToMaglev(code_kind)) {
+ return OptimizationDecision::Maglev();
+ } else if (code_kind == CodeKind::TURBOFAN) {
+ // Already in the top tier.
+ return OptimizationDecision::DoNotOptimize();
+ }
+
+ // If function's SFI has OSR cache, once enter loop range of OSR cache, set
+ // OSR loop nesting level for matching condition of OSR (loop_depth <
+ // osr_level), soon later OSR will be triggered when executing bytecode
+ // JumpLoop which is entry of the OSR cache, then hit the OSR cache.
+ BytecodeArray bytecode = function.shared().GetBytecodeArray(isolate_);
+ if (V8_UNLIKELY(function.shared().osr_code_cache_state() > kNotCached) &&
+ frame->is_unoptimized()) {
+ int current_offset =
+ static_cast<UnoptimizedFrame*>(frame)->GetBytecodeOffset();
+ OSROptimizedCodeCache cache =
+ function.context().native_context().GetOSROptimizedCodeCache();
+ std::vector<int> bytecode_offsets =
+ cache.GetBytecodeOffsetsFromSFI(function.shared());
+ interpreter::BytecodeArrayIterator iterator(
+ Handle<BytecodeArray>(bytecode, isolate_));
+ for (int jump_offset : bytecode_offsets) {
+ iterator.SetOffset(jump_offset);
+ int jump_target_offset = iterator.GetJumpTargetOffset();
+ if (jump_offset >= current_offset &&
+ current_offset >= jump_target_offset) {
+ bytecode.set_osr_loop_nesting_level(iterator.GetImmediateOperand(1) +
+ 1);
+ return OptimizationDecision::TurbofanHotAndStable();
+ }
+ }
+ }
+ const int ticks = function.feedback_vector().profiler_ticks();
+ const int ticks_for_optimization =
+ FLAG_ticks_before_optimization +
+ (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
+ if (ticks >= ticks_for_optimization) {
+ return OptimizationDecision::TurbofanHotAndStable();
+ } else if (ShouldOptimizeAsSmallFunction(bytecode.length(),
+ any_ic_changed_)) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ return OptimizationDecision::TurbofanSmallFunction();
+ } else if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function.PrintName();
+ PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
+ if (any_ic_changed_) {
+ PrintF("ICs changed]\n");
+ } else {
+ PrintF(" too large for small function optimization: %d/%d]\n",
+ bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
+ }
+ }
+ return OptimizationDecision::DoNotOptimize();
+}
+
+TieringManager::OnInterruptTickScope::OnInterruptTickScope(
+ TieringManager* profiler)
+ : profiler_(profiler) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.MarkCandidatesForOptimization");
+}
+
+TieringManager::OnInterruptTickScope::~OnInterruptTickScope() {
+ profiler_->any_ic_changed_ = false;
+}
+
+void TieringManager::OnInterruptTick(Handle<JSFunction> function) {
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate_));
+
+ // Remember whether the function had a vector at this point. This is relevant
+ // later since the configuration 'Ignition without a vector' can be
+ // considered a tier on its own. We begin tiering up to tiers higher than
+ // Sparkplug only when reaching this point *with* a feedback vector.
+ const bool had_feedback_vector = function->has_feedback_vector();
+
+ // Ensure that the feedback vector has been allocated, and reset the
+ // interrupt budget in preparation for the next tick.
+ if (had_feedback_vector) {
+ function->SetInterruptBudget(isolate_);
+ } else {
+ JSFunction::CreateAndAttachFeedbackVector(isolate_, function,
+ &is_compiled_scope);
+ DCHECK(is_compiled_scope.is_compiled());
+ // Also initialize the invocation count here. This is only really needed for
+ // OSR. When we OSR functions with lazy feedback allocation we want to have
+ // a non zero invocation count so we can inline functions.
+ function->feedback_vector().set_invocation_count(1, kRelaxedStore);
+ }
+
+ DCHECK(function->has_feedback_vector());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().HasBytecodeArray());
+
+ // TODO(jgruber): Consider integrating this into a linear tiering system
+ // controlled by OptimizationMarker in which the order is always
+ // Ignition-Sparkplug-Turbofan, and only a single tierup is requested at
+ // once.
+ // It's unclear whether this is possible and/or makes sense - for example,
+ // batching compilation can introduce arbitrary latency between the SP
+ // compile request and fulfillment, which doesn't work with strictly linear
+ // tiering.
+ if (CanCompileWithBaseline(isolate_, function->shared()) &&
+ !function->ActiveTierIsBaseline()) {
+ if (FLAG_baseline_batch_compilation) {
+ isolate_->baseline_batch_compiler()->EnqueueFunction(function);
+ } else {
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate_));
+ Compiler::CompileBaseline(isolate_, function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope);
+ }
+ }
+
+ // We only tier up beyond sparkplug if we already had a feedback vector.
+ if (!had_feedback_vector) return;
+
+ // Don't tier up if Turbofan is disabled.
+ // TODO(jgruber): Update this for a multi-tier world.
+ if (V8_UNLIKELY(!isolate_->use_optimizer())) return;
+
+ // --- We've decided to proceed for now. ---
+
+ DisallowGarbageCollection no_gc;
+ OnInterruptTickScope scope(this);
+ JSFunction function_obj = *function;
+
+ function_obj.feedback_vector().SaturatingIncrementProfilerTicks();
+
+ JavaScriptFrameIterator it(isolate_);
+ DCHECK(it.frame()->is_unoptimized());
+ const CodeKind code_kind = function_obj.GetActiveTier().value();
+ MaybeOptimizeFrame(function_obj, it.frame(), code_kind);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/runtime-profiler.h b/deps/v8/src/execution/tiering-manager.h
index 86cdba4b2a..ce1cde3613 100644
--- a/deps/v8/src/execution/runtime-profiler.h
+++ b/deps/v8/src/execution/tiering-manager.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXECUTION_RUNTIME_PROFILER_H_
-#define V8_EXECUTION_RUNTIME_PROFILER_H_
+#ifndef V8_EXECUTION_TIERING_MANAGER_H_
+#define V8_EXECUTION_TIERING_MANAGER_H_
#include "src/common/assert-scope.h"
#include "src/handles/handles.h"
@@ -17,27 +17,30 @@ class Isolate;
class UnoptimizedFrame;
class JavaScriptFrame;
class JSFunction;
-enum class CodeKind;
+class OptimizationDecision;
+enum class CodeKind : uint8_t;
enum class OptimizationReason : uint8_t;
-class RuntimeProfiler {
+void TraceManualRecompile(JSFunction function, CodeKind code_kind,
+ ConcurrencyMode concurrency_mode);
+
+class TieringManager {
public:
- explicit RuntimeProfiler(Isolate* isolate);
+ explicit TieringManager(Isolate* isolate) : isolate_(isolate) {}
- // Called from the interpreter when the bytecode interrupt has been exhausted.
- void MarkCandidatesForOptimizationFromBytecode();
- // Likewise, from generated code.
- void MarkCandidatesForOptimizationFromCode();
+ void OnInterruptTick(Handle<JSFunction> function);
void NotifyICChanged() { any_ic_changed_ = true; }
void AttemptOnStackReplacement(UnoptimizedFrame* frame,
int nesting_levels = 1);
- private:
- // Helper function called from MarkCandidatesForOptimization*
- void MarkCandidatesForOptimization(JavaScriptFrame* frame);
+ // For use when a JSFunction is available.
+ static int InterruptBudgetFor(Isolate* isolate, JSFunction function);
+ // For use when no JSFunction is available.
+ static int InitialInterruptBudget();
+ private:
// Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'.
void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
@@ -46,28 +49,27 @@ class RuntimeProfiler {
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame);
- OptimizationReason ShouldOptimize(JSFunction function,
- BytecodeArray bytecode_array);
- void Optimize(JSFunction function, OptimizationReason reason,
- CodeKind code_kind);
+ OptimizationDecision ShouldOptimize(JSFunction function, CodeKind code_kind,
+ JavaScriptFrame* frame);
+ void Optimize(JSFunction function, CodeKind code_kind,
+ OptimizationDecision decision);
void Baseline(JSFunction function, OptimizationReason reason);
- class V8_NODISCARD MarkCandidatesForOptimizationScope final {
+ class V8_NODISCARD OnInterruptTickScope final {
public:
- explicit MarkCandidatesForOptimizationScope(RuntimeProfiler* profiler);
- ~MarkCandidatesForOptimizationScope();
+ explicit OnInterruptTickScope(TieringManager* profiler);
+ ~OnInterruptTickScope();
private:
- HandleScope handle_scope_;
- RuntimeProfiler* const profiler_;
+ TieringManager* const profiler_;
DisallowGarbageCollection no_gc;
};
- Isolate* isolate_;
- bool any_ic_changed_;
+ Isolate* const isolate_;
+ bool any_ic_changed_ = false;
};
} // namespace internal
} // namespace v8
-#endif // V8_EXECUTION_RUNTIME_PROFILER_H_
+#endif // V8_EXECUTION_TIERING_MANAGER_H_
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 9fb8f1c30c..e06f49c5c0 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -31,8 +31,11 @@ void Locker::Initialize(v8::Isolate* isolate) {
has_lock_ = false;
top_level_ = true;
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
+
// Record that the Locker has been used at least once.
base::Relaxed_Store(&g_locker_was_ever_used_, 1);
+ isolate_->set_was_locker_ever_used();
+
// Get the big lock if necessary.
if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
isolate_->thread_manager()->Lock();
diff --git a/deps/v8/src/execution/v8threads.h b/deps/v8/src/execution/v8threads.h
index ccaa6b1bef..69fb91f91b 100644
--- a/deps/v8/src/execution/v8threads.h
+++ b/deps/v8/src/execution/v8threads.h
@@ -60,7 +60,7 @@ class ThreadVisitor {
class ThreadManager {
public:
void Lock();
- void Unlock();
+ V8_EXPORT_PRIVATE void Unlock();
void InitThread(const ExecutionAccess&);
void ArchiveThread();
diff --git a/deps/v8/src/execution/vm-state-inl.h b/deps/v8/src/execution/vm-state-inl.h
index 91fcbf30c7..d4de5abf8c 100644
--- a/deps/v8/src/execution/vm-state-inl.h
+++ b/deps/v8/src/execution/vm-state-inl.h
@@ -56,7 +56,7 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()),
vm_state_(isolate),
- pause_timed_histogram_scope_(isolate->counters()->execute_precise()) {
+ pause_timed_histogram_scope_(isolate->counters()->execute()) {
#ifdef USE_SIMULATOR
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 6e1522da25..329ba6289a 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/codegen/x64/register-x64.h"
+#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -61,16 +61,13 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- static constexpr RegList kPushedGpRegs =
- Register::ListOf(rax, rcx, rdx, rbx, rsi, rdi, r9);
+ static constexpr RegList kPushedGpRegs = {rax, rcx, rdx, rbx, rsi, rdi, r9};
- static constexpr RegList kPushedFpRegs =
- DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
+ static constexpr DoubleRegList kPushedFpRegs = {xmm0, xmm1, xmm2, xmm3,
+ xmm4, xmm5, xmm6, xmm7};
- static constexpr int kNumPushedGpRegisters =
- base::bits::CountPopulation(kPushedGpRegs);
- static constexpr int kNumPushedFpRegisters =
- base::bits::CountPopulation(kPushedFpRegs);
+ static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
+ static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
@@ -79,15 +76,17 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
- uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
+ uint32_t lower_regs =
+ kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSimd128Size;
}
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index cda90bd507..73d7669f9b 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -9,6 +9,7 @@
#include "include/v8-persistent-handle.h"
#include "include/v8-primitive.h"
#include "include/v8-template.h"
+#include "src/base/optional.h"
#include "src/base/platform/platform.h"
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
@@ -77,8 +78,8 @@ Maybe<GCOptions> Parse(v8::Isolate* isolate,
return Just<GCOptions>(options);
}
-void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type,
- v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
+void InvokeGC(v8::Isolate* isolate, ExecutionType execution_type,
+ v8::Isolate::GarbageCollectionType type) {
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
switch (type) {
case v8::Isolate::GarbageCollectionType::kMinorGarbageCollection:
@@ -86,7 +87,15 @@ void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type,
kGCCallbackFlagForced);
break;
case v8::Isolate::GarbageCollectionType::kFullGarbageCollection:
- heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
+ EmbedderStackStateScope stack_scope(
+ heap,
+ execution_type == ExecutionType::kAsync
+ ? EmbedderStackStateScope::kImplicitThroughTask
+ : EmbedderStackStateScope::kExplicitInvocation,
+ execution_type == ExecutionType::kAsync
+ ? v8::EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers
+ : v8::EmbedderHeapTracer::EmbedderStackState::
+ kMayContainHeapPointers);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
@@ -110,8 +119,7 @@ class AsyncGC final : public CancelableTask {
void RunInternal() final {
v8::HandleScope scope(isolate_);
- InvokeGC(isolate_, type_,
- v8::EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ InvokeGC(isolate_, ExecutionType::kAsync, type_);
auto resolver = v8::Local<v8::Promise::Resolver>::New(isolate_, resolver_);
auto ctx = Local<v8::Context>::New(isolate_, ctx_);
resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked();
@@ -136,9 +144,8 @@ void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Immediate bailout if no arguments are provided.
if (args.Length() == 0) {
- InvokeGC(
- isolate, v8::Isolate::GarbageCollectionType::kFullGarbageCollection,
- v8::EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ InvokeGC(isolate, ExecutionType::kSync,
+ v8::Isolate::GarbageCollectionType::kFullGarbageCollection);
return;
}
@@ -147,9 +154,7 @@ void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
GCOptions options = maybe_options.ToChecked();
switch (options.execution) {
case ExecutionType::kSync:
- InvokeGC(
- isolate, options.type,
- v8::EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ InvokeGC(isolate, ExecutionType::kSync, options.type);
break;
case ExecutionType::kAsync: {
v8::HandleScope scope(isolate);
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 976a97ad73..17cb7a9ef1 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -125,36 +125,41 @@ void StatisticsExtension::GetCounters(
AddNumber64(args.GetIsolate(), result, heap->external_memory(),
"amount_of_external_allocated_memory");
- args.GetReturnValue().Set(result);
- DisallowGarbageCollection no_gc;
- HeapObjectIterator iterator(
- reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
int reloc_info_total = 0;
int source_position_table_total = 0;
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- Object maybe_source_positions;
- if (obj.IsCode()) {
- Code code = Code::cast(obj);
- reloc_info_total += code.relocation_info().Size();
- maybe_source_positions = code.source_position_table();
- } else if (obj.IsBytecodeArray()) {
- maybe_source_positions =
- BytecodeArray::cast(obj).source_position_table(kAcquireLoad);
- } else {
- continue;
+ {
+ HeapObjectIterator iterator(
+ reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
+ DCHECK(!AllowGarbageCollection::IsAllowed());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ Object maybe_source_positions;
+ if (obj.IsCode()) {
+ Code code = Code::cast(obj);
+ reloc_info_total += code.relocation_info().Size();
+ // Baseline code doesn't have source positions since it uses
+ // interpreter code positions.
+ if (code.kind() == CodeKind::BASELINE) continue;
+ maybe_source_positions = code.source_position_table();
+ } else if (obj.IsBytecodeArray()) {
+ maybe_source_positions =
+ BytecodeArray::cast(obj).source_position_table(kAcquireLoad);
+ } else {
+ continue;
+ }
+ if (!maybe_source_positions.IsByteArray()) continue;
+ ByteArray source_positions = ByteArray::cast(maybe_source_positions);
+ if (source_positions.length() == 0) continue;
+ source_position_table_total += source_positions.Size();
}
- if (!maybe_source_positions.IsByteArray()) continue;
- ByteArray source_positions = ByteArray::cast(maybe_source_positions);
- if (source_positions.length() == 0) continue;
- source_position_table_total += source_positions.Size();
}
AddNumber(args.GetIsolate(), result, reloc_info_total,
"reloc_info_total_size");
AddNumber(args.GetIsolate(), result, source_position_table_total,
"source_position_table_total_size");
+ args.GetReturnValue().Set(result);
}
} // namespace internal
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 6a8eb14677..7ed7cccbcc 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -169,21 +169,21 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL false
#endif
-#ifdef V8_HEAP_SANDBOX
-#define V8_HEAP_SANDBOX_BOOL true
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL true
#else
-#define V8_HEAP_SANDBOX_BOOL false
+#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL false
#endif
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-#define V8_VIRTUAL_MEMORY_CAGE_BOOL true
+#ifdef V8_SANDBOX
+#define V8_SANDBOX_BOOL true
#else
-#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
+#define V8_SANDBOX_BOOL false
#endif
-// D8's MultiMappedAllocator is only available on Linux, and only if the virtual
-// memory cage is not enabled.
-#if V8_OS_LINUX && !V8_VIRTUAL_MEMORY_CAGE_BOOL
+// D8's MultiMappedAllocator is only available on Linux, and only if the sandbox
+// is not enabled.
+#if V8_OS_LINUX && !V8_SANDBOX_BOOL
#define MULTI_MAPPED_ALLOCATOR_AVAILABLE true
#else
#define MULTI_MAPPED_ALLOCATOR_AVAILABLE false
@@ -195,13 +195,11 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
-#define ENABLE_SPARKPLUG true
-#else
-// TODO(v8:11421): Enable Sparkplug for other architectures
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// TODO(v8:11421): Enable Sparkplug for these architectures.
#define ENABLE_SPARKPLUG false
+#else
+#define ENABLE_SPARKPLUG true
#endif
#if ENABLE_SPARKPLUG && !defined(ANDROID)
@@ -211,7 +209,7 @@ struct MaybeBoolFlag {
#define ENABLE_SPARKPLUG_BY_DEFAULT false
#endif
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Must be enabled on M1.
#define MUST_WRITE_PROTECT_CODE_MEMORY true
#else
@@ -292,6 +290,8 @@ DEFINE_BOOL(allow_overwriting_for_next_flag, false,
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
+DEFINE_BOOL(trace_temporal, false, "trace temporal code")
+
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
@@ -304,23 +304,26 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_import_assertions, "harmony import assertions") \
V(harmony_rab_gsab, \
"harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \
- V(harmony_temporal, "Temporal")
+ V(harmony_temporal, "Temporal") \
+ V(harmony_shadow_realm, "harmony ShadowRealm") \
+ V(harmony_struct, "harmony structs and shared structs")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_intl_number_format_v3, "Intl.NumberFormat v3")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
-// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V)
+// Features that are complete (but still behind the --harmony flag).
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_array_grouping, "harmony array grouping")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
- V(harmony_intl_enumeration, "Intl Enumberation API") \
- V(harmony_intl_locale_info, "Intl locale info")
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
@@ -431,7 +434,6 @@ DEFINE_NEG_IMPLICATION(enable_third_party_heap, inline_new)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_inlining)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, script_streaming)
DEFINE_NEG_IMPLICATION(enable_third_party_heap,
parallel_compile_tasks_for_eager_toplevel)
@@ -475,6 +477,9 @@ DEFINE_BOOL_READONLY(enable_unconditional_write_barriers,
V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL,
"always use full write barriers")
+DEFINE_BOOL(use_full_record_write_builtin, true,
+ "Force use of full version of RecordWrite builtin.")
+
#ifdef V8_ENABLE_SINGLE_GENERATION
#define V8_SINGLE_GENERATION_BOOL true
#else
@@ -503,6 +508,20 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
+#ifdef V8_ENABLE_MAGLEV
+#define V8_ENABLE_MAGLEV_BOOL true
+DEFINE_BOOL(maglev, false, "enable the maglev optimizing compiler")
+#else
+#define V8_ENABLE_MAGLEV_BOOL false
+DEFINE_BOOL_READONLY(maglev, false, "enable the maglev optimizing compiler")
+#endif // V8_ENABLE_MAGLEV
+
+DEFINE_STRING(maglev_filter, "*", "optimization filter for the maglev compiler")
+DEFINE_BOOL(maglev_break_on_entry, false, "insert an int3 on maglev entries")
+DEFINE_BOOL(print_maglev_graph, false, "print maglev graph")
+DEFINE_BOOL(print_maglev_code, false, "print maglev code")
+DEFINE_BOOL(trace_maglev_regalloc, false, "trace maglev register allocation")
+
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
@@ -513,12 +532,11 @@ DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
#if !MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false)
#endif
+DEFINE_WEAK_IMPLICATION(future, compact_maps)
DEFINE_BOOL_READONLY(dict_property_const_tracking,
V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
"Use const tracking on dictionary properties")
-DEFINE_NEG_IMPLICATION(dict_property_const_tracking, concurrent_inlining)
-DEFINE_NEG_IMPLICATION(dict_property_const_tracking, turboprop)
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -534,7 +552,11 @@ DEFINE_IMPLICATION(jitless, regexp_interpret_all)
// No Sparkplug compilation.
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
-#endif
+#endif // ENABLE_SPARKPLUG
+#ifdef V8_ENABLE_MAGLEV
+// No Maglev compilation.
+DEFINE_NEG_IMPLICATION(jitless, maglev)
+#endif // V8_ENABLE_MAGLEV
#ifndef V8_TARGET_ARCH_ARM
// Unsupported on arm. See https://crbug.com/v8/8713.
@@ -545,7 +567,9 @@ DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
// TODO(tebbi): Support allocating types from background thread.
DEFINE_NEG_IMPLICATION(assert_types, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(assert_types, concurrent_inlining)
+
+DEFINE_BOOL(verify_simplified_lowering, false,
+ "verify graph generated by simplified lowering")
DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
@@ -593,29 +617,35 @@ DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+// Tiering: Sparkplug / feedback vector allocation.
+DEFINE_INT(interrupt_budget_for_feedback_allocation, 940,
+ "The fixed interrupt budget (in bytecode size) for allocating "
+ "feedback vectors")
+DEFINE_INT(interrupt_budget_factor_for_feedback_allocation, 8,
+ "The interrupt budget factor (applied to bytecode size) for "
+ "allocating feedback vectors, used when bytecode size is known")
+
+// Tiering: Maglev.
+// The Maglev interrupt budget is chosen to be roughly 1/10th of Turbofan's
+// overall budget (including the multiple required ticks).
+DEFINE_INT(interrupt_budget_for_maglev, 40 * KB,
+ "interrupt budget which should be used for the profiler counter")
+
+// Tiering: Turbofan.
+DEFINE_INT(interrupt_budget, 132 * KB,
+ "interrupt budget which should be used for the profiler counter")
DEFINE_INT(ticks_before_optimization, 3,
"the number of times we have to go through the interrupt budget "
"before considering this function for optimization")
DEFINE_INT(bytecode_size_allowance_per_tick, 1100,
"increases the number of ticks required for optimization by "
"bytecode.length/X")
-DEFINE_INT(interrupt_budget, 132 * KB,
- "interrupt budget which should be used for the profiler counter")
DEFINE_INT(
max_bytecode_size_for_early_opt, 81,
"Maximum bytecode length for a function to be optimized on the first tick")
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
-DEFINE_INT(budget_for_feedback_vector_allocation, 940,
- "The budget in amount of bytecode executed by a function before we "
- "decide to allocate feedback vectors")
-DEFINE_INT(scale_factor_for_feedback_allocation, 8,
- "scale bytecode size for feedback vector allocation.")
-DEFINE_BOOL(feedback_allocation_on_bytecode_size, true,
- "Instead of a fixed budget for lazy feedback vector allocation, "
- "scale it based in the bytecode size.")
-DEFINE_IMPLICATION(sparkplug, feedback_allocation_on_bytecode_size)
DEFINE_BOOL(lazy_feedback_allocation, true, "Allocate feedback vectors lazily")
// Flags for Ignition.
@@ -663,25 +693,6 @@ DEFINE_BOOL(trace_track_allocation_sites, false,
DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization")
-// Flags for TurboProp.
-DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
-DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
- "enable mid-tier register allocator for turboprop")
-DEFINE_BOOL(
- turboprop_as_toptier, false,
- "enable experimental turboprop compiler without further tierup to turbofan")
-DEFINE_IMPLICATION(turboprop_as_toptier, turboprop)
-DEFINE_WEAK_VALUE_IMPLICATION(turboprop, interrupt_budget, 115 * KB)
-DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
- "max number of map checks to perform in minimorphic state")
-DEFINE_INT(turboprop_inline_scaling_factor, 4,
- "scale factor for reduction in bytecode that can be inline for "
- "TurboProp compared to TurboFan")
-// The scale factor determines the interrupt budget when tiering up from
-// Turboprop to TurboFan.
-DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 20,
- "scale factor for profiler ticks when tiering up from midtier")
-
// Flags for Sparkplug
#undef FLAG
#if ENABLE_SPARKPLUG
@@ -695,14 +706,13 @@ DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
#if ENABLE_SPARKPLUG
DEFINE_IMPLICATION(always_sparkplug, sparkplug)
DEFINE_BOOL(baseline_batch_compilation, true, "batch compile Sparkplug code")
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// M1 requires W^X.
DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
#else
DEFINE_BOOL(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
-DEFINE_IMPLICATION(concurrent_sparkplug, sparkplug)
DEFINE_WEAK_IMPLICATION(future, concurrent_sparkplug)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sparkplug)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sparkplug)
@@ -732,8 +742,9 @@ DEFINE_BOOL(trace_baseline_concurrent_compilation, false,
// Internalize into a shared string table in the shared isolate
DEFINE_BOOL(shared_string_table, false, "internalize strings into shared table")
+DEFINE_IMPLICATION(harmony_struct, shared_string_table)
-#if !defined(V8_OS_MACOSX) || !defined(V8_HOST_ARCH_ARM64)
+#if !defined(V8_OS_DARWIN) || !defined(V8_HOST_ARCH_ARM64)
DEFINE_BOOL(write_code_using_rwx, true,
"flip permissions to rwx to write page instead of rw")
DEFINE_NEG_IMPLICATION(jitless, write_code_using_rwx)
@@ -751,12 +762,10 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_BOOL(concurrent_inlining, true,
- "run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(
stress_concurrent_inlining, false,
"create additional concurrent optimization jobs but throw away result")
-DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
+DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
@@ -912,18 +921,12 @@ DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")
DEFINE_BOOL(turbo_fast_api_calls, false, "enable fast API calls from TurboFan")
-DEFINE_INT(reuse_opt_code_count, 0,
- "don't discard optimized code for the specified number of deopts.")
-DEFINE_BOOL(turbo_dynamic_map_checks, false,
- "use dynamic map checks when generating code for property accesses "
- "if all handlers in an IC are the same for turboprop")
DEFINE_BOOL(turbo_compress_translation_arrays, false,
"compress translation arrays (experimental)")
DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
DEFINE_BOOL(turbo_inline_js_wasm_calls, false, "inline JS->Wasm calls")
-DEFINE_BOOL(turbo_use_mid_tier_regalloc_for_huge_functions, false,
- "fall back to the mid-tier register allocator for huge functions "
- "(experimental)")
+DEFINE_BOOL(turbo_use_mid_tier_regalloc_for_huge_functions, true,
+ "fall back to the mid-tier register allocator for huge functions")
DEFINE_BOOL(turbo_force_mid_tier_regalloc, false,
"always use the mid-tier register allocator (for testing)")
@@ -958,14 +961,11 @@ DEFINE_INT(wasm_num_compilation_tasks, 128,
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
-DEFINE_BOOL(wasm_write_protect_code_memory, false,
+DEFINE_BOOL(wasm_write_protect_code_memory, true,
"write protect code memory on the wasm native heap with mprotect")
-DEFINE_WEAK_IMPLICATION(future, wasm_write_protect_code_memory)
-DEFINE_BOOL(wasm_memory_protection_keys, false,
- "protect wasm code memory with PKU if available, no protection "
- "without support; fallback to mprotect by adding "
- "--wasm-write-protect-code-memory")
-DEFINE_WEAK_IMPLICATION(future, wasm_memory_protection_keys)
+DEFINE_BOOL(wasm_memory_protection_keys, true,
+ "protect wasm code memory with PKU if available (takes precedence "
+ "over --wasm-write-protect-code-memory)")
DEFINE_DEBUG_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
@@ -984,6 +984,8 @@ DEFINE_BOOL(wasm_tier_up, true,
"have an effect)")
DEFINE_BOOL(wasm_dynamic_tiering, false,
"enable dynamic tier up to the optimizing compiler")
+DEFINE_NEG_NEG_IMPLICATION(liftoff, wasm_dynamic_tiering)
+DEFINE_WEAK_IMPLICATION(future, wasm_dynamic_tiering)
DEFINE_INT(wasm_tiering_budget, 1800000,
"budget for dynamic tiering (rough approximation of bytes executed")
DEFINE_INT(
@@ -1054,7 +1056,6 @@ FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
#undef DECL_WASM_FLAG
DEFINE_IMPLICATION(experimental_wasm_gc, experimental_wasm_typed_funcref)
-DEFINE_IMPLICATION(experimental_wasm_typed_funcref, experimental_wasm_reftypes)
DEFINE_BOOL(wasm_gc_js_interop, false, "experimental WasmGC-JS interop")
@@ -1083,9 +1084,9 @@ DEFINE_BOOL(
wasm_inlining, false,
"enable inlining of wasm functions into wasm functions (experimental)")
DEFINE_SIZE_T(
- wasm_inlining_budget_factor, 100000,
+ wasm_inlining_budget_factor, 75000,
"maximum allowed size to inline a function is given by {n / caller size}")
-DEFINE_SIZE_T(wasm_inlining_max_size, 1250,
+DEFINE_SIZE_T(wasm_inlining_max_size, 1000,
"maximum size of a function that can be inlined, in TF nodes")
DEFINE_BOOL(wasm_speculative_inlining, false,
"enable speculative inlining of call_ref targets (experimental)")
@@ -1103,6 +1104,7 @@ DEFINE_NEG_IMPLICATION(liftoff_only, wasm_speculative_inlining)
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
+DEFINE_BOOL(wasm_loop_peeling, false, "enable loop peeling for wasm functions")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
@@ -1339,6 +1341,11 @@ DEFINE_BOOL(compact, true,
"Perform compaction on full GCs based on V8's default heuristics")
DEFINE_BOOL(compact_code_space, true,
"Perform code space compaction on full collections.")
+DEFINE_BOOL(compact_maps, false,
+ "Perform compaction on maps on full collections.")
+DEFINE_BOOL(use_map_space, true, "Use separate space for maps.")
+// Without a map space we have to compact maps.
+DEFINE_NEG_VALUE_IMPLICATION(use_map_space, compact_maps, true)
DEFINE_BOOL(compact_on_every_full_gc, false,
"Perform compaction on every full GC")
DEFINE_BOOL(compact_with_stack, true,
@@ -1405,11 +1412,6 @@ DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
DEFINE_BOOL(crash_on_aborted_evacuation, false,
"crash when evacuation of page fails")
-DEFINE_BOOL_READONLY(
- young_generation_large_objects, true,
- "allocates large objects by default in the young generation large "
- "object space")
-
// assembler-ia32.cc / assembler-arm.cc / assembler-arm64.cc / assembler-x64.cc
#ifdef V8_ENABLE_DEBUG_CODE
DEFINE_BOOL(debug_code, DEBUG_BOOL,
@@ -1500,6 +1502,7 @@ DEFINE_STRING(expose_gc_as, nullptr,
DEFINE_IMPLICATION(expose_gc_as, expose_gc)
DEFINE_BOOL(expose_externalize_string, false,
"expose externalize string extension")
+DEFINE_BOOL(expose_statistics, false, "expose statistics extension")
DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_BOOL(expose_ignition_statistics, false,
"expose ignition-statistics extension (requires building with "
@@ -1636,6 +1639,11 @@ DEFINE_INT(heap_snapshot_string_limit, 1024,
"truncate strings to this length in the heap snapshot")
DEFINE_BOOL(heap_profiler_show_hidden_objects, false,
"use 'native' rather than 'hidden' node type in snapshot")
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+DEFINE_BOOL(heap_snapshot_verify, false,
+ "verify that heap snapshot matches marking visitor behavior")
+DEFINE_IMPLICATION(enable_slow_asserts, heap_snapshot_verify)
+#endif
// sampling-heap-profiler.cc
DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
@@ -1659,7 +1667,7 @@ DEFINE_INT(max_valid_polymorphic_map_count, 4,
DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
"generate extra code for manipulating stats counters")
-DEFINE_BOOL(super_ic, true, "use an IC for super property loads")
+DEFINE_BOOL(super_ic, false, "use an IC for super property loads")
DEFINE_BOOL(enable_mega_dom_ic, false, "use MegaDOM IC state for API objects")
@@ -1868,14 +1876,11 @@ DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
//
// Minor mark compact collector flags.
//
-#ifdef ENABLE_MINOR_MC
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
-#else
-DEFINE_BOOL_READONLY(minor_mc, false,
- "perform young generation mark compact GCs")
-#endif // ENABLE_MINOR_MC
+DEFINE_BOOL(minor_mc_sweeping, false,
+ "perform sweeping in young generation mark compact GCs")
//
// Dev shell flags
diff --git a/deps/v8/src/flags/flags.h b/deps/v8/src/flags/flags.h
index 07a29af5d4..c13cfcf184 100644
--- a/deps/v8/src/flags/flags.h
+++ b/deps/v8/src/flags/flags.h
@@ -5,8 +5,6 @@
#ifndef V8_FLAGS_FLAGS_H_
#define V8_FLAGS_FLAGS_H_
-#include <vector>
-
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index a5cc8672b6..fb4a2b4bea 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -12,7 +12,9 @@
#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
+#include "src/common/allow-deprecated.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/base/stack.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -385,7 +387,7 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
- Isolate* isolate = GetIsolateForHeapSandbox(jsobject);
+ Isolate* isolate = GetIsolateForSandbox(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
@@ -515,9 +517,11 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
case v8::WeakCallbackType::kInternalFields:
set_weakness_type(PHANTOM_WEAK_2_EMBEDDER_FIELDS);
break;
+ START_ALLOW_USE_DEPRECATED()
case v8::WeakCallbackType::kFinalizer:
set_weakness_type(FINALIZER_WEAK);
break;
+ END_ALLOW_USE_DEPRECATED()
}
set_parameter(parameter);
weak_callback_ = phantom_callback;
@@ -667,6 +671,8 @@ class GlobalHandles::TracedNode final
bool is_on_stack() const { return IsOnStack::decode(flags_); }
void set_is_on_stack(bool v) { flags_ = IsOnStack::update(flags_, v); }
+ void clear_object() { object_ = kNullAddress; }
+
void SetFinalizationCallback(void* parameter,
WeakCallbackInfo<void>::Callback callback) {
set_parameter(parameter);
@@ -697,7 +703,11 @@ class GlobalHandles::TracedNode final
void ResetPhantomHandle(HandleHolder handle_holder) {
DCHECK(IsInUse());
- if (handle_holder == HandleHolder::kLive) {
+ // Even if the handle holder should be alive, the back reference may have
+ // been cleared which prevents the handle from being reclaimed at this
+ // point. This can happen for explicitly reset handles during incremental
+ // marking that then cannot be reclaimed during Scavenge.
+ if (handle_holder == HandleHolder::kLive && data_.parameter) {
Address** handle = reinterpret_cast<Address**>(data_.parameter);
*handle = nullptr;
}
@@ -753,7 +763,7 @@ class GlobalHandles::OnStackTracedNodeSpace final {
void SetStackStart(void* stack_start) {
CHECK(on_stack_nodes_.empty());
- stack_start_ = base::Stack::GetRealStackAddressForSlot(stack_start);
+ stack_.SetStackStart(base::Stack::GetRealStackAddressForSlot(stack_start));
}
V8_INLINE bool IsOnStack(uintptr_t slot) const;
@@ -789,28 +799,17 @@ class GlobalHandles::OnStackTracedNodeSpace final {
std::map<uintptr_t, NodeEntry> on_stack_nodes_;
#endif // !V8_USE_ADDRESS_SANITIZER
- uintptr_t stack_start_ = 0;
+ ::heap::base::Stack stack_;
GlobalHandles* global_handles_ = nullptr;
size_t acquire_count_ = 0;
};
bool GlobalHandles::OnStackTracedNodeSpace::IsOnStack(uintptr_t slot) const {
-#ifdef V8_USE_ADDRESS_SANITIZER
- if (__asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(),
- reinterpret_cast<void*>(slot), nullptr,
- nullptr)) {
- return true;
- }
-#endif // V8_USE_ADDRESS_SANITIZER
-#if defined(__has_feature)
-#if __has_feature(safe_stack)
- if (reinterpret_cast<uintptr_t>(__builtin___get_unsafe_stack_top()) >= slot &&
- slot > reinterpret_cast<uintptr_t>(__builtin___get_unsafe_stack_ptr())) {
- return true;
- }
-#endif // __has_feature(safe_stack)
-#endif // defined(__has_feature)
- return stack_start_ >= slot && slot > base::Stack::GetCurrentStackPosition();
+ // By the time this function is called, the stack start may not be set (i.e.
+ // SetStackStart() was not called). In that case, assume the slot is not on
+ // stack.
+ if (!stack_.stack_start()) return false;
+ return stack_.IsOnStack(reinterpret_cast<void*>(slot));
}
void GlobalHandles::OnStackTracedNodeSpace::NotifyEmptyEmbedderStack() {
@@ -877,12 +876,27 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
void GlobalHandles::OnStackTracedNodeSpace::CleanupBelowCurrentStackPosition() {
if (on_stack_nodes_.empty()) return;
- const auto it =
- on_stack_nodes_.upper_bound(base::Stack::GetCurrentStackPosition());
+ const uintptr_t stack_ptr = reinterpret_cast<uintptr_t>(
+ ::heap::base::Stack::GetCurrentStackPointerForLocalVariables());
+ const auto it = on_stack_nodes_.upper_bound(stack_ptr);
on_stack_nodes_.erase(on_stack_nodes_.begin(), it);
}
// static
+void GlobalHandles::EnableMarkingBarrier(Isolate* isolate) {
+ auto* global_handles = isolate->global_handles();
+ DCHECK(!global_handles->is_marking_);
+ global_handles->is_marking_ = true;
+}
+
+// static
+void GlobalHandles::DisableMarkingBarrier(Isolate* isolate) {
+ auto* global_handles = isolate->global_handles();
+ DCHECK(global_handles->is_marking_);
+ global_handles->is_marking_ = false;
+}
+
+// static
void GlobalHandles::TracedNode::Verify(GlobalHandles* global_handles,
const Address* const* slot) {
#ifdef DEBUG
@@ -1160,14 +1174,45 @@ void GlobalHandles::Destroy(Address* location) {
}
}
+// static
void GlobalHandles::DestroyTraced(Address* location) {
if (location != nullptr) {
TracedNode* node = TracedNode::FromLocation(location);
if (node->is_on_stack()) {
node->Release(nullptr);
- } else {
+ return;
+ }
+ DCHECK(!node->is_on_stack());
+
+ auto* global_handles = GlobalHandles::From(node);
+ // When marking is off the handle may be freed immediately. Note that this
+ // includes also the case when invoking the first pass callbacks during the
+ // atomic pause which requires releasing a node fully.
+ if (!global_handles->is_marking_) {
NodeSpace<TracedNode>::Release(node);
+ return;
}
+
+ // Incremental marking is on. This also covers the scavenge case which
+ // prohibits eagerly reclaiming nodes when marking is on during a scavenge.
+ //
+ // On-heap traced nodes are released in the atomic pause in
+ // `IterateWeakRootsForPhantomHandles()` when they are discovered as not
+ // marked.
+ //
+ // Eagerly clear out the object here to avoid needlessly marking it from
+ // this point on. Also clear out callback and backreference for the version
+ // with callbacks to avoid calling into possibly dead memory later.
+ //
+ // In the case this happens during incremental marking, the node may
+ // still be spuriously marked as live and is then only reclaimed on the
+ // next cycle.
+ node->clear_object();
+ node->set_parameter(nullptr);
+ node->SetFinalizationCallback(nullptr, nullptr);
+ // The destructor setting is left untouched to avoid casting a
+ // v8::TracedGlobal to a v8::TracedReference for the EmbedderRootsHandler
+ // which would be UB.
}
}
@@ -1291,8 +1336,10 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
if (is_unmodified(node->location())) {
v8::Value* value = ToApi<v8::Value>(node->handle());
if (node->has_destructor()) {
+ START_ALLOW_USE_DEPRECATED()
node->set_root(handler->IsRoot(
*reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
+ END_ALLOW_USE_DEPRECATED()
} else {
node->set_root(handler->IsRoot(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)));
@@ -1385,7 +1432,9 @@ void GlobalHandles::IterateYoungWeakObjectsForPhantomHandles(
v8::Value* value = ToApi<v8::Value>(node->handle());
handler->ResetRoot(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
- DCHECK(!node->IsInUse());
+ // We cannot check whether a node is in use here as the reset behavior
+ // depends on whether incremental marking is running when reclaiming
+ // young objects.
}
++number_of_phantom_handle_resets_;
@@ -1676,8 +1725,10 @@ void GlobalHandles::IterateTracedNodes(
if (node->IsInUse()) {
v8::Value* value = ToApi<v8::Value>(node->handle());
if (node->has_destructor()) {
+ START_ALLOW_USE_DEPRECATED()
visitor->VisitTracedGlobalHandle(
*reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+ END_ALLOW_USE_DEPRECATED()
} else {
visitor->VisitTracedReference(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index 86b276c2df..058af91069 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -45,6 +45,9 @@ enum WeaknessType {
// callbacks and finalizers attached to them.
class V8_EXPORT_PRIVATE GlobalHandles final {
public:
+ static void EnableMarkingBarrier(Isolate*);
+ static void DisableMarkingBarrier(Isolate*);
+
GlobalHandles(const GlobalHandles&) = delete;
GlobalHandles& operator=(const GlobalHandles&) = delete;
@@ -236,6 +239,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
Node* node);
Isolate* const isolate_;
+ bool is_marking_ = false;
std::unique_ptr<NodeSpace<Node>> regular_nodes_;
// Contains all nodes holding young objects. Note: when the list
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index c0dab51de8..43c2ef807e 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -178,6 +178,7 @@ Address* HandleScope::CreateHandle(Isolate* isolate, Address value) {
Address* HandleScope::GetHandle(Isolate* isolate, Address value) {
DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(isolate->main_thread_local_heap()->IsRunning());
DCHECK_WITH_MSG(isolate->thread_id() == ThreadId::Current(),
"main-thread handle can only be created on the main thread.");
HandleScopeData* data = isolate->handle_scope_data();
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 68d50c7ab3..8fdf858c50 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -10,6 +10,7 @@
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/handles/maybe-handles.h"
+#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/objects-inl.h"
#include "src/roots/roots-inl.h"
#include "src/utils/address-map.h"
@@ -149,11 +150,9 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
-CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate,
- OptimizedCompilationInfo* info)
- : isolate_(isolate),
- info_(info),
- zone_(info ? info->zone() : new Zone(isolate->allocator(), ZONE_NAME)) {
+CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate, Zone* zone)
+ : zone_(zone == nullptr ? new Zone(isolate->allocator(), ZONE_NAME) : zone),
+ isolate_(isolate) {
HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
@@ -165,18 +164,12 @@ CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate,
CanonicalHandleScope::~CanonicalHandleScope() {
delete root_index_map_;
- if (info_) {
- // If we passed a compilation info as parameter, we created the identity map
- // on its zone(). Then, we pass it to the compilation info which is
- // responsible for the disposal.
- info_->set_canonical_handles(DetachCanonicalHandles());
- } else {
- // If we don't have a compilation info, we created the zone manually. To
- // properly dispose of said zone, we need to first free the identity_map_.
- // Then we do so manually even though identity_map_ is a unique_ptr.
- identity_map_.reset();
- delete zone_;
- }
+ // Note: both the identity_map_ (zone-allocated) and the zone_ itself may
+ // have custom ownership semantics, controlled by subclasses. For example, in
+ // case of external ownership, the subclass destructor may 'steal' both by
+ // resetting the identity map pointer and nulling the zone.
+ identity_map_.reset();
+ delete zone_;
isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
}
@@ -206,5 +199,26 @@ CanonicalHandleScope::DetachCanonicalHandles() {
return std::move(identity_map_);
}
+template <class CompilationInfoT>
+CanonicalHandleScopeForOptimization<CompilationInfoT>::
+ CanonicalHandleScopeForOptimization(Isolate* isolate,
+ CompilationInfoT* info)
+ : CanonicalHandleScope(isolate, info->zone()), info_(info) {}
+
+template <class CompilationInfoT>
+CanonicalHandleScopeForOptimization<
+ CompilationInfoT>::~CanonicalHandleScopeForOptimization() {
+ // We created the identity map on the compilation info's zone(). Pass
+ // ownership to the compilation info which is responsible for the disposal.
+ info_->set_canonical_handles(DetachCanonicalHandles());
+ zone_ = nullptr; // We don't own the zone, null it.
+}
+
+template class CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
+#ifdef V8_ENABLE_MAGLEV
+template class CanonicalHandleScopeForOptimization<
+ maglev::ExportedMaglevCompilationInfo>;
+#endif // V8_ENABLE_MAGLEV
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 3bde90f81f..8d4399477e 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -182,7 +182,7 @@ class Handle final : public HandleBase {
};
template <typename T>
-inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
+std::ostream& operator<<(std::ostream& os, Handle<T> handle);
// ----------------------------------------------------------------------------
// A stack-allocated class that governs a number of local handles.
@@ -278,6 +278,10 @@ class IdentityMap;
class RootIndexMap;
class OptimizedCompilationInfo;
+namespace maglev {
+class ExportedMaglevCompilationInfo;
+} // namespace maglev
+
using CanonicalHandlesMap = IdentityMap<Address*, ZoneAllocationPolicy>;
// A CanonicalHandleScope does not open a new HandleScope. It changes the
@@ -285,27 +289,23 @@ using CanonicalHandlesMap = IdentityMap<Address*, ZoneAllocationPolicy>;
// This does not apply to nested inner HandleScopes unless a nested
// CanonicalHandleScope is introduced. Handles are only canonicalized within
// the same CanonicalHandleScope, but not across nested ones.
-class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope final {
+class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope {
public:
- // If we passed a compilation info as parameter, we created the
- // CanonicalHandlesMap on said compilation info's zone(). If so, in the
- // CanonicalHandleScope destructor we hand off the canonical handle map to the
- // compilation info. The compilation info is responsible for the disposal. If
- // we don't have a compilation info, we create a zone in this constructor. To
- // properly dispose of said zone, we need to first free the identity_map_
+ // If no Zone is passed to this constructor, we create (and own) a new zone.
+ // To properly dispose of said zone, we need to first free the identity_map_
// which is done manually even though identity_map_ is a unique_ptr.
- explicit CanonicalHandleScope(Isolate* isolate,
- OptimizedCompilationInfo* info = nullptr);
+ explicit CanonicalHandleScope(Isolate* isolate, Zone* zone = nullptr);
~CanonicalHandleScope();
+ protected:
+ std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
+
+ Zone* zone_; // *Not* const, may be mutated by subclasses.
+
private:
Address* Lookup(Address object);
- std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
-
- Isolate* isolate_;
- OptimizedCompilationInfo* info_;
- Zone* zone_;
+ Isolate* const isolate_;
RootIndexMap* root_index_map_;
std::unique_ptr<CanonicalHandlesMap> identity_map_;
// Ordinary nested handle scopes within the current one are not canonical.
@@ -316,6 +316,27 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope final {
friend class HandleScope;
};
+template <class CompilationInfoT>
+class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScopeForOptimization final
+ : public CanonicalHandleScope {
+ public:
+ // We created the
+ // CanonicalHandlesMap on the compilation info's zone(). In the
+ // CanonicalHandleScope destructor we hand off the canonical handle map to the
+ // compilation info. The compilation info is responsible for the disposal.
+ explicit CanonicalHandleScopeForOptimization(Isolate* isolate,
+ CompilationInfoT* info);
+ ~CanonicalHandleScopeForOptimization();
+
+ private:
+ CompilationInfoT* const info_;
+};
+
+using CanonicalHandleScopeForTurbofan =
+ CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
+using CanonicalHandleScopeForMaglev =
+ CanonicalHandleScopeForOptimization<maglev::ExportedMaglevCompilationInfo>;
+
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
class V8_NODISCARD SealHandleScope final {
diff --git a/deps/v8/src/handles/local-handles-inl.h b/deps/v8/src/handles/local-handles-inl.h
index f9f63175cf..404a922ae0 100644
--- a/deps/v8/src/handles/local-handles-inl.h
+++ b/deps/v8/src/handles/local-handles-inl.h
@@ -16,6 +16,7 @@ namespace internal {
// static
V8_INLINE Address* LocalHandleScope::GetHandle(LocalHeap* local_heap,
Address value) {
+ DCHECK(local_heap->IsRunning());
if (local_heap->is_main_thread())
return LocalHandleScope::GetMainThreadHandle(local_heap, value);
@@ -57,10 +58,16 @@ LocalHandleScope::~LocalHandleScope() {
template <typename T>
Handle<T> LocalHandleScope::CloseAndEscape(Handle<T> handle_value) {
- HandleScopeData* current = &local_heap_->handles()->scope_;
+ HandleScopeData* current;
T value = *handle_value;
// Throw away all handles in the current scope.
- CloseScope(local_heap_, prev_next_, prev_limit_);
+ if (local_heap_->is_main_thread()) {
+ current = local_heap_->heap()->isolate()->handle_scope_data();
+ CloseMainThreadScope(local_heap_, prev_next_, prev_limit_);
+ } else {
+ current = &local_heap_->handles()->scope_;
+ CloseScope(local_heap_, prev_next_, prev_limit_);
+ }
// Allocate one handle in the parent scope.
DCHECK(current->level > current->sealed_level);
Handle<T> result(value, local_heap_);
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index e0f0a37128..857988c90a 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -4,5 +4,4 @@ hpayer@chromium.org
mlippautz@chromium.org
omerkatz@chromium.org
-per-file *factory*=leszeks@chromium.org
-per-file read-only-*=delphick@chromium.org
+per-file *factory*=file:../objects/OWNERS
diff --git a/deps/v8/src/heap/allocation-observer.h b/deps/v8/src/heap/allocation-observer.h
index 6a3826bf16..26559ed16a 100644
--- a/deps/v8/src/heap/allocation-observer.h
+++ b/deps/v8/src/heap/allocation-observer.h
@@ -14,48 +14,84 @@
namespace v8 {
namespace internal {
-class AllocationObserver;
-
-class AllocationCounter {
+// Observer for allocations that is aware of LAB-based allocation.
+class AllocationObserver {
public:
- AllocationCounter()
- : paused_(false),
- current_counter_(0),
- next_counter_(0),
- step_in_progress_(false) {}
- V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
+ explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
+ DCHECK_LE(kTaggedSize, step_size);
+ }
+ virtual ~AllocationObserver() = default;
+ AllocationObserver(const AllocationObserver&) = delete;
+ AllocationObserver& operator=(const AllocationObserver&) = delete;
- bool IsActive() { return !IsPaused() && observers_.size() > 0; }
+ protected:
+ // Called when at least `step_size_` bytes have been allocated. `soon_object`
+ // points to the uninitialized memory that has just been allocated and is the
+ // result for a request of `size` bytes.
+ //
+ // Some caveats:
+ // 1. `soon_object` will be nullptr in cases where the allocation returns a
+ // filler object, which is e.g. needed at page boundaries.
+ // 2. `soon_object` may actually be the first object in an
+ // allocation-folding group. In such a case size is the size of the group
+ // rather than the first object.
+ // 3. `size` is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
- void Pause() {
- DCHECK(!paused_);
- DCHECK(!step_in_progress_);
- paused_ = true;
- }
+ // Subclasses can override this method to make step size dynamic.
+ virtual intptr_t GetNextStepSize() { return step_size_; }
- void Resume() {
- DCHECK(paused_);
- DCHECK(!step_in_progress_);
- paused_ = false;
- }
+ private:
+ const intptr_t step_size_;
+
+ friend class AllocationCounter;
+};
+
+// A global allocation counter observers can be added to.
+class AllocationCounter final {
+ public:
+ AllocationCounter() = default;
+
+ // Adds an observer. May be called from `AllocationObserver::Step()`.
+ V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
+
+ // Removes an observer. May be called from `AllocationObserver::Step()`.
+ V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
+ // Advances forward by `allocated` bytes. Does not invoke any observers.
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated);
+
+ // Invokes observers via `AllocationObserver::Step()` and computes new step
+ // sizes. Does not advance the current allocation counter.
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size);
- size_t NextBytes() {
+ bool IsActive() const { return !IsPaused() && observers_.size() > 0; }
+
+ bool IsStepInProgress() const { return step_in_progress_; }
+
+ size_t NextBytes() const {
DCHECK(IsActive());
return next_counter_ - current_counter_;
}
- bool IsStepInProgress() { return step_in_progress_; }
+ void Pause() {
+ DCHECK(!step_in_progress_);
+ paused_++;
+ }
+
+ void Resume() {
+ DCHECK_NE(0, paused_);
+ DCHECK(!step_in_progress_);
+ paused_--;
+ }
private:
- bool IsPaused() { return paused_; }
+ bool IsPaused() const { return paused_; }
- struct AllocationObserverCounter {
+ struct AllocationObserverCounter final {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer),
@@ -71,47 +107,10 @@ class AllocationCounter {
std::vector<AllocationObserverCounter> pending_added_;
std::unordered_set<AllocationObserver*> pending_removed_;
- bool paused_;
-
- size_t current_counter_;
- size_t next_counter_;
-
- bool step_in_progress_;
-};
-
-// -----------------------------------------------------------------------------
-// Allows observation of allocations.
-class AllocationObserver {
- public:
- explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
- DCHECK_LE(kTaggedSize, step_size);
- }
- virtual ~AllocationObserver() = default;
- AllocationObserver(const AllocationObserver&) = delete;
- AllocationObserver& operator=(const AllocationObserver&) = delete;
-
- protected:
- // Pure virtual method provided by the subclasses that gets called when at
- // least step_size bytes have been allocated. soon_object is the address just
- // allocated (but not yet initialized.) size is the size of the object as
- // requested (i.e. w/o the alignment fillers). Some complexities to be aware
- // of:
- // 1) soon_object will be nullptr in cases where we end up observing an
- // allocation that happens to be a filler space (e.g. page boundaries.)
- // 2) size is the requested size at the time of allocation. Right-trimming
- // may change the object size dynamically.
- // 3) soon_object may actually be the first object in an allocation-folding
- // group. In such a case size is the size of the group rather than the
- // first object.
- virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
-
- // Subclasses can override this method to make step size dynamic.
- virtual intptr_t GetNextStepSize() { return step_size_; }
-
- private:
- intptr_t step_size_;
-
- friend class AllocationCounter;
+ size_t current_counter_ = 0;
+ size_t next_counter_ = 0;
+ bool step_in_progress_ = false;
+ int paused_ = 0;
};
class V8_EXPORT_PRIVATE V8_NODISCARD PauseAllocationObserversScope {
diff --git a/deps/v8/src/heap/allocation-result.h b/deps/v8/src/heap/allocation-result.h
new file mode 100644
index 0000000000..04a618995b
--- /dev/null
+++ b/deps/v8/src/heap/allocation-result.h
@@ -0,0 +1,74 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ALLOCATION_RESULT_H_
+#define V8_HEAP_ALLOCATION_RESULT_H_
+
+#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+enum class AllocationOrigin {
+ kGeneratedCode = 0,
+ kRuntime = 1,
+ kGC = 2,
+ kFirstAllocationOrigin = kGeneratedCode,
+ kLastAllocationOrigin = kGC,
+ kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
+};
+
+// The result of an allocation attempt. Either represents a successful
+// allocation that can be turned into an object or a failed attempt.
+class AllocationResult final {
+ public:
+ static AllocationResult Failure() { return AllocationResult(); }
+
+ static AllocationResult FromObject(HeapObject heap_object) {
+ return AllocationResult(heap_object);
+ }
+
+ // Empty constructor creates a failed result. The callsite determines which
+ // GC to invoke based on the requested allocation.
+ AllocationResult() = default;
+
+ bool IsFailure() const { return object_.is_null(); }
+
+ template <typename T>
+ bool To(T* obj) const {
+ if (IsFailure()) return false;
+ *obj = T::cast(object_);
+ return true;
+ }
+
+ HeapObject ToObjectChecked() const {
+ CHECK(!IsFailure());
+ return HeapObject::cast(object_);
+ }
+
+ HeapObject ToObject() const {
+ DCHECK(!IsFailure());
+ return HeapObject::cast(object_);
+ }
+
+ Address ToAddress() const {
+ DCHECK(!IsFailure());
+ return HeapObject::cast(object_).address();
+ }
+
+ private:
+ explicit AllocationResult(HeapObject heap_object) : object_(heap_object) {}
+
+ HeapObject object_;
+};
+
+STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ALLOCATION_RESULT_H_
diff --git a/deps/v8/src/heap/base/active-system-pages.cc b/deps/v8/src/heap/base/active-system-pages.cc
new file mode 100644
index 0000000000..8ad225461e
--- /dev/null
+++ b/deps/v8/src/heap/base/active-system-pages.cc
@@ -0,0 +1,71 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/base/active-system-pages.h"
+
+#include <climits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+
+namespace heap {
+namespace base {
+
+size_t ActiveSystemPages::Init(size_t header_size, size_t page_size_bits,
+ size_t user_page_size) {
+#if DEBUG
+ size_t page_size = 1 << page_size_bits;
+ DCHECK_LE(RoundUp(user_page_size, page_size) >> page_size_bits,
+ ActiveSystemPages::kMaxPages);
+#endif // DEBUG
+ Clear();
+ return Add(0, header_size, page_size_bits);
+}
+
+size_t ActiveSystemPages::Add(uintptr_t start, uintptr_t end,
+ size_t page_size_bits) {
+ const size_t page_size = 1 << page_size_bits;
+
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, kMaxPages * page_size);
+
+ // Make sure we actually get the bitcount as argument.
+ DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
+
+ const uintptr_t start_page_bit =
+ RoundDown(start, page_size) >> page_size_bits;
+ const uintptr_t end_page_bit = RoundUp(end, page_size) >> page_size_bits;
+ DCHECK_LE(start_page_bit, end_page_bit);
+
+ const uintptr_t bits = end_page_bit - start_page_bit;
+ DCHECK_LE(bits, kMaxPages);
+ const bitset_t mask = bits == kMaxPages
+ ? int64_t{-1}
+ : ((uint64_t{1} << bits) - 1) << start_page_bit;
+ const bitset_t added_pages = ~value_ & mask;
+ value_ |= mask;
+ return added_pages.count();
+}
+
+size_t ActiveSystemPages::Reduce(ActiveSystemPages updated_value) {
+ DCHECK_EQ(~value_ & updated_value.value_, 0);
+ const bitset_t removed_pages(value_ & ~updated_value.value_);
+ value_ = updated_value.value_;
+ return removed_pages.count();
+}
+
+size_t ActiveSystemPages::Clear() {
+ const size_t removed_pages = value_.count();
+ value_ = 0;
+ return removed_pages;
+}
+
+size_t ActiveSystemPages::Size(size_t page_size_bits) const {
+ // Make sure we don't get the full page size as argument.
+ DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
+ return value_.count() * (size_t{1} << page_size_bits);
+}
+
+} // namespace base
+} // namespace heap
diff --git a/deps/v8/src/heap/base/active-system-pages.h b/deps/v8/src/heap/base/active-system-pages.h
new file mode 100644
index 0000000000..0c30cb928f
--- /dev/null
+++ b/deps/v8/src/heap/base/active-system-pages.h
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
+#define V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
+
+#include <bitset>
+#include <cstdint>
+
+#include "src/base/macros.h"
+
+namespace heap {
+namespace base {
+
+// Class implements a bitset of system pages on a heap page.
+class ActiveSystemPages final {
+ public:
+ // Defines the maximum number of system pages that can be tracked in one
+ // instance.
+ static constexpr size_t kMaxPages = 64;
+
+ // Initializes the set of active pages to the system pages for the header.
+ V8_EXPORT_PRIVATE size_t Init(size_t header_size, size_t page_size_bits,
+ size_t user_page_size);
+
+ // Adds the pages for this memory range. Returns the number of freshly added
+ // pages.
+ V8_EXPORT_PRIVATE size_t Add(size_t start, size_t end, size_t page_size_bits);
+
+ // Replaces the current bitset with the given argument. The new bitset needs
+ // to be a proper subset of the current pages, which means this operation
+ // can't add pages. Returns the number of removed pages.
+ V8_EXPORT_PRIVATE size_t Reduce(ActiveSystemPages updated_value);
+
+ // Removes all pages. Returns the number of removed pages.
+ V8_EXPORT_PRIVATE size_t Clear();
+
+ // Returns the memory used with the given page size.
+ V8_EXPORT_PRIVATE size_t Size(size_t page_size_bits) const;
+
+ private:
+ using bitset_t = std::bitset<kMaxPages>;
+
+ bitset_t value_;
+};
+
+} // namespace base
+} // namespace heap
+
+#endif // V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
diff --git a/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
index 6befa3bcc0..47779e0736 100644
--- a/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
+++ b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
@@ -10,7 +10,8 @@
//
// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
// GN toolchain (e.g. ChromeOS) and not provide them.
-asm(".set noreorder \n"
+asm(".text \n"
+ ".set noreorder \n"
".global PushAllRegistersAndIterateStack \n"
".type PushAllRegistersAndIterateStack, %function \n"
".hidden PushAllRegistersAndIterateStack \n"
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index 299d2dd1c1..7ae7e1380a 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -10,7 +10,6 @@
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
#include "src/base/sanitizer/tsan.h"
-#include "src/heap/cppgc/globals.h"
namespace heap {
namespace base {
@@ -21,7 +20,12 @@ extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
+void Stack::SetStackStart(const void* stack_start) {
+ stack_start_ = stack_start;
+}
+
bool Stack::IsOnStack(void* slot) const {
+ DCHECK_NOT_NULL(stack_start_);
#ifdef V8_USE_ADDRESS_SANITIZER
// If the slot is part of a fake frame, then it is definitely on the stack.
if (__asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(),
@@ -35,7 +39,7 @@ bool Stack::IsOnStack(void* slot) const {
#if defined(__has_feature)
#if __has_feature(safe_stack)
if (__builtin___get_unsafe_stack_top() >= slot &&
- slot > __builtin___get_unsafe_stack_ptr()) {
+ slot >= __builtin___get_unsafe_stack_ptr()) {
return true;
}
#endif // __has_feature(safe_stack)
@@ -86,7 +90,7 @@ void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
#endif // V8_USE_ADDRESS_SANITIZER
-void IterateSafeStackIfNecessary(StackVisitor* visitor) {
+void IterateUnsafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
// Source:
@@ -146,11 +150,12 @@ void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
} // namespace
void Stack::IteratePointers(StackVisitor* visitor) const {
+ DCHECK_NOT_NULL(stack_start_);
PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
// TODO(chromium:1056170): Add support for SIMD and/or filtering.
- IterateSafeStackIfNecessary(visitor);
+ IterateUnsafeStackIfNecessary(visitor);
}
void Stack::IteratePointersUnsafe(StackVisitor* visitor,
@@ -158,5 +163,17 @@ void Stack::IteratePointersUnsafe(StackVisitor* visitor,
IteratePointersImpl(this, visitor, reinterpret_cast<intptr_t*>(stack_end));
}
+const void* Stack::GetCurrentStackPointerForLocalVariables() {
+#if defined(__has_feature)
+#if __has_feature(safe_stack)
+ return __builtin___get_unsafe_stack_ptr();
+#else // __has_feature(safe_stack)
+ return v8::base::Stack::GetCurrentStackPosition();
+#endif // __has_feature(safe_stack)
+#else // defined(__has_feature)
+ return v8::base::Stack::GetCurrentStackPosition();
+#endif // defined(__has_feature)
+}
+
} // namespace base
} // namespace heap
diff --git a/deps/v8/src/heap/base/stack.h b/deps/v8/src/heap/base/stack.h
index d7267deee7..59411d786e 100644
--- a/deps/v8/src/heap/base/stack.h
+++ b/deps/v8/src/heap/base/stack.h
@@ -21,7 +21,10 @@ class StackVisitor {
// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
class V8_EXPORT_PRIVATE Stack final {
public:
- explicit Stack(const void* stack_start);
+ explicit Stack(const void* stack_start = nullptr);
+
+ // Sets the start of the stack.
+ void SetStackStart(const void* stack_start);
// Returns true if |slot| is part of the stack and false otherwise.
bool IsOnStack(void* slot) const;
@@ -43,6 +46,12 @@ class V8_EXPORT_PRIVATE Stack final {
// Returns the start of the stack.
const void* stack_start() const { return stack_start_; }
+ // Get the current stack pointer for the stack, on which local variables are
+ // stored. In case the safe-stack is enabled (-fsanitize=safe-stack), this
+ // will return the stack pointer for the unsafe-stack. Otherwise, the function
+ // returns the stack pointer for the native stack.
+ static const void* GetCurrentStackPointerForLocalVariables();
+
private:
const void* stack_start_;
};
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index de91e6ea9f..98a7109f97 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -79,31 +79,27 @@ class BasicMemoryChunk {
// triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
- // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
- // to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17,
-
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
- INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+ INCREMENTAL_MARKING = 1u << 17,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 18,
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
- UNREGISTERED = 1u << 20,
+ UNREGISTERED = 1u << 19,
// The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached.
- READ_ONLY_HEAP = 1u << 21,
+ READ_ONLY_HEAP = 1u << 20,
// The memory chunk is pinned in memory and can't be moved. This is likely
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
- PINNED = 1u << 22,
+ PINNED = 1u << 21,
// This page belongs to a shared heap.
- IN_SHARED_HEAP = 1u << 23,
+ IN_SHARED_HEAP = 1u << 22,
};
using MainThreadFlags = base::Flags<Flag, uintptr_t>;
diff --git a/deps/v8/src/heap/code-object-registry.h b/deps/v8/src/heap/code-object-registry.h
index f0ae334d99..b0a2dbd4cf 100644
--- a/deps/v8/src/heap/code-object-registry.h
+++ b/deps/v8/src/heap/code-object-registry.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
-#include <set>
#include <vector>
#include "src/base/macros.h"
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index 5c5911d676..08b3c15148 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -53,6 +53,11 @@ Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
CHECK(IsAligned(result, alignment));
return result;
}
+ // The empty memory_ranges means that GetFreeMemoryRangesWithin() API
+ // is not supported, so use the lowest address from the preferred region
+ // as a hint because it'll be at least as good as the fallback hint but
+ // with a higher chances to point to the free address space range.
+ return RoundUp(preferred_region.begin(), alignment);
}
return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
alignment);
@@ -124,16 +129,8 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
- // V8_EXTERNAL_CODE_SPACE imposes additional alignment requirement for the
- // base address, so make sure the hint calculation function takes that into
- // account. Otherwise the allocated reservation might be outside of the
- // preferred region (see Isolate::GetShortBuiltinsCallRegion()).
- const size_t hint_alignment =
- V8_EXTERNAL_CODE_SPACE_BOOL
- ? RoundUp(params.base_alignment, allocate_page_size)
- : allocate_page_size;
params.requested_start_hint =
- GetCodeRangeAddressHint()->GetAddressHint(requested, hint_alignment);
+ GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
if (!VirtualMemoryCage::InitReservation(params)) return false;
@@ -175,7 +172,10 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
size_t embedded_blob_code_size) {
base::MutexGuard guard(&remap_embedded_builtins_mutex_);
- const base::AddressRegion& code_region = reservation()->region();
+ // Remap embedded builtins into the end of the address range controlled by
+ // the BoundedPageAllocator.
+ const base::AddressRegion code_region(page_allocator()->begin(),
+ page_allocator()->size());
CHECK_NE(code_region.begin(), kNullAddress);
CHECK(!code_region.is_empty());
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
index b6ef858e12..a76d5db050 100644
--- a/deps/v8/src/heap/concurrent-allocator-inl.h
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -24,7 +24,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
- local_heap_->VerifyCurrent();
+ if (local_heap_) local_heap_->VerifyCurrent();
#endif
if (object_size > kMaxLabObjectSize) {
@@ -37,11 +37,9 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
- return AllocateInLabSlow(object_size, alignment, origin);
- } else {
- return allocation;
- }
+ return allocation.IsFailure()
+ ? AllocateInLabSlow(object_size, alignment, origin)
+ : allocation;
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index bfdfaea7fe..b4dfddbb4e 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -37,7 +37,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -48,7 +48,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -59,7 +59,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -122,11 +122,11 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
- return AllocationResult::Retry(space_->identity());
+ return AllocationResult::Failure();
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
- DCHECK(!allocation.IsRetry());
+ DCHECK(!allocation.IsFailure());
return allocation;
}
@@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
- local_heap_->heap(), AllocationResult(object), result->second);
+ space_->heap(), AllocationResult::FromObject(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable();
@@ -157,7 +157,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
- if (!result) return AllocationResult::Retry(space_->identity());
+ if (!result) return AllocationResult::Failure();
HeapObject object = HeapObject::FromAddress(result->first);
@@ -166,7 +166,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
object_size);
}
- return AllocationResult(object);
+ return AllocationResult::FromObject(object);
}
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f806c4eca6..1863eb5a22 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -116,6 +116,10 @@ class ConcurrentMarkingVisitor final
return VisitJSObjectSubclassFast(map, object);
}
+ int VisitJSExternalObject(Map map, JSExternalObject object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
#if V8_ENABLE_WEBASSEMBLY
int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
@@ -318,15 +322,17 @@ class ConcurrentMarkingVisitor final
}
void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target))
+ return;
+
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
- if (!data.typed_slots) {
- data.typed_slots.reset(new TypedSlots());
- }
- data.typed_slots->Insert(info.slot_type, info.offset);
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+
+ MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
+ if (!data.typed_slots) {
+ data.typed_slots.reset(new TypedSlots());
}
+ data.typed_slots->Insert(info.slot_type, info.offset);
}
void SynchronizePageAccess(HeapObject heap_object) {
@@ -451,7 +457,11 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
- MarkingWorklists::Local local_marking_worklists(marking_worklists_);
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ MarkingWorklists::Local local_marking_worklists(
+ marking_worklists_, cpp_heap
+ ? cpp_heap->CreateCppMarkingState()
+ : MarkingWorklists::Local::kNoCppMarkingState);
WeakObjects::Local local_weak_objects(weak_objects_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, &local_weak_objects, heap_,
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 15737881ef..7c6d7fdda6 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -22,6 +22,7 @@
#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/cppgc-js/cpp-snapshot.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
@@ -40,6 +41,7 @@
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/unmarker.h"
+#include "src/heap/embedder-tracing-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/marking-worklist.h"
@@ -49,6 +51,62 @@
namespace v8 {
+namespace {
+
+class V8ToCppGCReferencesVisitor final
+ : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
+ public:
+ V8ToCppGCReferencesVisitor(
+ cppgc::internal::MutatorMarkingState& marking_state,
+ v8::internal::Isolate* isolate,
+ const v8::WrapperDescriptor& wrapper_descriptor)
+ : marking_state_(marking_state),
+ isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor) {}
+
+ void VisitTracedGlobalHandle(const v8::TracedGlobal<v8::Value>&) final {
+ UNREACHABLE();
+ }
+
+ void VisitTracedReference(const v8::TracedReference<v8::Value>& value) final {
+ VisitHandle(value, value.WrapperClassId());
+ }
+
+ private:
+ void VisitHandle(const v8::TracedReference<v8::Value>& value,
+ uint16_t class_id) {
+ DCHECK(!value.IsEmpty());
+
+ const internal::JSObject js_object =
+ *reinterpret_cast<const internal::JSObject* const&>(value);
+ if (!js_object.ptr() || !js_object.MayHaveEmbedderFields()) return;
+
+ internal::LocalEmbedderHeapTracer::WrapperInfo info;
+ if (!internal::LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ isolate_, js_object, wrapper_descriptor_, &info))
+ return;
+
+ marking_state_.MarkAndPush(
+ cppgc::internal::HeapObjectHeader::FromObject(info.second));
+ }
+
+ cppgc::internal::MutatorMarkingState& marking_state_;
+ v8::internal::Isolate* isolate_;
+ const v8::WrapperDescriptor& wrapper_descriptor_;
+};
+
+void TraceV8ToCppGCReferences(
+ v8::internal::Isolate* isolate,
+ cppgc::internal::MutatorMarkingState& marking_state,
+ const v8::WrapperDescriptor& wrapper_descriptor) {
+ DCHECK(isolate);
+ V8ToCppGCReferencesVisitor forwarding_visitor(marking_state, isolate,
+ wrapper_descriptor);
+ isolate->global_handles()->IterateTracedNodes(&forwarding_visitor);
+}
+
+} // namespace
+
// static
constexpr uint16_t WrapperDescriptor::kUnknownEmbedderId;
@@ -88,19 +146,16 @@ void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
}
void CppHeap::CollectGarbageForTesting(cppgc::EmbedderStackState stack_state) {
- return internal::CppHeap::From(this)->CollectGarbageForTesting(stack_state);
-}
-
-void JSHeapConsistency::DijkstraMarkingBarrierSlow(
- cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) {
- auto& heap_base = cppgc::internal::HeapBase::From(heap_handle);
- static_cast<JSVisitor*>(&heap_base.marker()->Visitor())->Trace(ref);
+ return internal::CppHeap::From(this)->CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state);
}
-void JSHeapConsistency::CheckWrapper(v8::Local<v8::Object>& wrapper,
- int wrapper_index, const void* wrappable) {
- CHECK_EQ(wrappable,
- wrapper->GetAlignedPointerFromInternalField(wrapper_index));
+void CppHeap::CollectGarbageInYoungGenerationForTesting(
+ cppgc::EmbedderStackState stack_state) {
+ return internal::CppHeap::From(this)->CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
+ stack_state);
}
namespace internal {
@@ -176,17 +231,34 @@ UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
heap(), marking_state, unified_heap_marking_state_);
}
+void FatalOutOfMemoryHandlerImpl(const std::string& reason,
+ const SourceLocation&, HeapBase* heap) {
+ FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
+ reason.c_str());
+}
+
+} // namespace
+
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
- UnifiedHeapMarker(Key, Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
+ UnifiedHeapMarker(Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
cppgc::Platform* platform, MarkingConfig config);
~UnifiedHeapMarker() final = default;
void AddObject(void*);
+ cppgc::internal::MarkingWorklists& GetMarkingWorklists() {
+ return marking_worklists_;
+ }
+
+ cppgc::internal::MutatorMarkingState& GetMutatorMarkingState() {
+ return static_cast<cppgc::internal::MutatorMarkingState&>(
+ marking_visitor_->marking_state_);
+ }
+
protected:
- cppgc::Visitor& visitor() final { return marking_visitor_; }
+ cppgc::Visitor& visitor() final { return *marking_visitor_; }
cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final {
return conservative_marking_visitor_;
}
@@ -196,20 +268,25 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
private:
UnifiedHeapMarkingState unified_heap_marking_state_;
- MutatorUnifiedHeapMarkingVisitor marking_visitor_;
+ std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
-UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap* v8_heap,
+UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::internal::HeapBase& heap,
cppgc::Platform* platform,
MarkingConfig config)
- : cppgc::internal::MarkerBase(key, heap, platform, config),
+ : cppgc::internal::MarkerBase(heap, platform, config),
unified_heap_marking_state_(v8_heap),
- marking_visitor_(heap, mutator_marking_state_,
- unified_heap_marking_state_),
+ marking_visitor_(
+ config.collection_type == cppgc::internal::GarbageCollector::Config::
+ CollectionType::kMajor
+ ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
+ heap, mutator_marking_state_, unified_heap_marking_state_)
+ : std::make_unique<MutatorMinorGCMarkingVisitor>(
+ heap, mutator_marking_state_, unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {
+ *marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_);
@@ -220,18 +297,11 @@ void UnifiedHeapMarker::AddObject(void* object) {
cppgc::internal::HeapObjectHeader::FromObject(object));
}
-void FatalOutOfMemoryHandlerImpl(const std::string& reason,
- const SourceLocation&, HeapBase* heap) {
- FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
- reason.c_str());
-}
-
-} // namespace
-
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
const FullCycle& cppgc_event) {
+ DCHECK(!last_full_gc_event_.has_value());
last_full_gc_event_ = cppgc_event;
- GetIsolate()->heap()->tracer()->NotifyGCCompleted();
+ GetIsolate()->heap()->tracer()->NotifyCppGCCompleted();
}
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -300,12 +370,23 @@ bool CppHeap::MetricRecorderAdapter::MetricsReportPending() const {
const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
CppHeap::MetricRecorderAdapter::ExtractLastFullGcEvent() {
- return std::move(last_full_gc_event_);
+ auto res = std::move(last_full_gc_event_);
+ last_full_gc_event_.reset();
+ return res;
}
const base::Optional<cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
CppHeap::MetricRecorderAdapter::ExtractLastIncrementalMarkEvent() {
- return std::move(last_incremental_mark_event_);
+ auto res = std::move(last_incremental_mark_event_);
+ last_incremental_mark_event_.reset();
+ return res;
+}
+
+void CppHeap::MetricRecorderAdapter::ClearCachedEvents() {
+ incremental_mark_batched_events_.events.clear();
+ incremental_sweep_batched_events_.events.clear();
+ last_incremental_mark_event_.reset();
+ last_full_gc_event_.reset();
}
Isolate* CppHeap::MetricRecorderAdapter::GetIsolate() const {
@@ -331,8 +412,10 @@ CppHeap::CppHeap(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan,
- cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
- cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
+ FLAG_single_threaded_gc ? MarkingType::kIncremental
+ : MarkingType::kIncrementalAndConcurrent,
+ FLAG_single_threaded_gc ? SweepingType::kIncremental
+ : SweepingType::kIncrementalAndConcurrent),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -397,17 +480,6 @@ void CppHeap::DetachIsolate() {
no_gc_scope_++;
}
-void CppHeap::RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) {
- DCHECK(marker_);
- for (auto& tuple : embedder_fields) {
- // First field points to type.
- // Second field points to object.
- static_cast<UnifiedHeapMarker*>(marker_.get())->AddObject(tuple.second);
- }
- marking_done_ = false;
-}
-
namespace {
bool IsMemoryReducingGC(CppHeap::GarbageCollectionFlags flags) {
@@ -423,38 +495,62 @@ bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
}
} // namespace
-void CppHeap::TracePrologue(GarbageCollectionFlags gc_flags) {
+
+CppHeap::MarkingType CppHeap::SelectMarkingType() const {
+ if (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
+ return MarkingType::kAtomic;
+
+ return marking_support();
+}
+
+CppHeap::SweepingType CppHeap::SelectSweepingType() const {
+ if (IsForceGC(current_gc_flags_)) return SweepingType::kAtomic;
+
+ return sweeping_support();
+}
+
+void CppHeap::InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
+ GarbageCollectionFlags gc_flags) {
CHECK(!sweeper_.IsSweepingInProgress());
+ // Check that previous cycle metrics have been reported.
+ DCHECK_IMPLIES(GetMetricRecorder(),
+ !GetMetricRecorder()->MetricsReportPending());
+
+ DCHECK(!collection_type_);
+ collection_type_ = collection_type;
+
#if defined(CPPGC_YOUNG_GENERATION)
- cppgc::internal::SequentialUnmarker unmarker(raw_heap());
+ if (*collection_type_ ==
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor)
+ cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
current_gc_flags_ = gc_flags;
const UnifiedHeapMarker::MarkingConfig marking_config{
- UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
- cppgc::Heap::StackState::kNoHeapPointers,
- (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
- ? UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic
- : UnifiedHeapMarker::MarkingConfig::MarkingType::
- kIncrementalAndConcurrent,
+ *collection_type_, cppgc::Heap::StackState::kNoHeapPointers,
+ SelectMarkingType(),
IsForceGC(current_gc_flags_)
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
- DCHECK_IMPLIES(!isolate_, (cppgc::Heap::MarkingType::kAtomic ==
- marking_config.marking_type) ||
- force_incremental_marking_for_testing_);
+ DCHECK_IMPLIES(!isolate_,
+ (MarkingType::kAtomic == marking_config.marking_type) ||
+ force_incremental_marking_for_testing_);
if (ShouldReduceMemory(current_gc_flags_)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
compactor_.InitializeIfShouldCompact(marking_config.marking_type,
marking_config.stack_state);
}
- marker_ =
- cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
- isolate_ ? isolate_->heap() : nullptr, AsBase(), platform_.get(),
- marking_config);
+ marker_ = std::make_unique<UnifiedHeapMarker>(
+ isolate_ ? isolate()->heap() : nullptr, AsBase(), platform_.get(),
+ marking_config);
+}
+
+void CppHeap::StartTracing() {
+ marker_->StartMarking();
marking_done_ = false;
}
@@ -483,12 +579,17 @@ bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
in_atomic_pause_ = true;
- if (override_stack_state_) {
- stack_state = *override_stack_state_;
- }
marker_->EnterAtomicPause(stack_state);
- compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
- stack_state);
+ if (isolate_ &&
+ *collection_type_ ==
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor) {
+ // Visit V8 -> cppgc references.
+ TraceV8ToCppGCReferences(isolate_,
+ static_cast<UnifiedHeapMarker*>(marker_.get())
+ ->GetMutatorMarkingState(),
+ wrapper_descriptor_);
+ }
+ compactor_.CancelIfShouldNotCompact(MarkingType::kAtomic, stack_state);
}
void CppHeap::TraceEpilogue() {
@@ -511,8 +612,7 @@ void CppHeap::TraceEpilogue() {
buffered_allocated_bytes_ = 0;
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
- UnifiedHeapMarkingVerifier verifier(
- *this, cppgc::internal::Heap::Config::CollectionType::kMajor);
+ UnifiedHeapMarkingVerifier verifier(*this, *collection_type_);
verifier.Run(
stack_state_of_prev_gc(), stack_end_of_current_gc(),
stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
@@ -528,27 +628,43 @@ void CppHeap::TraceEpilogue() {
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
- // In case the GC was forced, also finalize sweeping right away.
- IsForceGC(current_gc_flags_)
- ? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
- : cppgc::internal::Sweeper::SweepingConfig::SweepingType::
- kIncrementalAndConcurrent,
- compactable_space_handling,
+ SelectSweepingType(), compactable_space_handling,
ShouldReduceMemory(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDiscardWherePossible
: cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDoNotDiscard};
- DCHECK_IMPLIES(
- !isolate_,
- cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic ==
- sweeping_config.sweeping_type);
+ DCHECK_IMPLIES(!isolate_,
+ SweepingType::kAtomic == sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
in_atomic_pause_ = false;
+ collection_type_.reset();
sweeper().NotifyDoneIfNeeded();
}
+void CppHeap::RunMinorGC() {
+#if defined(CPPGC_YOUNG_GENERATION)
+ if (in_no_gc_scope()) return;
+ // Minor GC does not support nesting in full GCs.
+ if (IsMarking()) return;
+ // Finish sweeping in case it is still running.
+ sweeper().FinishIfRunning();
+
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+
+ // Perform an atomic GC, with starting incremental/concurrent marking and
+ // immediately finalizing the garbage collection.
+ InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
+ GarbageCollectionFlagValues::kForced);
+ StartTracing();
+ EnterFinalPause(cppgc::EmbedderStackState::kMayContainHeapPointers);
+ AdvanceTracing(std::numeric_limits<double>::infinity());
+ TraceEpilogue();
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
void CppHeap::AllocatedObjectSizeIncreased(size_t bytes) {
buffered_allocated_bytes_ += static_cast<int64_t>(bytes);
ReportBufferedAllocationSizeIfPossible();
@@ -584,6 +700,7 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
}
void CppHeap::CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
cppgc::internal::GarbageCollector::Config::StackState stack_state) {
if (in_no_gc_scope()) return;
@@ -599,7 +716,10 @@ void CppHeap::CollectGarbageForTesting(
} else {
// Perform an atomic GC, with starting incremental/concurrent marking and
// immediately finalizing the garbage collection.
- if (!IsMarking()) TracePrologue(GarbageCollectionFlagValues::kForced);
+ if (!IsMarking()) {
+ InitializeTracing(collection_type, GarbageCollectionFlagValues::kForced);
+ StartTracing();
+ }
EnterFinalPause(stack_state);
AdvanceTracing(std::numeric_limits<double>::infinity());
TraceEpilogue();
@@ -620,7 +740,10 @@ void CppHeap::StartIncrementalGarbageCollectionForTesting() {
DCHECK_NULL(isolate_);
if (IsMarking()) return;
force_incremental_marking_for_testing_ = true;
- TracePrologue(GarbageCollectionFlagValues::kForced);
+ InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ GarbageCollectionFlagValues::kForced);
+ StartTracing();
force_incremental_marking_for_testing_ = false;
}
@@ -630,7 +753,9 @@ void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
DCHECK_NULL(isolate_);
DCHECK(IsMarking());
if (IsMarking()) {
- CollectGarbageForTesting(stack_state);
+ CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state);
}
sweeper_.FinishIfRunning();
}
@@ -719,5 +844,24 @@ CppHeap::MetricRecorderAdapter* CppHeap::GetMetricRecorder() const {
void CppHeap::FinishSweepingIfRunning() { sweeper_.FinishIfRunning(); }
+void CppHeap::FinishSweepingIfOutOfWork() { sweeper_.FinishIfOutOfWork(); }
+
+std::unique_ptr<CppMarkingState> CppHeap::CreateCppMarkingState() {
+ DCHECK(IsMarking());
+ return std::make_unique<CppMarkingState>(
+ isolate(), wrapper_descriptor_,
+ std::make_unique<cppgc::internal::MarkingStateBase>(
+ AsBase(),
+ static_cast<UnifiedHeapMarker*>(marker())->GetMarkingWorklists()));
+}
+
+std::unique_ptr<CppMarkingState>
+CppHeap::CreateCppMarkingStateForMutatorThread() {
+ DCHECK(IsMarking());
+ return std::make_unique<CppMarkingState>(
+ isolate(), wrapper_descriptor_,
+ static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 3f9e8d9ec7..70958b2b6d 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -15,6 +15,7 @@ static_assert(
#include "include/v8-metrics.h"
#include "src/base/flags.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h"
@@ -25,6 +26,8 @@ class Isolate;
namespace internal {
+class CppMarkingState;
+
// A C++ heap implementation used with V8 to implement unified heap.
class V8_EXPORT_PRIVATE CppHeap final
: public cppgc::internal::HeapBase,
@@ -62,6 +65,8 @@ class V8_EXPORT_PRIVATE CppHeap final
cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
ExtractLastIncrementalMarkEvent();
+ void ClearCachedEvents();
+
private:
Isolate* GetIsolate() const;
@@ -105,6 +110,7 @@ class V8_EXPORT_PRIVATE CppHeap final
void EnableDetachedGarbageCollectionsForTesting();
void CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType,
cppgc::internal::GarbageCollector::Config::StackState);
void CollectCustomSpaceStatisticsAtLastGC(
@@ -112,15 +118,19 @@ class V8_EXPORT_PRIVATE CppHeap final
std::unique_ptr<CustomSpaceStatisticsReceiver>);
void FinishSweepingIfRunning();
+ void FinishSweepingIfOutOfWork();
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields);
- void TracePrologue(GarbageCollectionFlags);
+ void InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType,
+ GarbageCollectionFlags);
+ void StartTracing();
bool AdvanceTracing(double max_duration);
bool IsTracingDone();
void TraceEpilogue();
void EnterFinalPause(cppgc::EmbedderStackState stack_state);
+ void RunMinorGC();
+
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
void AllocatedObjectSizeDecreased(size_t) final;
@@ -134,6 +144,9 @@ class V8_EXPORT_PRIVATE CppHeap final
Isolate* isolate() const { return isolate_; }
+ std::unique_ptr<CppMarkingState> CreateCppMarkingState();
+ std::unique_ptr<CppMarkingState> CreateCppMarkingStateForMutatorThread();
+
private:
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -147,8 +160,14 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinalizeIncrementalGarbageCollectionForTesting(
cppgc::EmbedderStackState) final;
+ MarkingType SelectMarkingType() const;
+ SweepingType SelectSweepingType() const;
+
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
+ // |collection_type_| is initialized when marking is in progress.
+ base::Optional<cppgc::internal::GarbageCollector::Config::CollectionType>
+ collection_type_;
GarbageCollectionFlags current_gc_flags_;
// Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
new file mode 100644
index 0000000000..23294b4dca
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
@@ -0,0 +1,47 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
+#define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
+
+#include "src/heap/cppgc-js/cpp-marking-state.h"
+#include "src/heap/embedder-tracing-inl.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects.h"
+
+namespace v8 {
+namespace internal {
+
+bool CppMarkingState::ExtractEmbedderDataSnapshot(
+ Map map, JSObject object, EmbedderDataSnapshot& snapshot) {
+ if (JSObject::GetEmbedderFieldCount(map) < 2) return false;
+
+ EmbedderDataSlot::PopulateEmbedderDataSnapshot(
+ map, object, wrapper_descriptor_.wrappable_type_index, snapshot.first);
+ EmbedderDataSlot::PopulateEmbedderDataSnapshot(
+ map, object, wrapper_descriptor_.wrappable_instance_index,
+ snapshot.second);
+ return true;
+}
+
+void CppMarkingState::MarkAndPush(const EmbedderDataSnapshot& snapshot) {
+ const EmbedderDataSlot type_slot(snapshot.first);
+ const EmbedderDataSlot instance_slot(snapshot.second);
+ MarkAndPush(type_slot, instance_slot);
+}
+
+void CppMarkingState::MarkAndPush(const EmbedderDataSlot type_slot,
+ const EmbedderDataSlot instance_slot) {
+ LocalEmbedderHeapTracer::WrapperInfo info;
+ if (LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ isolate_, wrapper_descriptor_, type_slot, instance_slot, &info)) {
+ marking_state_.MarkAndPush(
+ cppgc::internal::HeapObjectHeader::FromObject(info.second));
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
new file mode 100644
index 0000000000..ad8ef3b680
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
@@ -0,0 +1,67 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
+#define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
+
+#include <memory>
+
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-worklists.h"
+#include "src/objects/embedder-data-slot.h"
+
+namespace v8 {
+namespace internal {
+
+class JSObject;
+class EmbedderDataSlot;
+
+class CppMarkingState {
+ public:
+ using EmbedderDataSnapshot =
+ std::pair<EmbedderDataSlot::EmbedderDataSlotSnapshot,
+ EmbedderDataSlot::EmbedderDataSlotSnapshot>;
+
+ CppMarkingState(Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ cppgc::internal::MarkingStateBase& main_thread_marking_state)
+ : isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor),
+ owned_marking_state_(nullptr),
+ marking_state_(main_thread_marking_state) {}
+
+ CppMarkingState(Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ std::unique_ptr<cppgc::internal::MarkingStateBase>
+ concurrent_marking_state)
+ : isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor),
+ owned_marking_state_(std::move(concurrent_marking_state)),
+ marking_state_(*owned_marking_state_) {}
+ CppMarkingState(const CppMarkingState&) = delete;
+ CppMarkingState& operator=(const CppMarkingState&) = delete;
+
+ void Publish() { marking_state_.Publish(); }
+
+ inline bool ExtractEmbedderDataSnapshot(Map, JSObject, EmbedderDataSnapshot&);
+
+ inline void MarkAndPush(const EmbedderDataSnapshot&);
+ inline void MarkAndPush(const EmbedderDataSlot type_slot,
+ const EmbedderDataSlot instance_slot);
+
+ bool IsLocalEmpty() {
+ return marking_state_.marking_worklist().IsLocalEmpty();
+ }
+
+ private:
+ Isolate* const isolate_;
+ const WrapperDescriptor& wrapper_descriptor_;
+
+ std::unique_ptr<cppgc::internal::MarkingStateBase> owned_marking_state_;
+ cppgc::internal::MarkingStateBase& marking_state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index e1065376ea..69ab62f086 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -358,7 +358,8 @@ void* ExtractEmbedderDataBackref(Isolate* isolate,
if (!v8_value->IsObject()) return nullptr;
Handle<Object> v8_object = Utils::OpenHandle(*v8_value);
- if (!v8_object->IsJSObject() || !JSObject::cast(*v8_object).IsApiWrapper())
+ if (!v8_object->IsJSObject() ||
+ !JSObject::cast(*v8_object).MayHaveEmbedderFields())
return nullptr;
JSObject js_object = JSObject::cast(*v8_object);
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index 09564055dc..f884b1d9fe 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
- HeapBase& heap, cppgc::internal::MarkingStateBase& marking_state,
+ HeapBase& heap, cppgc::internal::BasicMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state),
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index 721dbe5d98..abff33cd5a 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -17,7 +17,7 @@ class SourceLocation;
namespace internal {
class ConcurrentMarkingState;
-class MarkingStateBase;
+class BasicMarkingState;
class MutatorMarkingState;
} // namespace internal
} // namespace cppgc
@@ -31,9 +31,11 @@ using cppgc::WeakCallback;
using cppgc::internal::HeapBase;
using cppgc::internal::MutatorMarkingState;
+class UnifiedHeapMarker;
+
class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public:
- UnifiedHeapMarkingVisitorBase(HeapBase&, cppgc::internal::MarkingStateBase&,
+ UnifiedHeapMarkingVisitorBase(HeapBase&, cppgc::internal::BasicMarkingState&,
UnifiedHeapMarkingState&);
~UnifiedHeapMarkingVisitorBase() override = default;
@@ -49,13 +51,15 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
void HandleMovableReference(const void**) final;
// JS handling.
- void Visit(const TracedReferenceBase& ref) final;
+ void Visit(const TracedReferenceBase& ref) override;
- cppgc::internal::MarkingStateBase& marking_state_;
+ cppgc::internal::BasicMarkingState& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_;
+
+ friend class UnifiedHeapMarker;
};
-class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
+class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
@@ -68,6 +72,18 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
const SourceLocation&) final;
};
+class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
+ : public MutatorUnifiedHeapMarkingVisitor {
+ public:
+ using MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor;
+ ~MutatorMinorGCMarkingVisitor() override = default;
+
+ protected:
+ // Override and make the function empty, since we don't want to trace V8
+ // reference during cppgc's minor GC.
+ void Visit(const TracedReferenceBase&) final {}
+};
+
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
index 46884d42df..1899557134 100644
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ b/deps/v8/src/heap/cppgc/default-platform.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <include/cppgc/default-platform.h>
+#include "include/cppgc/default-platform.h"
namespace cppgc {
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
index ccc5840af4..568ff2a5ce 100644
--- a/deps/v8/src/heap/cppgc/explicit-management.cc
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -4,6 +4,7 @@
#include "include/cppgc/explicit-management.h"
+#include <algorithm>
#include <tuple>
#include "src/heap/cppgc/heap-base.h"
@@ -26,7 +27,8 @@ bool InGC(HeapHandle& heap_handle) {
} // namespace
-void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
+void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
+ void* object) {
if (InGC(heap_handle)) {
return;
}
@@ -34,16 +36,21 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
+ size_t object_size = 0;
+ USE(object_size);
+
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
if (base_page->is_large()) { // Large object.
+ object_size = LargePage::From(base_page)->ObjectSize();
base_page->space().RemovePage(base_page);
base_page->heap().stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
const size_t header_size = header.AllocatedSize();
+ object_size = header.ObjectSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
@@ -59,6 +66,13 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
// list entry.
}
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& heap_base = HeapBase::From(heap_handle);
+ heap_base.remembered_set().InvalidateRememberedSlotsInRange(
+ object, reinterpret_cast<uint8_t*>(object) + object_size);
+ // If this object was registered as remembered, remove it.
+ heap_base.remembered_set().InvalidateRememberedSourceObject(header);
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
namespace {
@@ -98,17 +112,19 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
lab.Set(free_start, lab.size() + size_delta);
SetMemoryInaccessible(lab.start(), size_delta);
header.SetAllocatedSize(new_size);
- return true;
- }
- // Heuristic: Only return memory to the free list if the block is larger than
- // the smallest size class.
- if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
+ } else if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
+ // Heuristic: Only return memory to the free list if the block is larger
+ // than the smallest size class.
SetMemoryInaccessible(free_start, size_delta);
base_page.heap().stats_collector()->NotifyExplicitFree(size_delta);
normal_space.free_list().Add({free_start, size_delta});
NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
header.SetAllocatedSize(new_size);
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ base_page.heap().remembered_set().InvalidateRememberedSlotsInRange(
+ free_start, free_start + size_delta);
+#endif // defined(CPPGC_YOUNG_GENERATION)
// Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas.
return true;
@@ -116,7 +132,7 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
} // namespace
-bool Resize(void* object, size_t new_object_size) {
+bool ExplicitManagementImpl::Resize(void* object, size_t new_object_size) {
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index b8e52452ee..a49a7a1bad 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+#include "include/cppgc/common.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/sweeper.h"
@@ -55,6 +56,11 @@ class GarbageCollector {
MarkingType::kAtomic, SweepingType::kAtomic};
}
+ static constexpr Config MinorConservativeAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
CollectionType collection_type = CollectionType::kMajor;
StackState stack_state = StackState::kMayContainHeapPointers;
MarkingType marking_type = MarkingType::kAtomic;
@@ -70,6 +76,9 @@ class GarbageCollector {
// The current epoch that the GC maintains. The epoch is increased on every
// GC invocation.
virtual size_t epoch() const = 0;
+
+ // Returns a non-null state if the stack state if overriden.
+ virtual const EmbedderStackState* override_stack_state() const = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.cc b/deps/v8/src/heap/cppgc/gc-invoker.cc
index 9537f0c2a4..1bddad7a7e 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.cc
+++ b/deps/v8/src/heap/cppgc/gc-invoker.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "include/cppgc/common.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/task-handle.h"
@@ -24,6 +25,9 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
void CollectGarbage(GarbageCollector::Config) final;
void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final { return collector_->epoch(); }
+ const EmbedderStackState* override_stack_state() const final {
+ return collector_->override_stack_state();
+ }
private:
class GCTask final : public cppgc::Task {
@@ -48,6 +52,8 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
private:
void Run() final {
+ CHECK_NULL(collector_->override_stack_state());
+
if (handle_.IsCanceled() || (collector_->epoch() != saved_epoch_)) return;
collector_->CollectGarbage(config_);
@@ -94,6 +100,8 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
// Force a precise GC since it will run in a non-nestable task.
config.stack_state =
GarbageCollector::Config::StackState::kNoHeapPointers;
+ DCHECK_NE(cppgc::Heap::StackSupport::kSupportsConservativeStackScan,
+ stack_support_);
gc_task_handle_ = GCTask::Post(
collector_, platform_->GetForegroundTaskRunner().get(), config);
}
@@ -137,5 +145,9 @@ void GCInvoker::StartIncrementalGarbageCollection(
size_t GCInvoker::epoch() const { return impl_->epoch(); }
+const EmbedderStackState* GCInvoker::override_stack_state() const {
+ return impl_->override_stack_state();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.h b/deps/v8/src/heap/cppgc/gc-invoker.h
index fa5e7e5435..ceebca139c 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.h
+++ b/deps/v8/src/heap/cppgc/gc-invoker.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_GC_INVOKER_H_
#define V8_HEAP_CPPGC_GC_INVOKER_H_
+#include "include/cppgc/common.h"
#include "include/cppgc/heap.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/garbage-collector.h"
@@ -36,6 +37,7 @@ class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
void CollectGarbage(GarbageCollector::Config) final;
void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final;
+ const EmbedderStackState* override_stack_state() const final;
private:
class GCInvokerImpl;
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index bf1e215c22..e04df872b2 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -43,9 +43,30 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
+// the guard pages do not protect anything, since there is no inaccessible
+// region surrounding the allocation.
+//
+// However, with a 4k guard page size (as below), we avoid putting any data
+// inside the "guard pages" region. Effectively, this wastes 2 * 4kiB of memory
+// for each 128kiB page, since this is memory we pay for (since accounting as at
+// the OS page level), but never use.
+//
+// The layout of pages is broadly:
+// | guard page | header | payload | guard page |
+// <--- 4k ---> <--- 4k --->
+// <------------------ 128k -------------------->
+//
+// Since this is aligned on an OS page boundary (16k), the guard pages are part
+// of the first and last OS page, respectively. So they are really private dirty
+// memory which we never use.
+constexpr size_t kGuardPageSize = 0;
+#else
// Guard pages are always put into memory. Whether they are actually protected
// depends on the allocator provided to the garbage collector.
constexpr size_t kGuardPageSize = 4096;
+#endif
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index a5c89b6218..14b0d2ad19 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -82,6 +82,9 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
+#if defined(CPPGC_YOUNG_GENERATION)
+ remembered_set_(*this),
+#endif // defined(CPPGC_YOUNG_GENERATION)
stack_support_(stack_support),
marking_support_(marking_support),
sweeping_support_(sweeping_support) {
@@ -136,7 +139,7 @@ void HeapBase::ResetRememberedSet() {
};
DCHECK(AllLABsAreEmpty(raw_heap()).value());
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
- remembered_slots().clear();
+ remembered_set_.Reset();
}
#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 041f4cf3bd..0c6fe757f8 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -15,6 +15,7 @@
#include "src/base/macros.h"
#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
@@ -29,6 +30,10 @@
#include "src/heap/cppgc/caged-heap.h"
#endif
+#if defined(CPPGC_YOUNG_GENERATION)
+#include "src/heap/cppgc/remembered-set.h"
+#endif
+
namespace v8 {
namespace base {
class LsanPageAllocator;
@@ -162,7 +167,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
}
#if defined(CPPGC_YOUNG_GENERATION)
- std::set<void*>& remembered_slots() { return remembered_slots_; }
+ OldToNewRememberedSet& remembered_set() { return remembered_set_; }
#endif // defined(CPPGC_YOUNG_GENERATION)
size_t ObjectPayloadSize() const;
@@ -207,6 +212,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
MarkingType marking_support() const { return marking_support_; }
+ SweepingType sweeping_support() const { return sweeping_support_; }
protected:
// Used by the incremental scheduler to finalize a GC if supported.
@@ -259,8 +265,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
ProcessHeapStatisticsUpdater::AllocationObserverImpl
allocation_observer_for_PROCESS_HEAP_STATISTICS_;
#if defined(CPPGC_YOUNG_GENERATION)
- std::set<void*> remembered_slots_;
-#endif
+ OldToNewRememberedSet remembered_set_;
+#endif // defined(CPPGC_YOUNG_GENERATION)
size_t no_gc_scope_ = 0;
size_t disallow_gc_scope_ = 0;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index e5a428a5a9..a6efb8defd 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -125,18 +125,17 @@ class HeapObjectHeader {
using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
// Used in |encoded_low_|.
using MarkBitField = v8::base::BitField16<bool, 0, 1>;
- using SizeField = void; // Use EncodeSize/DecodeSize instead.
+ using SizeField =
+ MarkBitField::Next<size_t, 15>; // Use EncodeSize/DecodeSize instead.
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
- using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
- return SizeFieldImpl::decode(encoded) * kAllocationGranularity;
+ return SizeField::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
- using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
- return SizeFieldImpl::encode(size / kAllocationGranularity);
+ return SizeField::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
@@ -230,8 +229,16 @@ size_t HeapObjectHeader::AllocatedSize() const {
}
void HeapObjectHeader::SetAllocatedSize(size_t size) {
+#if !defined(CPPGC_YOUNG_GENERATION)
+ // With sticky bits, marked objects correspond to old objects.
+ // TODO(bikineev:1029379): Consider disallowing old/marked objects to be
+ // resized.
DCHECK(!IsMarked());
- encoded_low_ = EncodeSize(size);
+#endif
+ // The object may be marked (i.e. old, in case young generation is enabled).
+ // Make sure to not overwrite the mark bit.
+ encoded_low_ &= ~SizeField::encode(SizeField::kMax);
+ encoded_low_ |= EncodeSize(size);
}
template <AccessMode mode>
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
index 9a78b44433..96e57d233f 100644
--- a/deps/v8/src/heap/cppgc/heap-space.cc
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -20,6 +20,8 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type,
USE(is_compactable_);
}
+BaseSpace::~BaseSpace() = default;
+
void BaseSpace::AddPage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index 18fe7ba225..39be232079 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -28,6 +28,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
BaseSpace(const BaseSpace&) = delete;
BaseSpace& operator=(const BaseSpace&) = delete;
+ virtual ~BaseSpace();
iterator begin() { return pages_.begin(); }
const_iterator begin() const { return pages_.begin(); }
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 26500a9ca8..beaa089206 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -159,8 +159,8 @@ void Heap::StartGarbageCollection(Config config) {
const Marker::MarkingConfig marking_config{
config.collection_type, config.stack_state, config.marking_type,
config.is_forced_gc};
- marker_ = MarkerFactory::CreateAndStartMarking<Marker>(
- AsBase(), platform_.get(), marking_config);
+ marker_ = std::make_unique<Marker>(AsBase(), platform_.get(), marking_config);
+ marker_->StartMarking();
}
void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
@@ -168,9 +168,6 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
config_.stack_state = stack_state;
- if (override_stack_state_) {
- config_.stack_state = *override_stack_state_;
- }
SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
in_atomic_pause_ = true;
{
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index c3504073bc..e47d203327 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -37,6 +37,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void FinalizeIncrementalGarbageCollectionIfRunning(Config);
size_t epoch() const final { return epoch_; }
+ const EmbedderStackState* override_stack_state() const final {
+ return HeapBase::override_stack_state();
+ }
void DisableHeapGrowingForTesting();
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index e792c4c844..fa8732fde7 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -60,33 +60,11 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
return false;
}
-// Visit remembered set that was recorded in the generational barrier.
-void VisitRememberedSlots(HeapBase& heap,
- MutatorMarkingState& mutator_marking_state) {
-#if defined(CPPGC_YOUNG_GENERATION)
- StatsCollector::EnabledScope stats_scope(
- heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
- for (void* slot : heap.remembered_slots()) {
- auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
- ->ObjectHeaderFromInnerAddress(slot);
- if (slot_header.IsYoung()) continue;
- // The design of young generation requires collections to be executed at the
- // top level (with the guarantee that no objects are currently being in
- // construction). This can be ensured by running young GCs from safe points
- // or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
-
- void* value = *reinterpret_cast<void**>(slot);
- mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
- }
-#endif
-}
-
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
-bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
+bool DrainWorklistWithBytesAndTimeDeadline(BasicMarkingState& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local,
@@ -153,7 +131,7 @@ void MarkerBase::IncrementalMarkingTask::Run() {
}
}
-MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
+MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: heap_(heap),
config_(config),
@@ -248,6 +226,13 @@ void MarkerBase::StartMarking() {
incremental_marking_allocation_observer_.get());
}
}
+void MarkerBase::HandleNotFullyConstructedObjects() {
+ if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+ mutator_marking_state_.FlushNotFullyConstructedObjects();
+ } else {
+ MarkNotFullyConstructedObjects();
+ }
+}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
@@ -271,12 +256,7 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
{
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
- if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
- mutator_marking_state_.FlushNotFullyConstructedObjects();
- DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
- } else {
- MarkNotFullyConstructedObjects();
- }
+ HandleNotFullyConstructedObjects();
}
if (heap().marking_support() ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
@@ -339,12 +319,32 @@ void MarkerBase::ProcessWeakness() {
heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
- MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& remembered_set = heap().remembered_set();
+ if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ // Custom callbacks assume that untraced pointers point to not yet freed
+ // objects. They must make sure that upon callback completion no
+ // UntracedMember points to a freed object. This may not hold true if a
+ // custom callback for an old object operates with a reference to a young
+ // object that was freed on a minor collection cycle. To maintain the
+ // invariant that UntracedMembers always point to valid objects, execute
+ // custom callbacks for old objects on each minor collection cycle.
+ remembered_set.ExecuteCustomCallbacks(broker);
+ } else {
+ // For major GCs, just release all the remembered weak callbacks.
+ remembered_set.ReleaseCustomCallbacks();
+ }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
+ MarkingWorklists::WeakCallbackItem item;
MarkingWorklists::WeakCallbackWorklist::Local& local =
mutator_marking_state_.weak_callback_worklist();
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
+#if defined(CPPGC_YOUNG_GENERATION)
+ heap().remembered_set().AddWeakCallback(item);
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
// Weak callbacks should not add any new objects for marking.
@@ -372,9 +372,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
+#if defined(CPPGC_YOUNG_GENERATION)
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
- VisitRememberedSlots(heap(), mutator_marking_state_);
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
+ heap().remembered_set().Visit(visitor(), mutator_marking_state_);
}
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
@@ -434,6 +438,10 @@ bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
concurrent_marker_->Cancel();
concurrent_marking_active_ = false;
+ // Concurrent markers may have pushed some "leftover" in-construction objects
+ // after flushing in EnterAtomicPause.
+ HandleNotFullyConstructedObjects();
+ DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
return true;
}
@@ -618,9 +626,8 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
-Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
- MarkingConfig config)
- : MarkerBase(key, heap, platform, config),
+Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
+ : MarkerBase(heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 1f76583177..d990dcaed0 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -24,12 +24,10 @@ namespace cppgc {
namespace internal {
class HeapBase;
-class MarkerFactory;
// Marking algorithm. Example for a valid call sequence creating the marking
// phase:
-// 1. StartMarking() [Called implicitly when creating a Marker using
-// MarkerFactory]
+// 1. StartMarking()
// 2. AdvanceMarkingWithLimits() [Optional, depending on environment.]
// 3. EnterAtomicPause()
// 4. AdvanceMarkingWithLimits()
@@ -87,6 +85,10 @@ class V8_EXPORT_PRIVATE MarkerBase {
// objects to be marked and merely updates marking states if needed.
void LeaveAtomicPause();
+ // Initialize marking according to the given config. This method will
+ // trigger incremental/concurrent marking if needed.
+ void StartMarking();
+
// Combines:
// - EnterAtomicPause()
// - AdvanceMarkingWithLimits()
@@ -141,17 +143,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
- class Key {
- private:
- Key() = default;
- friend class MarkerFactory;
- };
-
- MarkerBase(Key, HeapBase&, cppgc::Platform*, MarkingConfig);
-
- // Initialize marking according to the given config. This method will
- // trigger incremental/concurrent marking if needed.
- void StartMarking();
+ MarkerBase(HeapBase&, cppgc::Platform*, MarkingConfig);
virtual cppgc::Visitor& visitor() = 0;
virtual ConservativeTracingVisitor& conservative_visitor() = 0;
@@ -173,6 +165,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool CancelConcurrentMarkingIfNeeded();
+ void HandleNotFullyConstructedObjects();
+
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
@@ -193,27 +187,11 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
-
- friend class MarkerFactory;
-};
-
-class V8_EXPORT_PRIVATE MarkerFactory {
- public:
- template <typename T, typename... Args>
- static std::unique_ptr<T> CreateAndStartMarking(Args&&... args) {
- static_assert(std::is_base_of<MarkerBase, T>::value,
- "MarkerFactory can only create subclasses of MarkerBase");
- std::unique_ptr<T> marker =
- std::make_unique<T>(MarkerBase::Key(), std::forward<Args>(args)...);
- marker->StartMarking();
- return marker;
- }
};
class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
public:
- Marker(Key, HeapBase&, cppgc::Platform*,
- MarkingConfig = MarkingConfig::Default());
+ Marker(HeapBase&, cppgc::Platform*, MarkingConfig = MarkingConfig::Default());
protected:
cppgc::Visitor& visitor() final { return marking_visitor_; }
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 513c781b96..b550d4e354 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -23,8 +23,7 @@ namespace internal {
// C++ marking implementation.
class MarkingStateBase {
public:
- inline MarkingStateBase(HeapBase& heap, MarkingWorklists&,
- CompactionWorklists*);
+ inline MarkingStateBase(HeapBase&, MarkingWorklists&);
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
@@ -34,6 +33,86 @@ class MarkingStateBase {
inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
+ void Publish() { marking_worklist_.Publish(); }
+
+ MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
+ return marking_worklist_;
+ }
+ MarkingWorklists::NotFullyConstructedWorklist&
+ not_fully_constructed_worklist() {
+ return not_fully_constructed_worklist_;
+ }
+
+ protected:
+ inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
+
+ inline bool MarkNoPush(HeapObjectHeader&);
+
+ HeapBase& heap_;
+
+ MarkingWorklists::MarkingWorklist::Local marking_worklist_;
+ MarkingWorklists::NotFullyConstructedWorklist&
+ not_fully_constructed_worklist_;
+};
+
+MarkingStateBase::MarkingStateBase(HeapBase& heap,
+ MarkingWorklists& marking_worklists)
+ : heap_(heap),
+ marking_worklist_(marking_worklists.marking_worklist()),
+ not_fully_constructed_worklist_(
+ *marking_worklists.not_fully_constructed_worklist()) {}
+
+void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
+ DCHECK_NOT_NULL(object);
+ MarkAndPush(
+ HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
+ desc);
+}
+
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK_NOT_NULL(desc.callback);
+
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ } else if (MarkNoPush(header)) {
+ PushMarked(header, desc);
+ }
+}
+
+bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
+ // A GC should only mark the objects that belong in its heap.
+ DCHECK_EQ(&heap_, &BasePage::FromPayload(&header)->heap());
+ // Never mark free space objects. This would e.g. hint to marking a promptly
+ // freed backing store.
+ DCHECK(!header.IsFree<AccessMode::kAtomic>());
+ return header.TryMarkAtomic();
+}
+
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
+ MarkAndPush(
+ header,
+ {header.ObjectStart(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
+void MarkingStateBase::PushMarked(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK_NOT_NULL(desc.callback);
+
+ marking_worklist_.Push(desc);
+}
+
+class BasicMarkingState : public MarkingStateBase {
+ public:
+ inline BasicMarkingState(HeapBase& heap, MarkingWorklists&,
+ CompactionWorklists*);
+
+ BasicMarkingState(const BasicMarkingState&) = delete;
+ BasicMarkingState& operator=(const BasicMarkingState&) = delete;
+
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*);
@@ -58,7 +137,7 @@ class MarkingStateBase {
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
- marking_worklist_.Publish();
+ MarkingStateBase::Publish();
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
@@ -68,13 +147,6 @@ class MarkingStateBase {
if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
}
- MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
- return marking_worklist_;
- }
- MarkingWorklists::NotFullyConstructedWorklist&
- not_fully_constructed_worklist() {
- return not_fully_constructed_worklist_;
- }
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
@@ -121,21 +193,12 @@ class MarkingStateBase {
void set_in_atomic_pause() { in_atomic_pause_ = true; }
protected:
- inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
-
- inline bool MarkNoPush(HeapObjectHeader&);
-
inline void RegisterWeakContainer(HeapObjectHeader&);
inline bool IsCompactionEnabled() const {
return movable_slots_worklist_.get();
}
- HeapBase& heap_;
-
- MarkingWorklists::MarkingWorklist::Local marking_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist&
- not_fully_constructed_worklist_;
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
@@ -160,13 +223,10 @@ class MarkingStateBase {
bool in_atomic_pause_ = false;
};
-MarkingStateBase::MarkingStateBase(HeapBase& heap,
- MarkingWorklists& marking_worklists,
- CompactionWorklists* compaction_worklists)
- : heap_(heap),
- marking_worklist_(marking_worklists.marking_worklist()),
- not_fully_constructed_worklist_(
- *marking_worklists.not_fully_constructed_worklist()),
+BasicMarkingState::BasicMarkingState(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists),
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
@@ -187,53 +247,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
}
}
-void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
- DCHECK_NOT_NULL(object);
- MarkAndPush(
- HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
- desc);
-}
-
-void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
- TraceDescriptor desc) {
- DCHECK_NOT_NULL(desc.callback);
-
- if (header.IsInConstruction<AccessMode::kAtomic>()) {
- not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
- } else if (MarkNoPush(header)) {
- PushMarked(header, desc);
- }
-}
-
-bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
- // A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(&heap_, &BasePage::FromPayload(&header)->heap());
- // Never mark free space objects. This would e.g. hint to marking a promptly
- // freed backing store.
- DCHECK(!header.IsFree<AccessMode::kAtomic>());
- return header.TryMarkAtomic();
-}
-
-void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
- MarkAndPush(
- header,
- {header.ObjectStart(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
-}
-
-void MarkingStateBase::PushMarked(HeapObjectHeader& header,
- TraceDescriptor desc) {
- DCHECK(header.IsMarked<AccessMode::kAtomic>());
- DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
- DCHECK_NOT_NULL(desc.callback);
-
- marking_worklist_.Push(desc);
-}
-
-void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void BasicMarkingState::RegisterWeakReferenceIfNeeded(
+ const void* object, TraceDescriptor desc, WeakCallback weak_callback,
+ const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
@@ -245,20 +261,20 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
RegisterWeakCallback(weak_callback, parameter);
}
-void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void BasicMarkingState::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
DCHECK_NOT_NULL(callback);
weak_callback_worklist_.Push({callback, object});
}
-void MarkingStateBase::RegisterWeakContainer(HeapObjectHeader& header) {
+void BasicMarkingState::RegisterWeakContainer(HeapObjectHeader& header) {
weak_containers_worklist_.Push<AccessMode::kAtomic>(&header);
}
-void MarkingStateBase::ProcessWeakContainer(const void* object,
- TraceDescriptor desc,
- WeakCallback callback,
- const void* data) {
+void BasicMarkingState::ProcessWeakContainer(const void* object,
+ TraceDescriptor desc,
+ WeakCallback callback,
+ const void* data) {
DCHECK_NOT_NULL(object);
HeapObjectHeader& header =
@@ -291,9 +307,9 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
}
}
-void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
- TraceDescriptor value_desc,
- Visitor& visitor) {
+void BasicMarkingState::ProcessEphemeron(const void* key, const void* value,
+ TraceDescriptor value_desc,
+ Visitor& visitor) {
// ProcessEphemeron is not expected to find new ephemerons recursively, which
// would break the main marking loop.
DCHECK(!in_ephemeron_processing_);
@@ -325,7 +341,7 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
in_ephemeron_processing_ = false;
}
-void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
+void BasicMarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
AccountMarkedBytes(
header.IsLargeObject<AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
@@ -333,18 +349,18 @@ void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
: header.AllocatedSize<AccessMode::kAtomic>());
}
-void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
+void BasicMarkingState::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
}
-class MutatorMarkingState : public MarkingStateBase {
+class MutatorMarkingState : public BasicMarkingState {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
inline bool MarkNoPush(HeapObjectHeader& header) {
- return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
+ return MutatorMarkingState::BasicMarkingState::MarkNoPush(header);
}
inline void ReTraceMarkedWeakContainer(cppgc::Visitor&, HeapObjectHeader&);
@@ -440,11 +456,11 @@ void MutatorMarkingState::RecentlyRetracedWeakContainers::Insert(
recently_retraced_cache_[last_used_index_] = header;
}
-class ConcurrentMarkingState : public MarkingStateBase {
+class ConcurrentMarkingState : public BasicMarkingState {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index f2dff286cd..1dff652bd0 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -12,7 +12,7 @@ namespace cppgc {
namespace internal {
MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
- MarkingStateBase& marking_state)
+ BasicMarkingState& marking_state)
: marking_state_(marking_state) {}
void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 4692b32025..302c0d262b 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -16,13 +16,13 @@ namespace internal {
class HeapBase;
class HeapObjectHeader;
class Marker;
-class MarkingStateBase;
+class BasicMarkingState;
class MutatorMarkingState;
class ConcurrentMarkingState;
class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public:
- MarkingVisitorBase(HeapBase&, MarkingStateBase&);
+ MarkingVisitorBase(HeapBase&, BasicMarkingState&);
~MarkingVisitorBase() override = default;
protected:
@@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
void RegisterWeakCallback(WeakCallback, const void*) final;
void HandleMovableReference(const void**) final;
- MarkingStateBase& marking_state_;
+ BasicMarkingState& marking_state_;
};
class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
index e5b73318f7..5006b9121b 100644
--- a/deps/v8/src/heap/cppgc/page-memory.h
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -242,7 +242,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
- return kGuardPageSize % allocator.CommitPageSize() == 0;
+ return kGuardPageSize != 0 &&
+ kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
index fd769ae469..ba5d2a18d0 100644
--- a/deps/v8/src/heap/cppgc/platform.cc
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -16,7 +16,13 @@
namespace cppgc {
namespace internal {
-void Abort() { v8::base::OS::Abort(); }
+void Fatal(const std::string& reason, const SourceLocation& loc) {
+#ifdef DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()), "%s", reason.c_str());
+#else // !DEBUG
+ V8_Fatal("%s", reason.c_str());
+#endif // !DEBUG
+}
void FatalOutOfMemoryHandler::operator()(const std::string& reason,
const SourceLocation& loc) const {
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 1a4c60e3a2..5cf6435390 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -16,17 +16,14 @@
namespace cppgc {
namespace internal {
-// static
-void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- PreFinalizer pre_finalizer) {
- BasePage::FromPayload(pre_finalizer.object)
- ->heap()
- .prefinalizer_handler()
- ->RegisterPrefinalizer(pre_finalizer);
+PrefinalizerRegistration::PrefinalizerRegistration(void* object,
+ Callback callback) {
+ auto* page = BasePage::FromPayload(object);
+ DCHECK(!page->space().is_compactable());
+ page->heap().prefinalizer_handler()->RegisterPrefinalizer({object, callback});
}
-bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
- const PreFinalizer& other) const {
+bool PreFinalizer::operator==(const PreFinalizer& other) const {
return (object == other.object) && (callback == other.callback);
}
@@ -36,7 +33,7 @@ PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
-#endif
+#endif // DEBUG
{
}
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
index bc17c99b18..e3850174db 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
#define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+#include <utility>
#include <vector>
#include "include/cppgc/prefinalizer.h"
@@ -14,11 +15,17 @@ namespace internal {
class HeapBase;
+struct PreFinalizer final {
+ using Callback = PrefinalizerRegistration::Callback;
+
+ void* object;
+ Callback callback;
+
+ bool operator==(const PreFinalizer& other) const;
+};
+
class PreFinalizerHandler final {
public:
- using PreFinalizer =
- cppgc::internal::PreFinalizerRegistrationDispatcher::PreFinalizer;
-
explicit PreFinalizerHandler(HeapBase& heap);
void RegisterPrefinalizer(PreFinalizer pre_finalizer);
diff --git a/deps/v8/src/heap/cppgc/remembered-set.cc b/deps/v8/src/heap/cppgc/remembered-set.cc
new file mode 100644
index 0000000000..8843219745
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/remembered-set.cc
@@ -0,0 +1,135 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/remembered-set.h"
+
+#include <algorithm>
+
+#include "include/cppgc/visitor.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/marking-state.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(std::set<void*> slots, const HeapBase& heap,
+ MutatorMarkingState& mutator_marking_state) {
+ for (void* slot : slots) {
+ // Slot must always point to a valid, not freed object.
+ auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
+ ->ObjectHeaderFromInnerAddress(slot);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (slot_header.IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
+
+ void* value = *reinterpret_cast<void**>(slot);
+ // Slot could be updated to nullptr or kSentinelPointer by the mutator.
+ if (value == kSentinelPointer || value == nullptr) continue;
+
+#if DEBUG
+ // Check that the slot can not point to a freed object.
+ HeapObjectHeader& header =
+ BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
+ DCHECK(!header.IsFree());
+#endif
+
+ mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ }
+}
+
+// Visits source objects that were recorded in the generational barrier for
+// slots.
+void VisitRememberedSourceObjects(
+ std::set<HeapObjectHeader*> remembered_source_objects, Visitor& visitor) {
+ for (HeapObjectHeader* source_hoh : remembered_source_objects) {
+ DCHECK(source_hoh);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (source_hoh->IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!source_hoh->template IsInConstruction<AccessMode::kNonAtomic>());
+
+ const TraceCallback trace_callback =
+ GlobalGCInfoTable::GCInfoFromIndex(source_hoh->GetGCInfoIndex()).trace;
+
+ // Process eagerly to avoid reaccounting.
+ trace_callback(&visitor, source_hoh->ObjectStart());
+ }
+}
+
+} // namespace
+
+void OldToNewRememberedSet::AddSlot(void* slot) {
+ remembered_slots_.insert(slot);
+}
+
+void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
+ remembered_source_objects_.insert(&hoh);
+}
+
+void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
+ // TODO(1029379): WeakCallbacks are also executed for weak collections.
+ // Consider splitting weak-callbacks in custom weak callbacks and ones for
+ // collections.
+ remembered_weak_callbacks_.insert(item);
+}
+
+void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
+ void* end) {
+ // TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
+ auto from = remembered_slots_.lower_bound(begin),
+ to = remembered_slots_.lower_bound(end);
+ remembered_slots_.erase(from, to);
+#if defined(ENABLE_SLOW_DCHECKS)
+ // Check that no remembered slots are referring to the freed area.
+ DCHECK(std::none_of(remembered_slots_.begin(), remembered_slots_.end(),
+ [begin, end](void* slot) {
+ void* value = *reinterpret_cast<void**>(slot);
+ return begin <= value && value < end;
+ }));
+#endif // defined(ENABLE_SLOW_DCHECKS)
+}
+
+void OldToNewRememberedSet::InvalidateRememberedSourceObject(
+ HeapObjectHeader& header) {
+ remembered_source_objects_.erase(&header);
+}
+
+void OldToNewRememberedSet::Visit(Visitor& visitor,
+ MutatorMarkingState& marking_state) {
+ VisitRememberedSlots(remembered_slots_, heap_, marking_state);
+ VisitRememberedSourceObjects(remembered_source_objects_, visitor);
+}
+
+void OldToNewRememberedSet::ExecuteCustomCallbacks(LivenessBroker broker) {
+ for (const auto& callback : remembered_weak_callbacks_) {
+ callback.callback(broker, callback.parameter);
+ }
+}
+
+void OldToNewRememberedSet::ReleaseCustomCallbacks() {
+ remembered_weak_callbacks_.clear();
+}
+
+void OldToNewRememberedSet::Reset() {
+ remembered_slots_.clear();
+ remembered_source_objects_.clear();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/remembered-set.h b/deps/v8/src/heap/cppgc/remembered-set.h
new file mode 100644
index 0000000000..eb8de6da8e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/remembered-set.h
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_REMEMBERED_SET_H_
+#define V8_HEAP_CPPGC_REMEMBERED_SET_H_
+
+#include <set>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/marking-worklists.h"
+
+namespace cppgc {
+
+class Visitor;
+class LivenessBroker;
+
+namespace internal {
+
+class HeapBase;
+class HeapObjectHeader;
+class MutatorMarkingState;
+
+class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
+ public:
+ using WeakCallbackItem = MarkingWorklists::WeakCallbackItem;
+
+ explicit OldToNewRememberedSet(const HeapBase& heap)
+ : heap_(heap), remembered_weak_callbacks_(compare_parameter) {}
+
+ OldToNewRememberedSet(const OldToNewRememberedSet&) = delete;
+ OldToNewRememberedSet& operator=(const OldToNewRememberedSet&) = delete;
+
+ void AddSlot(void* slot);
+ void AddSourceObject(HeapObjectHeader& source_hoh);
+ void AddWeakCallback(WeakCallbackItem);
+
+ void InvalidateRememberedSlotsInRange(void* begin, void* end);
+ void InvalidateRememberedSourceObject(HeapObjectHeader& source_hoh);
+
+ void Visit(Visitor&, MutatorMarkingState&);
+
+ void ExecuteCustomCallbacks(LivenessBroker);
+ void ReleaseCustomCallbacks();
+
+ void Reset();
+
+ private:
+ friend class MinorGCTest;
+
+ static constexpr struct {
+ bool operator()(const WeakCallbackItem& lhs,
+ const WeakCallbackItem& rhs) const {
+ return lhs.parameter < rhs.parameter;
+ }
+ } compare_parameter{};
+
+ const HeapBase& heap_;
+ std::set<void*> remembered_slots_;
+ std::set<HeapObjectHeader*> remembered_source_objects_;
+ std::set<WeakCallbackItem, decltype(compare_parameter)>
+ remembered_weak_callbacks_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index b063b26f04..0aa12a614a 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -737,8 +737,6 @@ class Sweeper::SweeperImpl final {
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
} else {
- DCHECK_EQ(SweepingConfig::SweepingType::kIncrementalAndConcurrent,
- config.sweeping_type);
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
@@ -811,10 +809,25 @@ class Sweeper::SweeperImpl final {
NotifyDone();
}
+ void FinishIfOutOfWork() {
+ if (is_in_progress_ && !is_sweeping_on_mutator_thread_ &&
+ concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
+ !concurrent_sweeper_handle_->IsActive()) {
+ // At this point we know that the concurrent sweeping task has run
+ // out-of-work: all pages are swept. The main thread still needs to finish
+ // sweeping though.
+ DCHECK(std::all_of(space_states_.begin(), space_states_.end(),
+ [](const SpaceState& state) {
+ return state.unswept_pages.IsEmpty();
+ }));
+ FinishIfRunning();
+ }
+ }
+
void Finish() {
DCHECK(is_in_progress_);
- MutatorThreadSweepingScope sweeping_in_progresss(*this);
+ MutatorThreadSweepingScope sweeping_in_progress(*this);
// First, call finalizers on the mutator thread.
SweepFinalizer finalizer(platform_, config_.free_memory_handling);
@@ -953,6 +966,10 @@ class Sweeper::SweeperImpl final {
void ScheduleConcurrentSweeping() {
DCHECK(platform_);
+ if (config_.sweeping_type !=
+ SweepingConfig::SweepingType::kIncrementalAndConcurrent)
+ return;
+
concurrent_sweeper_handle_ =
platform_->PostJob(cppgc::TaskPriority::kUserVisible,
std::make_unique<ConcurrentSweepTask>(
@@ -999,6 +1016,7 @@ void Sweeper::Start(SweepingConfig config) {
impl_->Start(config, heap_.platform());
}
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 2254453d7a..845dfbbfc1 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(SweepingConfig);
void FinishIfRunning();
+ void FinishIfOutOfWork();
void NotifyDoneIfNeeded();
// SweepForAllocationIfRunning sweeps the given |space| until a slot that can
// fit an allocation of size |size| is found. Returns true if a slot was
diff --git a/deps/v8/src/heap/cppgc/testing.cc b/deps/v8/src/heap/cppgc/testing.cc
index 0c81d7003b..38e96abf78 100644
--- a/deps/v8/src/heap/cppgc/testing.cc
+++ b/deps/v8/src/heap/cppgc/testing.cc
@@ -54,5 +54,13 @@ void StandaloneTestingHeap::ForceCompactionForNextGarbageCollection() {
.EnableForNextGCForTesting();
}
+bool IsHeapObjectOld(void* object) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ return internal::HeapObjectHeader::FromObject(object).IsMarked();
+#else
+ return true;
+#endif
+}
+
} // namespace testing
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 007abe3005..c533c353c3 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -129,6 +129,7 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table,
const void* slot,
uintptr_t value_offset) {
+ DCHECK(slot);
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
@@ -136,8 +137,23 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
+
// Record slot.
- local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
+ local_data.heap_base.remembered_set().AddSlot((const_cast<void*>(slot)));
+}
+
+// static
+void WriteBarrier::GenerationalBarrierForSourceObjectSlow(
+ const CagedHeapLocalData& local_data, const void* inner_pointer) {
+ DCHECK(inner_pointer);
+
+ auto& object_header =
+ BasePage::FromInnerAddress(&local_data.heap_base, inner_pointer)
+ ->ObjectHeaderFromInnerAddress(inner_pointer);
+
+ // Record the source object.
+ local_data.heap_base.remembered_set().AddSourceObject(
+ const_cast<HeapObjectHeader&>(object_header));
}
#endif // CPPGC_YOUNG_GENERATION
diff --git a/deps/v8/src/heap/embedder-tracing-inl.h b/deps/v8/src/heap/embedder-tracing-inl.h
new file mode 100644
index 0000000000..9a1c201f41
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_HEAP_EMBEDDER_TRACING_INL_H_
+#define V8_HEAP_EMBEDDER_TRACING_INL_H_
+
+#include "src/heap/embedder-tracing.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ Isolate* isolate, JSObject js_object,
+ const WrapperDescriptor& wrapper_descriptor, WrapperInfo* info) {
+ DCHECK(js_object.MayHaveEmbedderFields());
+ if (js_object.GetEmbedderFieldCount() < 2) return false;
+
+ return ExtractWrappableInfo(
+ isolate, wrapper_descriptor,
+ EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index),
+ EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index),
+ info);
+}
+
+// static
+bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ const EmbedderDataSlot& type_slot, const EmbedderDataSlot& instance_slot,
+ WrapperInfo* info) {
+ if (type_slot.ToAlignedPointer(isolate, &info->first) && info->first &&
+ instance_slot.ToAlignedPointer(isolate, &info->second) && info->second) {
+ return (wrapper_descriptor.embedder_id_for_garbage_collected ==
+ WrapperDescriptor::kUnknownEmbedderId) ||
+ (*static_cast<uint16_t*>(info->first) ==
+ wrapper_descriptor.embedder_id_for_garbage_collected);
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EMBEDDER_TRACING_INL_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 08738af3f0..a61b89c5dc 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -6,10 +6,11 @@
#include "include/v8-cppgc.h"
#include "src/base/logging.h"
+#include "src/common/allow-deprecated.h"
#include "src/handles/global-handles.h"
+#include "src/heap/embedder-tracing-inl.h"
#include "src/heap/gc-tracer.h"
-#include "src/objects/embedder-data-slot.h"
-#include "src/objects/js-objects-inl.h"
+#include "src/heap/marking-worklist-inl.h"
namespace v8 {
namespace internal {
@@ -41,13 +42,21 @@ CppHeap::GarbageCollectionFlags ConvertTraceFlags(
}
} // namespace
+void LocalEmbedderHeapTracer::PrepareForTrace(
+ EmbedderHeapTracer::TraceFlags flags) {
+ if (cpp_heap_)
+ cpp_heap()->InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ ConvertTraceFlags(flags));
+}
+
void LocalEmbedderHeapTracer::TracePrologue(
EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
embedder_worklist_empty_ = false;
if (cpp_heap_)
- cpp_heap()->TracePrologue(ConvertTraceFlags(flags));
+ cpp_heap()->StartTracing();
else
remote_tracer_->TracePrologue(flags);
}
@@ -104,51 +113,17 @@ bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
: remote_tracer_->IsTracingDone());
}
-void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- if (!InUse()) return;
-
- embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
- NotifyEmptyEmbedderStack();
-}
-
-namespace {
-
-bool ExtractWrappableInfo(Isolate* isolate, JSObject js_object,
- const WrapperDescriptor& wrapper_descriptor,
- LocalEmbedderHeapTracer::WrapperInfo* info) {
- DCHECK(js_object.IsApiWrapper());
- if (js_object.GetEmbedderFieldCount() < 2) return false;
-
- if (EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index)
- .ToAlignedPointerSafe(isolate, &info->first) &&
- info->first &&
- EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index)
- .ToAlignedPointerSafe(isolate, &info->second) &&
- info->second) {
- return (wrapper_descriptor.embedder_id_for_garbage_collected ==
- WrapperDescriptor::kUnknownEmbedderId) ||
- (*static_cast<uint16_t*>(info->first) ==
- wrapper_descriptor.embedder_id_for_garbage_collected);
- }
- return false;
-}
-
-} // namespace
-
LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor()) {
+ : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
+ DCHECK(!tracer_->cpp_heap_);
wrapper_cache_.reserve(kWrapperCacheSize);
}
LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
+ DCHECK(!tracer_->cpp_heap_);
if (!wrapper_cache_.empty()) {
- if (tracer_->cpp_heap_)
- tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
- else
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
}
}
@@ -164,7 +139,7 @@ LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
- DCHECK(js_object.IsApiWrapper());
+ DCHECK(js_object.MayHaveEmbedderFields());
WrapperInfo info;
if (ExtractWrappableInfo(tracer_->isolate_, js_object, wrapper_descriptor_,
&info)) {
@@ -174,11 +149,9 @@ void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
}
void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
+ DCHECK(!tracer_->cpp_heap_);
if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
- if (tracer_->cpp_heap_)
- tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
- else
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
wrapper_cache_.clear();
wrapper_cache_.reserve(kWrapperCacheSize);
}
@@ -213,15 +186,37 @@ void LocalEmbedderHeapTracer::NotifyEmptyEmbedderStack() {
isolate_->global_handles()->NotifyEmptyEmbedderStack();
}
+void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap,
+ JSObject js_object) {
+ DCHECK(InUse());
+ DCHECK(js_object.MayHaveEmbedderFields());
+ if (cpp_heap_) {
+ DCHECK_NOT_NULL(heap->mark_compact_collector());
+ const EmbedderDataSlot type_slot(js_object,
+ wrapper_descriptor_.wrappable_type_index);
+ const EmbedderDataSlot instance_slot(
+ js_object, wrapper_descriptor_.wrappable_instance_index);
+ heap->mark_compact_collector()
+ ->local_marking_worklists()
+ ->cpp_marking_state()
+ ->MarkAndPush(type_slot, instance_slot);
+ return;
+ }
+ LocalEmbedderHeapTracer::ProcessingScope scope(this);
+ scope.TracePossibleWrapper(js_object);
+}
+
bool DefaultEmbedderRootsHandler::IsRoot(
const v8::TracedReference<v8::Value>& handle) {
return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
}
+START_ALLOW_USE_DEPRECATED()
bool DefaultEmbedderRootsHandler::IsRoot(
const v8::TracedGlobal<v8::Value>& handle) {
return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
}
+END_ALLOW_USE_DEPRECATED()
void DefaultEmbedderRootsHandler::ResetRoot(
const v8::TracedReference<v8::Value>& handle) {
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 6b08488aa6..72b1fd90e3 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -5,9 +5,12 @@
#ifndef V8_HEAP_EMBEDDER_TRACING_H_
#define V8_HEAP_EMBEDDER_TRACING_H_
+#include <atomic>
+
#include "include/v8-cppgc.h"
#include "include/v8-embedder-heap.h"
#include "include/v8-traced-handle.h"
+#include "src/common/allow-deprecated.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
@@ -23,7 +26,11 @@ class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
: public EmbedderRootsHandler {
public:
bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
+
+ START_ALLOW_USE_DEPRECATED()
bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) final;
+ END_ALLOW_USE_DEPRECATED()
+
void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
@@ -74,6 +81,13 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperCache wrapper_cache_;
};
+ static V8_INLINE bool ExtractWrappableInfo(Isolate*, JSObject,
+ const WrapperDescriptor&,
+ WrapperInfo*);
+ static V8_INLINE bool ExtractWrappableInfo(
+ Isolate*, const WrapperDescriptor&, const EmbedderDataSlot& type_slot,
+ const EmbedderDataSlot& instance_slot, WrapperInfo*);
+
explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
@@ -91,6 +105,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void SetRemoteTracer(EmbedderHeapTracer* tracer);
void SetCppHeap(CppHeap* cpp_heap);
+ void PrepareForTrace(EmbedderHeapTracer::TraceFlags flags);
void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
void TraceEpilogue();
void EnterFinalPause();
@@ -102,15 +117,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
(IsRemoteTracingDone() && embedder_worklist_empty_);
}
- void SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state);
-
void SetEmbedderWorklistEmpty(bool is_empty) {
embedder_worklist_empty_ = is_empty;
}
void IncreaseAllocatedSize(size_t bytes) {
- remote_stats_.used_size += bytes;
+ remote_stats_.used_size.fetch_add(bytes, std::memory_order_relaxed);
remote_stats_.allocated_size += bytes;
if (remote_stats_.allocated_size >
remote_stats_.allocated_size_limit_for_check) {
@@ -121,13 +133,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
void DecreaseAllocatedSize(size_t bytes) {
- DCHECK_GE(remote_stats_.used_size, bytes);
- remote_stats_.used_size -= bytes;
+ DCHECK_GE(remote_stats_.used_size.load(std::memory_order_relaxed), bytes);
+ remote_stats_.used_size.fetch_sub(bytes, std::memory_order_relaxed);
}
void StartIncrementalMarkingIfNeeded();
- size_t used_size() const { return remote_stats_.used_size; }
+ size_t used_size() const {
+ return remote_stats_.used_size.load(std::memory_order_relaxed);
+ }
size_t allocated_size() const { return remote_stats_.allocated_size; }
WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
@@ -149,6 +163,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
return embedder_stack_state_;
}
+ void EmbedderWriteBarrier(Heap*, JSObject);
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -194,7 +210,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// Used size of objects in bytes reported by the embedder. Updated via
// TraceSummary at the end of tracing and incrementally when the GC is not
// in progress.
- size_t used_size = 0;
+ std::atomic<size_t> used_size{0};
// Totally bytes allocated by the embedder. Monotonically
// increasing value. Used to approximate allocation rate.
size_t allocated_size = 0;
@@ -211,26 +227,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
friend class EmbedderStackStateScope;
};
-class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
- public:
- EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
- EmbedderHeapTracer::EmbedderStackState stack_state)
- : local_tracer_(local_tracer),
- old_stack_state_(local_tracer_->embedder_stack_state_) {
- local_tracer_->embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
- local_tracer_->NotifyEmptyEmbedderStack();
- }
-
- ~EmbedderStackStateScope() {
- local_tracer_->embedder_stack_state_ = old_stack_state_;
- }
-
- private:
- LocalEmbedderHeapTracer* const local_tracer_;
- const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/evacuation-allocator-inl.h
index 3d769906a6..1afb240cad 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/evacuation-allocator-inl.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_LOCAL_ALLOCATOR_INL_H_
-#define V8_HEAP_LOCAL_ALLOCATOR_INL_H_
-
-#include "src/heap/local-allocator.h"
+#ifndef V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
+#define V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
+#include "src/common/globals.h"
+#include "src/heap/evacuation-allocator.h"
#include "src/heap/spaces-inl.h"
namespace v8 {
@@ -22,6 +22,9 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
case OLD_SPACE:
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
alignment, origin);
+ case MAP_SPACE:
+ return compaction_spaces_.Get(MAP_SPACE)->AllocateRaw(object_size,
+ alignment, origin);
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
->AllocateRaw(object_size, alignment, origin);
@@ -39,6 +42,9 @@ void EvacuationAllocator::FreeLast(AllocationSpace space, HeapObject object,
case OLD_SPACE:
FreeLastInOldSpace(object, object_size);
return;
+ case MAP_SPACE:
+ FreeLastInMapSpace(object, object_size);
+ return;
default:
// Only new and old space supported.
UNREACHABLE();
@@ -64,19 +70,29 @@ void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
}
}
+void EvacuationAllocator::FreeLastInMapSpace(HeapObject object,
+ int object_size) {
+ if (!compaction_spaces_.Get(MAP_SPACE)->TryFreeLast(object.address(),
+ object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object.address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
AllocationResult EvacuationAllocator::AllocateInLAB(
int object_size, AllocationAlignment alignment) {
AllocationResult allocation;
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Failure();
}
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
if (!NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Failure();
} else {
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- CHECK(!allocation.IsRetry());
+ CHECK(!allocation.IsFailure());
}
}
return allocation;
@@ -86,7 +102,7 @@ bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
AllocationResult result =
new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
- if (result.IsRetry()) {
+ if (result.IsFailure()) {
lab_allocation_will_fail_ = true;
return false;
}
@@ -110,4 +126,4 @@ AllocationResult EvacuationAllocator::AllocateInNewSpace(
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+#endif // V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/evacuation-allocator.h
index 2b6841ecd6..6dbeab1b29 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/evacuation-allocator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_LOCAL_ALLOCATOR_H_
-#define V8_HEAP_LOCAL_ALLOCATOR_H_
+#ifndef V8_HEAP_EVACUATION_ALLOCATOR_H_
+#define V8_HEAP_EVACUATION_ALLOCATOR_H_
#include "src/common/globals.h"
#include "src/heap/heap.h"
@@ -35,6 +35,11 @@ class EvacuationAllocator {
heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap_->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
+ if (heap_->map_space()) {
+ heap_->map_space()->MergeCompactionSpace(
+ compaction_spaces_.Get(MAP_SPACE));
+ }
+
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
@@ -56,6 +61,7 @@ class EvacuationAllocator {
AllocationAlignment alignment);
inline void FreeLastInNewSpace(HeapObject object, int object_size);
inline void FreeLastInOldSpace(HeapObject object, int object_size);
+ inline void FreeLastInMapSpace(HeapObject object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
@@ -67,4 +73,4 @@ class EvacuationAllocator {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_LOCAL_ALLOCATOR_H_
+#endif // V8_HEAP_EVACUATION_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index ff1056ee57..5c31a72186 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -16,6 +16,7 @@
#include "src/heap/read-only-heap.h"
#include "src/logging/local-logger.h"
#include "src/logging/log.h"
+#include "src/objects/instance-type.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/oddball.h"
@@ -46,6 +47,8 @@ template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<Factory>::NewHeapNumber<AllocationType::kOld>();
template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<Factory>::NewHeapNumber<AllocationType::kReadOnly>();
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<Factory>::NewHeapNumber<AllocationType::kSharedOld>();
template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<LocalFactory>::NewHeapNumber<AllocationType::kOld>();
@@ -81,10 +84,12 @@ Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Isolate* isolate_for_heap_sandbox = impl()->isolate_for_heap_sandbox();
- data_container.AllocateExternalPointerEntries(isolate_for_heap_sandbox);
+ data_container.set_code_cage_base(impl()->isolate()->code_cage_base(),
+ kRelaxedStore);
+ Isolate* isolate_for_sandbox = impl()->isolate_for_sandbox();
+ data_container.AllocateExternalPointerEntries(isolate_for_sandbox);
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
- data_container.set_code_entry_point(isolate_for_heap_sandbox, kNullAddress);
+ data_container.set_code_entry_point(isolate_for_sandbox, kNullAddress);
}
data_container.clear_padding();
return handle(data_container, isolate());
@@ -249,9 +254,6 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
DCHECK(source->IsString() || source->IsUndefined());
// Create and initialize script object.
ReadOnlyRoots roots = read_only_roots();
-#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
- Handle<ArrayList> list = NewArrayList(0);
-#endif
Handle<Script> script = handle(
NewStructInternal<Script>(SCRIPT_TYPE, AllocationType::kOld), isolate());
{
@@ -273,7 +275,7 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
raw.set_flags(0);
raw.set_host_defined_options(roots.empty_fixed_array(), SKIP_WRITE_BARRIER);
#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
- raw.set_script_or_modules(*list);
+ raw.set_script_or_modules(roots.empty_array_list());
#endif
}
@@ -286,12 +288,18 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
}
template <typename Impl>
-Handle<ArrayList> FactoryBase<Impl>::NewArrayList(int size) {
- Handle<FixedArray> fixed_array = NewFixedArray(size + ArrayList::kFirstIndex);
- fixed_array->set_map_no_write_barrier(read_only_roots().array_list_map());
- Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
- result->SetLength(0);
- return result;
+Handle<ArrayList> FactoryBase<Impl>::NewArrayList(int size,
+ AllocationType allocation) {
+ if (size == 0) return impl()->empty_array_list();
+ Handle<FixedArray> fixed_array =
+ NewFixedArray(size + ArrayList::kFirstIndex, allocation);
+ {
+ DisallowGarbageCollection no_gc;
+ FixedArray raw = *fixed_array;
+ raw.set_map_no_write_barrier(read_only_roots().array_list_map());
+ ArrayList::cast(raw).SetLength(0);
+ }
+ return Handle<ArrayList>::cast(fixed_array);
}
template <typename Impl>
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index c3aa816d0b..2a8eae50c9 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -157,7 +157,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<Script> NewScriptWithId(Handle<PrimitiveHeapObject> source,
int script_id);
- Handle<ArrayList> NewArrayList(int size);
+ Handle<ArrayList> NewArrayList(
+ int size, AllocationType allocation = AllocationType::kYoung);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 696f5355e5..c022f12450 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -76,6 +76,10 @@ ReadOnlyRoots Factory::read_only_roots() const {
return ReadOnlyRoots(isolate());
}
+HeapAllocator* Factory::allocator() const {
+ return isolate()->heap()->allocator();
+}
+
Factory::CodeBuilder& Factory::CodeBuilder::set_interpreter_data(
Handle<HeapObject> interpreter_data) {
// This DCHECK requires this function to be in -inl.h.
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 9e05c52472..d41521cdba 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -21,6 +21,7 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -38,6 +39,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -59,7 +61,6 @@
#include "src/objects/promise-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/scope-info.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/struct-inl.h"
#include "src/objects/synthetic-module-inl.h"
@@ -127,6 +128,9 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ data_container->initialize_flags(kind_, builtin_);
+ }
data_container->set_kind_specific_flags(kind_specific_flags_,
kRelaxedStore);
}
@@ -227,7 +231,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
raw_code.clear_padding();
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- raw_code.set_main_cage_base(isolate_->cage_base());
+ raw_code.set_main_cage_base(isolate_->cage_base(), kRelaxedStore);
data_container->SetCodeAndEntryPoint(isolate_, raw_code);
}
#ifdef VERIFY_HEAP
@@ -264,16 +268,17 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
bool retry_allocation_or_fail) {
Heap* heap = isolate_->heap();
+ HeapAllocator* allocator = heap->allocator();
HeapObject result;
AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
? AllocationType::kCode
: AllocationType::kReadOnly;
const int object_size = Code::SizeFor(code_desc_.body_size());
if (retry_allocation_or_fail) {
- result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ result = allocator->AllocateRawWith<HeapAllocator::kRetryOrFail>(
object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
- result = heap->AllocateRawWith<Heap::kLightRetry>(
+ result = allocator->AllocateRawWith<HeapAllocator::kLightRetry>(
object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
@@ -327,7 +332,7 @@ Handle<Code> Factory::CodeBuilder::Build() {
HeapObject Factory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
- return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ return allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
size, allocation, AllocationOrigin::kRuntime, alignment);
}
@@ -340,8 +345,8 @@ HeapObject Factory::AllocateRawWithAllocationSite(
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
size += AllocationMemento::kSize;
}
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -368,8 +373,8 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
@@ -383,7 +388,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size,
AllocationType allocation,
AllocationOrigin origin) {
Heap* heap = isolate()->heap();
- HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
@@ -441,10 +446,11 @@ Handle<Oddball> Factory::NewBasicBlockCountersMarker() {
Oddball::kBasicBlockCountersMarker);
}
-Handle<PropertyArray> Factory::NewPropertyArray(int length) {
+Handle<PropertyArray> Factory::NewPropertyArray(int length,
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung);
+ HeapObject result = AllocateRawFixedArray(length, allocation);
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
PropertyArray array = PropertyArray::cast(result);
@@ -520,13 +526,9 @@ Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
array.set_length(length);
if (length > 0) {
- ObjectSlot start(array.slots_start());
- ObjectSlot end(array.slots_end());
- size_t slot_count = end - start;
- MemsetTagged(start, *undefined_value(), slot_count);
for (int i = 0; i < length; i++) {
- // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
- EmbedderDataSlot(array, i).AllocateExternalPointerEntry(isolate());
+ // TODO(v8): consider initializing embedded data array with Smi::zero().
+ EmbedderDataSlot(array, i).Initialize(*undefined_value());
}
}
return handle(array, isolate());
@@ -846,9 +848,8 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
String result = String::cast(AllocateRawWithImmortalMap(
size,
RefineAllocationTypeForInPlaceInternalizableString(
- isolate()->heap()->CanAllocateInReadOnlySpace()
- ? AllocationType::kReadOnly
- : AllocationType::kOld,
+ CanAllocateInReadOnlySpace() ? AllocationType::kReadOnly
+ : AllocationType::kOld,
map),
map));
DisallowGarbageCollection no_gc;
@@ -873,10 +874,6 @@ Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
return AllocateInternalizedStringImpl<false>(string, chars, hash_field);
}
-namespace {
-
-} // namespace
-
StringTransitionStrategy Factory::ComputeInternalizationStrategyForString(
Handle<String> string, MaybeHandle<Map>* internalized_map) {
// Do not internalize young strings in-place: This allows us to ignore both
@@ -1064,6 +1061,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
external_string.set_length(static_cast<int>(length));
external_string.set_raw_hash_field(String::kEmptyHashField);
external_string.SetResource(isolate(), resource);
+
isolate()->heap()->RegisterExternalString(external_string);
return Handle<String>(external_string, isolate());
@@ -1086,7 +1084,9 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
string.set_length(static_cast<int>(length));
string.set_raw_hash_field(String::kEmptyHashField);
string.SetResource(isolate(), resource);
+
isolate()->heap()->RegisterExternalString(string);
+
return Handle<ExternalTwoByteString>(string, isolate());
}
@@ -1113,9 +1113,9 @@ Symbol Factory::NewSymbolInternal(AllocationType allocation) {
Symbol::kSize, allocation, read_only_roots().symbol_map()));
DisallowGarbageCollection no_gc;
// Generate a random hash value.
- int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
- symbol.set_raw_hash_field(Name::kIsNotIntegerIndexMask |
- (hash << Name::kHashShift));
+ int hash = isolate()->GenerateIdentityHash(Name::HashBits::kMax);
+ symbol.set_raw_hash_field(
+ Name::CreateHashFieldValue(hash, Name::HashFieldType::kHash));
symbol.set_description(read_only_roots().undefined_value(),
SKIP_WRITE_BARRIER);
symbol.set_flags(0);
@@ -1151,8 +1151,8 @@ Context Factory::NewContextInternal(Handle<Map> map, int size,
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
DCHECK_LE(Context::SizeFor(variadic_part_length), size);
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
result.set_map_after_allocation(*map);
DisallowGarbageCollection no_gc;
Context context = Context::cast(result);
@@ -1208,7 +1208,9 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
Handle<ScriptContextTable> context_table = Handle<ScriptContextTable>::cast(
NewFixedArrayWithMap(read_only_roots().script_context_table_map_handle(),
ScriptContextTable::kMinLength));
+ Handle<NameToIndexHashTable> names = NameToIndexHashTable::New(isolate(), 16);
context_table->set_used(0, kReleaseStore);
+ context_table->set_names_to_context_index(*names);
return context_table;
}
@@ -1364,6 +1366,19 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return handle(info, isolate());
}
+Handle<ErrorStackData> Factory::NewErrorStackData(
+ Handle<Object> call_site_infos_or_formatted_stack,
+ Handle<Object> limit_or_stack_frame_infos) {
+ ErrorStackData error_stack_data = NewStructInternal<ErrorStackData>(
+ ERROR_STACK_DATA_TYPE, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ error_stack_data.set_call_site_infos_or_formatted_stack(
+ *call_site_infos_or_formatted_stack, SKIP_WRITE_BARRIER);
+ error_stack_data.set_limit_or_stack_frame_infos(*limit_or_stack_frame_infos,
+ SKIP_WRITE_BARRIER);
+ return handle(error_stack_data, isolate());
+}
+
void Factory::AddToScriptList(Handle<Script> script) {
Handle<WeakArrayList> scripts = script_list();
scripts = WeakArrayList::Append(isolate(), scripts,
@@ -1471,7 +1486,6 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
// The supertypes list is constant after initialization, so we pretenure
// that too. The subtypes list, however, is expected to grow (and hence be
// replaced), so we don't pretenure it.
- Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
supertypes = NewFixedArray(wasm::kMinimumSupertypeArraySize);
@@ -1500,14 +1514,14 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
result.set_supertypes(*supertypes);
- result.set_subtypes(*subtypes);
+ result.set_subtypes(ReadOnlyRoots(isolate()).empty_array_list());
result.set_instance_size(instance_size_bytes);
result.set_instance(*instance);
return handle(result, isolate());
}
Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
- Handle<JSReceiver> callable) {
+ Handle<JSReceiver> callable, Handle<HeapObject> suspender) {
Map map = *wasm_api_function_ref_map();
auto result = WasmApiFunctionRef::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
@@ -1519,6 +1533,11 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
} else {
result.set_callable(*undefined_value());
}
+ if (!suspender.is_null()) {
+ result.set_suspender(*suspender);
+ } else {
+ result.set_suspender(*undefined_value());
+ }
return handle(result, isolate());
}
@@ -1532,7 +1551,7 @@ Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
result.set_foreign_address(isolate(), opt_call_target);
result.set_ref(*ref);
// Default values, will be overwritten by the caller.
- result.set_code(isolate()->builtins()->code(Builtin::kAbort));
+ result.set_code(*BUILTIN_CODE(isolate(), Abort));
result.set_external(*undefined_value());
return handle(result, isolate());
}
@@ -1540,8 +1559,8 @@ Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code, Handle<Map> rtt) {
- Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable);
+ Handle<CodeT> wrapper_code, Handle<Map> rtt, Handle<HeapObject> suspender) {
+ Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable, suspender);
Handle<WasmInternalFunction> internal =
NewWasmInternalFunction(opt_call_target, ref, rtt);
Map map = *wasm_js_function_data_map();
@@ -1557,8 +1576,19 @@ Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
return handle(result, isolate());
}
+Handle<WasmOnFulfilledData> Factory::NewWasmOnFulfilledData(
+ Handle<WasmSuspenderObject> suspender) {
+ Map map = *wasm_onfulfilled_data_map();
+ WasmOnFulfilledData result =
+ WasmOnFulfilledData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.set_suspender(*suspender);
+ return handle(result, isolate());
+}
+
Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
- Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
Address sig_address, int wrapper_budget, Handle<Map> rtt) {
Handle<Foreign> sig_foreign = NewForeign(sig_address);
@@ -1576,17 +1606,23 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
result.set_function_index(func_index);
result.set_signature(*sig_foreign);
result.set_wrapper_budget(wrapper_budget);
- result.set_c_wrapper_code(ToCodeT(*BUILTIN_CODE(isolate(), Illegal)),
- SKIP_WRITE_BARRIER);
+ // We can't skip the write barrier when V8_EXTERNAL_CODE_SPACE is enabled
+ // because in this case the CodeT (CodeDataContainer) objects are not
+ // immovable.
+ result.set_c_wrapper_code(
+ *BUILTIN_CODE(isolate(), Illegal),
+ V8_EXTERNAL_CODE_SPACE_BOOL ? UPDATE_WRITE_BARRIER : SKIP_WRITE_BARRIER);
result.set_packed_args_size(0);
+ result.set_suspender(*undefined_value());
return handle(result, isolate());
}
Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code, Handle<Map> rtt,
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig) {
- Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(Handle<JSReceiver>());
+ Handle<WasmApiFunctionRef> ref =
+ NewWasmApiFunctionRef(Handle<JSReceiver>(), Handle<HeapObject>());
Handle<WasmInternalFunction> internal =
NewWasmInternalFunction(call_target, ref, rtt);
Map map = *wasm_capi_function_data_map();
@@ -1601,12 +1637,13 @@ Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
return handle(result, isolate());
}
-Handle<WasmArray> Factory::NewWasmArray(
+Handle<WasmArray> Factory::NewWasmArrayFromElements(
const wasm::ArrayType* type, const std::vector<wasm::WasmValue>& elements,
Handle<Map> map) {
uint32_t length = static_cast<uint32_t>(elements.size());
HeapObject raw =
AllocateRaw(WasmArray::SizeFor(*map, length), AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
raw.set_map_after_allocation(*map);
WasmArray result = WasmArray::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
@@ -1627,6 +1664,27 @@ Handle<WasmArray> Factory::NewWasmArray(
return handle(result, isolate());
}
+Handle<WasmArray> Factory::NewWasmArrayFromMemory(uint32_t length,
+ Handle<Map> map,
+ Address source) {
+ wasm::ValueType element_type = reinterpret_cast<wasm::ArrayType*>(
+ map->wasm_type_info().foreign_address())
+ ->element_type();
+ DCHECK(element_type.is_numeric());
+ HeapObject raw =
+ AllocateRaw(WasmArray::SizeFor(*map, length), AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ raw.set_map_after_allocation(*map);
+ WasmArray result = WasmArray::cast(raw);
+ result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
+ result.set_length(length);
+ MemCopy(reinterpret_cast<void*>(result.ElementAddress(0)),
+ reinterpret_cast<void*>(source),
+ length * element_type.element_size_bytes());
+
+ return handle(result, isolate());
+}
+
Handle<WasmStruct> Factory::NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args,
Handle<Map> map) {
@@ -1659,6 +1717,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
return NewSharedFunctionInfo(name, data, Builtin::kNoBuiltinId);
}
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmOnFulfilled(
+ Handle<WasmOnFulfilledData> data) {
+ return NewSharedFunctionInfo({}, data, Builtin::kNoBuiltinId);
+}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data) {
return NewSharedFunctionInfo(MaybeHandle<String>(), data,
@@ -1782,21 +1845,34 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
IsTerminalElementsKind(elements_kind));
DCHECK(allocation_type == AllocationType::kMap ||
allocation_type == AllocationType::kSharedMap);
- HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
Map::kSize, allocation_type);
DisallowGarbageCollection no_gc;
- result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
+ Heap* roots = allocation_type == AllocationType::kMap
+ ? isolate()->heap()
+ : isolate()->shared_isolate()->heap();
+ result.set_map_after_allocation(ReadOnlyRoots(roots).meta_map(),
+ SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
- elements_kind, inobject_properties),
+ elements_kind, inobject_properties, roots),
isolate());
}
Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
+ ElementsKind elements_kind, int inobject_properties,
+ Heap* roots) {
DisallowGarbageCollection no_gc;
+ map.set_bit_field(0);
+ map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
+ int bit_field3 =
+ Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::Bits3::OwnsDescriptorsBit::encode(true) |
+ Map::Bits3::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
+ Map::Bits3::IsExtensibleBit::encode(true);
+ map.set_bit_field3(bit_field3);
map.set_instance_type(type);
- HeapObject raw_null_value = *null_value();
+ ReadOnlyRoots ro_roots(roots);
+ HeapObject raw_null_value = ro_roots.null_value();
map.set_prototype(raw_null_value, SKIP_WRITE_BARRIER);
map.set_constructor_or_back_pointer(raw_null_value, SKIP_WRITE_BARRIER);
map.set_instance_size(instance_size);
@@ -1805,30 +1881,21 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
inobject_properties);
DCHECK_EQ(map.GetInObjectProperties(), inobject_properties);
- map.set_prototype_validity_cell(*invalid_prototype_validity_cell());
+ map.set_prototype_validity_cell(roots->invalid_prototype_validity_cell());
} else {
DCHECK_EQ(inobject_properties, 0);
map.set_inobject_properties_start_or_constructor_function_index(0);
map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid),
SKIP_WRITE_BARRIER);
}
- map.set_dependent_code(
- DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
- SKIP_WRITE_BARRIER);
+ map.set_dependent_code(DependentCode::empty_dependent_code(ro_roots),
+ SKIP_WRITE_BARRIER);
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()),
SKIP_WRITE_BARRIER);
map.SetInObjectUnusedPropertyFields(inobject_properties);
- map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
+ map.SetInstanceDescriptors(isolate(), ro_roots.empty_descriptor_array(), 0);
// Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- map.set_bit_field(0);
- map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
- int bit_field3 =
- Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::Bits3::OwnsDescriptorsBit::encode(true) |
- Map::Bits3::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
- Map::Bits3::IsExtensibleBit::encode(true);
- map.set_bit_field3(bit_field3);
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(elements_kind);
@@ -1871,8 +1938,9 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
adjusted_object_size += AllocationMemento::kSize;
}
- HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
- adjusted_object_size, AllocationType::kYoung);
+ HeapObject raw_clone =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ adjusted_object_size, AllocationType::kYoung);
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
@@ -1942,10 +2010,9 @@ void initialize_length<PropertyArray>(PropertyArray array, int length) {
array.initialize_length(length);
}
-inline void ZeroEmbedderFields(i::JSObject obj) {
- int count = obj.GetEmbedderFieldCount();
- for (int i = 0; i < count; i++) {
- obj.SetEmbedderField(i, Smi::zero());
+inline void InitEmbedderFields(i::JSObject obj, i::Object initial_value) {
+ for (int i = 0; i < obj.GetEmbedderFieldCount(); i++) {
+ EmbedderDataSlot(obj, i).Initialize(initial_value);
}
}
@@ -2109,7 +2176,7 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
- return isolate()->heap()->CanAllocateInReadOnlySpace()
+ return CanAllocateInReadOnlySpace()
? NewHeapNumber<AllocationType::kReadOnly>(value)
: NewHeapNumber<AllocationType::kOld>(value);
}
@@ -2136,7 +2203,7 @@ Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate(), constructor, constructor, message,
undefined_value(), SKIP_NONE, no_caller,
- ErrorUtils::StackTraceCollection::kDetailed)
+ ErrorUtils::StackTraceCollection::kEnabled)
.ToHandleChecked();
}
@@ -2205,9 +2272,10 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
Handle<JSObject> Factory::NewExternal(void* value) {
- Handle<Foreign> foreign = NewForeign(reinterpret_cast<Address>(value));
- Handle<JSObject> external = NewJSObjectFromMap(external_map());
- external->SetEmbedderField(0, *foreign);
+ auto external =
+ Handle<JSExternalObject>::cast(NewJSObjectFromMap(external_map()));
+ external->AllocateExternalPointerEntries(isolate());
+ external->set_value(isolate(), value);
return external;
}
@@ -2246,10 +2314,9 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Code raw_result = *result;
const bool set_is_off_heap_trampoline = true;
- const int stack_slots =
- raw_code.has_safepoint_info() ? raw_code.stack_slots() : 0;
raw_result.initialize_flags(raw_code.kind(), raw_code.is_turbofanned(),
- stack_slots, set_is_off_heap_trampoline);
+ raw_code.stack_slots(),
+ set_is_off_heap_trampoline);
raw_result.set_builtin_id(raw_code.builtin_id());
raw_result.set_handler_table_offset(raw_code.handler_table_offset());
raw_result.set_constant_pool_offset(raw_code.constant_pool_offset());
@@ -2273,10 +2340,14 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
#endif
raw_result.set_relocation_info(canonical_reloc_info);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ CodeDataContainer code_data_container =
+ raw_result.code_data_container(kAcquireLoad);
// Updating flags (in particular is_off_heap_trampoline one) might change
// the value of the instruction start, so update it here.
- raw_result.code_data_container(kAcquireLoad)
- .UpdateCodeEntryPoint(isolate(), raw_result);
+ code_data_container.UpdateCodeEntryPoint(isolate(), raw_result);
+ // Also update flag values cached on the code data container.
+ code_data_container.initialize_flags(raw_code.kind(),
+ raw_code.builtin_id());
}
}
@@ -2293,8 +2364,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
{
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
+ HeapObject result =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
// Copy code object.
Address old_addr = code->address();
@@ -2315,6 +2387,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
}
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ data_container->initialize_flags(code->kind(), code->builtin_id());
data_container->SetCodeAndEntryPoint(isolate(), *new_code);
}
@@ -2649,6 +2722,27 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
return module_namespace;
}
+Handle<JSWrappedFunction> Factory::NewJSWrappedFunction(
+ Handle<NativeContext> creation_context, Handle<Object> target) {
+ DCHECK(target->IsCallable());
+ Handle<Map> map(
+ Map::cast(creation_context->get(Context::WRAPPED_FUNCTION_MAP_INDEX)),
+ isolate());
+ // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
+ // 3. Set wrapped.[[Prototype]] to
+ // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
+ // 4. Set wrapped.[[Call]] as described in 2.1.
+ Handle<JSWrappedFunction> wrapped = Handle<JSWrappedFunction>::cast(
+ isolate()->factory()->NewJSObjectFromMap(map));
+ // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
+ wrapped->set_wrapped_target_function(JSReceiver::cast(*target));
+ // 6. Set wrapped.[[Realm]] to callerRealm.
+ wrapped->set_context(*creation_context);
+ // TODO(v8:11989): https://github.com/tc39/proposal-shadowrealm/pull/348
+
+ return wrapped;
+}
+
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
DCHECK(IsResumableFunction(function->shared().kind()));
@@ -2675,7 +2769,6 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
- Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0);
ReadOnlyRoots roots(isolate());
SourceTextModule module = SourceTextModule::cast(
@@ -2699,7 +2792,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module.set_async(IsAsyncModule(sfi->kind()));
module.set_async_evaluating_ordinal(SourceTextModule::kNotAsyncEvaluated);
module.set_cycle_root(roots.the_hole_value(), SKIP_WRITE_BARRIER);
- module.set_async_parent_modules(*async_parent_modules);
+ module.set_async_parent_modules(roots.empty_array_list());
module.set_pending_async_dependencies(0);
return handle(module, isolate());
}
@@ -2867,7 +2960,8 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
raw.set_byte_offset(byte_offset);
raw.set_byte_length(byte_length);
raw.set_bit_field(0);
- ZeroEmbedderFields(raw);
+ // TODO(v8) remove once embedder data slots are always zero-initialized.
+ InitEmbedderFields(raw, Smi::zero());
DCHECK_EQ(raw.GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
@@ -3171,6 +3265,7 @@ Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
Handle<String> Factory::NumberToString(Handle<Object> number,
NumberCacheMode mode) {
+ SLOW_DCHECK(number->IsNumber());
if (number->IsSmi()) return SmiToString(Smi::cast(*number), mode);
double double_value = Handle<HeapNumber>::cast(number)->value();
@@ -3333,12 +3428,12 @@ Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
return handle(new_break_point, isolate());
}
-Handle<StackFrameInfo> Factory::NewStackFrameInfo(
+Handle<CallSiteInfo> Factory::NewCallSiteInfo(
Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code_object, int code_offset_or_source_position,
int flags, Handle<FixedArray> parameters) {
- auto info = NewStructInternal<StackFrameInfo>(STACK_FRAME_INFO_TYPE,
- AllocationType::kYoung);
+ auto info = NewStructInternal<CallSiteInfo>(CALL_SITE_INFO_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
info.set_receiver_or_instance(*receiver_or_instance, SKIP_WRITE_BARRIER);
info.set_function(*function, SKIP_WRITE_BARRIER);
@@ -3349,6 +3444,22 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
return handle(info, isolate());
}
+Handle<StackFrameInfo> Factory::NewStackFrameInfo(
+ Handle<HeapObject> shared_or_script, int bytecode_offset_or_source_position,
+ Handle<String> function_name, bool is_constructor) {
+ DCHECK_GE(bytecode_offset_or_source_position, 0);
+ StackFrameInfo info = NewStructInternal<StackFrameInfo>(
+ STACK_FRAME_INFO_TYPE, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ info.set_flags(0);
+ info.set_shared_or_script(*shared_or_script, SKIP_WRITE_BARRIER);
+ info.set_bytecode_offset_or_source_position(
+ bytecode_offset_or_source_position);
+ info.set_function_name(*function_name, SKIP_WRITE_BARRIER);
+ info.set_is_constructor(is_constructor);
+ return handle(info, isolate());
+}
+
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared().language_mode()) ||
@@ -3591,14 +3702,16 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
int field_index = 0;
- STATIC_ASSERT(JSFunctionOrBoundFunction::kLengthDescriptorIndex == 0);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex == 0);
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
length_string(), function_length_accessor(), roc_attribs);
map->AppendDescriptor(isolate(), &d);
}
- STATIC_ASSERT(JSFunctionOrBoundFunction::kNameDescriptorIndex == 1);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex == 1);
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
@@ -3763,7 +3876,8 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
JSPromise raw = *promise;
raw.set_reactions_or_result(Smi::zero(), SKIP_WRITE_BARRIER);
raw.set_flags(0);
- ZeroEmbedderFields(*promise);
+ // TODO(v8) remove once embedder data slots are always zero-initialized.
+ InitEmbedderFields(*promise, Smi::zero());
DCHECK_EQ(raw.GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
return promise;
}
@@ -3789,7 +3903,7 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
}
bool Factory::CanAllocateInReadOnlySpace() {
- return isolate()->heap()->CanAllocateInReadOnlySpace();
+ return allocator()->CanAllocateInReadOnlySpace();
}
bool Factory::EmptyStringRootIsInitialized() {
@@ -3824,7 +3938,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
if (code->kind() == CodeKind::BASELINE) {
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
- JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate_, result, &is_compiled_scope);
}
Compiler::PostInstantiation(result);
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index a5dd9ce5a9..0387482010 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -36,6 +36,7 @@ class BreakPointInfo;
class CallableTask;
class CallbackTask;
class CallHandlerInfo;
+class CallSiteInfo;
class Expression;
class EmbedderDataArray;
class ArrayBoilerplateDescription;
@@ -110,11 +111,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
inline ReadOnlyRoots read_only_roots() const;
- template <typename T>
- Handle<T> MakeHandle(T obj) {
- return handle(obj, isolate());
- }
-
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -127,7 +123,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Oddball> NewBasicBlockCountersMarker();
// Allocates a property array initialized with undefined values.
- Handle<PropertyArray> NewPropertyArray(int length);
+ Handle<PropertyArray> NewPropertyArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -390,17 +387,25 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<AccessorInfo> NewAccessorInfo();
+ Handle<ErrorStackData> NewErrorStackData(
+ Handle<Object> call_site_infos_or_formatted_stack,
+ Handle<Object> limit_or_stack_frame_infos);
+
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
- Handle<StackFrameInfo> NewStackFrameInfo(Handle<Object> receiver_or_instance,
- Handle<Object> function,
- Handle<HeapObject> code_object,
- int code_offset_or_source_position,
- int flags,
- Handle<FixedArray> parameters);
+ Handle<CallSiteInfo> NewCallSiteInfo(Handle<Object> receiver_or_instance,
+ Handle<Object> function,
+ Handle<HeapObject> code_object,
+ int code_offset_or_source_position,
+ int flags,
+ Handle<FixedArray> parameters);
+ Handle<StackFrameInfo> NewStackFrameInfo(
+ Handle<HeapObject> shared_or_script,
+ int bytecode_offset_or_source_position, Handle<String> function_name,
+ bool is_constructor);
// Allocate various microtasks.
Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
@@ -436,10 +441,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0,
AllocationType allocation_type = AllocationType::kMap);
- // Initializes the fields of a newly created Map. Exposed for tests and
- // heap setup; other code should just call NewMap which takes care of it.
+ // Initializes the fields of a newly created Map using roots from the
+ // passed-in Heap. Exposed for tests and heap setup; other code should just
+ // call NewMap which takes care of it.
Map InitializeMap(Map map, InstanceType type, int instance_size,
- ElementsKind elements_kind, int inobject_properties);
+ ElementsKind elements_kind, int inobject_properties,
+ Heap* roots);
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
@@ -579,6 +586,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSModuleNamespace> NewJSModuleNamespace();
+ Handle<JSWrappedFunction> NewJSWrappedFunction(
+ Handle<NativeContext> creation_context, Handle<Object> target);
+
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
Handle<Map> opt_parent,
@@ -589,29 +599,37 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> rtt);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code, Handle<Map> rtt,
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig);
Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
- Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
Address sig_address, int wrapper_budget, Handle<Map> rtt);
- Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(Handle<JSReceiver> callable);
+ Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(
+ Handle<JSReceiver> callable, Handle<HeapObject> suspender);
// {opt_call_target} is kNullAddress for JavaScript functions, and
// non-null for exported Wasm functions.
Handle<WasmJSFunctionData> NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code, Handle<Map> rtt);
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
+ Handle<HeapObject> suspender);
+ Handle<WasmOnFulfilledData> NewWasmOnFulfilledData(
+ Handle<WasmSuspenderObject> suspender);
Handle<WasmStruct> NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args, Handle<Map> map);
- Handle<WasmArray> NewWasmArray(const wasm::ArrayType* type,
- const std::vector<wasm::WasmValue>& elements,
- Handle<Map> map);
+ Handle<WasmArray> NewWasmArrayFromElements(
+ const wasm::ArrayType* type, const std::vector<wasm::WasmValue>& elements,
+ Handle<Map> map);
+ Handle<WasmArray> NewWasmArrayFromMemory(uint32_t length, Handle<Map> map,
+ Address source);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
Handle<String> name, Handle<WasmExportedFunctionData> data);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmJSFunction(
Handle<String> name, Handle<WasmJSFunctionData> data);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmOnFulfilled(
+ Handle<WasmOnFulfilledData> data);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1003,15 +1021,17 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
}
// This is the real Isolate that will be used for allocating and accessing
- // external pointer entries when V8_HEAP_SANDBOX is enabled.
- Isolate* isolate_for_heap_sandbox() const {
-#ifdef V8_HEAP_SANDBOX
+ // external pointer entries when V8_SANDBOXED_EXTERNAL_POINTERS is enabled.
+ Isolate* isolate_for_sandbox() const {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
return isolate();
#else
return nullptr;
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
+ V8_INLINE HeapAllocator* allocator() const;
+
bool CanAllocateInReadOnlySpace();
bool EmptyStringRootIsInitialized();
AllocationType AllocationTypeForInPlaceInternalizableString();
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 655930859a..9dbaa9717d 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -58,13 +58,13 @@ double GCTracer::MonotonicallyIncreasingTimeInMs() {
}
}
-CollectionEpoch GCTracer::CurrentEpoch(Scope::ScopeId scope_id) {
- if (Scope::NeedsYoungEpoch(scope_id)) {
- return heap_->epoch_young();
- } else {
- return heap_->epoch_full();
- }
+namespace {
+std::atomic<CollectionEpoch> global_epoch{0};
+
+CollectionEpoch next_epoch() {
+ return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
}
+} // namespace
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
: tracer_(tracer), scope_(scope), thread_kind_(thread_kind) {
@@ -72,7 +72,9 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
#ifdef V8_RUNTIME_CALL_STATS
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
if (thread_kind_ == ThreadKind::kMain) {
- DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
runtime_stats_ =
tracer_->heap_->isolate()->counters()->runtime_call_stats();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
@@ -89,7 +91,10 @@ GCTracer::Scope::~Scope() {
double duration_ms = tracer_->MonotonicallyIncreasingTimeInMs() - start_time_;
if (thread_kind_ == ThreadKind::kMain) {
- DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
+
tracer_->AddScopeSample(scope_, duration_ms);
if (scope_ == ScopeId::MC_INCREMENTAL ||
scope_ == ScopeId::MC_INCREMENTAL_START ||
@@ -110,6 +115,19 @@ GCTracer::Scope::~Scope() {
#endif // defined(V8_RUNTIME_CALL_STATS)
}
+#if DEBUG
+void GCTracer::Scope::AssertMainThread() {
+ Isolate* isolate = tracer_->heap_->isolate();
+ Isolate* shared_isolate = isolate->shared_isolate();
+ ThreadId thread_id = ThreadId::Current();
+
+ // Either run on isolate's main thread or on the current main thread of the
+ // shared isolate during shared GCs.
+ DCHECK(isolate->thread_id() == thread_id ||
+ (shared_isolate && shared_isolate->thread_id() == thread_id));
+}
+#endif // DEBUG
+
const char* GCTracer::Scope::Name(ScopeId id) {
#define CASE(scope) \
case Scope::scope: \
@@ -137,9 +155,11 @@ bool GCTracer::Scope::NeedsYoungEpoch(ScopeId id) {
UNREACHABLE();
}
-GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
+GCTracer::Event::Event(Type type, State state,
+ GarbageCollectionReason gc_reason,
const char* collector_reason)
: type(type),
+ state(state),
gc_reason(gc_reason),
collector_reason(collector_reason),
start_time(0.0),
@@ -175,9 +195,47 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
return "Unknown Event Type";
}
+GCTracer::RecordGCPhasesInfo::RecordGCPhasesInfo(Heap* heap,
+ GarbageCollector collector) {
+ Counters* counters = heap->isolate()->counters();
+ const bool in_background = heap->isolate()->IsIsolateInBackground();
+ if (Heap::IsYoungGenerationCollector(collector)) {
+ mode = Mode::Scavenger;
+ type_timer = type_priority_timer = nullptr;
+ } else {
+ DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
+ if (heap->incremental_marking()->IsStopped()) {
+ mode = Mode::None;
+ type_timer = counters->gc_compactor();
+ type_priority_timer = in_background ? counters->gc_compactor_background()
+ : counters->gc_compactor_foreground();
+ } else if (heap->ShouldReduceMemory()) {
+ mode = Mode::None;
+ type_timer = counters->gc_finalize_reduce_memory();
+ type_priority_timer =
+ in_background ? counters->gc_finalize_reduce_memory_background()
+ : counters->gc_finalize_reduce_memory_foreground();
+ } else {
+ if (heap->incremental_marking()->IsMarking() &&
+ heap->incremental_marking()
+ ->local_marking_worklists()
+ ->IsPerContextMode()) {
+ mode = Mode::None;
+ type_timer = counters->gc_finalize_measure_memory();
+ } else {
+ mode = Mode::Finalize;
+ type_timer = counters->gc_finalize();
+ }
+ type_priority_timer = in_background ? counters->gc_finalize_background()
+ : counters->gc_finalize_foreground();
+ }
+ }
+}
+
GCTracer::GCTracer(Heap* heap)
: heap_(heap),
- current_(Event::START, GarbageCollectionReason::kUnknown, nullptr),
+ current_(Event::START, Event::State::NOT_RUNNING,
+ GarbageCollectionReason::kUnknown, nullptr),
previous_(current_),
incremental_marking_bytes_(0),
incremental_marking_duration_(0.0),
@@ -210,9 +268,14 @@ GCTracer::GCTracer(Heap* heap)
}
void GCTracer::ResetForTesting() {
- current_ = Event(Event::START, GarbageCollectionReason::kTesting, nullptr);
+ current_ = Event(Event::START, Event::State::NOT_RUNNING,
+ GarbageCollectionReason::kTesting, nullptr);
current_.end_time = MonotonicallyIncreasingTimeInMs();
previous_ = current_;
+ start_of_observable_pause_ = 0.0;
+ notified_sweeping_completed_ = false;
+ notified_cppgc_completed_ = false;
+ young_gc_while_full_gc_ = false;
ResetIncrementalMarkingCounters();
allocation_time_ms_ = 0.0;
new_space_allocation_counter_bytes_ = 0.0;
@@ -243,66 +306,101 @@ void GCTracer::ResetForTesting() {
void GCTracer::NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling) {
- DCHECK(current_.type == Event::SCAVENGER || start_counter_ > 1);
+ DCHECK_GE(1, start_counter_);
+ DCHECK_EQ(Event::SCAVENGER, current_.type);
heap_->isolate()->counters()->young_generation_handling()->AddSample(
static_cast<int>(young_generation_handling));
}
-void GCTracer::Start(GarbageCollector collector,
- GarbageCollectionReason gc_reason,
- const char* collector_reason) {
+void GCTracer::StartObservablePause() {
+ DCHECK_EQ(0, start_counter_);
start_counter_++;
- if (start_counter_ != 1) return;
- previous_ = current_;
+ DCHECK(!IsInObservablePause());
+ start_of_observable_pause_ = MonotonicallyIncreasingTimeInMs();
+}
+
+void GCTracer::UpdateCurrentEvent(GarbageCollectionReason gc_reason,
+ const char* collector_reason) {
+ // For incremental marking, the event has already been created and we just
+ // need to update a few fields.
+ DCHECK_EQ(Event::INCREMENTAL_MARK_COMPACTOR, current_.type);
+ DCHECK_EQ(Event::State::ATOMIC, current_.state);
+ DCHECK(IsInObservablePause());
+ current_.gc_reason = gc_reason;
+ current_.collector_reason = collector_reason;
+ // TODO(chromium:1154636): The start_time of the current event contains
+ // currently the start time of the observable pause. This should be
+ // reconsidered.
+ current_.start_time = start_of_observable_pause_;
+ current_.reduce_memory = heap_->ShouldReduceMemory();
+}
+
+void GCTracer::StartCycle(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
+ const char* collector_reason, MarkingType marking) {
+ // We cannot start a new cycle while there's another one in its atomic pause.
+ DCHECK_NE(Event::State::ATOMIC, current_.state);
+ // We cannot start a new cycle while a young generation GC cycle has
+ // already interrupted a full GC cycle.
+ DCHECK(!young_gc_while_full_gc_);
+ young_gc_while_full_gc_ = current_.state != Event::State::NOT_RUNNING;
+
+ DCHECK_IMPLIES(young_gc_while_full_gc_,
+ Heap::IsYoungGenerationCollector(collector) &&
+ !Event::IsYoungGenerationEvent(current_.type));
+
+ Event::Type type;
switch (collector) {
case GarbageCollector::SCAVENGER:
- current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+ type = Event::SCAVENGER;
break;
case GarbageCollector::MINOR_MARK_COMPACTOR:
- current_ =
- Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
+ type = Event::MINOR_MARK_COMPACTOR;
break;
case GarbageCollector::MARK_COMPACTOR:
- if (heap_->incremental_marking()->WasActivated()) {
- current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
- collector_reason);
- } else {
- current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
- }
+ type = marking == MarkingType::kIncremental
+ ? Event::INCREMENTAL_MARK_COMPACTOR
+ : Event::MARK_COMPACTOR;
break;
}
- current_.reduce_memory = heap_->ShouldReduceMemory();
- current_.start_time = MonotonicallyIncreasingTimeInMs();
- current_.start_object_size = 0;
- current_.start_memory_size = 0;
- current_.start_holes_size = 0;
- current_.young_object_size = 0;
-
- current_.incremental_marking_bytes = 0;
- current_.incremental_marking_duration = 0;
+ DCHECK_IMPLIES(!young_gc_while_full_gc_,
+ current_.state == Event::State::NOT_RUNNING);
+ DCHECK_EQ(Event::State::NOT_RUNNING, previous_.state);
- for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
- current_.scopes[i] = 0;
+ previous_ = current_;
+ current_ = Event(type, Event::State::MARKING, gc_reason, collector_reason);
+
+ switch (marking) {
+ case MarkingType::kAtomic:
+ DCHECK(IsInObservablePause());
+ // TODO(chromium:1154636): The start_time of the current event contains
+ // currently the start time of the observable pause. This should be
+ // reconsidered.
+ current_.start_time = start_of_observable_pause_;
+ current_.reduce_memory = heap_->ShouldReduceMemory();
+ break;
+ case MarkingType::kIncremental:
+ // The current event will be updated later.
+ DCHECK(!Heap::IsYoungGenerationCollector(collector));
+ DCHECK(!IsInObservablePause());
+ break;
}
- Counters* counters = heap_->isolate()->counters();
-
if (Heap::IsYoungGenerationCollector(collector)) {
- counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
+ epoch_young_ = next_epoch();
} else {
- counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
-
- if (FLAG_trace_gc_freelists) {
- PrintIsolate(heap_->isolate(),
- "FreeLists statistics before collection:\n");
- heap_->PrintFreeListsStats();
- }
+ epoch_full_ = next_epoch();
}
}
+void GCTracer::StartAtomicPause() {
+ DCHECK_EQ(Event::State::MARKING, current_.state);
+ current_.state = Event::State::ATOMIC;
+}
+
void GCTracer::StartInSafepoint() {
SampleAllocation(current_.start_time, heap_->NewSpaceAllocationCounter(),
heap_->OldGenerationAllocationCounter(),
@@ -332,27 +430,22 @@ void GCTracer::StopInSafepoint() {
current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
}
-void GCTracer::Stop(GarbageCollector collector) {
+void GCTracer::StopObservablePause() {
start_counter_--;
- if (start_counter_ != 0) {
- if (FLAG_trace_gc_verbose) {
- heap_->isolate()->PrintWithTimestamp(
- "[Finished reentrant %s during %s.]\n",
- Heap::CollectorName(collector), current_.TypeName(false));
- }
- return;
- }
+ DCHECK_EQ(0, start_counter_);
- DCHECK_LE(0, start_counter_);
- DCHECK((collector == GarbageCollector::SCAVENGER &&
- current_.type == Event::SCAVENGER) ||
- (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
- current_.type == Event::MINOR_MARK_COMPACTOR) ||
- (collector == GarbageCollector::MARK_COMPACTOR &&
- (current_.type == Event::MARK_COMPACTOR ||
- current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
+ DCHECK(IsInObservablePause());
+ start_of_observable_pause_ = 0.0;
+ // TODO(chromium:1154636): The end_time of the current event contains
+ // currently the end time of the observable pause. This should be
+ // reconsidered.
current_.end_time = MonotonicallyIncreasingTimeInMs();
+}
+
+void GCTracer::UpdateStatistics(GarbageCollector collector) {
+ const bool is_young = Heap::IsYoungGenerationCollector(collector);
+ DCHECK(IsConsistentWithCollector(collector));
AddAllocation(current_.end_time);
@@ -361,61 +454,44 @@ void GCTracer::Stop(GarbageCollector collector) {
static_cast<int64_t>(duration * base::Time::kMicrosecondsPerMillisecond);
auto* long_task_stats = heap_->isolate()->GetCurrentLongTaskStats();
- switch (current_.type) {
- case Event::SCAVENGER:
- case Event::MINOR_MARK_COMPACTOR:
- recorded_minor_gcs_total_.Push(
- MakeBytesAndDuration(current_.young_object_size, duration));
- recorded_minor_gcs_survived_.Push(
- MakeBytesAndDuration(current_.survived_young_object_size, duration));
- FetchBackgroundMinorGCCounters();
- long_task_stats->gc_young_wall_clock_duration_us += duration_us;
- break;
- case Event::INCREMENTAL_MARK_COMPACTOR:
+ if (is_young) {
+ recorded_minor_gcs_total_.Push(
+ MakeBytesAndDuration(current_.young_object_size, duration));
+ recorded_minor_gcs_survived_.Push(
+ MakeBytesAndDuration(current_.survived_young_object_size, duration));
+ FetchBackgroundMinorGCCounters();
+ long_task_stats->gc_young_wall_clock_duration_us += duration_us;
+ } else {
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
current_.incremental_marking_bytes = incremental_marking_bytes_;
current_.incremental_marking_duration = incremental_marking_duration_;
for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
current_.scopes[i] = incremental_marking_scopes_[i].duration;
}
-
- RecordMutatorUtilization(
- current_.end_time, duration + current_.incremental_marking_duration);
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
- RecordGCSumCounters(duration);
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- FetchBackgroundMarkCompactCounters();
- long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
- break;
- case Event::MARK_COMPACTOR:
+ } else {
DCHECK_EQ(0u, current_.incremental_marking_bytes);
DCHECK_EQ(0, current_.incremental_marking_duration);
- RecordMutatorUtilization(
- current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
- RecordGCSumCounters(duration);
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- FetchBackgroundMarkCompactCounters();
- long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
- break;
- case Event::START:
- UNREACHABLE();
+ }
+ RecordMutatorUtilization(current_.end_time,
+ duration + current_.incremental_marking_duration);
+ RecordGCSumCounters();
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
}
FetchBackgroundGeneralCounters();
heap_->UpdateTotalGCTime(duration);
- if (current_.type == Event::SCAVENGER ||
- current_.type == Event::MINOR_MARK_COMPACTOR) {
- ReportYoungCycleToRecorder();
- if (FLAG_trace_gc_ignore_scavenger) return;
- }
+ if (FLAG_trace_gc_ignore_scavenger && is_young) return;
if (FLAG_trace_gc_nvp) {
PrintNVP();
@@ -438,7 +514,65 @@ void GCTracer::Stop(GarbageCollector collector) {
}
}
+void GCTracer::StopAtomicPause() {
+ DCHECK_EQ(Event::State::ATOMIC, current_.state);
+ current_.state = Event::State::SWEEPING;
+}
+
+void GCTracer::StopCycle(GarbageCollector collector) {
+ DCHECK_EQ(Event::State::SWEEPING, current_.state);
+ current_.state = Event::State::NOT_RUNNING;
+
+ DCHECK(IsConsistentWithCollector(collector));
+
+ Counters* counters = heap_->isolate()->counters();
+ GarbageCollectionReason gc_reason = current_.gc_reason;
+
+ if (Heap::IsYoungGenerationCollector(collector)) {
+ ReportYoungCycleToRecorder();
+
+ // If a young generation GC interrupted an unfinished full GC cycle, restore
+ // the event corresponding to the full GC cycle.
+ if (young_gc_while_full_gc_) {
+ std::swap(current_, previous_);
+ young_gc_while_full_gc_ = false;
+ }
+ } else {
+ ReportFullCycleToRecorder();
+
+ counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics before collection:\n");
+ heap_->PrintFreeListsStats();
+ }
+ }
+}
+
+void GCTracer::StopCycleIfNeeded() {
+ if (current_.state != Event::State::SWEEPING) return;
+ if (!notified_sweeping_completed_) return;
+ if (heap_->cpp_heap() && !notified_cppgc_completed_) return;
+ StopCycle(GarbageCollector::MARK_COMPACTOR);
+ notified_sweeping_completed_ = false;
+ notified_cppgc_completed_ = false;
+}
+
void GCTracer::NotifySweepingCompleted() {
+#ifdef VERIFY_HEAP
+ // If heap verification is enabled, sweeping finalization can also be
+ // triggered from inside a full GC cycle's atomic pause.
+ DCHECK((current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
+ (current_.state == Event::State::SWEEPING ||
+ (FLAG_verify_heap && current_.state == Event::State::ATOMIC)));
+#else
+ DCHECK(IsSweepingInProgress());
+#endif
+
+ // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
+ // finished sweeping. This method is invoked by v8.
if (FLAG_trace_gc_freelists) {
PrintIsolate(heap_->isolate(),
"FreeLists statistics after sweeping completed:\n");
@@ -450,8 +584,21 @@ void GCTracer::NotifySweepingCompleted() {
heap_->code_space()->PrintAllocationsOrigins();
heap_->map_space()->PrintAllocationsOrigins();
}
- metrics_report_pending_ = true;
- NotifyGCCompleted();
+ DCHECK(!notified_sweeping_completed_);
+ notified_sweeping_completed_ = true;
+ StopCycleIfNeeded();
+}
+
+void GCTracer::NotifyCppGCCompleted() {
+ // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
+ // finished sweeping. This method is invoked by cppgc.
+ DCHECK(heap_->cpp_heap());
+ DCHECK(CppHeap::From(heap_->cpp_heap())
+ ->GetMetricRecorder()
+ ->MetricsReportPending());
+ DCHECK(!notified_cppgc_completed_);
+ notified_cppgc_completed_ = true;
+ StopCycleIfNeeded();
}
void GCTracer::SampleAllocation(double current_ms,
@@ -510,7 +657,6 @@ void GCTracer::AddCompactionEvent(double duration,
MakeBytesAndDuration(live_bytes_compacted, duration));
}
-
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
recorded_survival_ratios_.Push(promotion_ratio);
}
@@ -564,7 +710,7 @@ void GCTracer::Print() const {
"[%d:%p] "
"%8.0f ms: "
"%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
- "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
+ "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s; %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(),
@@ -581,7 +727,6 @@ void GCTracer::Print() const {
current_.collector_reason != nullptr ? current_.collector_reason : "");
}
-
void GCTracer::PrintNVP() const {
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
@@ -1172,9 +1317,6 @@ void GCTracer::FetchBackgroundMarkCompactCounters() {
void GCTracer::FetchBackgroundMinorGCCounters() {
FetchBackgroundCounters(Scope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
Scope::LAST_MINOR_GC_BACKGROUND_SCOPE);
- heap_->isolate()->counters()->background_scavenger()->AddSample(
- static_cast<int>(
- current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]));
}
void GCTracer::FetchBackgroundGeneralCounters() {
@@ -1196,9 +1338,9 @@ void GCTracer::AddScopeSampleBackground(Scope::ScopeId scope, double duration) {
counter.total_duration_ms += duration;
}
-void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
+void GCTracer::RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode) {
Counters* counters = heap_->isolate()->counters();
- if (gc_timer == counters->gc_finalize()) {
+ if (mode == RecordGCPhasesInfo::Mode::Finalize) {
DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
counters->gc_finalize_clear()->AddSample(
static_cast<int>(current_.scopes[Scope::MC_CLEAR]));
@@ -1247,7 +1389,7 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
}
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
- } else if (gc_timer == counters->gc_scavenger()) {
+ } else if (mode == RecordGCPhasesInfo::Mode::Scavenger) {
counters->gc_scavenger_scavenge_main()->AddSample(
static_cast<int>(current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL]));
counters->gc_scavenger_scavenge_roots()->AddSample(
@@ -1255,20 +1397,23 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
}
}
-void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
+void GCTracer::RecordGCSumCounters() {
base::MutexGuard guard(&background_counter_mutex_);
- const double overall_duration =
+ const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
+ const double incremental_marking =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
- .duration +
incremental_marking_duration_ +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration +
- atomic_pause_duration;
+ .duration;
+ const double incremental_sweeping =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration;
+ const double overall_duration =
+ atomic_pause_duration + incremental_marking + incremental_sweeping;
const double background_duration =
background_counter_[Scope::MC_BACKGROUND_EVACUATE_COPY]
.total_duration_ms +
@@ -1276,23 +1421,12 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms +
background_counter_[Scope::MC_BACKGROUND_MARKING].total_duration_ms +
background_counter_[Scope::MC_BACKGROUND_SWEEPING].total_duration_ms;
-
- const double marking_duration =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
- .duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
- .duration +
- incremental_marking_duration_ +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration +
- current_.scopes[Scope::MC_MARK];
+ const double atomic_marking_duration =
+ current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
+ const double marking_duration = atomic_marking_duration + incremental_marking;
const double marking_background_duration =
background_counter_[Scope::MC_BACKGROUND_MARKING].total_duration_ms;
- // UMA.
- heap_->isolate()->counters()->gc_mark_compactor()->AddSample(
- static_cast<int>(overall_duration));
-
// Emit trace event counters.
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCMarkCompactorSummary", TRACE_EVENT_SCOPE_THREAD,
@@ -1304,22 +1438,6 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
"background_duration", marking_background_duration);
}
-void GCTracer::NotifyGCCompleted() {
- // Report full GC cycle metric to recorder only when both v8 and cppgc (if
- // available) GCs have finished. This method is invoked by both v8 and cppgc.
- if (!metrics_report_pending_) {
- // V8 sweeping is not done yet.
- return;
- }
- const auto* cpp_heap = heap_->cpp_heap();
- if (cpp_heap &&
- !CppHeap::From(cpp_heap)->GetMetricRecorder()->MetricsReportPending()) {
- // Cppgc sweeping is not done yet.
- return;
- }
- ReportFullCycleToRecorder();
-}
-
namespace {
void CopyTimeMetrics(
@@ -1330,6 +1448,9 @@ void CopyTimeMetrics(
metrics.mark_wall_clock_duration_in_us = cppgc_metrics.mark_duration_us;
DCHECK_NE(-1, cppgc_metrics.sweep_duration_us);
metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
+ metrics.total_wall_clock_duration_in_us =
+ metrics.mark_wall_clock_duration_in_us +
+ metrics.sweep_wall_clock_duration_in_us;
}
void CopyTimeMetrics(
@@ -1343,6 +1464,11 @@ void CopyTimeMetrics(
metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
DCHECK_NE(-1, cppgc_metrics.weak_duration_us);
metrics.weak_wall_clock_duration_in_us = cppgc_metrics.weak_duration_us;
+ metrics.total_wall_clock_duration_in_us =
+ metrics.compact_wall_clock_duration_in_us +
+ metrics.mark_wall_clock_duration_in_us +
+ metrics.sweep_wall_clock_duration_in_us +
+ metrics.weak_wall_clock_duration_in_us;
}
void CopySizeMetrics(
@@ -1373,27 +1499,43 @@ void FlushBatchedIncrementalEvents(
DCHECK(!batched_events.events.empty());
isolate->metrics_recorder()->AddMainThreadEvent(std::move(batched_events),
GetContextId(isolate));
+ batched_events = {};
}
} // namespace
void GCTracer::ReportFullCycleToRecorder() {
+ DCHECK(!Event::IsYoungGenerationEvent(current_.type));
+ DCHECK_EQ(Event::State::NOT_RUNNING, current_.state);
+ auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
+ DCHECK_IMPLIES(cpp_heap,
+ cpp_heap->GetMetricRecorder()->MetricsReportPending());
const std::shared_ptr<metrics::Recorder>& recorder =
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
- if (!recorder->HasEmbedderRecorder()) return;
+ if (!recorder->HasEmbedderRecorder()) {
+ incremental_mark_batched_events_ = {};
+ if (cpp_heap) {
+ cpp_heap->GetMetricRecorder()->ClearCachedEvents();
+ }
+ return;
+ }
if (!incremental_mark_batched_events_.events.empty()) {
FlushBatchedIncrementalEvents(incremental_mark_batched_events_,
heap_->isolate());
}
+
v8::metrics::GarbageCollectionFullCycle event;
- if (heap_->cpp_heap()) {
- auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
+ event.reason = static_cast<int>(current_.gc_reason);
+
+ // Managed C++ heap statistics:
+ if (cpp_heap) {
cpp_heap->GetMetricRecorder()->FlushBatchedIncrementalEvents();
const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
optional_cppgc_event =
cpp_heap->GetMetricRecorder()->ExtractLastFullGcEvent();
DCHECK(optional_cppgc_event.has_value());
+ DCHECK(!cpp_heap->GetMetricRecorder()->MetricsReportPending());
const cppgc::internal::MetricRecorder::FullCycle& cppgc_event =
optional_cppgc_event.value();
CopyTimeMetrics(event.total_cpp, cppgc_event.total);
@@ -1414,12 +1556,92 @@ void GCTracer::ReportFullCycleToRecorder() {
event.main_thread_efficiency_cpp_in_bytes_per_us =
cppgc_event.main_thread_efficiency_in_bytes_per_us;
}
- // TODO(chromium:1154636): Populate v8 metrics.
+
+ // Unified heap statistics:
+ const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
+ const double incremental_marking =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ current_.incremental_marking_duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration;
+ const double incremental_sweeping =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration;
+ const double overall_duration =
+ atomic_pause_duration + incremental_marking + incremental_sweeping;
+ const double marking_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_MARKING];
+ const double sweeping_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_SWEEPING];
+ const double compact_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY] +
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS];
+ const double background_duration = marking_background_duration +
+ sweeping_background_duration +
+ compact_background_duration;
+ const double atomic_marking_duration =
+ current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
+ const double marking_duration = atomic_marking_duration + incremental_marking;
+ const double weak_duration = current_.scopes[Scope::MC_CLEAR];
+ const double compact_duration = current_.scopes[Scope::MC_EVACUATE] +
+ current_.scopes[Scope::MC_FINISH] +
+ current_.scopes[Scope::MC_EPILOGUE];
+ const double atomic_sweeping_duration = current_.scopes[Scope::MC_SWEEP];
+ const double sweeping_duration =
+ atomic_sweeping_duration + incremental_sweeping;
+
+ event.main_thread_atomic.total_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_pause_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.total_wall_clock_duration_in_us = static_cast<int64_t>(
+ overall_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.total_wall_clock_duration_in_us =
+ static_cast<int64_t>((overall_duration + background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.mark_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_marking_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.mark_wall_clock_duration_in_us = static_cast<int64_t>(
+ marking_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.mark_wall_clock_duration_in_us =
+ static_cast<int64_t>((marking_duration + marking_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.weak_wall_clock_duration_in_us =
+ event.main_thread.weak_wall_clock_duration_in_us =
+ event.total.weak_wall_clock_duration_in_us = static_cast<int64_t>(
+ weak_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.compact_wall_clock_duration_in_us =
+ event.main_thread.compact_wall_clock_duration_in_us =
+ static_cast<int64_t>(compact_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.total.compact_wall_clock_duration_in_us =
+ static_cast<int64_t>((compact_duration + compact_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.sweep_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_sweeping_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.sweep_wall_clock_duration_in_us = static_cast<int64_t>(
+ sweeping_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.sweep_wall_clock_duration_in_us =
+ static_cast<int64_t>((sweeping_duration + sweeping_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+
+ // TODO(chromium:1154636): Populate the following:
+ // - event.main_thread_incremental
+ // - event.objects
+ // - event.memory
+ // - event.collection_rate_in_percent
+ // - event.efficiency_in_bytes_per_us
+ // - event.main_thread_efficiency_in_bytes_per_us
+
recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
- metrics_report_pending_ = false;
}
void GCTracer::ReportIncrementalMarkingStepToRecorder() {
+ DCHECK_EQ(Event::Type::INCREMENTAL_MARK_COMPACTOR, current_.type);
static constexpr int kMaxBatchedEvents =
CppHeap::MetricRecorderAdapter::kMaxBatchedEvents;
const std::shared_ptr<metrics::Recorder>& recorder =
@@ -1447,21 +1669,33 @@ void GCTracer::ReportIncrementalMarkingStepToRecorder() {
}
void GCTracer::ReportYoungCycleToRecorder() {
+ DCHECK(Event::IsYoungGenerationEvent(current_.type));
+ DCHECK_EQ(Event::State::NOT_RUNNING, current_.state);
const std::shared_ptr<metrics::Recorder>& recorder =
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
if (!recorder->HasEmbedderRecorder()) return;
v8::metrics::GarbageCollectionYoungCycle event;
+ // Reason:
+ event.reason = static_cast<int>(current_.gc_reason);
// Total:
const double total_wall_clock_duration_in_us =
(current_.scopes[Scope::SCAVENGER] +
- current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]) *
+ current_.scopes[Scope::MINOR_MARK_COMPACTOR] +
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]) *
base::Time::kMicrosecondsPerMillisecond;
+ // TODO(chromium:1154636): Consider adding BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
+ // (both for the case of the scavenger and the minor mark-compactor), and
+ // BACKGROUND_UNMAPPER (for the case of the minor mark-compactor).
event.total_wall_clock_duration_in_us =
static_cast<int64_t>(total_wall_clock_duration_in_us);
// MainThread:
const double main_thread_wall_clock_duration_in_us =
- current_.scopes[Scope::SCAVENGER] *
+ (current_.scopes[Scope::SCAVENGER] +
+ current_.scopes[Scope::MINOR_MARK_COMPACTOR]) *
base::Time::kMicrosecondsPerMillisecond;
event.main_thread_wall_clock_duration_in_us =
static_cast<int64_t>(main_thread_wall_clock_duration_in_us);
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 2c9b7b01ec..c008b1da06 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -51,6 +51,8 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)), \
"epoch", tracer->CurrentEpoch(scope_id))
+using CollectionEpoch = uint32_t;
+
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class V8_EXPORT_PRIVATE GCTracer {
@@ -112,6 +114,10 @@ class V8_EXPORT_PRIVATE GCTracer {
static bool NeedsYoungEpoch(ScopeId id);
private:
+#if DEBUG
+ void AssertMainThread();
+#endif // DEBUG
+
GCTracer* tracer_;
ScopeId scope_;
ThreadKind thread_kind_;
@@ -133,15 +139,33 @@ class V8_EXPORT_PRIVATE GCTracer {
START = 4
};
- Event(Type type, GarbageCollectionReason gc_reason,
+#ifdef DEBUG
+ // Returns true if the event corresponds to a young generation GC.
+ static constexpr bool IsYoungGenerationEvent(Type type) {
+ DCHECK_NE(START, type);
+ return type == SCAVENGER || type == MINOR_MARK_COMPACTOR;
+ }
+#endif
+
+ // The state diagram for a GC cycle:
+ // (NOT_RUNNING) -----(StartCycle)----->
+ // MARKING --(StartAtomicPause)-->
+ // ATOMIC ---(StopAtomicPause)-->
+ // SWEEPING ------(StopCycle)-----> NOT_RUNNING
+ enum class State { NOT_RUNNING, MARKING, ATOMIC, SWEEPING };
+
+ Event(Type type, State state, GarbageCollectionReason gc_reason,
const char* collector_reason);
// Returns a string describing the event type.
const char* TypeName(bool short_name) const;
- // Type of event
+ // Type of the event.
Type type;
+ // State of the cycle corresponding to the event.
+ State state;
+
GarbageCollectionReason gc_reason;
const char* collector_reason;
@@ -195,6 +219,24 @@ class V8_EXPORT_PRIVATE GCTracer {
incremental_marking_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
};
+ class RecordGCPhasesInfo {
+ public:
+ RecordGCPhasesInfo(Heap* heap, GarbageCollector collector);
+
+ enum class Mode { None, Scavenger, Finalize };
+
+ Mode mode;
+
+ // The timer used for a given GC type:
+ // - GCScavenger: young generation GC
+ // - GCCompactor: full GC
+ // - GCFinalizeMC: finalization of incremental full GC
+ // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
+ // memory reduction.
+ TimedHistogram* type_timer;
+ TimedHistogram* type_priority_timer;
+ };
+
static const int kThroughputTimeFrameMs = 5000;
static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
@@ -207,22 +249,64 @@ class V8_EXPORT_PRIVATE GCTracer {
explicit GCTracer(Heap* heap);
- // Start collecting data.
- void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
- const char* collector_reason);
- void StartInSafepoint();
+ CollectionEpoch CurrentEpoch(Scope::ScopeId id) const {
+ return Scope::NeedsYoungEpoch(id) ? epoch_young_ : epoch_full_;
+ }
+
+ // Start and stop an observable pause.
+ void StartObservablePause();
+ void StopObservablePause();
+
+ // Update the current event if it precedes the start of the observable pause.
+ void UpdateCurrentEvent(GarbageCollectionReason gc_reason,
+ const char* collector_reason);
- // Stop collecting data and print results.
- void Stop(GarbageCollector collector);
+ void UpdateStatistics(GarbageCollector collector);
+
+ enum class MarkingType { kAtomic, kIncremental };
+
+ // Start and stop a GC cycle (collecting data and reporting results).
+ void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason, MarkingType marking);
+ void StopCycle(GarbageCollector collector);
+ void StopCycleIfNeeded();
+
+ // Start and stop a cycle's atomic pause.
+ void StartAtomicPause();
+ void StopAtomicPause();
+
+ void StartInSafepoint();
void StopInSafepoint();
void NotifySweepingCompleted();
-
- void NotifyGCCompleted();
+ void NotifyCppGCCompleted();
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
+#ifdef DEBUG
+ bool IsInObservablePause() const { return 0.0 < start_of_observable_pause_; }
+
+ // Checks if the current event is consistent with a collector.
+ bool IsConsistentWithCollector(GarbageCollector collector) const {
+ return (collector == GarbageCollector::SCAVENGER &&
+ current_.type == Event::SCAVENGER) ||
+ (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
+ current_.type == Event::MINOR_MARK_COMPACTOR) ||
+ (collector == GarbageCollector::MARK_COMPACTOR &&
+ (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR));
+ }
+
+ // Checks if the current event corresponds to a full GC cycle whose sweeping
+ // has not finalized yet.
+ bool IsSweepingInProgress() const {
+ return (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
+ current_.state == Event::State::SWEEPING;
+ }
+#endif
+
// Sample and accumulate bytes allocated since the last GC.
void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
size_t old_generation_counter_bytes,
@@ -336,7 +420,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddScopeSampleBackground(Scope::ScopeId scope, double duration);
- void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
+ void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode);
void RecordEmbedderSpeed(size_t bytes, double duration);
@@ -349,8 +433,6 @@ class V8_EXPORT_PRIVATE GCTracer {
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
#endif // defined(V8_RUNTIME_CALL_STATS)
- CollectionEpoch CurrentEpoch(Scope::ScopeId id);
-
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -367,7 +449,6 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
- FRIEND_TEST(GCTracerTest, RecordGCSumHistograms);
FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
@@ -388,10 +469,10 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordMutatorUtilization(double mark_compactor_end_time,
double mark_compactor_duration);
- // Overall time spent in mark compact within a given GC cycle. Exact
- // accounting of events within a GC is not necessary which is why the
- // recording takes place at the end of the atomic pause.
- void RecordGCSumCounters(double atomic_pause_duration);
+ // Update counters for an entire full GC cycle. Exact accounting of events
+ // within a GC is not necessary which is why the recording takes place at the
+ // end of the atomic pause.
+ void RecordGCSumCounters();
double MonotonicallyIncreasingTimeInMs();
@@ -434,6 +515,14 @@ class V8_EXPORT_PRIVATE GCTracer {
// Previous tracer event.
Event previous_;
+ // The starting time of the observable pause or 0.0 if we're not inside it.
+ double start_of_observable_pause_ = 0.0;
+
+ // We need two epochs, since there can be scavenges during incremental
+ // marking.
+ CollectionEpoch epoch_young_ = 0;
+ CollectionEpoch epoch_full_ = 0;
+
// Size of incremental marking steps (in bytes) accumulated since the end of
// the last mark compact GC.
size_t incremental_marking_bytes_;
@@ -455,7 +544,6 @@ class V8_EXPORT_PRIVATE GCTracer {
IncrementalMarkingInfos
incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
-
// Timestamp and allocation counter at the last sampled allocation event.
double allocation_time_ms_;
size_t new_space_allocation_counter_bytes_;
@@ -489,7 +577,15 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
base::RingBuffer<double> recorded_survival_ratios_;
- bool metrics_report_pending_ = false;
+ // A full GC cycle stops only when both v8 and cppgc (if available) GCs have
+ // finished sweeping.
+ bool notified_sweeping_completed_ = false;
+ bool notified_cppgc_completed_ = false;
+
+ // When a full GC cycle is interrupted by a young generation GC cycle, the
+ // |previous_| event is used as temporary storage for the |current_| event
+ // that corresponded to the full GC cycle, and this field is set to true.
+ bool young_gc_while_full_gc_ = false;
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark
incremental_mark_batched_events_;
diff --git a/deps/v8/src/heap/heap-allocator-inl.h b/deps/v8/src/heap/heap-allocator-inl.h
new file mode 100644
index 0000000000..043f4c629b
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator-inl.h
@@ -0,0 +1,250 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_ALLOCATOR_INL_H_
+#define V8_HEAP_HEAP_ALLOCATOR_INL_H_
+
+#include "src/base/logging.h"
+#include "src/common/globals.h"
+#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/heap-allocator.h"
+#include "src/heap/large-spaces.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/heap/third-party/heap-api.h"
+
+namespace v8 {
+namespace internal {
+
+PagedSpace* HeapAllocator::code_space() const {
+ return static_cast<PagedSpace*>(spaces_[CODE_SPACE]);
+}
+
+CodeLargeObjectSpace* HeapAllocator::code_lo_space() const {
+ return static_cast<CodeLargeObjectSpace*>(spaces_[CODE_LO_SPACE]);
+}
+
+OldLargeObjectSpace* HeapAllocator::lo_space() const {
+ return static_cast<OldLargeObjectSpace*>(spaces_[LO_SPACE]);
+}
+
+PagedSpace* HeapAllocator::map_space() const {
+ return static_cast<PagedSpace*>(spaces_[MAP_SPACE]);
+}
+
+NewSpace* HeapAllocator::new_space() const {
+ return static_cast<NewSpace*>(spaces_[NEW_SPACE]);
+}
+
+NewLargeObjectSpace* HeapAllocator::new_lo_space() const {
+ return static_cast<NewLargeObjectSpace*>(spaces_[NEW_LO_SPACE]);
+}
+
+PagedSpace* HeapAllocator::old_space() const {
+ return static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
+}
+
+ReadOnlySpace* HeapAllocator::read_only_space() const {
+ return read_only_space_;
+}
+
+bool HeapAllocator::CanAllocateInReadOnlySpace() const {
+ return read_only_space()->writable();
+}
+
+template <AllocationType type>
+V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
+ int size_in_bytes, AllocationOrigin origin, AllocationAlignment alignment) {
+ DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ if (FLAG_single_generation && type == AllocationType::kYoung) {
+ return AllocateRaw(size_in_bytes, AllocationType::kOld, origin, alignment);
+ }
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ if (!heap_->always_allocate() && allocation_timeout_-- <= 0) {
+ return AllocationResult::Failure();
+ }
+ }
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+#ifdef DEBUG
+ IncrementObjectCounters();
+#endif // DEBUG
+
+ if (heap_->CanSafepoint()) {
+ heap_->main_thread_local_heap()->Safepoint();
+ }
+
+ const size_t large_object_threshold = heap_->MaxRegularHeapObjectSize(type);
+ const bool large_object =
+ static_cast<size_t>(size_in_bytes) > large_object_threshold;
+
+ HeapObject object;
+ AllocationResult allocation;
+
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ allocation = heap_->tp_heap_->Allocate(size_in_bytes, type, alignment);
+ } else {
+ if (V8_UNLIKELY(large_object)) {
+ allocation =
+ AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
+ } else {
+ switch (type) {
+ case AllocationType::kYoung:
+ allocation =
+ new_space()->AllocateRaw(size_in_bytes, alignment, origin);
+ break;
+ case AllocationType::kOld:
+ allocation =
+ old_space()->AllocateRaw(size_in_bytes, alignment, origin);
+ break;
+ case AllocationType::kCode:
+ DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
+ DCHECK(AllowCodeAllocation::IsAllowed());
+ allocation = code_space()->AllocateRawUnaligned(size_in_bytes);
+ break;
+ case AllocationType::kMap:
+ DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
+ allocation = map_space()->AllocateRawUnaligned(size_in_bytes);
+ break;
+ case AllocationType::kReadOnly:
+ DCHECK(read_only_space()->writable());
+ DCHECK_EQ(AllocationOrigin::kRuntime, origin);
+ allocation = read_only_space()->AllocateRaw(size_in_bytes, alignment);
+ break;
+ case AllocationType::kSharedMap:
+ allocation = shared_map_allocator_->AllocateRaw(size_in_bytes,
+ alignment, origin);
+ break;
+ case AllocationType::kSharedOld:
+ allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
+ alignment, origin);
+ break;
+ }
+ }
+ }
+
+ if (allocation.To(&object)) {
+ if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ heap_->UnprotectAndRegisterMemoryChunk(
+ object, UnprotectMemoryOrigin::kMainThread);
+ heap_->ZapCodeObject(object.address(), size_in_bytes);
+ if (!large_object) {
+ MemoryChunk::FromHeapObject(object)
+ ->GetCodeObjectRegistry()
+ ->RegisterNewlyAllocatedCodeObject(object.address());
+ }
+ }
+
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ if (AllocationType::kReadOnly != type) {
+ DCHECK_TAG_ALIGNED(object.address());
+ Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
+ object.address());
+ }
+#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+
+ for (auto& tracker : heap_->allocation_trackers_) {
+ tracker->AllocationEvent(object.address(), size_in_bytes);
+ }
+ }
+
+ return allocation;
+}
+
+AllocationResult HeapAllocator::AllocateRaw(int size_in_bytes,
+ AllocationType type,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kOld:
+ return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kCode:
+ return AllocateRaw<AllocationType::kCode>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kMap:
+ return AllocateRaw<AllocationType::kMap>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kReadOnly:
+ return AllocateRaw<AllocationType::kReadOnly>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kSharedMap:
+ return AllocateRaw<AllocationType::kSharedMap>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kSharedOld:
+ return AllocateRaw<AllocationType::kSharedOld>(size_in_bytes, origin,
+ alignment);
+ }
+ UNREACHABLE();
+}
+
+AllocationResult HeapAllocator::AllocateRawData(int size_in_bytes,
+ AllocationType type,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kOld:
+ return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kCode:
+ case AllocationType::kMap:
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+template <HeapAllocator::AllocationRetryMode mode>
+V8_WARN_UNUSED_RESULT V8_INLINE HeapObject HeapAllocator::AllocateRawWith(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result;
+ HeapObject object;
+ if (allocation == AllocationType::kYoung) {
+ result = AllocateRaw<AllocationType::kYoung>(size, origin, alignment);
+ if (result.To(&object)) {
+ return object;
+ }
+ } else if (allocation == AllocationType::kOld) {
+ result = AllocateRaw<AllocationType::kOld>(size, origin, alignment);
+ if (result.To(&object)) {
+ return object;
+ }
+ }
+ switch (mode) {
+ case kLightRetry:
+ result = AllocateRawWithLightRetrySlowPath(size, allocation, origin,
+ alignment);
+ break;
+ case kRetryOrFail:
+ result = AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
+ alignment);
+ break;
+ }
+ if (result.To(&object)) {
+ return object;
+ }
+ return HeapObject();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc
new file mode 100644
index 0000000000..580f56c9e0
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator.cc
@@ -0,0 +1,163 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/heap-allocator.h"
+
+#include "src/base/logging.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap-allocator-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+HeapAllocator::HeapAllocator(Heap* heap) : heap_(heap) {}
+
+void HeapAllocator::Setup() {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; ++i) {
+ spaces_[i] = heap_->space(i);
+ }
+ shared_old_allocator_ = heap_->shared_old_allocator_.get();
+ shared_map_allocator_ = heap_->shared_map_allocator_.get();
+}
+
+void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
+ read_only_space_ = read_only_space;
+}
+
+AllocationResult HeapAllocator::AllocateRawLargeInternal(
+ int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation));
+ switch (allocation) {
+ case AllocationType::kYoung:
+ return new_lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kOld:
+ return lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kCode:
+ return code_lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kMap:
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+}
+
+namespace {
+
+constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return NEW_SPACE;
+ case AllocationType::kOld:
+ case AllocationType::kCode:
+ case AllocationType::kMap:
+ // OLD_SPACE indicates full GC.
+ return OLD_SPACE;
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
+ if (!result.IsFailure()) {
+ return result;
+ }
+
+ // Two GCs before returning failure.
+ for (int i = 0; i < 2; i++) {
+ if (IsSharedAllocationType(allocation)) {
+ heap_->CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
+ } else {
+ heap_->CollectGarbage(AllocationTypeToGCSpace(allocation),
+ GarbageCollectionReason::kAllocationFailure);
+ }
+ result = AllocateRaw(size, allocation, origin, alignment);
+ if (!result.IsFailure()) {
+ return result;
+ }
+ }
+ return result;
+}
+
+AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result =
+ AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
+ if (!result.IsFailure()) return result;
+
+ heap_->isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ if (IsSharedAllocationType(allocation)) {
+ heap_->CollectSharedGarbage(GarbageCollectionReason::kLastResort);
+
+ // We need always_allocate() to be true both on the client- and
+ // server-isolate. It is used in both code paths.
+ AlwaysAllocateScope shared_scope(
+ heap_->isolate()->shared_isolate()->heap());
+ AlwaysAllocateScope client_scope(heap_);
+ result = AllocateRaw(size, allocation, origin, alignment);
+ } else {
+ heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+
+ AlwaysAllocateScope scope(heap_);
+ result = AllocateRaw(size, allocation, origin, alignment);
+ }
+
+ if (!result.IsFailure()) {
+ return result;
+ }
+
+ v8::internal::V8::FatalProcessOutOfMemory(heap_->isolate(),
+ "CALL_AND_RETRY_LAST", true);
+}
+
+#ifdef DEBUG
+
+void HeapAllocator::IncrementObjectCounters() {
+ heap_->isolate()->counters()->objs_since_last_full()->Increment();
+ heap_->isolate()->counters()->objs_since_last_young()->Increment();
+}
+
+#endif // DEBUG
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+
+void HeapAllocator::SetAllocationTimeout(int allocation_timeout) {
+ allocation_timeout_ = allocation_timeout;
+}
+
+void HeapAllocator::UpdateAllocationTimeout() {
+ if (FLAG_random_gc_interval > 0) {
+ const int new_timeout = allocation_timeout_ <= 0
+ ? heap_->isolate()->fuzzer_rng()->NextInt(
+ FLAG_random_gc_interval + 1)
+ : allocation_timeout_;
+ // Reset the allocation timeout, but make sure to allow at least a few
+ // allocations after a collection. The reason for this is that we have a lot
+ // of allocation sequences and we assume that a garbage collection will
+ // allow the subsequent allocation attempts to go through.
+ constexpr int kFewAllocationsHeadroom = 6;
+ allocation_timeout_ = std::max(kFewAllocationsHeadroom, new_timeout);
+ } else if (FLAG_gc_interval >= 0) {
+ allocation_timeout_ = FLAG_gc_interval;
+ }
+}
+
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap-allocator.h b/deps/v8/src/heap/heap-allocator.h
new file mode 100644
index 0000000000..9de82295f2
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator.h
@@ -0,0 +1,119 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_ALLOCATOR_H_
+#define V8_HEAP_HEAP_ALLOCATOR_H_
+
+#include "include/v8config.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/allocation-result.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeLargeObjectSpace;
+class ConcurrentAllocator;
+class Heap;
+class NewSpace;
+class NewLargeObjectSpace;
+class OldLargeObjectSpace;
+class PagedSpace;
+class ReadOnlySpace;
+class Space;
+
+// Allocator for the main thread. All exposed functions internally call the
+// right bottleneck.
+class V8_EXPORT_PRIVATE HeapAllocator final {
+ public:
+ explicit HeapAllocator(Heap*);
+
+ void Setup();
+ void SetReadOnlySpace(ReadOnlySpace*);
+
+ // Supports all `AllocationType` types.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ // Supports all `AllocationType` types. Use when type is statically known.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ template <AllocationType type>
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ // Supports only `AllocationType::kYoung` and `AllocationType::kOld`.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawData(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ enum AllocationRetryMode { kLightRetry, kRetryOrFail };
+
+ // Supports all `AllocationType` types and allows specifying retry handling.
+ template <AllocationRetryMode mode>
+ V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
+ AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ V8_INLINE bool CanAllocateInReadOnlySpace() const;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ void UpdateAllocationTimeout();
+ void SetAllocationTimeout(int allocation_timeout);
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+ private:
+ V8_INLINE PagedSpace* code_space() const;
+ V8_INLINE CodeLargeObjectSpace* code_lo_space() const;
+ V8_INLINE PagedSpace* map_space() const;
+ V8_INLINE NewSpace* new_space() const;
+ V8_INLINE NewLargeObjectSpace* new_lo_space() const;
+ V8_INLINE OldLargeObjectSpace* lo_space() const;
+ V8_INLINE PagedSpace* old_space() const;
+ V8_INLINE ReadOnlySpace* read_only_space() const;
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawLargeInternal(
+ int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+#ifdef DEBUG
+ void IncrementObjectCounters();
+#endif // DEBUG
+
+ Heap* const heap_;
+ Space* spaces_[LAST_SPACE + 1];
+ ReadOnlySpace* read_only_space_;
+
+ ConcurrentAllocator* shared_old_allocator_;
+ ConcurrentAllocator* shared_map_allocator_;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // If the --gc-interval flag is set to a positive value, this variable
+ // holds the value indicating the number of allocations remain until the
+ // next failure and garbage collection.
+ int allocation_timeout_ = 0;
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 68abf816b0..d14ba247ca 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -20,6 +20,8 @@
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/concurrent-allocator.h"
+#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
@@ -66,26 +68,6 @@ T ForwardingAddress(T heap_obj) {
}
}
-AllocationSpace AllocationResult::RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::ToInt(object_));
-}
-
-HeapObject AllocationResult::ToObjectChecked() {
- CHECK(!IsRetry());
- return HeapObject::cast(object_);
-}
-
-HeapObject AllocationResult::ToObject() {
- DCHECK(!IsRetry());
- return HeapObject::cast(object_);
-}
-
-Address AllocationResult::ToAddress() {
- DCHECK(!IsRetry());
- return HeapObject::cast(object_).address();
-}
-
// static
base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
@@ -120,6 +102,16 @@ int64_t Heap::update_external_memory(int64_t delta) {
return external_memory_.Update(delta);
}
+PagedSpace* Heap::space_for_maps() {
+ return V8_LIKELY(map_space_) ? static_cast<PagedSpace*>(map_space_)
+ : static_cast<PagedSpace*>(old_space_);
+}
+
+ConcurrentAllocator* Heap::concurrent_allocator_for_maps() {
+ return V8_LIKELY(shared_map_allocator_) ? shared_map_allocator_.get()
+ : shared_old_allocator_.get();
+}
+
RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
#define ROOT_ACCESSOR(Type, name, CamelName) \
@@ -191,6 +183,10 @@ inline const base::AddressRegion& Heap::code_region() {
#endif
}
+Address Heap::code_range_base() {
+ return code_range_ ? code_range_->base() : kNullAddress;
+}
+
int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
(allocation == AllocationType::kCode)) {
@@ -204,208 +200,18 @@ int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kTaggedAligned);
- DCHECK_EQ(gc_state(), NOT_IN_GC);
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
- if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
- AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
- return AllocationResult::Retry(space);
- }
- }
-#endif
-#ifdef DEBUG
- IncrementObjectCounters();
-#endif
-
- if (CanSafepoint()) {
- main_thread_local_heap()->Safepoint();
- }
-
- size_t large_object_threshold = MaxRegularHeapObjectSize(type);
- bool large_object =
- static_cast<size_t>(size_in_bytes) > large_object_threshold;
-
- HeapObject object;
- AllocationResult allocation;
-
- if (FLAG_single_generation && type == AllocationType::kYoung) {
- type = AllocationType::kOld;
- }
-
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
- } else {
- if (AllocationType::kYoung == type) {
- if (large_object) {
- if (FLAG_young_generation_large_objects) {
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- // If young generation large objects are disalbed we have to tenure
- // the allocation and violate the given allocation type. This could be
- // dangerous. We may want to remove
- // FLAG_young_generation_large_objects and avoid patching.
- allocation = lo_space_->AllocateRaw(size_in_bytes);
- }
- } else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
- }
- } else if (AllocationType::kOld == type) {
- if (large_object) {
- allocation = lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
- }
- } else if (AllocationType::kCode == type) {
- DCHECK(AllowCodeAllocation::IsAllowed());
- if (large_object) {
- allocation = code_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
- }
- } else if (AllocationType::kMap == type) {
- allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
- } else if (AllocationType::kReadOnly == type) {
- DCHECK(!large_object);
- DCHECK(CanAllocateInReadOnlySpace());
- DCHECK_EQ(AllocationOrigin::kRuntime, origin);
- allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
- } else if (AllocationType::kSharedOld == type) {
- allocation =
- shared_old_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
- } else if (AllocationType::kSharedMap == type) {
- allocation =
- shared_map_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
- } else {
- UNREACHABLE();
- }
- }
-
- if (allocation.To(&object)) {
- if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- // Unprotect the memory chunk of the object if it was not unprotected
- // already.
- UnprotectAndRegisterMemoryChunk(object,
- UnprotectMemoryOrigin::kMainThread);
- ZapCodeObject(object.address(), size_in_bytes);
- if (!large_object) {
- MemoryChunk::FromHeapObject(object)
- ->GetCodeObjectRegistry()
- ->RegisterNewlyAllocatedCodeObject(object.address());
- }
- }
-
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- if (AllocationType::kReadOnly != type) {
- DCHECK_TAG_ALIGNED(object.address());
- Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
- object.address());
- }
-#endif
-
- OnAllocationEvent(object, size_in_bytes);
- }
-
- return allocation;
-}
-
-template <Heap::AllocationRetryMode mode>
-HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK_EQ(gc_state(), NOT_IN_GC);
- Heap* heap = isolate()->heap();
- if (allocation == AllocationType::kYoung &&
- alignment == AllocationAlignment::kTaggedAligned &&
- size <= MaxRegularHeapObjectSize(allocation) &&
- V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
- FLAG_gc_interval == -1)) {
- Address* top = heap->NewSpaceAllocationTopAddress();
- Address* limit = heap->NewSpaceAllocationLimitAddress();
- if (*limit - *top >= static_cast<unsigned>(size)) {
- DCHECK(IsAligned(size, kTaggedSize));
- HeapObject obj = HeapObject::FromAddress(*top);
- *top += size;
- heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
- return obj;
- }
- }
- switch (mode) {
- case kLightRetry:
- return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
- alignment);
- case kRetryOrFail:
- return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
- alignment);
- }
- UNREACHABLE();
+ return heap_allocator_.AllocateRaw(size_in_bytes, type, origin, alignment);
}
Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
- return AllocateRawWith<kRetryOrFail>(size, allocation, origin, alignment)
+ return heap_allocator_
+ .AllocateRawWith<HeapAllocator::kRetryOrFail>(size, allocation, origin,
+ alignment)
.address();
}
-void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
- for (auto& tracker : allocation_trackers_) {
- tracker->AllocationEvent(object.address(), size_in_bytes);
- }
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(object);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- } else if (FLAG_trace_allocation_stack_interval > 0) {
- ++allocations_count_;
- if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
- isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
- }
- }
-}
-
-bool Heap::CanAllocateInReadOnlySpace() {
- return read_only_space()->writable();
-}
-
-void Heap::UpdateAllocationsHash(HeapObject object) {
- Address object_address = object.address();
- MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
- AllocationSpace allocation_space = memory_chunk->owner_identity();
-
- STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
- uint32_t value =
- static_cast<uint32_t>(object_address - memory_chunk->address()) |
- (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
-
- UpdateAllocationsHash(value);
-}
-
-void Heap::UpdateAllocationsHash(uint32_t value) {
- uint16_t c1 = static_cast<uint16_t>(value);
- uint16_t c2 = static_cast<uint16_t>(value >> 16);
- raw_allocations_hash_ =
- StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
- raw_allocations_hash_ =
- StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
-}
-
void Heap::RegisterExternalString(String string) {
DCHECK(string.IsExternalString());
DCHECK(!string.IsThinString());
diff --git a/deps/v8/src/heap/heap-layout-tracer.cc b/deps/v8/src/heap/heap-layout-tracer.cc
index 53ac5726a7..0e984ce761 100644
--- a/deps/v8/src/heap/heap-layout-tracer.cc
+++ b/deps/v8/src/heap/heap-layout-tracer.cc
@@ -20,7 +20,8 @@ void HeapLayoutTracer::GCProloguePrintHeapLayout(v8::Isolate* isolate,
v8::GCCallbackFlags flags,
void* data) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- PrintF("Before GC:%d,", heap->gc_count());
+ // gc_count_ will increase after this callback, manually add 1.
+ PrintF("Before GC:%d,", heap->gc_count() + 1);
PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
PrintHeapLayout(std::cout, heap);
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index a1b03256af..4d48679dfa 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -45,10 +45,10 @@ struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0;
- static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
+ static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 17;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
- static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
+ static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 20;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -283,6 +283,19 @@ void WriteBarrier::MarkingFromInternalFields(JSObject host) {
MarkingSlowFromInternalFields(*heap, host);
}
+#ifdef ENABLE_SLOW_DCHECKS
+// static
+template <typename T>
+bool WriteBarrier::IsRequired(HeapObject host, T value) {
+ if (BasicMemoryChunk::FromHeapObject(host)->InYoungGeneration()) return false;
+ if (value.IsSmi()) return false;
+ if (value.IsCleared()) return false;
+ HeapObject target = value.GetHeapObject();
+ if (ReadOnlyHeap::Contains(target)) return false;
+ return !IsImmortalImmovableHeapObject(target);
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index dce052f00e..461af50f98 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -7,6 +7,7 @@
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/marking-barrier.h"
+#include "src/objects/code-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/maybe-object.h"
@@ -49,13 +50,10 @@ void WriteBarrier::MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value) {
// static
void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) {
- // We are not checking the mark bits of host here as (a) there's no
- // synchronization with the marker and (b) we are writing into a live object
- // (independent of the mark bits).
- if (!heap->local_embedder_heap_tracer()->InUse()) return;
- LocalEmbedderHeapTracer::ProcessingScope scope(
- heap->local_embedder_heap_tracer());
- scope.TracePossibleWrapper(host);
+ auto* local_embedder_heap_tracer = heap->local_embedder_heap_tracer();
+ if (!local_embedder_heap_tracer->InUse()) return;
+
+ local_embedder_heap_tracer->EmbedderWriteBarrier(heap, host);
}
void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
@@ -99,5 +97,21 @@ int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
return 0;
}
+#ifdef ENABLE_SLOW_DCHECKS
+bool WriteBarrier::IsImmortalImmovableHeapObject(HeapObject object) {
+ BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
+ // All objects in readonly space are immortal and immovable.
+ if (basic_chunk->InReadOnlySpace()) return true;
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ // There are also objects in "regular" spaces which are immortal and
+ // immovable. Objects on a page that can get compacted are movable and can be
+ // filtered out.
+ if (!chunk->IsFlagSet(MemoryChunk::NEVER_EVACUATE)) return false;
+ // Now we know the object is immovable, check whether it is also immortal.
+ // Builtins are roots and therefore always kept alive by the GC.
+ return object.IsCode() && Code::cast(object).is_builtin();
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index b221fae2ed..9e2cf8652a 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -65,6 +65,12 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static MarkingBarrier* CurrentMarkingBarrier(Heap* heap);
+#ifdef ENABLE_SLOW_DCHECKS
+ template <typename T>
+ static inline bool IsRequired(HeapObject host, T value);
+ static bool IsImmortalImmovableHeapObject(HeapObject object);
+#endif
+
private:
static inline base::Optional<Heap*> GetHeapIfMarking(HeapObject object);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5f80f2fd4f..81255e531c 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -30,7 +30,6 @@
#include "src/execution/embedder-state.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/microtask-queue.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles-inl.h"
@@ -88,6 +87,7 @@
#include "src/objects/feedback-vector.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
@@ -114,14 +114,6 @@
namespace v8 {
namespace internal {
-namespace {
-std::atomic<CollectionEpoch> global_epoch{0};
-
-CollectionEpoch next_epoch() {
- return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
-}
-} // namespace
-
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
return reinterpret_cast<Isolate*>(
@@ -212,6 +204,7 @@ class ScavengeTaskObserver : public AllocationObserver {
Heap::Heap()
: isolate_(isolate()),
+ heap_allocator_(this),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(std::make_unique<IsolateSafepoint>(this)),
@@ -488,6 +481,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return GarbageCollector::MARK_COMPACTOR;
}
+ DCHECK(!FLAG_single_generation);
+ DCHECK(!FLAG_gc_global);
// Default
*reason = nullptr;
return YoungGenerationCollector();
@@ -540,12 +535,14 @@ void Heap::PrintShortHeapStatistics() {
", committed: %6zu KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_,
- "Map space, used: %6zu KB"
- ", available: %6zu KB"
- ", committed: %6zu KB\n",
- map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
- map_space_->CommittedMemory() / KB);
+ if (map_space()) {
+ PrintIsolate(isolate_,
+ "Map space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
+ map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
+ map_space_->CommittedMemory() / KB);
+ }
PrintIsolate(isolate_,
"Large object space, used: %6zu KB"
", available: %6zu KB"
@@ -723,6 +720,106 @@ void Heap::ReportStatisticsAfterGC() {
}
}
+class Heap::AllocationTrackerForDebugging final
+ : public HeapObjectAllocationTracker {
+ public:
+ static bool IsNeeded() {
+ return FLAG_verify_predictable || FLAG_fuzzer_gc_analysis ||
+ (FLAG_trace_allocation_stack_interval > 0);
+ }
+
+ explicit AllocationTrackerForDebugging(Heap* heap) : heap_(heap) {
+ CHECK(IsNeeded());
+ heap_->AddHeapObjectAllocationTracker(this);
+ }
+
+ ~AllocationTrackerForDebugging() final {
+ heap_->RemoveHeapObjectAllocationTracker(this);
+ if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
+ PrintAllocationsHash();
+ }
+ }
+
+ void AllocationEvent(Address addr, int size) final {
+ if (FLAG_verify_predictable) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ // Advance synthetic time by making a time request.
+ heap_->MonotonicallyIncreasingTimeInMs();
+
+ UpdateAllocationsHash(HeapObject::FromAddress(addr));
+ UpdateAllocationsHash(size);
+
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
+ PrintAllocationsHash();
+ }
+ } else if (FLAG_fuzzer_gc_analysis) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ } else if (FLAG_trace_allocation_stack_interval > 0) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
+ heap_->isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
+ }
+ }
+ }
+
+ void MoveEvent(Address source, Address target, int size) final {
+ if (FLAG_verify_predictable) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ // Advance synthetic time by making a time request.
+ heap_->MonotonicallyIncreasingTimeInMs();
+
+ UpdateAllocationsHash(HeapObject::FromAddress(source));
+ UpdateAllocationsHash(HeapObject::FromAddress(target));
+ UpdateAllocationsHash(size);
+
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
+ PrintAllocationsHash();
+ }
+ } else if (FLAG_fuzzer_gc_analysis) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+
+ void UpdateObjectSizeEvent(Address, int) final {}
+
+ private:
+ void UpdateAllocationsHash(HeapObject object) {
+ Address object_address = object.address();
+ MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+ AllocationSpace allocation_space = memory_chunk->owner_identity();
+
+ STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+ uint32_t value =
+ static_cast<uint32_t>(object_address - memory_chunk->address()) |
+ (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+ UpdateAllocationsHash(value);
+ }
+
+ void UpdateAllocationsHash(uint32_t value) {
+ const uint16_t c1 = static_cast<uint16_t>(value);
+ const uint16_t c2 = static_cast<uint16_t>(value >> 16);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+ }
+
+ void PrintAllocationsHash() {
+ uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+ PrintF("\n### Allocations = %zu, hash = 0x%08x\n",
+ allocations_count_.load(std::memory_order_relaxed), hash);
+ }
+
+ Heap* const heap_;
+ // Count of all allocations performed through C++ bottlenecks. This needs to
+ // be atomic as objects are moved in parallel in the GC which counts as
+ // allocations.
+ std::atomic<size_t> allocations_count_{0};
+ // Running hash over allocations performed.
+ uint32_t raw_allocations_hash_ = 0;
+};
+
void Heap::AddHeapObjectAllocationTracker(
HeapObjectAllocationTracker* tracker) {
if (allocation_trackers_.empty() && FLAG_inline_new) {
@@ -913,9 +1010,28 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
-void Heap::GarbageCollectionPrologue() {
+void Heap::GarbageCollectionPrologue(
+ GarbageCollectionReason gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
+ is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
+ current_gc_flags_ & kForcedGC ||
+ force_gc_on_next_allocation_;
+ is_current_gc_for_heap_profiler_ =
+ gc_reason == GarbageCollectionReason::kHeapProfiler;
+ if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ heap_allocator_.UpdateAllocationTimeout();
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+ // There may be an allocation memento behind objects in new space. Upon
+ // evacuation of a non-full new space (or if we are on the last page) there
+ // may be uninitialized memory behind top. We fill the remainder of the page
+ // with a filler.
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
@@ -1289,7 +1405,11 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+
+ if (map_space()) {
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ }
+
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -1358,8 +1478,10 @@ void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
static_cast<int>(SizeOfObjects() / KB));
- isolate_->counters()->heap_sample_map_space_committed()->AddSample(
- static_cast<int>(map_space()->CommittedMemory() / KB));
+ if (map_space()) {
+ isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+ static_cast<int>(map_space()->CommittedMemory() / KB));
+ }
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
@@ -1388,7 +1510,7 @@ class V8_NODISCARD GCCallbacksScope {
};
void Heap::HandleGCRequest() {
- if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
+ if (IsStressingScavenge() && stress_scavenge_observer_->HasRequestedGC()) {
CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
stress_scavenge_observer_->RequestedGCDone();
} else if (HighMemoryPressure()) {
@@ -1417,51 +1539,6 @@ void Heap::ScheduleScavengeTaskIfNeeded() {
scavenge_job_->ScheduleTaskIfNeeded(this);
}
-TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_scavenger_background();
- }
- return isolate_->counters()->gc_scavenger_foreground();
- } else {
- if (!incremental_marking()->IsStopped()) {
- if (ShouldReduceMemory()) {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_finalize_reduce_memory_background();
- }
- return isolate_->counters()->gc_finalize_reduce_memory_foreground();
- } else {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_finalize_background();
- }
- return isolate_->counters()->gc_finalize_foreground();
- }
- } else {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_compactor_background();
- }
- return isolate_->counters()->gc_compactor_foreground();
- }
- }
-}
-
-TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- return isolate_->counters()->gc_scavenger();
- }
- if (incremental_marking()->IsStopped()) {
- return isolate_->counters()->gc_compactor();
- }
- if (ShouldReduceMemory()) {
- return isolate_->counters()->gc_finalize_reduce_memory();
- }
- if (incremental_marking()->IsMarking() &&
- incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
- return isolate_->counters()->gc_finalize_measure_memory();
- }
- return isolate_->counters()->gc_finalize();
-}
-
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -1662,6 +1739,19 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
heap_->SizeOfObjects());
}
+static GCType GetGCTypeFromGarbageCollector(GarbageCollector collector) {
+ switch (collector) {
+ case GarbageCollector::MARK_COMPACTOR:
+ return kGCTypeMarkSweepCompact;
+ case GarbageCollector::SCAVENGER:
+ return kGCTypeScavenge;
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
+ return kGCTypeMinorMarkCompact;
+ default:
+ UNREACHABLE();
+ }
+}
+
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
@@ -1673,62 +1763,64 @@ bool Heap::CollectGarbage(AllocationSpace space,
CHECK(always_allocate());
FatalProcessOutOfMemory("GC during deserialization");
}
- const char* collector_reason = nullptr;
- GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
- current_gc_flags_ & kForcedGC ||
- force_gc_on_next_allocation_;
- is_current_gc_for_heap_profiler_ =
- gc_reason == GarbageCollectionReason::kHeapProfiler;
- if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
- DevToolsTraceEventScope devtools_trace_event_scope(
- this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
- GarbageCollectionReasonToString(gc_reason));
+ // CollectGarbage consists of three parts:
+ // 1. The prologue part which may execute callbacks. These callbacks may
+ // allocate and trigger another garbage collection.
+ // 2. The main garbage collection phase.
+ // 3. The epilogue part which may execute callbacks. These callbacks may
+ // allocate and trigger another garbage collection
+
+ // Part 1: Invoke all callbacks which should happen before the actual garbage
+ // collection is triggered. Note that these callbacks may trigger another
+ // garbage collection since they may allocate.
- // Filter on-stack reference below this method.
- isolate()
- ->global_handles()
- ->CleanupOnStackReferencesBelowCurrentStackPosition();
+ DCHECK(AllowGarbageCollection::IsAllowed());
// Ensure that all pending phantom callbacks are invoked.
isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
- // The VM is in the GC state until exiting this function.
- VMState<GC> state(isolate());
-
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- // Reset the allocation timeout, but make sure to allow at least a few
- // allocations after a collection. The reason for this is that we have a lot
- // of allocation sequences and we assume that a garbage collection will allow
- // the subsequent allocation attempts to go through.
- if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
- allocation_timeout_ =
- std::max(6, NextAllocationTimeout(allocation_timeout_));
- }
-#endif
-
- // There may be an allocation memento behind objects in new space. Upon
- // evacuation of a non-full new space (or if we are on the last page) there
- // may be uninitialized memory behind top. We fill the remainder of the page
- // with a filler.
- if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+ const char* collector_reason = nullptr;
+ GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ GCType gc_type = GetGCTypeFromGarbageCollector(collector);
- if (IsYoungGenerationCollector(collector) &&
- !incremental_marking()->IsStopped()) {
- if (FLAG_trace_incremental_marking) {
- isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Scavenge during marking.\n");
+ {
+ GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create
+ // their own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ this, EmbedderStackStateScope::kExplicitInvocation,
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ if (scope.CheckReenter()) {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> callback_state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
}
- size_t freed_global_handles = 0;
-
- size_t committed_memory_before = 0;
+ // Part 2: The main garbage collection phase.
+ DisallowGarbageCollection no_gc_during_gc;
- if (collector == GarbageCollector::MARK_COMPACTOR) {
- committed_memory_before = CommittedOldGenerationMemory();
- if (cpp_heap()) {
+ size_t freed_global_handles = 0;
+ size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
+ ? CommittedOldGenerationMemory()
+ : 0;
+ {
+ tracer()->StartObservablePause();
+ VMState<GC> state(isolate());
+ DevToolsTraceEventScope devtools_trace_event_scope(
+ this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
+ GarbageCollectionReasonToString(gc_reason));
+
+ // Filter on-stack reference below this method.
+ isolate()
+ ->global_handles()
+ ->CleanupOnStackReferencesBelowCurrentStackPosition();
+
+ if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
// deterministic passes over the stack. E.g., a verifier that should only
// find a subset of references of the marker.
@@ -1738,69 +1830,32 @@ bool Heap::CollectGarbage(AllocationSpace space,
static_cast<v8::internal::CppHeap*>(cpp_heap())
->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
}
- }
-
- {
- tracer()->Start(collector, gc_reason, collector_reason);
- DCHECK(AllowGarbageCollection::IsAllowed());
- DisallowGarbageCollection no_gc_during_gc;
- GarbageCollectionPrologue();
+ GarbageCollectionPrologue(gc_reason, gc_callback_flags);
{
- TimedHistogram* gc_type_timer = GCTypeTimer(collector);
- TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
- TRACE_EVENT0("v8", gc_type_timer->name());
-
- TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
- OptionalTimedHistogramScopeMode mode =
- isolate_->IsMemorySavingsModeActive()
- ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
- : OptionalTimedHistogramScopeMode::TAKE_TIME;
- OptionalTimedHistogramScope histogram_timer_priority_scope(
- gc_type_priority_timer, isolate_, mode);
-
- if (!IsYoungGenerationCollector(collector)) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
- GCType gc_type;
-
- switch (collector) {
- case GarbageCollector::MARK_COMPACTOR:
- gc_type = kGCTypeMarkSweepCompact;
- break;
- case GarbageCollector::SCAVENGER:
- gc_type = kGCTypeScavenge;
- break;
- case GarbageCollector::MINOR_MARK_COMPACTOR:
- gc_type = kGCTypeMinorMarkCompact;
- break;
- default:
- UNREACHABLE();
+ GCTracer::RecordGCPhasesInfo record_gc_phases_info(this, collector);
+ base::Optional<TimedHistogramScope> histogram_timer_scope;
+ base::Optional<OptionalTimedHistogramScope>
+ histogram_timer_priority_scope;
+ if (record_gc_phases_info.type_timer) {
+ histogram_timer_scope.emplace(record_gc_phases_info.type_timer,
+ isolate_);
+ TRACE_EVENT0("v8", record_gc_phases_info.type_timer->name());
}
-
- {
- GCCallbacksScope scope(this);
- // Temporary override any embedder stack state as callbacks may create
- // their own state on the stack and recursively trigger GC.
- EmbedderStackStateScope embedder_scope(
- local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
+ if (record_gc_phases_info.type_priority_timer) {
+ OptionalTimedHistogramScopeMode mode =
+ isolate_->IsMemorySavingsModeActive()
+ ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
+ : OptionalTimedHistogramScopeMode::TAKE_TIME;
+ histogram_timer_priority_scope.emplace(
+ record_gc_phases_info.type_priority_timer, isolate_, mode);
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
tp_heap_->CollectGarbage();
} else {
- freed_global_handles +=
- PerformGarbageCollection(collector, gc_callback_flags);
+ freed_global_handles += PerformGarbageCollection(
+ collector, gc_reason, collector_reason, gc_callback_flags);
}
// Clear flags describing the current GC now that the current GC is
// complete. Do this before GarbageCollectionEpilogue() since that could
@@ -1808,33 +1863,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
is_current_gc_forced_ = false;
is_current_gc_for_heap_profiler_ = false;
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- gc_post_processing_depth_++;
- {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- freed_global_handles +=
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
- }
- gc_post_processing_depth_--;
- }
-
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
- }
if (collector == GarbageCollector::MARK_COMPACTOR ||
collector == GarbageCollector::SCAVENGER) {
- tracer()->RecordGCPhasesHistograms(gc_type_timer);
+ tracer()->RecordGCPhasesHistograms(record_gc_phases_info.mode);
}
}
@@ -1870,7 +1901,47 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- tracer()->Stop(collector);
+ tracer()->StopAtomicPause();
+ tracer()->StopObservablePause();
+ tracer()->UpdateStatistics(collector);
+ // Young generation cycles finish atomically. It is important that
+ // StopObservablePause, UpdateStatistics and StopCycle are called in this
+ // order; the latter may replace the current event with that of an
+ // interrupted full cycle.
+ if (IsYoungGenerationCollector(collector)) {
+ tracer()->StopCycle(collector);
+ } else {
+ tracer()->StopCycleIfNeeded();
+ }
+ }
+
+ // Part 3: Invoke all callbacks which should happen after the actual garbage
+ // collection is triggered. Note that these callbacks may trigger another
+ // garbage collection since they may allocate.
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
+ }
+
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> callback_state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
}
if (collector == GarbageCollector::MARK_COMPACTOR &&
@@ -1924,9 +1995,6 @@ void Heap::StartIncrementalMarking(int gc_flags,
// Sweeping needs to be completed such that markbits are all cleared before
// starting marking again.
CompleteSweepingFull();
- if (cpp_heap()) {
- CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
- }
base::Optional<SafepointScope> safepoint_scope;
@@ -1940,9 +2008,9 @@ void Heap::StartIncrementalMarking(int gc_flags,
VerifyCountersAfterSweeping();
#endif
- // Now that sweeping is completed, we can update the current epoch for the new
- // full collection.
- UpdateEpochFull();
+ // Now that sweeping is completed, we can start the next full GC cycle.
+ tracer()->StartCycle(GarbageCollector::MARK_COMPACTOR, gc_reason, nullptr,
+ GCTracer::MarkingType::kIncremental);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
@@ -1951,8 +2019,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
void Heap::CompleteSweepingFull() {
array_buffer_sweeper()->EnsureFinished();
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kUnifiedHeap);
+
DCHECK(!mark_compact_collector()->sweeping_in_progress());
+ DCHECK_IMPLIES(cpp_heap(),
+ !CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
+ DCHECK(!tracer()->IsSweepingInProgress());
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
@@ -2148,23 +2221,41 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
} // namespace
size_t Heap::PerformGarbageCollection(
- GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+ GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
+ Verify();
+ }
+#endif // VERIFY_HEAP
+
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
+ tracer()->StartCycle(collector, gc_reason, collector_reason,
+ GCTracer::MarkingType::kAtomic);
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
- if (cpp_heap()) {
- CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ // If incremental marking has been activated, the full GC cycle has already
+ // started, so don't start a new one.
+ if (!incremental_marking_->WasActivated()) {
+ tracer()->StartCycle(collector, gc_reason, collector_reason,
+ GCTracer::MarkingType::kAtomic);
}
}
- // The last GC cycle is done after completing sweeping. Start the next GC
- // cycle.
- UpdateCurrentEpoch(collector);
+ tracer()->StartAtomicPause();
+ if (!Heap::IsYoungGenerationCollector(collector) &&
+ incremental_marking_->WasActivated()) {
+ tracer()->UpdateCurrentEvent(gc_reason, collector_reason);
+ }
+ DCHECK(tracer()->IsConsistentWithCollector(collector));
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
base::Optional<SafepointScope> safepoint_scope;
@@ -2177,15 +2268,6 @@ size_t Heap::PerformGarbageCollection(
collection_barrier_->StopTimeToCollectionTimer();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // We don't really perform a GC here but need this scope for the nested
- // SafepointScope inside Verify().
- AllowGarbageCollection allow_gc;
- Verify();
- }
-#endif
-
tracer()->StartInSafepoint();
GarbageCollectionPrologueInSafepoint();
@@ -2250,6 +2332,10 @@ size_t Heap::PerformGarbageCollection(
local_embedder_heap_tracer()->TraceEpilogue();
}
+ if (collector == GarbageCollector::SCAVENGER && cpp_heap()) {
+ CppHeap::From(cpp_heap())->RunMinorGC();
+ }
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// We don't really perform a GC here but need this scope for the nested
@@ -2257,7 +2343,7 @@ size_t Heap::PerformGarbageCollection(
AllowGarbageCollection allow_gc;
Verify();
}
-#endif
+#endif // VERIFY_HEAP
RecomputeLimits(collector);
@@ -2288,20 +2374,36 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
- const char* collector_reason = nullptr;
- GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
-
- tracer()->Start(collector, gc_reason, collector_reason);
-
+ tracer()->StartObservablePause();
+ DCHECK(!incremental_marking_->WasActivated());
DCHECK_NOT_NULL(isolate()->global_safepoint());
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
client->heap()->FreeSharedLinearAllocationAreas();
+
+ // As long as we need to iterate the client heap to find references into the
+ // shared heap, all client heaps need to be iterable.
+ client->heap()->MakeHeapIterable();
+
+ if (FLAG_concurrent_marking) {
+ client->heap()->concurrent_marking()->Pause();
+ }
});
- PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
+ const GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
+ PerformGarbageCollection(collector, gc_reason, nullptr);
+
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ if (FLAG_concurrent_marking &&
+ client->heap()->incremental_marking()->IsMarking()) {
+ client->heap()->concurrent_marking()->RescheduleJobIfNeeded();
+ }
+ });
- tracer()->Stop(collector);
+ tracer()->StopAtomicPause();
+ tracer()->StopObservablePause();
+ tracer()->UpdateStatistics(collector);
+ tracer()->StopCycleIfNeeded();
}
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
@@ -2318,8 +2420,15 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
UNREACHABLE();
}
- TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
- array_buffer_sweeper()->EnsureFinished();
+ {
+ TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
+ array_buffer_sweeper()->EnsureFinished();
+ }
+
+ // If sweeping is in progress and there are no sweeper tasks running, finish
+ // the sweeping here, to avoid having to pause and resume during the young
+ // generation GC.
+ mark_compact_collector()->FinishSweepingIfOutOfWork();
}
void Heap::EnsureSweepingCompleted(HeapObject object) {
@@ -2338,16 +2447,6 @@ void Heap::EnsureSweepingCompleted(HeapObject object) {
mark_compact_collector()->EnsurePageIsSwept(page);
}
-void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- epoch_young_ = next_epoch();
- } else if (incremental_marking()->IsStopped()) {
- epoch_full_ = next_epoch();
- }
-}
-
-void Heap::UpdateEpochFull() { epoch_full_ = next_epoch(); }
-
void Heap::RecomputeLimits(GarbageCollector collector) {
if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
(HasLowYoungGenerationAllocationRate() &&
@@ -2448,9 +2547,8 @@ void Heap::MarkCompact() {
SetGCState(MARK_COMPACT);
- LOG(isolate_, ResourceEvent("markcompact", "begin"));
-
- CodeSpaceMemoryModificationScope code_modifcation(this);
+ PROFILE(isolate_, CodeMovingGCEvent());
+ CodeSpaceMemoryModificationScope code_modification(this);
// Disable soft allocation limits in the shared heap, if one exists, as
// promotions into the shared heap should always succeed.
@@ -2470,8 +2568,6 @@ void Heap::MarkCompact() {
mark_compact_collector()->CollectGarbage();
- LOG(isolate_, ResourceEvent("markcompact", "end"));
-
MarkCompactEpilogue();
if (FLAG_allocation_site_pretenuring) {
@@ -2488,13 +2584,16 @@ void Heap::MarkCompact() {
}
void Heap::MinorMarkCompact() {
-#ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc);
DCHECK(new_space());
+ if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] MinorMarkCompact during marking.\n");
+ }
+
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
- LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(this);
@@ -2507,13 +2606,9 @@ void Heap::MinorMarkCompact() {
incremental_marking());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
- minor_mark_compact_collector()->CollectGarbage();
+ minor_mark_compact_collector_->CollectGarbage();
- LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
-#else
- UNREACHABLE();
-#endif // ENABLE_MINOR_MC
}
void Heap::MarkCompactEpilogue() {
@@ -2560,9 +2655,6 @@ void Heap::EvacuateYoungGeneration() {
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- SetGCState(SCAVENGE);
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
@@ -2596,14 +2688,16 @@ void Heap::EvacuateYoungGeneration() {
IncrementYoungSurvivorsCounter(promoted);
IncrementPromotedObjectsSize(promoted);
IncrementSemiSpaceCopiedObjectSize(0);
-
- LOG(isolate_, ResourceEvent("scavenge", "end"));
- SetGCState(NOT_IN_GC);
}
void Heap::Scavenge() {
DCHECK_NOT_NULL(new_space());
+ if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Scavenge during marking.\n");
+ }
+
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
@@ -2648,12 +2742,8 @@ void Heap::Scavenge() {
new_lo_space()->ResetPendingObject();
// Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
scavenger_collector_->CollectGarbage();
- LOG(isolate_, ResourceEvent("scavenge", "end"));
-
SetGCState(NOT_IN_GC);
}
@@ -3345,22 +3435,6 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(source);
- UpdateAllocationsHash(target);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- }
}
FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
@@ -3551,7 +3625,8 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
}
void Heap::MakeHeapIterable() {
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
@@ -3590,7 +3665,7 @@ void Heap::FreeSharedLinearAllocationAreas() {
void Heap::FreeMainThreadSharedLinearAllocationAreas() {
if (!isolate()->shared_isolate()) return;
shared_old_allocator_->FreeLinearAllocationArea();
- shared_map_allocator_->FreeLinearAllocationArea();
+ if (shared_map_allocator_) shared_map_allocator_->FreeLinearAllocationArea();
main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
@@ -3805,7 +3880,9 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
NestedTimedHistogramScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarkingFinalize", "epoch", epoch_full());
+ TRACE_EVENT1(
+ "v8", "V8.GCIncrementalMarkingFinalize", "epoch",
+ tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_FINALIZE));
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
ThreadKind::kMain);
@@ -3887,6 +3964,9 @@ class SlotCollectingVisitor final : public ObjectVisitor {
};
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
+ // Object layout changes are currently not supported on background threads.
+ DCHECK_NULL(LocalHeap::Current());
+
if (!FLAG_verify_heap) return;
PtrComprCageBase cage_base(isolate());
@@ -3896,54 +3976,60 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
- if (object.IsJSObject(cage_base)) {
- // Without double unboxing all in-object fields of a JSObject are tagged.
- return;
- }
- if (object.IsString(cage_base) &&
- (new_map == ReadOnlyRoots(this).thin_string_map() ||
- new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
- // When transitioning a string to ThinString,
- // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
- // tagged fields are introduced.
- return;
- }
- if (FLAG_shared_string_table && object.IsString(cage_base) &&
- InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
- // In-place internalization does not change a string's fields.
- //
- // When sharing the string table, the setting and re-setting of maps below
- // can race when there are parallel internalization operations, causing
- // DCHECKs to fail.
- return;
- }
- // Check that the set of slots before and after the transition match.
- SlotCollectingVisitor old_visitor;
- object.IterateFast(cage_base, &old_visitor);
- MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
- // Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
- SlotCollectingVisitor new_visitor;
- object.IterateFast(cage_base, &new_visitor);
- // Restore the old map.
- object.set_map_word(old_map_word, kRelaxedStore);
- DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
- for (int i = 0; i < new_visitor.number_of_slots(); i++) {
- DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
- }
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK_EQ(new_visitor.number_of_code_slots(),
- old_visitor.number_of_code_slots());
- for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
- DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
- }
-#endif // V8_EXTERNAL_CODE_SPACE
+ VerifySafeMapTransition(object, new_map);
} else {
DCHECK_EQ(pending_layout_change_object_, object);
pending_layout_change_object_ = HeapObject();
}
}
-#endif
+
+void Heap::VerifySafeMapTransition(HeapObject object, Map new_map) {
+ PtrComprCageBase cage_base(isolate());
+
+ if (object.IsJSObject(cage_base)) {
+ // Without double unboxing all in-object fields of a JSObject are tagged.
+ return;
+ }
+ if (object.IsString(cage_base) &&
+ (new_map == ReadOnlyRoots(this).thin_string_map() ||
+ new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
+ // When transitioning a string to ThinString,
+ // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
+ // tagged fields are introduced.
+ return;
+ }
+ if (FLAG_shared_string_table && object.IsString(cage_base) &&
+ InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
+ // In-place internalization does not change a string's fields.
+ //
+ // When sharing the string table, the setting and re-setting of maps below
+ // can race when there are parallel internalization operations, causing
+ // DCHECKs to fail.
+ return;
+ }
+ // Check that the set of slots before and after the transition match.
+ SlotCollectingVisitor old_visitor;
+ object.IterateFast(cage_base, &old_visitor);
+ MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
+ // Temporarily set the new map to iterate new slots.
+ object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
+ SlotCollectingVisitor new_visitor;
+ object.IterateFast(cage_base, &new_visitor);
+ // Restore the old map.
+ object.set_map_word(old_map_word, kRelaxedStore);
+ DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
+ for (int i = 0; i < new_visitor.number_of_slots(); i++) {
+ DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
+ }
+#ifdef V8_EXTERNAL_CODE_SPACE
+ DCHECK_EQ(new_visitor.number_of_code_slots(),
+ old_visitor.number_of_code_slots());
+ for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
+ DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
+ }
+#endif // V8_EXTERNAL_CODE_SPACE
+}
+#endif // VERIFY_HEAP
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
@@ -4338,8 +4424,8 @@ bool Heap::Contains(HeapObject value) const {
return HasBeenSetUp() &&
((new_space_ && new_space_->ToSpaceContains(value)) ||
old_space_->Contains(value) || code_space_->Contains(value) ||
- map_space_->Contains(value) || lo_space_->Contains(value) ||
- code_lo_space_->Contains(value) ||
+ (map_space_ && map_space_->Contains(value)) ||
+ lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
(new_lo_space_ && new_lo_space_->Contains(value)));
}
@@ -4358,7 +4444,7 @@ bool Heap::ContainsCode(HeapObject value) const {
bool Heap::SharedHeapContains(HeapObject value) const {
if (shared_old_space_)
return shared_old_space_->Contains(value) ||
- shared_map_space_->Contains(value);
+ (shared_map_space_ && shared_map_space_->Contains(value));
return false;
}
@@ -4366,6 +4452,7 @@ bool Heap::ShouldBeInSharedOldSpace(HeapObject value) {
if (isolate()->OwnsStringTable()) return false;
if (ReadOnlyHeap::Contains(value)) return false;
if (Heap::InYoungGeneration(value)) return false;
+ if (value.IsExternalString()) return false;
if (value.IsString()) {
return value.IsInternalizedString() ||
String::IsInPlaceInternalizable(String::cast(value));
@@ -4389,6 +4476,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
case CODE_SPACE:
return code_space_->Contains(value);
case MAP_SPACE:
+ DCHECK(map_space_);
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
@@ -4418,6 +4506,7 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
case CODE_SPACE:
return code_space_->ContainsSlow(addr);
case MAP_SPACE:
+ DCHECK(map_space_);
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
@@ -4479,7 +4568,9 @@ void Heap::Verify() {
if (new_space_) new_space_->Verify(isolate());
old_space_->Verify(isolate(), &visitor);
- map_space_->Verify(isolate(), &visitor);
+ if (map_space_) {
+ map_space_->Verify(isolate(), &visitor);
+ }
VerifyPointersVisitor no_dirty_regions_visitor(this);
code_space_->Verify(isolate(), &no_dirty_regions_visitor);
@@ -4488,6 +4579,10 @@ void Heap::Verify() {
code_lo_space_->Verify(isolate());
if (new_lo_space_) new_lo_space_->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate());
+
+#if DEBUG
+ VerifyCommittedPhysicalMemory();
+#endif // DEBUG
}
void Heap::VerifyReadOnlyHeap() {
@@ -4534,25 +4629,25 @@ class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(
- InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(InTypedSet(SlotType::kCodeEntry, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolCodeEntry,
+ rinfo->constant_pool_entry_address())));
}
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object(cage_base());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(
- InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(DATA_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(COMPRESSED_OBJECT_SLOT,
- rinfo->constant_pool_entry_address())) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(InTypedSet(SlotType::kEmbeddedObjectFull, rinfo->pc()) ||
+ InTypedSet(SlotType::kEmbeddedObjectCompressed, rinfo->pc()) ||
+ InTypedSet(SlotType::kEmbeddedObjectData, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolEmbeddedObjectCompressed,
+ rinfo->constant_pool_entry_address())) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolEmbeddedObjectFull,
+ rinfo->constant_pool_entry_address())));
}
}
@@ -4587,9 +4682,7 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot target) override {
VisitPointer(host, target);
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) return VisitPointer(host, target);
-#endif
+ if (FLAG_minor_mc) return;
// Keys are handled separately and should never appear in this set.
CHECK(!InUntypedSet(key));
Object k = *key;
@@ -4681,7 +4774,15 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
-#endif
+
+void Heap::VerifyCommittedPhysicalMemory() {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->VerifyCommittedPhysicalMemory();
+ }
+}
+#endif // DEBUG
void Heap::ZapFromSpace() {
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
@@ -4979,10 +5080,6 @@ void Heap::IterateBuiltins(RootVisitor* v) {
++builtin) {
const char* name = Builtins::name(builtin);
v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- v->VisitRootPointer(Root::kBuiltins, name,
- builtins->builtin_code_data_container_slot(builtin));
- }
}
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
@@ -5207,8 +5304,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
*stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->SizeOfObjects();
- *stats->map_space_capacity = map_space_->Capacity();
+ *stats->map_space_size = map_space_ ? map_space_->SizeOfObjects() : 0;
+ *stats->map_space_capacity = map_space_ ? map_space_->Capacity() : 0;
*stats->lo_space_size = lo_space_->Size();
*stats->code_lo_space_size = code_lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
@@ -5434,8 +5531,10 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
max_marking_limit_reached_ =
std::max<double>(max_marking_limit_reached_, current_percent);
}
- } else if (current_percent >= stress_marking_percentage_) {
- stress_marking_percentage_ = NextStressMarkingLimit();
+ } else if (current_percent >=
+ stress_marking_percentage_.load(std::memory_order_relaxed)) {
+ stress_marking_percentage_.store(NextStressMarkingLimit(),
+ std::memory_order_relaxed);
return IncrementalMarkingLimit::kHardLimit;
}
}
@@ -5494,95 +5593,32 @@ bool Heap::ShouldStressCompaction() const {
}
void Heap::EnableInlineAllocation() {
- if (!inline_allocation_disabled_) return;
- inline_allocation_disabled_ = false;
-
// Update inline allocation limit for new space.
if (new_space()) {
- new_space()->AdvanceAllocationObservers();
- new_space()->UpdateInlineAllocationLimit(0);
+ new_space()->EnableInlineAllocation();
+ }
+ // Update inline allocation limit for old spaces.
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ base::MutexGuard guard(space->mutex());
+ space->EnableInlineAllocation();
}
}
void Heap::DisableInlineAllocation() {
- if (inline_allocation_disabled_) return;
- inline_allocation_disabled_ = true;
-
// Update inline allocation limit for new space.
if (new_space()) {
- new_space()->UpdateInlineAllocationLimit(0);
+ new_space()->DisableInlineAllocation();
}
-
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
- space->FreeLinearAllocationArea();
- }
-}
-
-HeapObject Heap::AllocateRawWithLightRetrySlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment) {
- HeapObject result;
- AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
- if (alloc.To(&result)) {
- // DCHECK that the successful allocation is not "exception". The one
- // exception to this is when allocating the "exception" object itself, in
- // which case this must be an ROSpace allocation and the exception object
- // in the roots has to be unset.
- DCHECK((CanAllocateInReadOnlySpace() &&
- allocation == AllocationType::kReadOnly &&
- ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
- result != ReadOnlyRoots(this).exception());
- return result;
- }
- // Two GCs before panicking. In newspace will almost always succeed.
- for (int i = 0; i < 2; i++) {
- if (IsSharedAllocationType(allocation)) {
- CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
- } else {
- CollectGarbage(alloc.RetrySpace(),
- GarbageCollectionReason::kAllocationFailure);
- }
- alloc = AllocateRaw(size, allocation, origin, alignment);
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- }
- return HeapObject();
-}
-
-HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment) {
- AllocationResult alloc;
- HeapObject result =
- AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
- if (!result.is_null()) return result;
-
- isolate()->counters()->gc_last_resort_from_handles()->Increment();
- if (IsSharedAllocationType(allocation)) {
- CollectSharedGarbage(GarbageCollectionReason::kLastResort);
-
- AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
- alloc = AllocateRaw(size, allocation, origin, alignment);
- } else {
- CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
-
- AlwaysAllocateScope scope(this);
- alloc = AllocateRaw(size, allocation, origin, alignment);
- }
-
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
+ space->DisableInlineAllocation();
}
- // TODO(1181417): Fix this.
- FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
}
void Heap::SetUp(LocalHeap* main_thread_local_heap) {
@@ -5590,8 +5626,8 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
main_thread_local_heap_ = main_thread_local_heap;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- allocation_timeout_ = NextAllocationTimeout();
-#endif
+ heap_allocator_.UpdateAllocationTimeout();
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
tp_heap_ = third_party_heap::Heap::New(isolate());
@@ -5650,6 +5686,7 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
+ minor_mark_compact_collector_.reset(new MinorMarkCompactCollector(this));
incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
@@ -5688,6 +5725,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
read_only_space_ == ro_heap->read_only_space());
space_[RO_SPACE] = nullptr;
read_only_space_ = ro_heap->read_only_space();
+ heap_allocator_.SetReadOnlySpace(read_only_space_);
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
@@ -5698,6 +5736,7 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
}
read_only_space_ = space;
+ heap_allocator_.SetReadOnlySpace(read_only_space_);
}
class StressConcurrentAllocationObserver : public AllocationObserver {
@@ -5729,15 +5768,15 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
space_[NEW_SPACE] = new_space_ = new NewSpace(
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
+ space_[NEW_LO_SPACE] = new_lo_space_ =
+ new NewLargeObjectSpace(this, NewSpaceCapacity());
}
space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
- space_[MAP_SPACE] = map_space_ = new MapSpace(this);
- space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
- if (has_young_gen) {
- space_[NEW_LO_SPACE] = new_lo_space_ =
- new NewLargeObjectSpace(this, NewSpaceCapacity());
+ if (FLAG_use_map_space) {
+ space_[MAP_SPACE] = map_space_ = new MapSpace(this);
}
+ space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5746,11 +5785,6 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
}
tracer_.reset(new GCTracer(this));
-#ifdef ENABLE_MINOR_MC
- minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
-#else
- minor_mark_compact_collector_ = nullptr;
-#endif // ENABLE_MINOR_MC
array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_measurement_.reset(new MemoryMeasurement(isolate()));
@@ -5762,16 +5796,18 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
embedder_roots_handler_ =
&local_embedder_heap_tracer()->default_embedder_roots_handler();
+ if (Heap::AllocationTrackerForDebugging::IsNeeded()) {
+ allocation_tracker_for_debugging_ =
+ std::make_unique<Heap::AllocationTrackerForDebugging>(this);
+ }
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
mark_compact_collector()->SetUp();
-#ifdef ENABLE_MINOR_MC
- if (minor_mark_compact_collector() != nullptr) {
- minor_mark_compact_collector()->SetUp();
+ if (minor_mark_compact_collector_) {
+ minor_mark_compact_collector_->SetUp();
}
-#endif // ENABLE_MINOR_MC
if (new_space()) {
scavenge_job_.reset(new ScavengeJob());
@@ -5789,7 +5825,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
- if (FLAG_stress_scavenge > 0 && new_space()) {
+ if (IsStressingScavenge()) {
stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -5797,16 +5833,21 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
write_protect_code_memory_ = FLAG_write_protect_code_memory;
if (isolate()->shared_isolate()) {
- shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
+ Heap* shared_heap = isolate()->shared_isolate()->heap();
+
+ shared_old_space_ = shared_heap->old_space();
shared_old_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
- shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
- shared_map_allocator_.reset(
- new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+ if (shared_heap->map_space()) {
+ shared_map_space_ = shared_heap->map_space();
+ shared_map_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+ }
}
main_thread_local_heap()->SetUpMainThread();
+ heap_allocator_.Setup();
}
void Heap::InitializeHashSeed() {
@@ -5822,22 +5863,9 @@ void Heap::InitializeHashSeed() {
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
-int Heap::NextAllocationTimeout(int current_timeout) {
- if (FLAG_random_gc_interval > 0) {
- // If current timeout hasn't reached 0 the GC was caused by something
- // different than --stress-atomic-gc flag and we don't update the timeout.
- if (current_timeout <= 0) {
- return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
- } else {
- return current_timeout;
- }
- }
- return FLAG_gc_interval;
-}
-
-void Heap::PrintAllocationsHash() {
- uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
- PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
+// static
+void Heap::InitializeOncePerProcess() {
+ MemoryAllocator::InitializeOncePerProcess();
}
void Heap::PrintMaxMarkingLimitReached() {
@@ -6015,15 +6043,11 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
- PrintAllocationsHash();
- }
-
if (FLAG_fuzzer_gc_analysis) {
if (FLAG_stress_marking > 0) {
PrintMaxMarkingLimitReached();
}
- if (FLAG_stress_scavenge > 0) {
+ if (IsStressingScavenge()) {
PrintMaxNewSpaceSizeReached();
}
}
@@ -6048,7 +6072,7 @@ void Heap::TearDown() {
delete stress_marking_observer_;
stress_marking_observer_ = nullptr;
}
- if (FLAG_stress_scavenge > 0 && new_space()) {
+ if (IsStressingScavenge()) {
new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
delete stress_scavenge_observer_;
stress_scavenge_observer_ = nullptr;
@@ -6059,13 +6083,10 @@ void Heap::TearDown() {
mark_compact_collector_.reset();
}
-#ifdef ENABLE_MINOR_MC
- if (minor_mark_compact_collector_ != nullptr) {
+ if (minor_mark_compact_collector_) {
minor_mark_compact_collector_->TearDown();
- delete minor_mark_compact_collector_;
- minor_mark_compact_collector_ = nullptr;
+ minor_mark_compact_collector_.reset();
}
-#endif // ENABLE_MINOR_MC
scavenger_collector_.reset();
array_buffer_sweeper_.reset();
@@ -6073,8 +6094,8 @@ void Heap::TearDown() {
concurrent_marking_.reset();
gc_idle_time_handler_.reset();
-
memory_measurement_.reset();
+ allocation_tracker_for_debugging_.reset();
if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown();
@@ -6235,7 +6256,7 @@ void Heap::CompactWeakArrayLists() {
}
void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
- if (map->is_in_retained_map_list()) {
+ if (map->is_in_retained_map_list() || map->InSharedWritableHeap()) {
return;
}
@@ -6386,20 +6407,9 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
PagedSpace* PagedSpaceIterator::Next() {
- int space = counter_++;
- switch (space) {
- case RO_SPACE:
- UNREACHABLE();
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- default:
- DCHECK_GT(space, LAST_GROWABLE_PAGED_SPACE);
- return nullptr;
- }
+ DCHECK_GE(counter_, FIRST_GROWABLE_PAGED_SPACE);
+ if (counter_ > LAST_GROWABLE_PAGED_SPACE) return nullptr;
+ return heap_->paged_space(counter_++);
}
SpaceIterator::SpaceIterator(Heap* heap)
@@ -6419,7 +6429,7 @@ bool SpaceIterator::HasNext() {
}
Space* SpaceIterator::Next() {
- DCHECK(HasNext());
+ DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
Space* space = heap_->space(current_space_++);
DCHECK_NOT_NULL(space);
return space;
@@ -6577,6 +6587,8 @@ HeapObjectIterator::HeapObjectIterator(
default:
break;
}
+ // By not calling |space_iterator_->HasNext()|, we assume that the old
+ // space is first returned and that it has been set up.
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
}
@@ -7069,6 +7081,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
case CODE_SPACE:
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
+ return dst == MAP_SPACE && type == MAP_TYPE;
case LO_SPACE:
case CODE_LO_SPACE:
case NEW_LO_SPACE:
@@ -7133,7 +7146,7 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
Builtin maybe_builtin =
OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
if (Builtins::IsBuiltinId(maybe_builtin)) {
- return isolate()->builtins()->code(maybe_builtin);
+ return FromCodeT(isolate()->builtins()->code(maybe_builtin));
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
@@ -7178,11 +7191,20 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
}
}
// TODO(1241665): Remove once the issue is solved.
+ std::shared_ptr<CodeRange> code_range = CodeRange::GetProcessWideCodeRange();
+ void* code_range_embedded_blob_code_copy =
+ code_range ? code_range->embedded_blob_code_copy() : nullptr;
+ Address flags = (isolate()->is_short_builtin_calls_enabled() ? 1 : 0) |
+ (code_range ? 2 : 0) |
+ static_cast<Address>(max_old_generation_size());
+
isolate()->PushParamsAndDie(
reinterpret_cast<void*>(inner_pointer),
const_cast<uint8_t*>(isolate()->embedded_blob_code()),
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
- reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()));
+ code_range_embedded_blob_code_copy,
+ reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()),
+ reinterpret_cast<void*>(flags));
UNREACHABLE();
}
@@ -7334,25 +7356,11 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
DCHECK(InYoungGeneration(object));
- Page* source_page = Page::FromHeapObject(host);
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- slot_type = COMPRESSED_OBJECT_SLOT;
- } else {
- DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = FULL_OBJECT_SLOT;
- }
- }
- uintptr_t offset = addr - source_page->address();
- DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
- static_cast<uint32_t>(offset));
+ const MarkCompactCollector::RecordRelocSlotInfo info =
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, object);
+
+ RememberedSet<OLD_TO_NEW>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
bool Heap::PageFlagsAreConsistent(HeapObject object) {
@@ -7390,12 +7398,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-void Heap::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
- stack_state);
-}
-
#ifdef DEBUG
void Heap::IncrementObjectCounters() {
isolate_->counters()->objs_since_last_full()->Increment();
@@ -7403,6 +7405,10 @@ void Heap::IncrementObjectCounters() {
}
#endif // DEBUG
+bool Heap::IsStressingScavenge() {
+ return FLAG_stress_scavenge > 0 && new_space();
+}
+
// StrongRootBlocks are allocated as a block of addresses, prefixed with a
// StrongRootsEntry pointer:
//
@@ -7439,5 +7445,46 @@ void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
base::Free(block);
}
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+void Heap::set_allocation_timeout(int allocation_timeout) {
+ heap_allocator_.SetAllocationTimeout(allocation_timeout);
+}
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+EmbedderStackStateScope::EmbedderStackStateScope(
+ Heap* heap, Origin origin,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(heap->local_embedder_heap_tracer()),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ if (origin == kImplicitThroughTask && heap->overriden_stack_state()) {
+ stack_state = *heap->overriden_stack_state();
+ }
+
+ local_tracer_->embedder_stack_state_ = stack_state;
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
+}
+
+// static
+EmbedderStackStateScope EmbedderStackStateScope::ExplicitScopeForTesting(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ return EmbedderStackStateScope(local_tracer, stack_state);
+}
+
+EmbedderStackStateScope::EmbedderStackStateScope(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(local_tracer),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ local_tracer_->embedder_stack_state_ = stack_state;
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
+}
+
+EmbedderStackStateScope::~EmbedderStackStateScope() {
+ local_tracer_->embedder_stack_state_ = old_stack_state_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index ef8d912bfb..29aa5aad76 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -7,7 +7,6 @@
#include <atomic>
#include <cmath>
-#include <map>
#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -27,6 +26,8 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
+#include "src/heap/allocation-result.h"
+#include "src/heap/heap-allocator.h"
#include "src/init/heap-symbols.h"
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
@@ -126,16 +127,10 @@ enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
-enum class AllocationOrigin {
- kGeneratedCode = 0,
- kRuntime = 1,
- kGC = 2,
- kFirstAllocationOrigin = kGeneratedCode,
- kLastAllocationOrigin = kGC,
- kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
-};
-
-enum class GarbageCollectionReason {
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused. If you add new items here, update
+// src/tools/metrics/histograms/enums.xml in chromium.
+enum class GarbageCollectionReason : int {
kUnknown = 0,
kAllocationFailure = 1,
kAllocationLimit = 2,
@@ -162,11 +157,14 @@ enum class GarbageCollectionReason {
kGlobalAllocationLimit = 23,
kMeasureMemory = 24,
kBackgroundAllocationFailure = 25,
- // If you add new items here, then update the incremental_marking_reason,
- // mark_compact_reason, and scavenge_reason counters in counters.h.
- // Also update src/tools/metrics/histograms/enums.xml in chromium.
+
+ kLastReason = kBackgroundAllocationFailure,
};
+static_assert(kGarbageCollectionReasonMaxValue ==
+ static_cast<int>(GarbageCollectionReason::kLastReason),
+ "The value of kGarbageCollectionReasonMaxValue is inconsistent.");
+
enum class YoungGenerationHandling {
kRegularScavenge = 0,
kFastPromotionDuringScavenge = 1,
@@ -208,44 +206,6 @@ class StrongRootsEntry final {
friend class Heap;
};
-class AllocationResult {
- public:
- static inline AllocationResult Retry(AllocationSpace space) {
- return AllocationResult(space);
- }
-
- // Implicit constructor from Object.
- AllocationResult(Object object) // NOLINT
- : object_(object) {
- // AllocationResults can't return Smis, which are used to represent
- // failure and the space to retry in.
- CHECK(!object.IsSmi());
- }
-
- AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
-
- inline bool IsRetry() { return object_.IsSmi(); }
- inline HeapObject ToObjectChecked();
- inline HeapObject ToObject();
- inline Address ToAddress();
- inline AllocationSpace RetrySpace();
-
- template <typename T>
- bool To(T* obj) {
- if (IsRetry()) return false;
- *obj = T::cast(object_);
- return true;
- }
-
- private:
- explicit AllocationResult(AllocationSpace space)
- : object_(Smi::FromInt(static_cast<int>(space))) {}
-
- Object object_;
-};
-
-STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
-
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
@@ -265,8 +225,6 @@ using EphemeronRememberedSet =
std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
Object::Hasher>;
-using CollectionEpoch = uint32_t;
-
class Heap {
public:
// Stores ephemeron entries where the EphemeronHashTable is in old-space,
@@ -480,12 +438,8 @@ class Heap {
}
static inline GarbageCollector YoungGenerationCollector() {
-#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR
: GarbageCollector::SCAVENGER;
-#else
- return GarbageCollector::SCAVENGER;
-#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
@@ -545,8 +499,6 @@ class Heap {
void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
- void UpdateCurrentEpoch(GarbageCollector collector);
-
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
@@ -647,9 +599,6 @@ class Heap {
void CheckHandleCount();
- // Number of "runtime allocations" done so far.
- uint32_t allocations_count() { return allocations_count_; }
-
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -800,16 +749,9 @@ class Heap {
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
- // This event is triggered after successful allocation of a new object made
- // by runtime. Allocations of target space for object evacuation do not
- // trigger the event. In order to track ALL allocations one must turn off
- // FLAG_inline_new.
- inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
-
// This event is triggered after object is moved to a new place.
void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
- inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
// We can only invoke Safepoint() on the main thread local heap after
@@ -866,6 +808,9 @@ class Heap {
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
+ // Invoked once for the process from V8::Initialize.
+ static void InitializeOncePerProcess();
+
// Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
bool CreateHeapObjects();
@@ -893,6 +838,7 @@ class Heap {
OldSpace* shared_old_space() { return shared_old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
+ inline PagedSpace* space_for_maps();
OldLargeObjectSpace* lo_space() { return lo_space_; }
CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
@@ -912,6 +858,8 @@ class Heap {
return memory_allocator_.get();
}
+ inline ConcurrentAllocator* concurrent_allocator_for_maps();
+
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
@@ -919,17 +867,22 @@ class Heap {
}
MinorMarkCompactCollector* minor_mark_compact_collector() {
- return minor_mark_compact_collector_;
+ return minor_mark_compact_collector_.get();
}
ArrayBufferSweeper* array_buffer_sweeper() {
return array_buffer_sweeper_.get();
}
+ // The potentially overreserved address space region reserved by the code
+ // range if it exists or empty region otherwise.
const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
+ // The base of the code range if it exists or null address.
+ inline Address code_range_base();
+
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
Heap* AsHeap() { return this; }
@@ -1001,9 +954,6 @@ class Heap {
// Inline allocation. ========================================================
// ===========================================================================
- // Indicates whether inline bump-pointer allocation has been disabled.
- bool inline_allocation_disabled() { return inline_allocation_disabled_; }
-
// Switch whether inline bump-pointer allocation should be used.
V8_EXPORT_PRIVATE void EnableInlineAllocation();
V8_EXPORT_PRIVATE void DisableInlineAllocation();
@@ -1165,6 +1115,9 @@ class Heap {
// - or it was communicated to GC using NotifyObjectLayoutChange.
V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object,
Map new_map);
+ // Checks that this is a safe map transition.
+ V8_EXPORT_PRIVATE void VerifySafeMapTransition(HeapObject object,
+ Map new_map);
#endif
// ===========================================================================
@@ -1196,8 +1149,6 @@ class Heap {
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
- V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state);
EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
@@ -1626,23 +1577,24 @@ class Heap {
#endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-#endif
+ void V8_EXPORT_PRIVATE set_allocation_timeout(int allocation_timeout);
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
+ void VerifyCommittedPhysicalMemory();
void Print();
void PrintHandles();
// Report code statistics.
void ReportCodeStatistics(const char* title);
-#endif
+#endif // DEBUG
void* GetRandomMmapAddr() {
void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
// directory entries [PDE] created from mmap or mach_vm_allocate, even
// after the region is destroyed. Using a virtual address space that is
@@ -1652,7 +1604,7 @@ class Heap {
// space. See crbug.com/700928.
uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
#endif // V8_TARGET_ARCH_X64
return result;
}
@@ -1667,16 +1619,13 @@ class Heap {
static Isolate* GetIsolateFromWritableObject(HeapObject object);
- CollectionEpoch epoch_young() { return epoch_young_; }
- CollectionEpoch epoch_full() { return epoch_full_; }
-
- void UpdateEpochFull();
-
// Ensure that we have swept all spaces in such a way that we can iterate
// over all objects.
void MakeHeapIterable();
private:
+ class AllocationTrackerForDebugging;
+
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1811,7 +1760,8 @@ class Heap {
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
- GarbageCollector collector,
+ GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection in the shared heap.
@@ -1887,11 +1837,6 @@ class Heap {
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
- int NextAllocationTimeout(int current_timeout = 0);
- inline void UpdateAllocationsHash(HeapObject object);
- inline void UpdateAllocationsHash(uint32_t value);
- void PrintAllocationsHash();
-
void PrintMaxMarkingLimitReached();
void PrintMaxNewSpaceSizeReached();
@@ -1920,15 +1865,6 @@ class Heap {
void InvokeIncrementalMarkingPrologueCallbacks();
void InvokeIncrementalMarkingEpilogueCallbacks();
- // Returns the timer used for a given GC type.
- // - GCScavenger: young generation GC
- // - GCCompactor: full GC
- // - GCFinalzeMC: finalization of incremental full GC
- // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
- // memory reduction
- TimedHistogram* GCTypeTimer(GarbageCollector collector);
- TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
-
// ===========================================================================
// Pretenuring. ==============================================================
// ===========================================================================
@@ -1947,7 +1883,8 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
+ void GarbageCollectionPrologue(GarbageCollectionReason gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags);
void GarbageCollectionPrologueInSafepoint();
void GarbageCollectionEpilogue(GarbageCollector collector);
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);
@@ -2087,6 +2024,8 @@ class Heap {
// Allocation methods. =======================================================
// ===========================================================================
+ HeapAllocator* allocator() { return &heap_allocator_; }
+
// Allocates a JS Map in the heap.
V8_WARN_UNUSED_RESULT AllocationResult
AllocateMap(InstanceType instance_type, int instance_size,
@@ -2097,19 +2036,19 @@ class Heap {
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationType allocation,
- AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kTaggedAligned);
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode.
enum AllocationRetryMode { kLightRetry, kRetryOrFail };
template <AllocationRetryMode mode>
- V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
- int size, AllocationType allocation,
- AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kTaggedAligned);
+ V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
+ AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
@@ -2117,25 +2056,6 @@ class Heap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned);
- // This method will try to perform an allocation of a given size of a given
- // AllocationType. If the allocation fails, a regular full garbage collection
- // is triggered and the allocation is retried. This is performed multiple
- // times. If after that retry procedure the allocation still fails nullptr is
- // returned.
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kTaggedAligned);
-
- // This method will try to perform an allocation of a given size of a given
- // AllocationType. If the allocation fails, a regular full garbage collection
- // is triggered and the allocation is retried. This is performed multiple
- // times. If after that retry procedure the allocation still fails a "hammer"
- // garbage collection is triggered which tries to significantly reduce memory.
- // If the allocation still fails after that a fatal error is thrown.
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kTaggedAligned);
-
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
AllocationType allocation);
@@ -2179,12 +2099,16 @@ class Heap {
return allocation_type_for_in_place_internalizable_strings_;
}
+ bool IsStressingScavenge();
+
ExternalMemoryAccounting external_memory_;
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
+ HeapAllocator heap_allocator_;
+
// These limits are initialized in Heap::ConfigureHeap based on the resource
// constraints and flags.
size_t code_range_size_ = 0;
@@ -2276,18 +2200,12 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact();
- // How many "runtime allocations" happened.
- uint32_t allocations_count_ = 0;
-
- // Running hash over allocations performed.
- uint32_t raw_allocations_hash_ = 0;
-
// Starts marking when stress_marking_percentage_% of the marking start limit
// is reached.
- int stress_marking_percentage_ = 0;
+ std::atomic<int> stress_marking_percentage_{0};
- // Observer that causes more frequent checks for reached incremental marking
- // limit.
+ // Observer that causes more frequent checks for reached incremental
+ // marking limit.
AllocationObserver* stress_marking_observer_ = nullptr;
// Observer that can cause early scavenge start.
@@ -2321,10 +2239,6 @@ class Heap {
std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0;
- // Indicates that inline bump-pointer allocation has been globally disabled
- // for all spaces. This is used to disable allocations in generated code.
- bool inline_allocation_disabled_ = false;
-
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
// {native_contexts_list_} is an Address instead of an Object to allow the use
@@ -2369,7 +2283,7 @@ class Heap {
std::unique_ptr<GCTracer> tracer_;
std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
- MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
+ std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_;
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferSweeper> array_buffer_sweeper_;
@@ -2386,6 +2300,8 @@ class Heap {
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
+ std::unique_ptr<AllocationTrackerForDebugging>
+ allocation_tracker_for_debugging_;
// This object controls virtual space reserved for code on the V8 heap. This
// is only valid for 64-bit architectures where kRequiresCodeRange.
@@ -2486,13 +2402,6 @@ class Heap {
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_ = 0;
-#endif // V8_ENABLE_ALLOCATION_TIMEOUT
-
std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
@@ -2509,11 +2418,6 @@ class Heap {
std::unique_ptr<third_party_heap::Heap> tp_heap_;
- // We need two epochs, since there can be scavenges during incremental
- // marking.
- CollectionEpoch epoch_young_ = 0;
- CollectionEpoch epoch_full_ = 0;
-
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ArrayBufferCollector;
@@ -2522,6 +2426,7 @@ class Heap {
friend class EvacuateVisitorBase;
friend class GCCallbacksScope;
friend class GCTracer;
+ friend class HeapAllocator;
friend class HeapObjectIterator;
friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
@@ -2611,6 +2516,7 @@ class V8_NODISCARD AlwaysAllocateScope {
friend class AlwaysAllocateScopeForTesting;
friend class Evacuator;
friend class Heap;
+ friend class HeapAllocator;
friend class Isolate;
explicit inline AlwaysAllocateScope(Heap* heap);
@@ -2653,9 +2559,8 @@ class V8_NODISCARD CodeSpaceMemoryModificationScope {
Heap* heap_;
};
-// The CodePageCollectionMemoryModificationScope can only be used by the main
-// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
-// already active.
+// The CodePageCollectionMemoryModificationScope can be used by any thread. It
+// will not be enabled if a CodeSpaceMemoryModificationScope is already active.
class V8_NODISCARD CodePageCollectionMemoryModificationScope {
public:
explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
@@ -2867,6 +2772,30 @@ struct StrongRootBlockAllocator::rebind {
};
};
+class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
+ public:
+ enum Origin {
+ kImplicitThroughTask,
+ kExplicitInvocation,
+ };
+
+ // Only used for testing where the Origin is always an explicit invocation.
+ static EmbedderStackStateScope ExplicitScopeForTesting(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ EmbedderStackStateScope(Heap* heap, Origin origin,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+ ~EmbedderStackStateScope();
+
+ private:
+ EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ LocalEmbedderHeapTracer* const local_tracer_;
+ const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 08a34991db..aad12728d7 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -56,6 +56,7 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+
const EmbedderHeapTracer::EmbedderStackState stack_state =
taskrunner->NonNestableTasksEnabled()
? EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers
@@ -97,8 +98,8 @@ void IncrementalMarkingJob::Task::RunInternal() {
TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
Heap* heap = isolate()->heap();
- EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
- stack_state_);
+ EmbedderStackStateScope scope(
+ heap, EmbedderStackStateScope::kImplicitThroughTask, stack_state_);
if (task_type_ == TaskType::kNormal) {
heap->tracer()->RecordTimeToIncrementalMarkingTask(
heap->MonotonicallyIncreasingTimeInMs() - job_->scheduled_time_);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a653877f40..5d7dd4a1dd 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -6,6 +6,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
@@ -191,8 +192,9 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
static_cast<int>(gc_reason));
NestedTimedHistogramScope incremental_marking_scope(
counters->gc_incremental_marking_start());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarkingStart", "epoch",
- heap_->epoch_full());
+ TRACE_EVENT1(
+ "v8", "V8.GCIncrementalMarkingStart", "epoch",
+ heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_START));
TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START,
ThreadKind::kMain);
heap_->tracer()->NotifyIncrementalMarkingStart();
@@ -235,11 +237,22 @@ void IncrementalMarking::StartMarking() {
is_compacting_ = collector_->StartCompaction(
MarkCompactCollector::StartCompactionMode::kIncremental);
+
+ auto embedder_flags = heap_->flags_for_embedder_tracer();
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
+ // PrepareForTrace should be called before visitor initialization in
+ // StartMarking. It is only used with CppHeap.
+ heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ }
+
collector_->StartMarking();
SetState(MARKING);
MarkingBarrier::ActivateAll(heap(), is_compacting_);
+ GlobalHandles::EnableMarkingBarrier(heap()->isolate());
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
@@ -261,8 +274,7 @@ void IncrementalMarking::StartMarking() {
// marking (including write barriers) is fully set up.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue(
- heap_->flags_for_embedder_tracer());
+ heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
heap_->InvokeIncrementalMarkingEpilogueCallbacks();
@@ -273,7 +285,7 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(IsMarking());
black_allocation_ = true;
heap()->old_space()->MarkLinearAllocationAreaBlack();
- heap()->map_space()->MarkLinearAllocationAreaBlack();
+ if (heap()->map_space()) heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
@@ -287,7 +299,7 @@ void IncrementalMarking::StartBlackAllocation() {
void IncrementalMarking::PauseBlackAllocation() {
DCHECK(IsMarking());
heap()->old_space()->UnmarkLinearAllocationArea();
- heap()->map_space()->UnmarkLinearAllocationArea();
+ if (heap()->map_space()) heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
@@ -410,7 +422,7 @@ void IncrementalMarking::FinalizeIncrementally() {
// 2) Age and retain maps embedded in optimized code.
MarkRoots();
- // Map retaining is needed for perfromance, not correctness,
+ // Map retaining is needed for performance, not correctness,
// so we can do it only once at the beginning of the finalization.
RetainMaps();
@@ -426,83 +438,83 @@ void IncrementalMarking::FinalizeIncrementally() {
}
}
-void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
+void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
if (!IsMarking()) return;
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
-#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
-#endif // ENABLE_MINOR_MC
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
- collector_->marking_worklists()->Update(
- [
+ collector_->marking_worklists()->Update([
#ifdef DEBUG
- // this is referred inside DCHECK.
- this,
-#endif
-#ifdef ENABLE_MINOR_MC
- minor_marking_state,
+ // this is referred inside DCHECK.
+ this,
#endif
- cage_base, filler_map](HeapObject obj, HeapObject* out) -> bool {
- DCHECK(obj.IsHeapObject());
- // Only pointers to from space have to be updated.
- if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
- if (!map_word.IsForwardingAddress()) {
- // There may be objects on the marking deque that do not exist
- // anymore, e.g. left trimmed objects or objects from the root set
- // (frames). If these object are dead at scavenging time, their
- // marking deque entries will not point to forwarding addresses.
- // Hence, we can discard them.
- return false;
- }
- HeapObject dest = map_word.ToForwardingAddress();
- DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller());
- *out = dest;
- return true;
- } else if (Heap::InToPage(obj)) {
- // The object may be on a large page or on a page that was moved in
- // new space.
- DCHECK(Heap::IsLargeObject(obj) ||
- Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
-#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsWhite(obj)) {
- return false;
- }
-#endif // ENABLE_MINOR_MC
- // Either a large object or an object marked by the minor
- // mark-compactor.
- *out = obj;
- return true;
- } else {
- // The object may be on a page that was moved from new to old space.
- // Only applicable during minor MC garbage collections.
- if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
-#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsWhite(obj)) {
- return false;
- }
-#endif // ENABLE_MINOR_MC
- *out = obj;
- return true;
- }
- DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller(cage_base));
- // Skip one word filler objects that appear on the
- // stack when we perform in place array shift.
- if (obj.map(cage_base) != filler_map) {
- *out = obj;
- return true;
- }
+ minor_marking_state, cage_base,
+ filler_map](
+ HeapObject obj,
+ HeapObject* out) -> bool {
+ DCHECK(obj.IsHeapObject());
+ // Only pointers to from space have to be updated.
+ if (Heap::InFromPage(obj)) {
+ DCHECK_IMPLIES(FLAG_minor_mc_sweeping, minor_marking_state->IsWhite(obj));
+ MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
+ DCHECK_IMPLIES(FLAG_minor_mc_sweeping, !map_word.IsForwardingAddress());
+ if (!map_word.IsForwardingAddress()) {
+ // There may be objects on the marking deque that do not exist
+ // anymore, e.g. left trimmed objects or objects from the root set
+ // (frames). If these object are dead at scavenging time, their
+ // marking deque entries will not point to forwarding addresses.
+ // Hence, we can discard them.
+ return false;
+ }
+ HeapObject dest = map_word.ToForwardingAddress();
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
+ if (dest.InSharedHeap()) {
+ // Object got promoted into the shared heap. Drop it from the client
+ // heap marking worklist.
+ return false;
+ }
+ *out = dest;
+ return true;
+ } else if (Heap::InToPage(obj)) {
+ // The object may be on a large page or on a page that was moved in
+ // new space.
+ DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION));
+ if (minor_marking_state->IsWhite(obj)) {
+ return false;
+ }
+ // Either a large object or an object marked by the minor
+ // mark-compactor.
+ *out = obj;
+ return true;
+ } else {
+ // The object may be on a page that was moved from new to old space.
+ // Only applicable during minor MC garbage collections.
+ if (!Heap::IsLargeObject(obj) &&
+ Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ if (minor_marking_state->IsWhite(obj)) {
return false;
}
- });
+ *out = obj;
+ return true;
+ }
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj),
+ obj.IsFreeSpaceOrFiller(cage_base));
+ // Skip one word filler objects that appear on the
+ // stack when we perform in place array shift.
+ if (obj.map(cage_base) != filler_map) {
+ *out = obj;
+ return true;
+ }
+ return false;
+ }
+ });
collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
@@ -533,13 +545,15 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
const double start = heap_->MonotonicallyIncreasingTimeInMs();
const double deadline = start + expected_duration_ms;
- bool empty_worklist;
- {
+ bool empty_worklist = true;
+ if (local_marking_worklists()->PublishWrapper()) {
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
+ } else {
+ // Cannot directly publish wrapper objects.
LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
HeapObject object;
size_t cnt = 0;
- empty_worklist = true;
- while (local_marking_worklists()->PopEmbedder(&object)) {
+ while (local_marking_worklists()->PopWrapper(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
@@ -586,7 +600,6 @@ void IncrementalMarking::Hurry() {
}
}
-
void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
@@ -601,8 +614,7 @@ void IncrementalMarking::Stop() {
std::max(0, old_generation_size_mb - old_generation_limit_mb));
}
- SpaceIterator it(heap_);
- while (it.HasNext()) {
+ for (SpaceIterator it(heap_); it.HasNext();) {
Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
@@ -629,13 +641,11 @@ void IncrementalMarking::Stop() {
background_live_bytes_.clear();
}
-
void IncrementalMarking::Finalize() {
Hurry();
Stop();
}
-
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
@@ -784,7 +794,8 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
StepOrigin step_origin) {
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch", heap_->epoch_full());
+ TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
+ heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
DCHECK(!IsStopped());
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 5ea92e6bad..4a0c196358 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -114,8 +114,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
inline bool IsMarking() const { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() const { return state() == MARKING; }
-
inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
@@ -140,7 +138,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void FinalizeIncrementally();
- void UpdateMarkingWorklistAfterScavenge();
+ void UpdateMarkingWorklistAfterYoungGenGC();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 7d28b750e2..d5dc4e41b5 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
-#include <map>
-
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
@@ -33,14 +31,19 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
- DCHECK(invalidated_object.map().IsMap());
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(invalidated_object.map()));
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_);
- DCHECK_GT(offset, 0);
+
+ // OLD_TO_OLD can have slots in map word unlike other remembered sets.
+ DCHECK_GE(offset, 0);
+ DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0);
+
if (offset < invalidated_size_)
- return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
+ return offset == 0 ||
+ invalidated_object.IsValidSlot(invalidated_object.map(), offset);
NextInvalidatedObject();
return true;
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index b3655aaad8..d9ad9547ad 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -13,15 +13,19 @@ namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
+ OLD_TO_OLD);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
+ OLD_TO_NEW);
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
- MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
+ RememberedSetType remembered_set_type) {
+ USE(remembered_set_type);
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
@@ -36,6 +40,7 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
#ifdef DEBUG
last_slot_ = chunk->area_start();
+ remembered_set_type_ = remembered_set_type;
#endif
}
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 15be3ce44c..032d259e27 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -9,6 +9,7 @@
#include <stack>
#include "src/base/atomic-utils.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/objects/heap-object.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
@@ -33,11 +34,13 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
- explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
- InvalidatedSlots* invalidated_slots);
inline bool IsValid(Address slot);
private:
+ explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
+ InvalidatedSlots* invalidated_slots,
+ RememberedSetType remembered_set_type);
+
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
@@ -47,6 +50,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
+ RememberedSetType remembered_set_type_;
#endif
private:
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 7d79c5cdd4..19844ff4c8 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -107,7 +107,7 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
}
}
@@ -135,11 +135,11 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
UpdatePendingObject(object);
@@ -156,7 +156,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(),
static_cast<size_t>(object_size));
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
@@ -171,11 +171,11 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
@@ -189,7 +189,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
if (identity() == CODE_LO_SPACE) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
- return object;
+ return AllocationResult::FromObject(object);
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
@@ -324,8 +324,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
} else {
RemovePage(current, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
- current);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
}
current = next_current;
}
@@ -378,22 +377,35 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->space_for_maps()->Contains(map));
// We have only the following types in the large object space:
- if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
- object.IsExternalString(cage_base) ||
- object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
- object.IsFixedDoubleArray(cage_base) ||
- object.IsWeakFixedArray(cage_base) ||
- object.IsWeakArrayList(cage_base) ||
- object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
- object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
- object.IsFreeSpace(cage_base) ||
- object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
- object.IsUncompiledDataWithoutPreparseData(cage_base) ||
- object.IsPreparseData(cage_base)) &&
- !FLAG_young_generation_large_objects) {
+ const bool is_valid_lo_space_object = //
+ object.IsAbstractCode(cage_base) || //
+ object.IsBigInt(cage_base) || //
+ object.IsByteArray(cage_base) || //
+ object.IsContext(cage_base) || //
+ object.IsExternalString(cage_base) || //
+ object.IsFeedbackMetadata(cage_base) || //
+ object.IsFeedbackVector(cage_base) || //
+ object.IsFixedArray(cage_base) || //
+ object.IsFixedDoubleArray(cage_base) || //
+ object.IsFreeSpace(cage_base) || //
+ object.IsPreparseData(cage_base) || //
+ object.IsPropertyArray(cage_base) || //
+ object.IsScopeInfo() || //
+ object.IsSeqString(cage_base) || //
+ object.IsSwissNameDictionary() || //
+ object.IsThinString(cage_base) || //
+ object.IsUncompiledDataWithoutPreparseData(cage_base) || //
+#if V8_ENABLE_WEBASSEMBLY //
+ object.IsWasmArray() || //
+#endif //
+ object.IsWeakArrayList(cage_base) || //
+ object.IsWeakFixedArray(cage_base);
+ if (!is_valid_lo_space_object) {
+ object.Print();
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object.map(cage_base).instance_type());
}
@@ -434,6 +446,9 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
}
+
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
@@ -472,16 +487,16 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
// The size of the first object may exceed the capacity.
capacity_ = std::max(capacity_, SizeOfObjects());
@@ -490,7 +505,6 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
UpdatePendingObject(result);
-#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -498,13 +512,12 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
-#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AdvanceAndInvokeAllocationObservers(result.address(),
static_cast<size_t>(object_size));
- return result;
+ return AllocationResult::FromObject(result);
}
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
@@ -531,7 +544,7 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
index 257e3943a3..65734d5b34 100644
--- a/deps/v8/src/heap/large-spaces.h
+++ b/deps/v8/src/heap/large-spaces.h
@@ -94,8 +94,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
- LargePage* first_page() {
- return reinterpret_cast<LargePage*>(Space::first_page());
+ LargePage* first_page() override {
+ return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
diff --git a/deps/v8/src/heap/local-factory.cc b/deps/v8/src/heap/local-factory.cc
index d8c2ce898a..b767145e09 100644
--- a/deps/v8/src/heap/local-factory.cc
+++ b/deps/v8/src/heap/local-factory.cc
@@ -19,7 +19,12 @@
namespace v8 {
namespace internal {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+LocalFactory::LocalFactory(Isolate* isolate)
+ : roots_(isolate), isolate_for_sandbox_(isolate) {}
+#else
LocalFactory::LocalFactory(Isolate* isolate) : roots_(isolate) {}
+#endif
void LocalFactory::AddToScriptList(Handle<Script> shared) {
// TODO(leszeks): Actually add the script to the main Isolate's script list,
diff --git a/deps/v8/src/heap/local-factory.h b/deps/v8/src/heap/local-factory.h
index 8737e3bfa1..9ad22f7c35 100644
--- a/deps/v8/src/heap/local-factory.h
+++ b/deps/v8/src/heap/local-factory.h
@@ -5,9 +5,6 @@
#ifndef V8_HEAP_LOCAL_FACTORY_H_
#define V8_HEAP_LOCAL_FACTORY_H_
-#include <map>
-#include <vector>
-
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
@@ -68,13 +65,13 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
}
// This is the real Isolate that will be used for allocating and accessing
- // external pointer entries when V8_HEAP_SANDBOX is enabled.
- Isolate* isolate_for_heap_sandbox() {
-#ifdef V8_HEAP_SANDBOX
- return isolate_for_heap_sandbox_;
+ // external pointer entries when V8_SANDBOXED_EXTERNAL_POINTERS is enabled.
+ Isolate* isolate_for_sandbox() {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ return isolate_for_sandbox_;
#else
return nullptr;
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
inline bool CanAllocateInReadOnlySpace() { return false; }
@@ -86,8 +83,8 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
ReadOnlyRoots roots_;
-#ifdef V8_HEAP_SANDBOX
- Isolate* isolate_for_heap_sandbox_;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ Isolate* isolate_for_sandbox_;
#endif
#ifdef DEBUG
bool a_script_was_added_to_the_script_list_ = false;
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 030e5b1932..abff7072df 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -72,7 +72,8 @@ Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationAlignment alignment) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
- if (!result.IsRetry()) return result.ToObject().address();
+ HeapObject object;
+ if (result.To(&object)) return object.address();
return PerformCollectionAndAllocateAgain(object_size, type, origin,
alignment);
}
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 0485158799..700016cade 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -398,7 +398,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
allocation_failed_ = false;
main_thread_parked_ = false;
return result.ToObjectChecked().address();
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index e945c34cef..ca6a17bf7d 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -40,8 +40,6 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
}
}
-#ifdef ENABLE_MINOR_MC
-
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
non_atomic_marking_state_.WhiteToGrey(obj)) {
@@ -49,8 +47,6 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
}
}
-#endif
-
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
local_marking_worklists()->Push(obj);
@@ -199,10 +195,9 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = black_object.map(cage_base, kAcquireLoad);
- CHECK(map_object.IsMap(cage_base));
- map = Map::cast(map_object);
- DCHECK(map.IsMap(cage_base));
+ map = black_object.map(cage_base, kAcquireLoad);
+ // Map might be forwarded during GC.
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 2977b4219d..206cf936df 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -5,6 +5,7 @@
#include "src/heap/mark-compact.h"
#include <unordered_map>
+#include <unordered_set>
#include "src/base/logging.h"
#include "src/base/optional.h"
@@ -19,18 +20,21 @@
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/concurrent-allocator.h"
+#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
-#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
@@ -48,9 +52,11 @@
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
@@ -211,7 +217,7 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
- VerifyMarking(heap_->map_space());
+ if (heap_->map_space()) VerifyMarking(heap_->map_space());
VerifyMarking(heap_->lo_space());
VerifyMarking(heap_->code_lo_space());
}
@@ -393,7 +399,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
}
protected:
@@ -507,6 +513,17 @@ void MarkCompactCollector::TearDown() {
sweeper()->TearDown();
}
+// static
+bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
+ MapWord map_word = map.map_word(kRelaxedLoad);
+
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress().IsMap();
+ } else {
+ return map_word.ToMap().IsMap();
+ }
+}
+
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
@@ -543,6 +560,10 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
CollectEvacuationCandidates(heap()->old_space());
+ if (heap()->map_space() && FLAG_compact_maps) {
+ CollectEvacuationCandidates(heap()->map_space());
+ }
+
if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
@@ -550,7 +571,7 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
TraceFragmentation(heap()->code_space());
}
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation && heap()->map_space()) {
TraceFragmentation(heap()->map_space());
}
@@ -570,8 +591,11 @@ void MarkCompactCollector::StartMarking() {
}
code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
- local_marking_worklists_ =
- std::make_unique<MarkingWorklists::Local>(marking_worklists());
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
+ marking_worklists(),
+ cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
+ : MarkingWorklists::Local::kNoCppMarkingState);
local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), local_weak_objects_.get(),
@@ -591,10 +615,6 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
-#ifdef ENABLE_MINOR_MC
- heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
-#endif // ENABLE_MINOR_MC
-
MarkLiveObjects();
ClearNonLiveReferences();
VerifyMarking();
@@ -643,7 +663,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
- VerifyMarkbitsAreClean(heap_->map_space());
+ if (heap_->map_space()) {
+ VerifyMarkbitsAreClean(heap_->map_space());
+ }
VerifyMarkbitsAreClean(heap_->new_space());
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
@@ -655,26 +677,57 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
#endif // VERIFY_HEAP
-void MarkCompactCollector::EnsureSweepingCompleted() {
- if (!sweeper()->sweeping_in_progress()) return;
+void MarkCompactCollector::FinishSweepingIfOutOfWork() {
+ if (sweeper()->sweeping_in_progress() && FLAG_concurrent_sweeping &&
+ !sweeper()->AreSweeperTasksRunning()) {
+ // At this point we know that all concurrent sweeping tasks have run
+ // out of work and quit: all pages are swept. The main thread still needs
+ // to complete sweeping though.
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
+ }
+ if (heap()->cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists and it's out of work.
+ CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
+ }
+}
- TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
+void MarkCompactCollector::EnsureSweepingCompleted(
+ SweepingForcedFinalizationMode mode) {
+ if (sweeper()->sweeping_in_progress()) {
+ TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
- sweeper()->EnsureCompleted();
- heap()->old_space()->RefillFreeList();
- heap()->code_space()->RefillFreeList();
- heap()->map_space()->RefillFreeList();
- heap()->map_space()->SortFreeList();
+ sweeper()->EnsureCompleted();
+ heap()->old_space()->RefillFreeList();
+ heap()->code_space()->RefillFreeList();
+ if (heap()->map_space()) {
+ heap()->map_space()->RefillFreeList();
+ heap()->map_space()->SortFreeList();
+ }
- heap()->tracer()->NotifySweepingCompleted();
+ heap()->tracer()->NotifySweepingCompleted();
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !evacuation()) {
- FullEvacuationVerifier verifier(heap());
- verifier.Run();
- }
+ if (FLAG_verify_heap && !evacuation()) {
+ FullEvacuationVerifier verifier(heap());
+ verifier.Run();
+ }
#endif
+ }
+
+ if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
+ heap()->cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists.
+ CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
+ DCHECK(
+ !CppHeap::From(heap()->cpp_heap())->sweeper().IsSweepingInProgress());
+ }
+
+ DCHECK_IMPLIES(mode == SweepingForcedFinalizationMode::kUnifiedHeap ||
+ !heap()->cpp_heap(),
+ !heap()->tracer()->IsSweepingInProgress());
}
void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
@@ -736,7 +789,8 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
- DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
+ DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
+ space->identity() == MAP_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
@@ -924,8 +978,11 @@ void MarkCompactCollector::Prepare() {
if (!was_marked_incrementally_) {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue(
- heap_->flags_for_embedder_tracer());
+ auto embedder_flags = heap_->flags_for_embedder_tracer();
+ // PrepareForTrace should be called before visitor initialization in
+ // StartMarking.
+ heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
@@ -973,7 +1030,7 @@ void MarkCompactCollector::VerifyMarking() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap()->old_space()->VerifyLiveBytes();
- heap()->map_space()->VerifyLiveBytes();
+ if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
}
#endif
@@ -1151,7 +1208,14 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
private:
V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object.IsHeapObject()) return;
- collector_->MarkObject(host, HeapObject::cast(object));
+ HeapObject heap_object = HeapObject::cast(object);
+ // We use this visitor both in client and shared GCs. The client GC should
+ // not mark objects in the shared heap. In shared GCs we are marking each
+ // client's top stack frame, so it is actually legal to encounter references
+ // into the client heap here in a shared GC. We need to bail out in these
+ // cases as well.
+ if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
+ collector_->MarkObject(host, heap_object);
}
MarkCompactCollector* const collector_;
@@ -1165,17 +1229,18 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
- MarkObject(host, p.load(cage_base()));
+ MarkObject(host, p, p.load(cage_base()));
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
MaybeObject object = p.load(cage_base());
HeapObject heap_object;
- if (object.GetHeapObject(&heap_object)) MarkObject(host, heap_object);
+ if (object.GetHeapObject(&heap_object))
+ MarkObject(host, ObjectSlot(p), heap_object);
}
void VisitMapPointer(HeapObject host) final {
- MarkObject(host, host.map(cage_base()));
+ MarkObject(host, host.map_slot(), host.map(cage_base()));
}
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
@@ -1183,13 +1248,13 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
- MarkObject(host, p.load(cage_base()));
+ MarkObject(host, p, p.load(cage_base()));
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- MarkObject(host, slot.load(code_cage_base()));
+ MarkObject(host, ObjectSlot(slot.address()), slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -1203,19 +1268,37 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- MarkObject(host, target);
+ RecordRelocSlot(host, rinfo, target);
}
+
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- MarkObject(host, rinfo->target_object(cage_base()));
+ HeapObject target = rinfo->target_object(cage_base());
+ RecordRelocSlot(host, rinfo, target);
}
private:
- V8_INLINE void MarkObject(HeapObject host, Object object) {
- DCHECK(!BasicMemoryChunk::FromHeapObject(host)->InSharedHeap());
+ V8_INLINE void MarkObject(HeapObject host, ObjectSlot slot, Object object) {
+ DCHECK(!host.InSharedHeap());
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- if (!BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) return;
- collector_->MarkObject(host, heap_object);
+ if (!heap_object.InSharedHeap()) return;
+ RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot.address());
+ collector_->MarkRootObject(Root::kClientHeap, heap_object);
+ }
+
+ V8_INLINE void RecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ if (ShouldRecordRelocSlot(host, rinfo, target)) {
+ RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
+ RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk,
+ info.slot_type, info.offset);
+ }
+ }
+
+ V8_INLINE bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ return BasicMemoryChunk::FromHeapObject(target)->InSharedHeap();
}
MarkCompactCollector* const collector_;
@@ -1345,6 +1428,10 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
p.address());
}
+ inline void VisitMapPointer(HeapObject host) final {
+ VisitPointer(host, host.map_slot());
+ }
+
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
RecordMigratedSlot(host, p.load(cage_base()), p.address());
@@ -1518,10 +1605,19 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
+ // In case the object's map gets relocated during GC we load the old map
+ // here. This is fine since they store the same content.
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
+ } else if (dest == MAP_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(IsAligned(size, kTaggedSize));
+ base->heap_->CopyBlock(dst_addr, src_addr, size);
+ if (mode != MigrationMode::kFast)
+ base->ExecuteMigrationObservers(dest, src, dst, size);
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
@@ -1529,7 +1625,9 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
+ // In case the object's map gets relocated during GC we load the old map
+ // here. This is fine since they store the same content.
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1541,14 +1639,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
- record_visitor_(record_visitor) {
- if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
- shared_string_table_ = true;
- shared_old_allocator_ = heap_->shared_old_allocator_.get();
- }
+ shared_old_allocator_(shared_old_allocator),
+ record_visitor_(record_visitor),
+ shared_string_table_(shared_old_allocator != nullptr) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
@@ -1584,7 +1681,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
inline bool ShouldPromoteIntoSharedHeap(Map map) {
if (shared_string_table_) {
- return String::IsInPlaceInternalizable(map.instance_type());
+ return String::IsInPlaceInternalizableExcludingExternal(
+ map.instance_type());
}
return false;
}
@@ -1622,7 +1720,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Heap* heap_;
EvacuationAllocator* local_allocator_;
- ConcurrentAllocator* shared_old_allocator_ = nullptr;
+ ConcurrentAllocator* shared_old_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
@@ -1633,10 +1731,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young)
- : EvacuateVisitorBase(heap, local_allocator, record_visitor),
+ : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
+ record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
@@ -1706,7 +1806,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation = local_allocator_->Allocate(
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
}
@@ -1720,7 +1820,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationAlignment alignment) {
AllocationResult allocation = local_allocator_->Allocate(
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
}
@@ -1769,7 +1869,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
} else if (mode == NEW_TO_OLD) {
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- object.IterateBodyFast(cage_base, record_visitor_);
+ object.IterateFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
@@ -1790,8 +1890,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
- : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
+ : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
+ record_visitor) {}
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
@@ -1807,19 +1909,41 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
- explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
+ explicit EvacuateRecordOnlyVisitor(Heap* heap)
+ : heap_(heap)
+#ifdef V8_COMPRESS_POINTERS
+ ,
+ cage_base_(heap->isolate())
+#endif // V8_COMPRESS_POINTERS
+ {
+ }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ V8_INLINE PtrComprCageBase cage_base() const {
+#ifdef V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
- PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- object.IterateBodyFast(cage_base, &visitor);
+ Map map = object.map(cage_base());
+ // Instead of calling object.IterateBodyFast(cage_base(), &visitor) here
+ // we can shortcut and use the precomputed size value passed to the visitor.
+ DCHECK_EQ(object.SizeFromMap(map), size);
+ object.IterateBodyFast(map, size, &visitor);
return true;
}
private:
Heap* heap_;
+#ifdef V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
@@ -1920,7 +2044,7 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
} while (another_ephemeron_iteration_main_thread ||
heap()->concurrent_marking()->another_ephemeron_iteration() ||
!local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsEmbedderEmpty() ||
+ !local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
@@ -2042,7 +2166,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// is necessary.
work_to_do = !local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsEmbedderEmpty() ||
+ !local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
@@ -2063,11 +2187,14 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
- {
+ if (local_marking_worklists()->PublishWrapper()) {
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
+ } else {
+ // Cannot directly publish wrapper objects.
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
- while (local_marking_worklists()->PopEmbedder(&object)) {
+ while (local_marking_worklists()->PopWrapper(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
@@ -2200,28 +2327,29 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
}
void MarkCompactCollector::RecordObjectStats() {
- if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
- heap()->CreateObjectStats();
- ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
- heap()->dead_object_stats_.get());
- collector.Collect();
- if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- std::stringstream live, dead;
- heap()->live_object_stats_->Dump(live);
- heap()->dead_object_stats_->Dump(dead);
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
- "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
- "live", TRACE_STR_COPY(live.str().c_str()), "dead",
- TRACE_STR_COPY(dead.str().c_str()));
- }
- if (FLAG_trace_gc_object_stats) {
- heap()->live_object_stats_->PrintJSON("live");
- heap()->dead_object_stats_->PrintJSON("dead");
- }
- heap()->live_object_stats_->CheckpointObjectStats();
- heap()->dead_object_stats_->ClearObjectStats();
- }
+ if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
+ // Cannot run during bootstrapping due to incomplete objects.
+ if (isolate()->bootstrapper()->IsActive()) return;
+ heap()->CreateObjectStats();
+ ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
+ heap()->dead_object_stats_.get());
+ collector.Collect();
+ if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ std::stringstream live, dead;
+ heap()->live_object_stats_->Dump(live);
+ heap()->dead_object_stats_->Dump(dead);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+ "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+ TRACE_STR_COPY(dead.str().c_str()));
+ }
+ if (FLAG_trace_gc_object_stats) {
+ heap()->live_object_stats_->PrintJSON("live");
+ heap()->dead_object_stats_->PrintJSON("dead");
+ }
+ heap()->live_object_stats_->CheckpointObjectStats();
+ heap()->dead_object_stats_->ClearObjectStats();
}
void MarkCompactCollector::MarkLiveObjects() {
@@ -2292,8 +2420,8 @@ void MarkCompactCollector::MarkLiveObjects() {
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
- !local_marking_worklists()->IsEmbedderEmpty());
- DCHECK(local_marking_worklists()->IsEmbedderEmpty());
+ !local_marking_worklists()->IsWrapperEmpty());
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
@@ -2336,7 +2464,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
- DCHECK(local_marking_worklists()->IsEmbedderEmpty());
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
@@ -2350,6 +2478,7 @@ void MarkCompactCollector::MarkLiveObjects() {
}
if (was_marked_incrementally_) {
MarkingBarrier::DeactivateAll(heap());
+ GlobalHandles::DisableMarkingBarrier(heap()->isolate());
}
epoch_++;
@@ -2369,12 +2498,12 @@ void MarkCompactCollector::ClearNonLiveReferences() {
string_table->DropOldData();
string_table->IterateElements(&internalized_visitor);
string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
-
- ExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.IterateAll(&external_visitor);
- heap()->external_string_table_.CleanUpAll();
}
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.IterateAll(&external_visitor);
+ heap()->external_string_table_.CleanUpAll();
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
@@ -2414,6 +2543,14 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization();
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
+ isolate()->external_pointer_table().Sweep(isolate());
+ }
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
@@ -2449,7 +2586,7 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
Map parent = Map::cast(potential_parent);
DisallowGarbageCollection no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
- TransitionsAccessor(isolate(), parent, &no_gc_obviously)
+ TransitionsAccessor(isolate(), parent)
.HasSimpleTransitionTo(dead_target)) {
ClearPotentialSimpleMapTransition(parent, dead_target);
}
@@ -2809,12 +2946,16 @@ void MarkCompactCollector::ClearWeakCollections() {
if (FLAG_verify_heap) {
Object value = table.ValueAt(i);
if (value.IsHeapObject()) {
- CHECK_IMPLIES(non_atomic_marking_state()->IsBlackOrGrey(key),
- non_atomic_marking_state()->IsBlackOrGrey(
- HeapObject::cast(value)));
+ HeapObject heap_object = HeapObject::cast(value);
+ CHECK_IMPLIES(
+ (!is_shared_heap_ && key.InSharedHeap()) ||
+ non_atomic_marking_state()->IsBlackOrGrey(key),
+ (!is_shared_heap_ && heap_object.InSharedHeap()) ||
+ non_atomic_marking_state()->IsBlackOrGrey(heap_object));
}
}
#endif
+ if (!is_shared_heap_ && key.InSharedHeap()) continue;
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
table.RemoveEntry(i);
}
@@ -2927,53 +3068,76 @@ bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
+// static
+bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
+ BasicMemoryChunk* target_chunk = BasicMemoryChunk::FromHeapObject(target);
+ return target_chunk->IsEvacuationCandidate() &&
+ !source_chunk->ShouldSkipEvacuationSlotRecording();
+}
+
+// static
MarkCompactCollector::RecordRelocSlotInfo
-MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target) {
+MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ DCHECK_EQ(host, rinfo->host());
+
RecordRelocSlotInfo result;
- result.should_record = false;
- Page* target_page = Page::FromHeapObject(target);
- Page* source_page = Page::FromHeapObject(host);
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host().is_null() ||
- !source_page->ShouldSkipEvacuationSlotRecording())) {
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- slot_type = COMPRESSED_OBJECT_SLOT;
- } else {
- DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = FULL_OBJECT_SLOT;
- }
+ const RelocInfo::Mode rmode = rinfo->rmode();
+ Address addr;
+ SlotType slot_type;
+
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = SlotType::kConstPoolCodeEntry;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ slot_type = SlotType::kConstPoolEmbeddedObjectFull;
+ }
+ } else {
+ addr = rinfo->pc();
+
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = SlotType::kCodeEntry;
+ } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
+ slot_type = SlotType::kEmbeddedObjectFull;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = SlotType::kEmbeddedObjectCompressed;
+ } else {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ slot_type = SlotType::kEmbeddedObjectData;
}
- uintptr_t offset = addr - source_page->address();
- DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- result.should_record = true;
- result.memory_chunk = source_page;
- result.slot_type = slot_type;
- result.offset = static_cast<uint32_t>(offset);
}
+
+ MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
+ const uintptr_t offset = addr - source_chunk->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ result.memory_chunk = source_chunk;
+ result.slot_type = slot_type;
+ result.offset = static_cast<uint32_t>(offset);
+
return result;
}
+// static
void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
- RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- // Access to TypeSlots need to be protected, since LocalHeaps might
- // publish code in the background thread.
- base::Optional<base::MutexGuard> opt_guard;
- if (FLAG_concurrent_sparkplug) {
- opt_guard.emplace(info.memory_chunk->mutex());
- }
- RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
- info.offset);
+ if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
+ RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
+
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(info.memory_chunk->mutex());
}
+ RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
namespace {
@@ -3049,14 +3213,17 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
map_word.ToForwardingAddress(host_cage_base));
if (access_mode == AccessMode::NON_ATOMIC) {
- slot.store(target);
+ // Needs to be atomic for map space compaction: This slot could be a map
+ // word which we update while loading the map word for updating the slot
+ // on another page.
+ slot.Relaxed_Store(target);
} else {
slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
- DCHECK(heap_obj.map(cage_base).IsMap(cage_base));
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -3106,7 +3273,7 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
CodeDataContainer::cast(HeapObject::FromAddress(
slot.address() - CodeDataContainer::kCodeOffset));
Code code = code_data_container.code(code_cage_base);
- Isolate* isolate_for_sandbox = GetIsolateForHeapSandbox(host);
+ Isolate* isolate_for_sandbox = GetIsolateForSandbox(host);
code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
return result;
}
@@ -3115,11 +3282,8 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
} // namespace
-static constexpr bool kClientHeap = true;
-
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-template <bool in_client_heap = false>
class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
public RootVisitor {
public:
@@ -3175,34 +3339,14 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
}
- void VisitMapPointer(HeapObject object) override {
- if (in_client_heap) {
- UpdateStrongSlotInternal(cage_base(), object.map_slot());
- } else {
- UNREACHABLE();
- }
- }
-
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- if (in_client_heap) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK_WITH_MSG(!target.InSharedHeap(),
- "refs into shared heap not yet supported here.");
- } else {
- // This visitor nevers visits code objects.
- UNREACHABLE();
- }
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- if (in_client_heap) {
- HeapObject target = rinfo->target_object(cage_base());
- CHECK_WITH_MSG(!target.InSharedHeap(),
- "refs into shared heap not yet supported here.");
- } else {
- // This visitor nevers visits code objects.
- UNREACHABLE();
- }
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
private:
@@ -3232,9 +3376,75 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
};
+#ifdef VERIFY_HEAP
+// Visitor for updating root pointers and to-space pointers.
+// It does not expect to encounter pointers to dead objects.
+class ClientHeapVerifier final : public ObjectVisitorWithCageBases {
+ public:
+ explicit ClientHeapVerifier(Heap* heap) : ObjectVisitorWithCageBases(heap) {}
+
+ void VisitPointer(HeapObject host, ObjectSlot p) override {
+ VerifySlot(cage_base(), p);
+ }
+
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
+ VerifySlot(cage_base(), p);
+ }
+
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
+ VerifySlot(cage_base(), p);
+ }
+ }
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ VerifySlot(cage_base(), p);
+ }
+ }
+
+ void VisitMapPointer(HeapObject host) override {
+ VerifySlot(cage_base(), host.map_slot());
+ }
+
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ VerifySlot(code_cage_base(), ObjectSlot(slot.address()));
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {}
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {}
+
+ private:
+ void VerifySlot(PtrComprCageBase cage_base, ObjectSlot slot) {
+ HeapObject heap_object;
+ if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ VerifyHeapObject(heap_object);
+ }
+ }
+
+ void VerifySlot(PtrComprCageBase cage_base, MaybeObjectSlot slot) {
+ HeapObject heap_object;
+ if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ VerifyHeapObject(heap_object);
+ }
+ }
+
+ void VerifyHeapObject(HeapObject heap_object) {
+ if (BasicMemoryChunk::FromHeapObject(heap_object)->InReadOnlySpace())
+ return;
+ if (!heap_object.InSharedHeap()) return;
+ CHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
+ }
+};
+#endif // VERIFY_HEAP
+
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
- MapWord map_word = HeapObject::cast(*p).map_word(kRelaxedLoad);
+ HeapObject old_string = HeapObject::cast(*p);
+ MapWord map_word = old_string.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
String new_string = String::cast(map_word.ToForwardingAddress());
@@ -3306,6 +3516,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL((p->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
@@ -3313,6 +3524,16 @@ void MarkCompactCollector::EvacuateEpilogue() {
#endif
}
+namespace {
+ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
+ if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ }
+
+ return nullptr;
+}
+} // namespace
+
class Evacuator : public Malloced {
public:
enum EvacuationMode {
@@ -3359,14 +3580,17 @@ class Evacuator : public Malloced {
AlwaysPromoteYoung always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
- new_space_visitor_(heap_, local_allocator, record_visitor,
- &local_pretenuring_feedback_, always_promote_young),
+ shared_old_allocator_(CreateSharedOldAllocator(heap_)),
+ new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
+ record_visitor, &local_pretenuring_feedback_,
+ always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
- old_space_visitor_(heap_, local_allocator, record_visitor),
+ old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
+ record_visitor),
local_allocator_(local_allocator),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3405,6 +3629,9 @@ class Evacuator : public Malloced {
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ // Allocator for the shared heap.
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
@@ -3450,6 +3677,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
void Evacuator::Finalize() {
local_allocator_->Finalize();
+ if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_to_old_page_visitor_.moved_bytes());
@@ -3733,6 +3961,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
@@ -3915,10 +4144,9 @@ void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
- if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper()->AddPageForIterability(p);
- } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ // Full GCs don't promote pages within new space.
+ DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
@@ -3926,6 +4154,12 @@ void MarkCompactCollector::Evacuate() {
}
new_space_evacuation_pages_.clear();
+ for (LargePage* p : promoted_large_pages_) {
+ DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ }
+ promoted_large_pages_.clear();
+
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
@@ -4037,7 +4271,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor<> visitor(heap_);
+ PointersUpdatingVisitor visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
Map map = object.map(visitor.cage_base());
@@ -4052,7 +4286,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor<> visitor(heap_);
+ PointersUpdatingVisitor visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
@@ -4380,7 +4614,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
// The external string table is updated at the end.
- PointersUpdatingVisitor<> updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor(heap());
heap_->IterateRootsIncludingClients(
&updating_visitor,
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
@@ -4405,8 +4639,10 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSetUpdatingMode::ALL);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
RememberedSetUpdatingMode::ALL);
- CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::ALL);
+ if (heap()->map_space()) {
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::ALL);
+ }
// Iterating to space may require a valid body descriptor for e.g.
// WasmStruct which races with updating a slot in Map. Since to space is
@@ -4440,18 +4676,54 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
void MarkCompactCollector::UpdatePointersInClientHeaps() {
if (!isolate()->is_shared()) return;
- PointersUpdatingVisitor<kClientHeap> visitor(heap());
-
isolate()->global_safepoint()->IterateClientIsolates(
- [&visitor](Isolate* client) {
- Heap* heap = client->heap();
- HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
- PtrComprCageBase cage_base(client);
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- obj.IterateFast(cage_base, &visitor);
- }
- });
+ [this](Isolate* client) { UpdatePointersInClientHeap(client); });
+}
+
+void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
+ PtrComprCageBase cage_base(client);
+ MemoryChunkIterator chunk_iterator(client->heap());
+
+ while (chunk_iterator.HasNext()) {
+ MemoryChunk* chunk = chunk_iterator.Next();
+ CodePageMemoryModificationScope unprotect_code_page(chunk);
+
+ RememberedSet<OLD_TO_SHARED>::Iterate(
+ chunk,
+ [cage_base](MaybeObjectSlot slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ chunk->ReleaseSlotSet<OLD_TO_SHARED>();
+
+ RememberedSet<OLD_TO_SHARED>::IterateTyped(
+ chunk, [this](SlotType slot_type, Address slot) {
+ // Using UpdateStrongSlot is OK here, because there are no weak
+ // typed slots.
+ PtrComprCageBase cage_base = heap_->isolate();
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base,
+ slot);
+ });
+ });
+
+ chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ ClientHeapVerifier verifier_visitor(client->heap());
+
+ HeapObjectIterator iterator(client->heap(),
+ HeapObjectIterator::kNoFiltering);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &verifier_visitor);
+ }
+ }
+#endif // VERIFY_HEAP
}
void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
@@ -4605,7 +4877,7 @@ void MarkCompactCollector::StartSweepSpaces() {
heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
StartSweepSpace(heap()->code_space());
}
- {
+ if (heap()->map_space()) {
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
StartSweepSpace(heap()->map_space());
@@ -4614,8 +4886,6 @@ void MarkCompactCollector::StartSweepSpaces() {
}
}
-#ifdef ENABLE_MINOR_MC
-
namespace {
#ifdef VERIFY_HEAP
@@ -4668,8 +4938,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- PtrComprCageBase cage_base = host.main_cage_base();
- VerifyHeapObjectImpl(rinfo->target_object(cage_base));
+ VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -4707,7 +4976,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
}
protected:
@@ -4871,14 +5140,18 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_;
}
-void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
- for (Page* p : sweep_to_iterate_pages_) {
- if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- non_atomic_marking_state()->ClearLiveness(p);
- }
+void MinorMarkCompactCollector::CleanupPromotedPages() {
+ for (Page* p : promoted_pages_) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ non_atomic_marking_state()->ClearLiveness(p);
+ }
+ promoted_pages_.clear();
+
+ for (LargePage* p : promoted_large_pages_) {
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
- sweep_to_iterate_pages_.clear();
+ promoted_large_pages_.clear();
}
void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
@@ -4962,7 +5235,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor<> updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor(heap());
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
@@ -4971,8 +5244,11 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ if (heap()->map_space()) {
+ CollectRememberedSetUpdatingItems(
+ &updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ }
CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
@@ -5044,7 +5320,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- CleanupSweepToIteratePages();
}
heap()->array_buffer_sweeper()->EnsureFinished();
@@ -5068,14 +5343,15 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p :
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
- DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ DCHECK_EQ(promoted_pages_.end(),
+ std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p);
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
@@ -5090,12 +5366,13 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
+ CleanupPromotedPages();
+
SweepArrayBufferExtensions();
}
void MinorMarkCompactCollector::MakeIterable(
- Page* p, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode) {
+ Page* p, FreeSpaceTreatmentMode free_space_mode) {
CHECK(!p->IsLargePage());
// We have to clear the full collectors markbits for the areas that we
// remove here.
@@ -5137,11 +5414,6 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
-
- if (marking_mode == MarkingTreatmentMode::CLEAR) {
- non_atomic_marking_state()->ClearLiveness(p);
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- }
}
namespace {
@@ -5637,7 +5909,7 @@ void MinorMarkCompactCollector::Evacuate() {
EvacuatePagesInParallel();
}
- UpdatePointersAfterEvacuation();
+ if (!FLAG_minor_mc_sweeping) UpdatePointersAfterEvacuation();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
@@ -5651,10 +5923,7 @@ void MinorMarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->SetFlag(Page::SWEEP_TO_ITERATE);
- sweep_to_iterate_pages_.push_back(p);
+ promoted_pages_.push_back(p);
}
}
new_space_evacuation_pages_.clear();
@@ -5703,6 +5972,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
*live_bytes = marking_state->live_bytes(chunk);
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
+ DCHECK(!FLAG_minor_mc_sweeping);
LiveObjectVisitor::VisitGreyObjectsNoFail(
chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
@@ -5715,14 +5985,12 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
if (!chunk->IsLargePage()) {
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP,
IGNORE_FREE_SPACE);
}
}
@@ -5735,14 +6003,12 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
DCHECK(!chunk->IsLargePage());
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
}
break;
case kObjectsOldToOld:
@@ -5760,7 +6026,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
+ if (FLAG_minor_mc_sweeping ||
+ ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
@@ -5780,6 +6047,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
if (non_atomic_marking_state_.IsGrey(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
@@ -5797,7 +6065,5 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
}
}
-#endif // ENABLE_MINOR_MC
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index ecfb5adc64..ea9173f5be 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -26,6 +26,7 @@ namespace internal {
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
+class LargePage;
class MigrationObserver;
class ReadOnlySpace;
class RecordMigratedSlotVisitor;
@@ -187,7 +188,6 @@ class LiveObjectVisitor : AllStatic {
enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
-enum class MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
@@ -508,11 +508,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
struct RecordRelocSlotInfo {
MemoryChunk* memory_chunk;
SlotType slot_type;
- bool should_record;
uint32_t offset;
};
- static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target);
+
+ static V8_EXPORT_PRIVATE bool IsMapOrForwardedMap(Map map);
+
+ static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target);
+ static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
+ HeapObject target);
+
static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
HeapObject target);
@@ -525,10 +530,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool is_compacting() const { return compacting_; }
bool is_shared_heap() const { return is_shared_heap_; }
+ void FinishSweepingIfOutOfWork();
+
+ enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };
+
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
- V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
+ V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
+ SweepingForcedFinalizationMode mode);
void EnsurePageIsSwept(Page* page);
@@ -622,6 +632,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Free unmarked ArrayBufferExtensions.
void SweepArrayBufferExtensions();
+ // Free unmarked entries in the ExternalPointerTable.
+ void SweepExternalPointerTable();
+
void MarkLiveObjects() override;
// Marks the object grey and adds it to the marking work list.
@@ -642,6 +655,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Updates pointers to shared objects from client heaps.
void UpdatePointersInClientHeaps();
+ void UpdatePointersInClientHeap(Isolate* client);
// Marks object reachable from harmony weak maps and wrapper tracing.
void ProcessEphemeronMarking();
@@ -799,6 +813,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
aborted_evacuation_candidates_due_to_oom_;
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_flags_;
+ std::vector<LargePage*> promoted_large_pages_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
@@ -835,8 +850,6 @@ class V8_NODISCARD EvacuationScope {
MarkCompactCollector* collector_;
};
-#ifdef ENABLE_MINOR_MC
-
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
@@ -858,9 +871,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void TearDown() override;
void CollectGarbage() override;
- void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode);
- void CleanupSweepToIteratePages();
+ void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
+ void CleanupPromotedPages();
private:
using MarkingWorklist =
@@ -909,15 +921,14 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<Page*> sweep_to_iterate_pages_;
+ std::vector<Page*> promoted_pages_;
+ std::vector<LargePage*> promoted_large_pages_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
};
-#endif // ENABLE_MINOR_MC
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 6a7571af79..fc82ff50f2 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -121,15 +121,16 @@ void MarkingBarrier::Write(DescriptorArray descriptor_array,
void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
DCHECK(IsCurrentMarkingBarrier());
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) return;
+
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- auto& typed_slots = typed_slots_map_[info.memory_chunk];
- if (!typed_slots) {
- typed_slots.reset(new TypedSlots());
- }
- typed_slots->Insert(info.slot_type, info.offset);
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+
+ auto& typed_slots = typed_slots_map_[info.memory_chunk];
+ if (!typed_slots) {
+ typed_slots.reset(new TypedSlots());
}
+ typed_slots->Insert(info.slot_type, info.offset);
}
// static
@@ -193,7 +194,7 @@ void MarkingBarrier::Deactivate() {
is_compacting_ = false;
if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space());
- DeactivateSpace(heap_->map_space());
+ if (heap_->map_space()) DeactivateSpace(heap_->map_space());
DeactivateSpace(heap_->code_space());
DeactivateSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
@@ -232,7 +233,7 @@ void MarkingBarrier::Activate(bool is_compacting) {
is_activated_ = true;
if (is_main_thread_barrier_) {
ActivateSpace(heap_->old_space());
- ActivateSpace(heap_->map_space());
+ if (heap_->map_space()) ActivateSpace(heap_->map_space());
ActivateSpace(heap_->code_space());
ActivateSpace(heap_->new_space());
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 8f65a61dab..c59ae55d2d 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_MARKING_VISITOR_INL_H_
#include "src/heap/marking-visitor.h"
+#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/progress-bar.h"
@@ -25,6 +26,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
concrete_visitor()->SynchronizePageAccess(object);
+ AddStrongReferenceForReferenceSummarizer(host, object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
local_marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
@@ -41,8 +43,7 @@ template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
concrete_visitor()->SynchronizePageAccess(heap_object);
- BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object);
- if (!is_shared_heap_ && target_page->InSharedHeap()) return;
+ if (!is_shared_heap_ && heap_object.InSharedHeap()) return;
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
@@ -64,6 +65,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
// the reference when we know the liveness of the whole transitive
// closure.
local_weak_objects_->weak_references_local.Push(std::make_pair(host, slot));
+ AddWeakReferenceForReferenceSummarizer(host, heap_object);
}
}
@@ -112,10 +114,13 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object =
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
+ if (!is_shared_heap_ && object.InSharedHeap()) return;
+
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
local_weak_objects_->weak_objects_in_code_local.Push(
std::make_pair(object, host));
+ AddWeakReferenceForReferenceSummarizer(host, object);
} else {
MarkObject(host, object);
}
@@ -128,6 +133,8 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodeTarget(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+
+ if (!is_shared_heap_ && target.InSharedHeap()) return;
MarkObject(host, target);
concrete_visitor()->RecordRelocSlot(host, rinfo, target);
}
@@ -243,7 +250,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
// in the large object space.
ProgressBar& progress_bar =
MemoryChunk::FromHeapObject(object)->ProgressBar();
- return progress_bar.IsEnabled()
+ return CanUpdateValuesInHeap() && progress_bar.IsEnabled()
? VisitFixedArrayWithProgressBar(map, object, progress_bar)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
@@ -260,17 +267,45 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedDoubleArray(
template <typename ConcreteVisitor, typename MarkingState>
template <typename T>
+inline int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitEmbedderTracingSubClassNoEmbedderTracing(Map map, T object) {
+ return concrete_visitor()->VisitJSObjectSubclass(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T>
+inline int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object) {
+ const bool requires_snapshot =
+ local_marking_worklists_->SupportsExtractWrapper();
+ MarkingWorklists::Local::WrapperSnapshot wrapper_snapshot;
+ const bool valid_snapshot =
+ requires_snapshot &&
+ local_marking_worklists_->ExtractWrapper(map, object, wrapper_snapshot);
+ const int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
+ if (size) {
+ if (valid_snapshot) {
+ // Success: The object needs to be processed for embedder references.
+ local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot);
+ } else if (!requires_snapshot) {
+ // Snapshot not supported. Just fall back to pushing the wrapper itself
+ // instead which will be processed on the main thread.
+ local_marking_worklists_->PushWrapper(object);
+ }
+ }
+ return size;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T>
int MarkingVisitorBase<ConcreteVisitor,
MarkingState>::VisitEmbedderTracingSubclass(Map map,
T object) {
- DCHECK(object.IsApiWrapper());
- int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
- if (size && is_embedder_tracing_enabled_) {
- // Success: The object needs to be processed for embedder references on
- // the main thread.
- local_marking_worklists_->PushEmbedder(object);
+ DCHECK(object.MayHaveEmbedderFields());
+ if (V8_LIKELY(is_embedder_tracing_enabled_)) {
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
}
- return size;
+ return VisitEmbedderTracingSubClassNoEmbedderTracing(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
@@ -315,11 +350,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
concrete_visitor()->SynchronizePageAccess(key);
concrete_visitor()->RecordSlot(table, key_slot, key);
+ AddWeakReferenceForReferenceSummarizer(table, key);
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
- if (concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
+ if ((!is_shared_heap_ && key.InSharedHeap()) ||
+ concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
Object value_obj = table.ValueAt(i);
@@ -328,6 +365,9 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
HeapObject value = HeapObject::cast(value_obj);
concrete_visitor()->SynchronizePageAccess(value);
concrete_visitor()->RecordSlot(table, value_slot, value);
+ AddWeakReferenceForReferenceSummarizer(table, value);
+
+ if (!is_shared_heap_ && value.InSharedHeap()) continue;
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
@@ -358,6 +398,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
local_weak_objects_->js_weak_refs_local.Push(weak_ref);
+ AddWeakReferenceForReferenceSummarizer(weak_ref, target);
}
}
return size;
@@ -388,6 +429,8 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// token. We have to process them when we know the liveness of the whole
// transitive closure.
local_weak_objects_->weak_cells_local.Push(weak_cell);
+ AddWeakReferenceForReferenceSummarizer(weak_cell, target);
+ AddWeakReferenceForReferenceSummarizer(weak_cell, unregister_token);
}
return size;
}
@@ -414,8 +457,11 @@ template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptors(
DescriptorArray descriptor_array, int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
- int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
- mark_compact_epoch_, new_marked);
+ int16_t old_marked = 0;
+ if (CanUpdateValuesInHeap()) {
+ old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
+ mark_compact_epoch_, new_marked);
+ }
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 26ebf5713f..94670b4d73 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -25,6 +25,11 @@ struct EphemeronMarking {
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
+ // Declares that this marking state is not collecting retainers, so the
+ // marking visitor may update the heap state to store information about
+ // progress, and may avoid fully visiting an object if it is safe to do so.
+ static constexpr bool kCollectRetainers = false;
+
explicit MarkingStateBase(PtrComprCageBase cage_base)
#if V8_COMPRESS_POINTERS
: cage_base_(cage_base)
@@ -102,6 +107,15 @@ class MarkingStateBase {
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
+ void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
+ void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
private:
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
@@ -127,9 +141,8 @@ template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(MarkingWorklists::Local* local_marking_worklists,
- WeakObjects::Local* local_weak_objects,
- // WeakObjects* weak_objects,
- Heap* heap, unsigned mark_compact_epoch,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
+ unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled,
bool should_keep_ages_unchanged)
@@ -141,7 +154,13 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
should_keep_ages_unchanged_(should_keep_ages_unchanged),
- is_shared_heap_(heap->IsShared()) {}
+ is_shared_heap_(heap->IsShared())
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ,
+ external_pointer_table_(&heap->isolate()->external_pointer_table())
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+ {
+ }
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
@@ -161,10 +180,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// ObjectVisitor overrides.
void VisitMapPointer(HeapObject host) final {
- // Note that we are skipping the recording the slot because map objects
- // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
- MarkObject(host, HeapObject::cast(
- host.map(ObjectVisitorWithCageBases::cage_base())));
+ Map map = host.map(ObjectVisitorWithCageBases::cage_base());
+ MarkObject(host, map);
+ concrete_visitor()->RecordSlot(host, host.map_slot(), map);
}
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
@@ -191,6 +209,14 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// reconstructed after GC.
}
+ V8_INLINE void VisitExternalPointer(HeapObject host,
+ ExternalPointer_t ptr) final {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ uint32_t index = ptr >> kExternalPointerIndexShift;
+ external_pointer_table_->Mark(index);
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+ }
+
protected:
ConcreteVisitor* concrete_visitor() {
return static_cast<ConcreteVisitor*>(this);
@@ -219,6 +245,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
+ template <typename T>
+ int VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object);
+ template <typename T>
+ int VisitEmbedderTracingSubClassNoEmbedderTracing(Map map, T object);
+
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
ProgressBar& progress_bar);
// Marks the descriptor array black without pushing it on the marking work
@@ -228,6 +259,23 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
+ V8_INLINE void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ concrete_visitor()
+ ->marking_state()
+ ->AddStrongReferenceForReferenceSummarizer(host, obj);
+ }
+
+ V8_INLINE void AddWeakReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ concrete_visitor()->marking_state()->AddWeakReferenceForReferenceSummarizer(
+ host, obj);
+ }
+
+ constexpr bool CanUpdateValuesInHeap() {
+ return !MarkingState::kCollectRetainers;
+ }
+
MarkingWorklists::Local* const local_marking_worklists_;
WeakObjects::Local* const local_weak_objects_;
Heap* const heap_;
@@ -236,6 +284,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const bool is_embedder_tracing_enabled_;
const bool should_keep_ages_unchanged_;
const bool is_shared_heap_;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ExternalPointerTable* const external_pointer_table_;
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-worklist-inl.h b/deps/v8/src/heap/marking-worklist-inl.h
index 7e4c49667f..8a1551f1a2 100644
--- a/deps/v8/src/heap/marking-worklist-inl.h
+++ b/deps/v8/src/heap/marking-worklist-inl.h
@@ -5,9 +5,11 @@
#define V8_HEAP_MARKING_WORKLIST_INL_H_
#include <unordered_map>
-#include <vector>
+#include "src/heap/cppgc-js/cpp-marking-state-inl.h"
#include "src/heap/marking-worklist.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ template <typename Callback>
void MarkingWorklists::Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
- embedder_.Update(callback);
+ wrapper_.Update(callback);
other_.Update(callback);
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
@@ -45,12 +47,30 @@ bool MarkingWorklists::Local::PopOnHold(HeapObject* object) {
return on_hold_.Pop(object);
}
-void MarkingWorklists::Local::PushEmbedder(HeapObject object) {
- embedder_.Push(object);
+bool MarkingWorklists::Local::SupportsExtractWrapper() {
+ return cpp_marking_state_.get();
}
-bool MarkingWorklists::Local::PopEmbedder(HeapObject* object) {
- return embedder_.Pop(object);
+bool MarkingWorklists::Local::ExtractWrapper(Map map, JSObject object,
+ WrapperSnapshot& snapshot) {
+ DCHECK_NOT_NULL(cpp_marking_state_);
+ return cpp_marking_state_->ExtractEmbedderDataSnapshot(map, object, snapshot);
+}
+
+void MarkingWorklists::Local::PushExtractedWrapper(
+ const WrapperSnapshot& snapshot) {
+ DCHECK_NOT_NULL(cpp_marking_state_);
+ cpp_marking_state_->MarkAndPush(snapshot);
+}
+
+void MarkingWorklists::Local::PushWrapper(HeapObject object) {
+ DCHECK_NULL(cpp_marking_state_);
+ wrapper_.Push(object);
+}
+
+bool MarkingWorklists::Local::PopWrapper(HeapObject* object) {
+ DCHECK_NULL(cpp_marking_state_);
+ return wrapper_.Pop(object);
}
Address MarkingWorklists::Local::SwitchToContext(Address context) {
@@ -72,6 +92,12 @@ void MarkingWorklists::Local::SwitchToContext(
active_context_ = context;
}
+bool MarkingWorklists::Local::PublishWrapper() {
+ if (!cpp_marking_state_) return false;
+ cpp_marking_state_->Publish();
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-worklist.cc b/deps/v8/src/heap/marking-worklist.cc
index e5d3fbdf35..5dbbef5dcd 100644
--- a/deps/v8/src/heap/marking-worklist.cc
+++ b/deps/v8/src/heap/marking-worklist.cc
@@ -5,8 +5,11 @@
#include "src/heap/marking-worklist.h"
#include <algorithm>
+#include <cstddef>
#include <map>
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/heap-object.h"
@@ -29,7 +32,7 @@ MarkingWorklists::~MarkingWorklists() {
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
- embedder_.Clear();
+ wrapper_.Clear();
other_.Clear();
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
@@ -94,13 +97,17 @@ void MarkingWorklists::PrintWorklist(const char* worklist_name,
#endif
}
-const Address MarkingWorklists::Local::kSharedContext;
-const Address MarkingWorklists::Local::kOtherContext;
+constexpr Address MarkingWorklists::Local::kSharedContext;
+constexpr Address MarkingWorklists::Local::kOtherContext;
+constexpr std::nullptr_t MarkingWorklists::Local::kNoCppMarkingState;
-MarkingWorklists::Local::Local(MarkingWorklists* global)
+MarkingWorklists::Local::Local(
+ MarkingWorklists* global,
+ std::unique_ptr<CppMarkingState> cpp_marking_state)
: on_hold_(global->on_hold()),
- embedder_(global->embedder()),
- is_per_context_mode_(false) {
+ wrapper_(global->wrapper()),
+ is_per_context_mode_(false),
+ cpp_marking_state_(std::move(cpp_marking_state)) {
if (global->context_worklists().empty()) {
MarkingWorklist::Local shared(global->shared());
active_ = std::move(shared);
@@ -133,7 +140,7 @@ MarkingWorklists::Local::~Local() {
void MarkingWorklists::Local::Publish() {
active_.Publish();
on_hold_.Publish();
- embedder_.Publish();
+ wrapper_.Publish();
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
@@ -141,6 +148,7 @@ void MarkingWorklists::Local::Publish() {
}
}
}
+ PublishWrapper();
}
bool MarkingWorklists::Local::IsEmpty() {
@@ -163,8 +171,12 @@ bool MarkingWorklists::Local::IsEmpty() {
return true;
}
-bool MarkingWorklists::Local::IsEmbedderEmpty() const {
- return embedder_.IsLocalEmpty() && embedder_.IsGlobalEmpty();
+bool MarkingWorklists::Local::IsWrapperEmpty() const {
+ if (cpp_marking_state_) {
+ DCHECK(wrapper_.IsLocalAndGlobalEmpty());
+ return cpp_marking_state_->IsLocalEmpty();
+ }
+ return wrapper_.IsLocalAndGlobalEmpty();
}
void MarkingWorklists::Local::ShareWork() {
diff --git a/deps/v8/src/heap/marking-worklist.h b/deps/v8/src/heap/marking-worklist.h
index 2be050c7e5..b202c09a70 100644
--- a/deps/v8/src/heap/marking-worklist.h
+++ b/deps/v8/src/heap/marking-worklist.h
@@ -5,21 +5,27 @@
#ifndef V8_HEAP_MARKING_WORKLIST_H_
#define V8_HEAP_MARKING_WORKLIST_H_
+#include <cstddef>
+#include <memory>
#include <unordered_map>
#include <vector>
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/marking.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
+class CppMarkingState;
+class JSObject;
+
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
using MarkingWorklist = ::heap::base::Worklist<HeapObject, 64>;
-using EmbedderTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
+using WrapperTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
@@ -82,7 +88,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
- EmbedderTracingWorklist* embedder() { return &embedder_; }
+ WrapperTracingWorklist* wrapper() { return &wrapper_; }
// A list of (context, worklist) pairs that was set up at the start of
// marking by CreateContextWorklists.
@@ -115,7 +121,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
- EmbedderTracingWorklist embedder_;
+ WrapperTracingWorklist wrapper_;
// Per-context worklists.
std::vector<ContextWorklistPair> context_worklists_;
@@ -137,10 +143,13 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
// been moved to active_.
class V8_EXPORT_PRIVATE MarkingWorklists::Local {
public:
- static const Address kSharedContext = MarkingWorklists::kSharedContext;
- static const Address kOtherContext = MarkingWorklists::kOtherContext;
+ static constexpr Address kSharedContext = MarkingWorklists::kSharedContext;
+ static constexpr Address kOtherContext = MarkingWorklists::kOtherContext;
+ static constexpr std::nullptr_t kNoCppMarkingState = nullptr;
- explicit Local(MarkingWorklists* global);
+ Local(
+ MarkingWorklists* global,
+ std::unique_ptr<CppMarkingState> cpp_marking_state = kNoCppMarkingState);
~Local();
inline void Push(HeapObject object);
@@ -149,12 +158,17 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local {
inline void PushOnHold(HeapObject object);
inline bool PopOnHold(HeapObject* object);
- inline void PushEmbedder(HeapObject object);
- inline bool PopEmbedder(HeapObject* object);
+ using WrapperSnapshot = CppMarkingState::EmbedderDataSnapshot;
+ inline bool ExtractWrapper(Map map, JSObject object,
+ WrapperSnapshot& snapshot);
+ inline void PushExtractedWrapper(const WrapperSnapshot& snapshot);
+ inline bool SupportsExtractWrapper();
+ inline void PushWrapper(HeapObject object);
+ inline bool PopWrapper(HeapObject* object);
void Publish();
bool IsEmpty();
- bool IsEmbedderEmpty() const;
+ bool IsWrapperEmpty() const;
// Publishes the local active marking worklist if its global worklist is
// empty. In the per-context marking mode it also publishes the shared
// worklist.
@@ -162,25 +176,35 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local {
// Merges the on-hold worklist to the shared worklist.
void MergeOnHold();
+ // Returns true if wrapper objects could be directly pushed. Otherwise,
+ // objects need to be processed one by one.
+ inline bool PublishWrapper();
+
// Returns the context of the active worklist.
Address Context() const { return active_context_; }
inline Address SwitchToContext(Address context);
inline Address SwitchToShared();
bool IsPerContextMode() const { return is_per_context_mode_; }
+ CppMarkingState* cpp_marking_state() const {
+ return cpp_marking_state_.get();
+ }
+
private:
bool PopContext(HeapObject* object);
Address SwitchToContextSlow(Address context);
inline void SwitchToContext(Address context,
MarkingWorklist::Local* worklist);
MarkingWorklist::Local on_hold_;
- EmbedderTracingWorklist::Local embedder_;
+ WrapperTracingWorklist::Local wrapper_;
MarkingWorklist::Local active_;
Address active_context_;
MarkingWorklist::Local* active_owner_;
bool is_per_context_mode_;
std::unordered_map<Address, std::unique_ptr<MarkingWorklist::Local>>
worklist_by_context_;
+
+ std::unique_ptr<CppMarkingState> cpp_marking_state_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index d9552149c2..9f467305bf 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -24,6 +24,9 @@ namespace internal {
// MemoryAllocator
//
+size_t MemoryAllocator::commit_page_size_ = 0;
+size_t MemoryAllocator::commit_page_size_bits_ = 0;
+
MemoryAllocator::MemoryAllocator(Isolate* isolate,
v8::PageAllocator* code_page_allocator,
size_t capacity)
@@ -87,8 +90,8 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
private:
void RunImpl(JobDelegate* delegate) {
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
- delegate);
+ unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled,
+ delegate);
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
@@ -110,7 +113,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
}
} else {
- PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled);
}
}
@@ -131,21 +134,20 @@ void MemoryAllocator::Unmapper::PrepareForGC() {
void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
CancelAndWaitForPendingTasks();
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
}
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) {
allocator_->PerformFreeMemory(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
-template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
- JobDelegate* delegate) {
+ MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -154,18 +156,18 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
NumberOfChunks());
}
// Regular chunks.
- while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ if (pooled) AddMemoryChunkSafe(kPooled, chunk);
if (delegate && delegate->ShouldYield()) return;
}
- if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
- // to the pooled list. In case of kReleasePooled we need to free them
- // though.
- while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
- allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ // to the pooled list. In case of kFreePooled we need to free them though as
+ // well.
+ while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) {
+ allocator_->FreePooledChunk(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
@@ -174,7 +176,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
void MemoryAllocator::Unmapper::TearDown() {
CHECK(!job_handle_ || !job_handle_->IsValid());
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
@@ -228,9 +230,9 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
return true;
}
-void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
- Address base, size_t size) {
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
+void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
+ Address base, size_t size) {
+ FreePages(page_allocator, reinterpret_cast<void*>(base), size);
}
Address MemoryAllocator::AllocateAlignedMemory(
@@ -400,14 +402,15 @@ V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
+ PageSize page_size,
BaseSpace* owner) {
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
reserve_area_size, commit_area_size, executable, owner);
if (basic_chunk == nullptr) return nullptr;
- MemoryChunk* chunk =
- MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+ MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(),
+ executable, page_size);
#ifdef DEBUG
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
@@ -440,7 +443,8 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
size_ -= released_bytes;
}
-void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
+void MemoryAllocator::UnregisterSharedBasicMemoryChunk(
+ BasicMemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@@ -448,8 +452,8 @@ void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
size_ -= size;
}
-void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
- Executability executable) {
+void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
+ Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
@@ -469,15 +473,20 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
-void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
- UnregisterMemory(chunk, chunk->executable());
+void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) {
+ UnregisterBasicMemoryChunk(chunk, chunk->executable());
+}
+
+void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) {
+ DCHECK(!page->executable());
+ UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE);
}
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterSharedMemory(chunk);
+ UnregisterSharedBasicMemoryChunk(chunk);
v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
VirtualMemory* reservation = chunk->reserved_memory();
@@ -487,15 +496,15 @@ void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
// Only read-only pages can have a non-initialized reservation object. This
// happens when the pages are remapped to multiple locations and where the
// reservation would therefore be invalid.
- FreeMemory(allocator, chunk->address(),
- RoundUp(chunk->size(), allocator->AllocatePageSize()));
+ FreeMemoryRegion(allocator, chunk->address(),
+ RoundUp(chunk->size(), allocator->AllocatePageSize()));
}
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterMemory(chunk);
+ UnregisterMemoryChunk(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -516,25 +525,18 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
}
}
-template <MemoryAllocator::FreeMode mode>
-void MemoryAllocator::Free(MemoryChunk* chunk) {
+void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
switch (mode) {
- case kFull:
+ case kImmediately:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
- case kAlreadyPooled:
- // Pooled pages cannot be touched anymore as their memory is uncommitted.
- // Pooled pages are not-executable.
- FreeMemory(data_page_allocator(), chunk->address(),
- static_cast<size_t>(MemoryChunk::kPageSize));
- break;
- case kPooledAndQueue:
+ case kConcurrentlyAndPool:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
- case kPreFreeAndQueue:
+ case kConcurrently:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
@@ -542,23 +544,18 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kFull>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ // Pooled pages are not-executable.
+ FreeMemoryRegion(data_page_allocator(), chunk->address(),
+ static_cast<size_t>(MemoryChunk::kPageSize));
+}
-template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
+Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
+ size_t size, Space* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
- if (alloc_mode == kPooled) {
+ if (alloc_mode == kUsePool) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
owner->identity())));
@@ -566,22 +563,12 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocatePagePooled(owner);
}
if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, owner);
+ chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
BasicMemoryChunk* chunk =
@@ -599,13 +586,13 @@ MemoryAllocator::RemapSharedPage(
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ MemoryChunk* chunk =
+ AllocateChunk(size, size, executable, PageSize::kLarge, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
-template <typename SpaceType>
-MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
@@ -624,7 +611,8 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
BasicMemoryChunk* basic_chunk =
BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
area_end, owner, std::move(reservation));
- MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE,
+ PageSize::kRegular);
size_ += size;
return chunk;
}
@@ -637,18 +625,16 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
size >> kTaggedSizeLog2);
}
-intptr_t MemoryAllocator::GetCommitPageSize() {
- if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
- return FLAG_v8_os_page_size * KB;
- } else {
- return CommitPageSize();
- }
+void MemoryAllocator::InitializeOncePerProcess() {
+ commit_page_size_ =
+ FLAG_v8_os_page_size > 0 ? FLAG_v8_os_page_size * KB : CommitPageSize();
+ CHECK(base::bits::IsPowerOfTwo(commit_page_size_));
+ commit_page_size_bits_ = base::bits::WhichPowerOfTwo(commit_page_size_);
}
base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
size_t size) {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
+ size_t page_size = GetCommitPageSize();
if (size < page_size + FreeSpace::kSize) {
return base::AddressRegion(0, 0);
}
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index 49b5a769cf..f7a5da5c26 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -50,9 +50,9 @@ class MemoryAllocator {
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
- AddMemoryChunkSafe<kRegular>(chunk);
+ AddMemoryChunkSafe(kRegular, chunk);
} else {
- AddMemoryChunkSafe<kNonRegular>(chunk);
+ AddMemoryChunkSafe(kNonRegular, chunk);
}
}
@@ -61,10 +61,10 @@ class MemoryAllocator {
// (1) Try to get a chunk that was declared as pooled and already has
// been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been
- // unmapped.
- MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+ // uncommitted.
+ MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
if (chunk == nullptr) {
- chunk = GetMemoryChunkSafe<kRegular>();
+ chunk = GetMemoryChunkSafe(kRegular);
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory();
@@ -90,23 +90,24 @@ class MemoryAllocator {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
// can thus be used for stealing.
kNonRegular, // Large chunks and executable chunks.
- kPooled, // Pooled chunks, already uncommited and ready for reuse.
+ kPooled, // Pooled chunks, already freed and ready for reuse.
kNumberOfChunkQueues,
};
enum class FreeMode {
+ // Disables any access on pooled pages before adding them to the pool.
kUncommitPooled,
- kReleasePooled,
+
+ // Free pooled pages. Only used on tear down and last-resort GCs.
+ kFreePooled,
};
- template <ChunkQueueType type>
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ void AddMemoryChunkSafe(ChunkQueueType type, MemoryChunk* chunk) {
base::MutexGuard guard(&mutex_);
chunks_[type].push_back(chunk);
}
- template <ChunkQueueType type>
- MemoryChunk* GetMemoryChunkSafe() {
+ MemoryChunk* GetMemoryChunkSafe(ChunkQueueType type) {
base::MutexGuard guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].back();
@@ -116,8 +117,8 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
- template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
+ void PerformFreeMemoryOnQueuedChunks(FreeMode mode,
+ JobDelegate* delegate = nullptr);
void PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate = nullptr);
@@ -132,18 +133,38 @@ class MemoryAllocator {
};
enum AllocationMode {
+ // Regular allocation path. Does not use pool.
kRegular,
- kPooled,
+
+ // Uses the pool for allocation first.
+ kUsePool,
};
enum FreeMode {
- kFull,
- kAlreadyPooled,
- kPreFreeAndQueue,
- kPooledAndQueue,
+ // Frees page immediately on the main thread.
+ kImmediately,
+
+ // Frees page on background thread.
+ kConcurrently,
+
+ // Uncommits but does not free page on background thread. Page is added to
+ // pool. Used to avoid the munmap/mmap-cycle when we quickly reallocate
+ // pages.
+ kConcurrentlyAndPool,
};
- V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
+ // Initialize page sizes field in V8::Initialize.
+ static void InitializeOncePerProcess();
+
+ V8_INLINE static intptr_t GetCommitPageSize() {
+ DCHECK_LT(0, commit_page_size_);
+ return commit_page_size_;
+ }
+
+ V8_INLINE static intptr_t GetCommitPageSizeBits() {
+ DCHECK_LT(0, commit_page_size_bits_);
+ return commit_page_size_bits_;
+ }
// Computes the memory area of discardable memory within a given memory area
// [addr, addr+size) and returns the result as base::AddressRegion. If the
@@ -160,10 +181,9 @@ class MemoryAllocator {
// Allocates a Page from the allocator. AllocationMode is used to indicate
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first.
- template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
- typename SpaceType>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
+ V8_EXPORT_PRIVATE Page* AllocatePage(
+ MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
+ Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
@@ -173,9 +193,8 @@ class MemoryAllocator {
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
- template <MemoryAllocator::FreeMode mode = kFull>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- void Free(MemoryChunk* chunk);
+ V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode,
+ MemoryChunk* chunk);
void FreeReadOnlyPage(ReadOnlyPage* chunk);
// Returns allocated spaces in bytes.
@@ -197,27 +216,15 @@ class MemoryAllocator {
address >= highest_ever_allocated_;
}
- // Returns a BasicMemoryChunk in which the memory region from commit_area_size
- // to reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
- size_t reserve_area_size, size_t commit_area_size,
- Executability executable, BaseSpace* space);
-
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
+ PageSize page_size,
BaseSpace* space);
- Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
- size_t alignment, Executability executable,
- void* hint, VirtualMemory* controller);
-
- void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
-
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
@@ -234,23 +241,10 @@ class MemoryAllocator {
}
#endif // DEBUG
- // Commit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool CommitMemory(VirtualMemory* reservation);
-
- // Uncommit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool UncommitMemory(VirtualMemory* reservation);
-
// Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value.
void ZapBlock(Address start, size_t size, uintptr_t zap_value);
- V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
-
// Page allocator instance for allocating non-executable pages.
// Guaranteed to be a valid pointer.
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
@@ -268,16 +262,37 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
- // Performs all necessary bookkeeping to free the memory, but does not free
- // it.
- void UnregisterMemory(MemoryChunk* chunk);
- void UnregisterMemory(BasicMemoryChunk* chunk,
- Executability executable = NOT_EXECUTABLE);
- void UnregisterSharedMemory(BasicMemoryChunk* chunk);
-
- void RegisterReadOnlyMemory(ReadOnlyPage* page);
+ void UnregisterReadOnlyPage(ReadOnlyPage* page);
private:
+ // Returns a BasicMemoryChunk in which the memory region from commit_area_size
+ // to reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size,
+ Executability executable, BaseSpace* space);
+
+ Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ size_t alignment, Executability executable,
+ void* hint, VirtualMemory* controller);
+
+ // Commit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool CommitMemory(VirtualMemory* reservation);
+
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
+
+ // Disallows any access on memory region owned by given reservation object.
+ // Returns true if it succeeded and false otherwise.
+ bool UncommitMemory(VirtualMemory* reservation);
+
+ // Frees the given memory region.
+ void FreeMemoryRegion(v8::PageAllocator* page_allocator, Address addr,
+ size_t size);
+
// PreFreeMemory logically frees the object, i.e., it unregisters the
// memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
@@ -289,8 +304,10 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
- template <typename SpaceType>
- MemoryChunk* AllocatePagePooled(SpaceType* owner);
+ MemoryChunk* AllocatePagePooled(Space* owner);
+
+ // Frees a pooled page. Only used on tear-down and last-resort GCs.
+ void FreePooledChunk(MemoryChunk* chunk);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
@@ -313,6 +330,15 @@ class MemoryAllocator {
}
}
+ // Performs all necessary bookkeeping to free the memory, but does not free
+ // it.
+ void UnregisterMemoryChunk(MemoryChunk* chunk);
+ void UnregisterSharedBasicMemoryChunk(BasicMemoryChunk* chunk);
+ void UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
+ Executability executable = NOT_EXECUTABLE);
+
+ void RegisterReadOnlyMemory(ReadOnlyPage* page);
+
#ifdef DEBUG
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
base::MutexGuard guard(&executable_memory_mutex_);
@@ -369,35 +395,15 @@ class MemoryAllocator {
base::Mutex executable_memory_mutex_;
#endif // DEBUG
+ V8_EXPORT_PRIVATE static size_t commit_page_size_;
+ V8_EXPORT_PRIVATE static size_t commit_page_size_bits_;
+
friend class heap::TestCodePageAllocatorScope;
friend class heap::TestMemoryAllocatorScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 1b958f0cbf..9a76730e82 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/progress-bar.h"
@@ -27,10 +28,13 @@ class SlotSet;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
- OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_OLD + 1 : OLD_TO_OLD,
+ OLD_TO_SHARED,
+ OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_SHARED + 1 : OLD_TO_SHARED,
NUMBER_OF_REMEMBERED_SET_TYPES
};
+using ActiveSystemPages = ::heap::base::ActiveSystemPages;
+
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static const int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES;
@@ -67,6 +71,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(Bitmap*, YoungGenerationBitmap),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
+ FIELD(ActiveSystemPages, ActiveSystemPages),
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
FIELD(ObjectStartBitmap, ObjectStartBitmap),
#endif
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index d4d1116683..08baeee8b2 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -6,9 +6,11 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
+#include "src/common/globals.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
@@ -117,11 +119,14 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable) {
+ Executability executable,
+ PageSize page_size) {
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED],
+ nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
nullptr);
@@ -131,6 +136,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED],
+ nullptr);
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
@@ -176,6 +183,15 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
+ if (page_size == PageSize::kRegular) {
+ chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(),
+ chunk->size());
+ } else {
+ // We do not track active system pages for large pages.
+ chunk->active_system_pages_.Clear();
+ }
+
// All pages of a shared heap need to be marked with this flag.
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
@@ -191,9 +207,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
- return size();
- return high_water_mark_;
+ if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
+ return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
@@ -259,6 +274,8 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+template V8_EXPORT_PRIVATE SlotSet*
+MemoryChunk::AllocateSlotSet<OLD_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
@@ -286,6 +303,7 @@ SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
@@ -308,6 +326,7 @@ void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_SHARED>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
@@ -324,6 +343,7 @@ TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_SHARED>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index de6f09234b..8a8f556426 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
@@ -219,7 +220,7 @@ class MemoryChunk : public BasicMemoryChunk {
protected:
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable);
+ Executability executable, PageSize page_size);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
@@ -291,6 +292,8 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
+ ActiveSystemPages active_system_pages_;
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
ObjectStartBitmap object_start_bitmap_;
#endif
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index 72112d2426..0f1a3a361a 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -102,17 +102,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
result = AllocateFastUnaligned(size_in_bytes, origin);
}
- if (!result.IsRetry()) {
- return result;
- } else {
- return AllocateRawSlow(size_in_bytes, alignment, origin);
- }
+ return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
+ : result;
}
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
@@ -124,7 +121,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
- return obj;
+ return AllocationResult::FromObject(obj);
}
AllocationResult NewSpace::AllocateFastAligned(
@@ -135,7 +132,7 @@ AllocationResult NewSpace::AllocateFastAligned(
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject obj = HeapObject::FromAddress(
allocation_info_->IncrementTop(aligned_size_in_bytes));
@@ -153,7 +150,7 @@ AllocationResult NewSpace::AllocateFastAligned(
UpdateAllocationOrigins(origin);
}
- return obj;
+ return AllocationResult::FromObject(obj);
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 6155a06f77..685e631f23 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -24,7 +24,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->list_node().Initialize();
-#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -32,7 +31,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
-#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
@@ -54,12 +52,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Free all overallocated pages which are behind current_page.
while (current_page) {
MemoryChunk* next_current = current_page->list_node().next();
+ AccountUncommitted(Page::kPageSize);
+ DecrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- current_page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ current_page);
current_page = next_current;
}
@@ -68,12 +68,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
- current_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ current_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
+ AccountCommitted(Page::kPageSize);
+ IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
@@ -105,22 +107,23 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!IsCommitted());
+ DCHECK_EQ(CommittedMemory(), size_t(0));
const int num_pages = static_cast<int>(target_capacity_ / Page::kPageSize);
DCHECK(num_pages);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
// Pages in the new spaces can be moved to the old space by the full
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ Page* new_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted());
return false;
}
memory_chunk_list_.PushBack(new_page);
+ IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
}
Reset();
AccountCommitted(target_capacity_);
@@ -133,14 +136,22 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(IsCommitted());
+ int actual_pages = 0;
while (!memory_chunk_list_.Empty()) {
+ actual_pages++;
MemoryChunk* chunk = memory_chunk_list_.front();
+ DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ chunk);
}
current_page_ = nullptr;
current_capacity_ = 0;
- AccountUncommitted(target_capacity_);
+ size_t removed_page_size =
+ static_cast<size_t>(actual_pages * Page::kPageSize);
+ DCHECK_EQ(CommittedMemory(), removed_page_size);
+ DCHECK_EQ(CommittedPhysicalMemory(), 0);
+ AccountUncommitted(removed_page_size);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
DCHECK(!IsCommitted());
return true;
@@ -148,11 +159,8 @@ bool SemiSpace::Uncommit() {
size_t SemiSpace::CommittedPhysicalMemory() {
if (!IsCommitted()) return 0;
- size_t size = 0;
- for (Page* p : *this) {
- size += p->CommittedPhysicalMemory();
- }
- return size;
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ return committed_physical_memory_;
}
bool SemiSpace::GrowTo(size_t new_capacity) {
@@ -169,16 +177,16 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ Page* new_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
+ IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
@@ -193,7 +201,9 @@ void SemiSpace::RewindPages(int num_pages) {
while (num_pages > 0) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
+ DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ last);
num_pages--;
}
}
@@ -246,6 +256,8 @@ void SemiSpace::RemovePage(Page* page) {
}
}
memory_chunk_list_.Remove(page);
+ AccountUncommitted(Page::kPageSize);
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
@@ -257,6 +269,8 @@ void SemiSpace::PrependPage(Page* page) {
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
+ AccountCommitted(Page::kPageSize);
+ IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
@@ -286,11 +300,39 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->current_page_, to->current_page_);
std::swap(from->external_backing_store_bytes_,
to->external_backing_store_bytes_);
+ std::swap(from->committed_physical_memory_, to->committed_physical_memory_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
}
+void SemiSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
+ if (!base::OS::HasLazyCommits()) return;
+ DCHECK_LE(committed_physical_memory_,
+ committed_physical_memory_ + increment_value);
+ committed_physical_memory_ += increment_value;
+}
+
+void SemiSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
+ if (!base::OS::HasLazyCommits()) return;
+ DCHECK_LE(decrement_value, committed_physical_memory_);
+ committed_physical_memory_ -= decrement_value;
+}
+
+void SemiSpace::AddRangeToActiveSystemPages(Address start, Address end) {
+ Page* page = current_page();
+
+ DCHECK_LE(page->address(), start);
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, page->address() + Page::kPageSize);
+
+ const size_t added_pages = page->active_system_pages()->Add(
+ start - page->address(), end - page->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+ IncrementCommittedPhysicalMemory(added_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
@@ -318,6 +360,9 @@ void SemiSpace::Verify() {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ int actual_pages = 0;
+ size_t computed_committed_physical_memory = 0;
+
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
@@ -341,9 +386,15 @@ void SemiSpace::Verify() {
external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
}
+ computed_committed_physical_memory += page->CommittedPhysicalMemory();
+
CHECK_IMPLIES(page->list_node().prev(),
page->list_node().prev()->list_node().next() == page);
+ actual_pages++;
}
+ CHECK_EQ(actual_pages * size_t(Page::kPageSize), CommittedMemory());
+ CHECK_EQ(computed_committed_physical_memory, CommittedPhysicalMemory());
+
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
@@ -478,6 +529,8 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
}
+
+ to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
UpdateInlineAllocationLimit(0);
@@ -621,13 +674,13 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
@@ -640,7 +693,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
@@ -649,7 +702,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
@@ -704,6 +757,11 @@ void NewSpace::Verify(Isolate* isolate) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(
+ Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION));
+
PtrComprCageBase cage_base(isolate);
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
@@ -717,7 +775,8 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->space_for_maps()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap(cage_base));
@@ -742,6 +801,8 @@ void NewSpace::Verify(Isolate* isolate) {
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
current = page->area_start();
}
}
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index b1bec1b032..b31dfa28e4 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -6,7 +6,6 @@
#define V8_HEAP_NEW_SPACES_H_
#include <atomic>
-#include <map>
#include <memory>
#include "src/base/macros.h"
@@ -107,7 +106,7 @@ class SemiSpace : public Space {
void PrependPage(Page* page);
void MovePageToTheEnd(Page* page);
- Page* InitializePage(MemoryChunk* chunk);
+ Page* InitializePage(MemoryChunk* chunk) override;
// Age mark accessors.
Address age_mark() { return age_mark_; }
@@ -139,11 +138,18 @@ class SemiSpace : public Space {
size_t Available() override { UNREACHABLE(); }
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
+ Page* first_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.front());
+ }
+ Page* last_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.back());
+ }
- const Page* first_page() const {
- return reinterpret_cast<const Page*>(Space::first_page());
+ const Page* first_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.front());
+ }
+ const Page* last_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.back());
}
iterator begin() { return iterator(first_page()); }
@@ -169,12 +175,17 @@ class SemiSpace : public Space {
virtual void Verify();
#endif
+ void AddRangeToActiveSystemPages(Address start, Address end);
+
private:
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
+ void IncrementCommittedPhysicalMemory(size_t increment_value);
+ void DecrementCommittedPhysicalMemory(size_t decrement_value);
+
// The currently committed space capacity.
size_t current_capacity_;
@@ -191,6 +202,8 @@ class SemiSpace : public Space {
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
+ size_t committed_physical_memory_{0};
+
SemiSpaceId id_;
Page* current_page_;
@@ -447,8 +460,11 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace* active_space() { return &to_space_; }
- Page* first_page() { return to_space_.first_page(); }
- Page* last_page() { return to_space_.last_page(); }
+ Page* first_page() override { return to_space_.first_page(); }
+ Page* last_page() override { return to_space_.last_page(); }
+
+ const Page* first_page() const override { return to_space_.first_page(); }
+ const Page* last_page() const override { return to_space_.last_page(); }
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
@@ -478,7 +494,7 @@ class V8_EXPORT_PRIVATE NewSpace
void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it.
- void FreeLinearAllocationArea();
+ void FreeLinearAllocationArea() override;
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 379356a797..a495d259ad 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -443,6 +443,11 @@ class ObjectStatsCollectorImpl {
void RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription description);
+
+ PtrComprCageBase cage_base() const {
+ return field_stats_collector_.cage_base();
+ }
+
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
@@ -488,7 +493,7 @@ void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type) {
- return RecordVirtualObjectStats(parent, obj, type, obj.Size(),
+ return RecordVirtualObjectStats(parent, obj, type, obj.Size(cage_base()),
ObjectStats::kNoOverAllocation, kCheckCow);
}
@@ -657,13 +662,13 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kDefineNamedOwn:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
}
@@ -711,7 +716,8 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
MaybeObject raw_object = vector.Get(slot.WithOffset(i));
HeapObject object;
if (raw_object->GetHeapObject(&object)) {
- if (object.IsCell() || object.IsWeakFixedArray()) {
+ if (object.IsCell(cage_base()) ||
+ object.IsWeakFixedArray(cage_base())) {
RecordSimpleVirtualObjectStats(
vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
@@ -733,51 +739,55 @@ void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
void ObjectStatsCollectorImpl::CollectStatistics(
HeapObject obj, Phase phase, CollectFieldStats collect_field_stats) {
- Map map = obj.map();
+ DisallowGarbageCollection no_gc;
+ Map map = obj.map(cage_base());
+ InstanceType instance_type = map.instance_type();
switch (phase) {
case kPhase1:
- if (obj.IsFeedbackVector()) {
+ if (InstanceTypeChecker::IsFeedbackVector(instance_type)) {
RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
- } else if (obj.IsMap()) {
+ } else if (InstanceTypeChecker::IsMap(instance_type)) {
RecordVirtualMapDetails(Map::cast(obj));
- } else if (obj.IsBytecodeArray()) {
+ } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
- } else if (obj.IsCode()) {
+ } else if (InstanceTypeChecker::IsCode(instance_type)) {
RecordVirtualCodeDetails(Code::cast(obj));
- } else if (obj.IsFunctionTemplateInfo()) {
+ } else if (InstanceTypeChecker::IsFunctionTemplateInfo(instance_type)) {
RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo::cast(obj));
- } else if (obj.IsJSGlobalObject()) {
+ } else if (InstanceTypeChecker::IsJSGlobalObject(instance_type)) {
RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
- } else if (obj.IsJSObject()) {
+ } else if (InstanceTypeChecker::IsJSObject(instance_type)) {
// This phase needs to come after RecordVirtualAllocationSiteDetails
// to properly split among boilerplates.
RecordVirtualJSObjectDetails(JSObject::cast(obj));
- } else if (obj.IsSharedFunctionInfo()) {
+ } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
- } else if (obj.IsContext()) {
+ } else if (InstanceTypeChecker::IsContext(instance_type)) {
RecordVirtualContext(Context::cast(obj));
- } else if (obj.IsScript()) {
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj.IsArrayBoilerplateDescription()) {
+ } else if (InstanceTypeChecker::IsArrayBoilerplateDescription(
+ instance_type)) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
- } else if (obj.IsFixedArrayExact()) {
+ } else if (InstanceTypeChecker::IsFixedArrayExact(instance_type)) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
}
break;
case kPhase2:
- if (obj.IsExternalString()) {
+ if (InstanceTypeChecker::IsExternalString(instance_type)) {
// This has to be in Phase2 to avoid conflicting with recording Script
// sources. We still want to run RecordObjectStats after though.
RecordVirtualExternalStringDetails(ExternalString::cast(obj));
}
size_t over_allocated = ObjectStats::kNoOverAllocation;
- if (obj.IsJSObject()) {
+ if (InstanceTypeChecker::IsJSObject(instance_type)) {
over_allocated = map.instance_size() - map.UsedInstanceSize();
}
- RecordObjectStats(obj, map.instance_type(), obj.Size(), over_allocated);
+ RecordObjectStats(obj, instance_type, obj.Size(cage_base()),
+ over_allocated);
if (collect_field_stats == CollectFieldStats::kYes) {
field_stats_collector_.RecordStats(obj);
}
@@ -788,7 +798,7 @@ void ObjectStatsCollectorImpl::CollectStatistics(
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Iterate boilerplates first to disambiguate them from regular JS objects.
Object list = heap_->allocation_sites_list();
- while (list.IsAllocationSite()) {
+ while (list.IsAllocationSite(cage_base())) {
AllocationSite site = AllocationSite::cast(list);
RecordVirtualAllocationSiteDetails(site);
list = site.weak_next();
@@ -829,7 +839,7 @@ bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
}
bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
- return array.map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
+ return array.map(cage_base()) == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
@@ -868,7 +878,7 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map.instance_descriptors(isolate());
+ DescriptorArray array = map.instance_descriptors(cage_base());
if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
@@ -891,10 +901,10 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
}
if (map.is_prototype_map()) {
- if (map.prototype_info().IsPrototypeInfo()) {
+ if (map.prototype_info().IsPrototypeInfo(cage_base())) {
PrototypeInfo info = PrototypeInfo::cast(map.prototype_info());
Object users = info.prototype_users();
- if (users.IsWeakFixedArray()) {
+ if (users.IsWeakFixedArray(cage_base())) {
RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
@@ -909,7 +919,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
// Log the size of external source code.
Object raw_source = script.source();
- if (raw_source.IsExternalString()) {
+ if (raw_source.IsExternalString(cage_base())) {
// The contents of external strings aren't on the heap, so we have to record
// them manually. The on-heap String object is recorded indepentendely in
// the normal pass.
@@ -922,7 +932,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
? ObjectStats::SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE
: ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
off_heap_size);
- } else if (raw_source.IsString()) {
+ } else if (raw_source.IsString(cage_base())) {
String source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
script, source,
@@ -940,7 +950,7 @@ void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
size_t off_heap_size = string.ExternalPayloadSize();
RecordExternalResourceStats(
resource,
- string.IsOneByteRepresentation()
+ string.IsOneByteRepresentation(cage_base())
? ObjectStats::STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE
: ObjectStats::STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE,
off_heap_size);
@@ -967,7 +977,7 @@ void ObjectStatsCollectorImpl::
HeapObject parent, HeapObject object,
ObjectStats::VirtualInstanceType type) {
if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
- if (object.IsFixedArrayExact()) {
+ if (object.IsFixedArrayExact(cage_base())) {
FixedArray array = FixedArray::cast(object);
for (int i = 0; i < array.length(); i++) {
Object entry = array.get(i);
@@ -988,7 +998,7 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
FixedArray constant_pool = FixedArray::cast(bytecode.constant_pool());
for (int i = 0; i < constant_pool.length(); i++) {
Object entry = constant_pool.get(i);
- if (entry.IsFixedArrayExact()) {
+ if (entry.IsFixedArrayExact(cage_base())) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -1041,11 +1051,10 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
}
}
int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
- PtrComprCageBase cage_base(heap_->isolate());
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
- Object target = it.rinfo()->target_object(cage_base);
- if (target.IsFixedArrayExact()) {
+ Object target = it.rinfo()->target_object(cage_base());
+ if (target.IsFixedArrayExact(cage_base())) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
@@ -1055,7 +1064,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context.IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
- if (context.retained_maps().IsWeakArrayList()) {
+ if (context.retained_maps().IsWeakArrayList(cage_base())) {
RecordSimpleVirtualObjectStats(
context, WeakArrayList::cast(context.retained_maps()),
ObjectStats::RETAINED_MAPS_TYPE);
@@ -1101,6 +1110,9 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
+ // We don't perform a GC while collecting object stats but need this scope for
+ // the nested SafepointScope inside CombinedHeapObjectIterator.
+ AllowGarbageCollection allow_gc;
CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 715b83b9ac..b3770fc6c8 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -133,6 +133,19 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ // The following types have external pointers, which must be visited.
+ // TODO(v8:10391) Consider adding custom visitor IDs for these.
+ if (object.IsExternalOneByteString()) {
+ ExternalOneByteString::BodyDescriptor::IterateBody(map, object, size,
+ visitor);
+ } else if (object.IsExternalTwoByteString()) {
+ ExternalTwoByteString::BodyDescriptor::IterateBody(map, object, size,
+ visitor);
+ } else if (object.IsForeign()) {
+ Foreign::BodyDescriptor::IterateBody(map, object, size, visitor);
+ }
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
return static_cast<ResultType>(size);
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 858e279ec4..32df0a46d9 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -30,6 +30,7 @@ namespace internal {
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
@@ -62,6 +63,7 @@ namespace internal {
IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmSuspenderObject) \
+ IF_WASM(V, WasmOnFulfilledData) \
IF_WASM(V, WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index 22b07c7442..fbade0ea3d 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -95,9 +95,9 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
- return AllocationResult(
+ return AllocationResult::FromObject(
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
}
@@ -108,7 +108,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_->CanIncrementTop(aligned_size)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
@@ -116,18 +116,18 @@ AllocationResult PagedSpace::AllocateFastAligned(
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
- return AllocationResult(obj);
+ return AllocationResult::FromObject(obj);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@@ -152,12 +152,12 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLabMain(allocation_size, origin)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
int aligned_size_in_bytes;
AllocationResult result =
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@@ -183,11 +183,8 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
result = AllocateFastUnaligned(size_in_bytes);
}
- if (!result.IsRetry()) {
- return result;
- } else {
- return AllocateRawSlow(size_in_bytes, alignment, origin);
- }
+ return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
+ : result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 0db2d5f989..c5604254be 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -4,6 +4,8 @@
#include "src/heap/paged-spaces.h"
+#include <atomic>
+
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
@@ -13,8 +15,10 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
@@ -103,7 +107,7 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
}
accounting_stats_.Clear();
}
@@ -211,15 +215,42 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
}
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) {
+ DCHECK_EQ(0, committed_physical_memory());
+ return CommittedMemory();
+ }
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
- base::MutexGuard guard(mutex());
+ return committed_physical_memory();
+}
+
+void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
+ if (!base::OS::HasLazyCommits() || increment_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_add(
+ increment_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_LT(old_value, old_value + increment_value);
+}
+
+void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
+ if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_sub(
+ decrement_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_GT(old_value, old_value - decrement_value);
+}
+
+#if DEBUG
+void PagedSpace::VerifyCommittedPhysicalMemory() {
+ heap()->safepoint()->AssertActive();
size_t size = 0;
for (Page* page : *this) {
+ DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory();
}
- return size;
+ // Ensure that the space's counter matches the sum of all page counters.
+ DCHECK_EQ(size, CommittedPhysicalMemory());
}
+#endif // DEBUG
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
@@ -264,6 +295,7 @@ size_t PagedSpace::AddPage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
return RelinkFreeListCategories(page);
}
@@ -278,6 +310,7 @@ void PagedSpace::RemovePage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
@@ -319,8 +352,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
Page* PagedSpace::AllocatePage() {
- return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
- executable());
+ return heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() {
@@ -334,7 +367,7 @@ Page* PagedSpace::Expand() {
}
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
- LocalHeap* local_heap, size_t size_in_bytes) {
+ size_t size_in_bytes) {
Page* page = AllocatePage();
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
@@ -346,6 +379,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
CHECK_LE(size_in_bytes, page->area_size());
Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted);
+ AddRangeToActiveSystemPages(page, object_start, object_start + size_in_bytes);
return std::make_pair(object_start, size_in_bytes);
}
@@ -492,8 +526,9 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(page->size());
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
}
void PagedSpace::SetReadable() {
@@ -573,6 +608,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
+ AddRangeToActiveSystemPages(page, start, limit);
return true;
}
@@ -585,10 +621,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC);
+ DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
base::Optional<std::pair<Address, size_t>> result =
- TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
- max_size_in_bytes, alignment, origin);
+ TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
+ alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -600,7 +637,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) {
@@ -619,8 +656,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
- origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
}
@@ -628,7 +664,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- result = ExpandBackground(local_heap, max_size_in_bytes);
+ result = ExpandBackground(max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
@@ -645,15 +681,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
return {};
}
base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
+PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
@@ -694,13 +729,15 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
+ AddRangeToActiveSystemPages(page, start, limit);
return std::make_pair(start, used_size_in_bytes);
}
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
// Code space sweeping is only allowed on main thread.
- return local_heap->is_main_thread() || identity() != CODE_SPACE;
+ return (local_heap && local_heap->is_main_thread()) ||
+ identity() != CODE_SPACE;
}
#ifdef DEBUG
@@ -742,7 +779,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
+ isolate->heap()->space_for_maps()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -772,6 +809,9 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
external_space_bytes[t] += external_page_bytes[t];
}
+
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
for (int i = 0; i < kNumTypes; i++) {
if (i == ExternalBackingStoreType::kArrayBuffer) continue;
@@ -1003,6 +1043,28 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
return result;
}
+void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
+ Address end) {
+ DCHECK_LE(page->address(), start);
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, page->address() + Page::kPageSize);
+
+ const size_t added_pages = page->active_system_pages()->Add(
+ start - page->address(), end - page->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+
+ IncrementCommittedPhysicalMemory(added_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
+void PagedSpace::ReduceActiveSystemPages(
+ Page* page, ActiveSystemPages active_system_pages) {
+ const size_t reduced_pages =
+ page->active_system_pages()->Reduce(active_system_pages);
+ DecrementCommittedPhysicalMemory(reduced_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
// -----------------------------------------------------------------------------
// MapSpace implementation
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index bdc4dee23f..2df7083a84 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_
+#include <atomic>
#include <memory>
#include <utility>
@@ -15,6 +16,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/allocation-stats.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -108,6 +110,13 @@ class V8_EXPORT_PRIVATE PagedSpace
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
+#if DEBUG
+ void VerifyCommittedPhysicalMemory();
+#endif // DEBUG
+
+ void IncrementCommittedPhysicalMemory(size_t increment_value);
+ void DecrementCommittedPhysicalMemory(size_t decrement_value);
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -194,7 +203,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
- void FreeLinearAllocationArea();
+ void FreeLinearAllocationArea() override;
void MakeLinearAllocationAreaIterable();
@@ -216,7 +225,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- Page* InitializePage(MemoryChunk* chunk);
+ Page* InitializePage(MemoryChunk* chunk) override;
void ReleasePage(Page* page);
@@ -289,9 +298,11 @@ class V8_EXPORT_PRIVATE PagedSpace
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- const Page* first_page() const {
- return reinterpret_cast<const Page*>(Space::first_page());
+ Page* first_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.front());
+ }
+ const Page* first_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
@@ -325,6 +336,10 @@ class V8_EXPORT_PRIVATE PagedSpace
return &pending_allocation_mutex_;
}
+ void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
+ void ReduceActiveSystemPages(Page* page,
+ ActiveSystemPages active_system_pages);
+
private:
class ConcurrentAllocationMutex {
public:
@@ -374,7 +389,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// a memory area of the given size in it. If successful the method returns
// the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground(
- LocalHeap* local_heap, size_t size_in_bytes);
+ size_t size_in_bytes);
Page* AllocatePage();
@@ -413,8 +428,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- TryAllocationFromFreeListBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
+ TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
@@ -422,6 +436,10 @@ class V8_EXPORT_PRIVATE PagedSpace
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
+ size_t committed_physical_memory() const {
+ return committed_physical_memory_.load(std::memory_order_relaxed);
+ }
+
Executability executable_;
CompactionSpaceKind compaction_space_kind_;
@@ -442,6 +460,8 @@ class V8_EXPORT_PRIVATE PagedSpace
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
+ std::atomic<size_t> committed_physical_memory_{0};
+
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -485,6 +505,8 @@ class CompactionSpaceCollection : public Malloced {
CompactionSpaceKind compaction_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
+ map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
+ compaction_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
compaction_space_kind) {}
@@ -492,6 +514,8 @@ class CompactionSpaceCollection : public Malloced {
switch (space) {
case OLD_SPACE:
return &old_space_;
+ case MAP_SPACE:
+ return &map_space_;
case CODE_SPACE:
return &code_space_;
default:
@@ -502,6 +526,7 @@ class CompactionSpaceCollection : public Malloced {
private:
CompactionSpace old_space_;
+ CompactionSpace map_space_;
CompactionSpace code_space_;
};
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 3fa267d26c..9265ca5963 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -397,7 +397,7 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
DetachFromHeap();
for (ReadOnlyPage* p : pages_) {
if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
- memory_allocator->UnregisterMemory(p);
+ memory_allocator->UnregisterReadOnlyPage(p);
}
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
p->MakeHeaderRelocatable();
@@ -533,6 +533,9 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
CHECK(!object.IsExternalString());
CHECK(!object.IsJSArrayBuffer());
}
+
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
CHECK(allocation_pointer_found_in_space);
@@ -667,7 +670,7 @@ AllocationResult ReadOnlySpace::AllocateRawAligned(
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
@@ -687,7 +690,7 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
chunk->IncreaseAllocatedBytes(size_in_bytes);
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
@@ -697,7 +700,7 @@ AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
- if (!result.IsRetry() && result.To(&heap_obj)) {
+ if (result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
diff --git a/deps/v8/src/heap/reference-summarizer.cc b/deps/v8/src/heap/reference-summarizer.cc
new file mode 100644
index 0000000000..fd2668e140
--- /dev/null
+++ b/deps/v8/src/heap/reference-summarizer.cc
@@ -0,0 +1,116 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/reference-summarizer.h"
+
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/marking-visitor-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// A class which acts as a MarkingState but does not actually update any marking
+// bits. It reports all objects as white and all transitions as successful. It
+// also tracks which objects are retained by the primary object according to the
+// marking visitor.
+class ReferenceSummarizerMarkingState final {
+ public:
+ // Declares that this marking state is collecting retainers, so the marking
+ // visitor must fully visit each object and can't update on-heap state.
+ static constexpr bool kCollectRetainers = true;
+
+ explicit ReferenceSummarizerMarkingState(HeapObject object)
+ : primary_object_(object),
+ local_marking_worklists_(&marking_worklists_),
+ local_weak_objects_(&weak_objects_) {}
+
+ ~ReferenceSummarizerMarkingState() {
+ // Clean up temporary state.
+ local_weak_objects_.Publish();
+ weak_objects_.Clear();
+ local_marking_worklists_.Publish();
+ marking_worklists_.Clear();
+ }
+
+ // Retrieves the references that were collected by this marker. This operation
+ // transfers ownership of the set, so calling it again would yield an empty
+ // result.
+ ReferenceSummary DestructivelyRetrieveReferences() {
+ ReferenceSummary tmp = std::move(references_);
+ references_.Clear();
+ return tmp;
+ }
+
+ // Standard marking visitor functions:
+
+ bool IsWhite(HeapObject obj) const { return true; }
+
+ bool IsBlackOrGrey(HeapObject obj) const { return false; }
+
+ bool WhiteToGrey(HeapObject obj) { return true; }
+
+ bool GreyToBlack(HeapObject obj) { return true; }
+
+ // Adds a retaining relationship found by the marking visitor.
+ void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ AddReference(host, obj, references_.strong_references());
+ }
+
+ // Adds a non-retaining weak reference found by the marking visitor. The value
+ // in an ephemeron hash table entry is also included here, since it is not
+ // known to be strong without further information about the key.
+ void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
+ AddReference(host, obj, references_.weak_references());
+ }
+
+ // Other member functions, not part of the marking visitor contract:
+
+ MarkingWorklists::Local* local_marking_worklists() {
+ return &local_marking_worklists_;
+ }
+ WeakObjects::Local* local_weak_objects() { return &local_weak_objects_; }
+
+ private:
+ void AddReference(
+ HeapObject host, HeapObject obj,
+ std::unordered_set<HeapObject, Object::Hasher>& references) {
+ // It's possible that the marking visitor handles multiple objects at once,
+ // such as a Map and its DescriptorArray, but we're only interested in
+ // references from the primary object.
+ if (host == primary_object_) {
+ references.insert(obj);
+ }
+ }
+
+ ReferenceSummary references_;
+ HeapObject primary_object_;
+ MarkingWorklists marking_worklists_;
+ MarkingWorklists::Local local_marking_worklists_;
+ WeakObjects weak_objects_;
+ WeakObjects::Local local_weak_objects_;
+};
+
+} // namespace
+
+ReferenceSummary ReferenceSummary::SummarizeReferencesFrom(Heap* heap,
+ HeapObject obj) {
+ ReferenceSummarizerMarkingState marking_state(obj);
+
+ MainMarkingVisitor<ReferenceSummarizerMarkingState> visitor(
+ &marking_state, marking_state.local_marking_worklists(),
+ marking_state.local_weak_objects(), heap, 0 /*mark_compact_epoch*/,
+ {} /*code_flush_mode*/, false /*embedder_tracing_enabled*/,
+ true /*should_keep_ages_unchanged*/);
+ visitor.Visit(obj.map(heap->isolate()), obj);
+
+ return marking_state.DestructivelyRetrieveReferences();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/reference-summarizer.h b/deps/v8/src/heap/reference-summarizer.h
new file mode 100644
index 0000000000..a49ac597a1
--- /dev/null
+++ b/deps/v8/src/heap/reference-summarizer.h
@@ -0,0 +1,55 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_REFERENCE_SUMMARIZER_H_
+#define V8_HEAP_REFERENCE_SUMMARIZER_H_
+
+#include <unordered_set>
+
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class ReferenceSummary {
+ public:
+ ReferenceSummary() = default;
+ ReferenceSummary(ReferenceSummary&& other) V8_NOEXCEPT
+ : strong_references_(std::move(other.strong_references_)),
+ weak_references_(std::move(other.weak_references_)) {}
+
+ // Produces a set of objects referred to by the object. This function uses a
+ // realistic marking visitor, so its results are likely to match real GC
+ // behavior. Intended only for verification.
+ static ReferenceSummary SummarizeReferencesFrom(Heap* heap, HeapObject obj);
+
+ // All objects which the chosen object has strong pointers to.
+ std::unordered_set<HeapObject, Object::Hasher>& strong_references() {
+ return strong_references_;
+ }
+
+ // All objects which the chosen object has weak pointers to. The values in
+ // ephemeron hash tables are also included here, even though they aren't
+ // normal weak pointers.
+ std::unordered_set<HeapObject, Object::Hasher>& weak_references() {
+ return weak_references_;
+ }
+
+ void Clear() {
+ strong_references_.clear();
+ weak_references_.clear();
+ }
+
+ private:
+ std::unordered_set<HeapObject, Object::Hasher> strong_references_;
+ std::unordered_set<HeapObject, Object::Hasher> weak_references_;
+ DISALLOW_GARBAGE_COLLECTION(no_gc)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_REFERENCE_SUMMARIZER_H_
diff --git a/deps/v8/src/heap/remembered-set-inl.h b/deps/v8/src/heap/remembered-set-inl.h
index f7358630d0..b0908839ea 100644
--- a/deps/v8/src/heap/remembered-set-inl.h
+++ b/deps/v8/src/heap/remembered-set-inl.h
@@ -17,26 +17,26 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
Address addr,
Callback callback) {
switch (slot_type) {
- case CODE_TARGET_SLOT: {
+ case SlotType::kCodeEntry: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
return UpdateCodeTarget(&rinfo, callback);
}
- case CODE_ENTRY_SLOT: {
+ case SlotType::kConstPoolCodeEntry: {
return UpdateCodeEntry(addr, callback);
}
- case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectCompressed: {
RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case FULL_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectFull: {
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case DATA_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectData: {
RelocInfo rinfo(addr, RelocInfo::DATA_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case COMPRESSED_OBJECT_SLOT: {
+ case SlotType::kConstPoolEmbeddedObjectCompressed: {
HeapObject old_target = HeapObject::cast(Object(
DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr))));
HeapObject new_target = old_target;
@@ -47,10 +47,10 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
}
return result;
}
- case FULL_OBJECT_SLOT: {
+ case SlotType::kConstPoolEmbeddedObjectFull: {
return callback(FullMaybeObjectSlot(addr));
}
- case CLEARED_SLOT:
+ case SlotType::kCleared:
break;
}
UNREACHABLE();
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 13a6fedf47..b4badca6d6 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -391,19 +391,6 @@ class RememberedSetSweeping {
}
};
-inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
- return FULL_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- return COMPRESSED_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDataEmbeddedObject(rmode)) {
- return DATA_EMBEDDED_OBJECT_SLOT;
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index bd4c610004..12f6706f76 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -33,14 +33,13 @@ void IsolateSafepoint::EnterLocalSafepointScope() {
DCHECK_NULL(LocalHeap::Current());
DCHECK(AllowGarbageCollection::IsAllowed());
- LockMutex(heap_->isolate()->main_thread_local_heap());
+ LockMutex(isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
// Local safepoint can only be initiated on the isolate's main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_EQ(ThreadId::Current(), isolate()->thread_id());
- TimedHistogramScope timer(
- heap_->isolate()->counters()->gc_time_to_safepoint());
+ TimedHistogramScope timer(isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
barrier_.Arm();
@@ -72,6 +71,7 @@ class PerClientSafepointData final {
void IsolateSafepoint::InitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
+ shared_isolate()->global_safepoint()->AssertActive();
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
InitiateGlobalSafepointScopeRaw(initiator, client_data);
@@ -79,6 +79,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScope(
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
+ shared_isolate()->global_safepoint()->AssertActive();
if (!local_heaps_mutex_.TryLock()) return;
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
@@ -95,7 +96,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
IsolateSafepoint::IncludeMainThread
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
- const bool is_initiator = heap_->isolate() == initiator;
+ const bool is_initiator = isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
@@ -233,23 +234,6 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
-bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
- base::RecursiveMutexGuard guard(&local_heaps_mutex_);
- LocalHeap* current = local_heaps_head_;
-
- while (current) {
- if (current == local_heap) return true;
- current = current->next_;
- }
-
- return false;
-}
-
-bool IsolateSafepoint::ContainsAnyLocalHeap() {
- base::RecursiveMutexGuard guard(&local_heaps_mutex_);
- return local_heaps_head_ != nullptr;
-}
-
void IsolateSafepoint::Iterate(RootVisitor* visitor) {
AssertActive();
for (LocalHeap* current = local_heaps_head_; current;
@@ -263,6 +247,12 @@ void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
DCHECK_NULL(heap_->main_thread_local_heap()->next_);
}
+Isolate* IsolateSafepoint::isolate() const { return heap_->isolate(); }
+
+Isolate* IsolateSafepoint::shared_isolate() const {
+ return isolate()->shared_isolate();
+}
+
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterLocalSafepointScope();
}
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 8a6823c603..b64df46f3a 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -26,9 +26,6 @@ class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
- V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
- V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
-
// Iterate handles in local heaps
void Iterate(RootVisitor* visitor);
@@ -44,7 +41,7 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
- void AssertMainThreadIsOnlyThread();
+ V8_EXPORT_PRIVATE void AssertMainThreadIsOnlyThread();
private:
class Barrier {
@@ -135,6 +132,9 @@ class IsolateSafepoint final {
local_heaps_head_ = local_heap->next_;
}
+ Isolate* isolate() const;
+ Isolate* shared_isolate() const;
+
Barrier barrier_;
Heap* heap_;
@@ -145,11 +145,9 @@ class IsolateSafepoint final {
int active_safepoint_scopes_;
- friend class Heap;
friend class GlobalSafepoint;
friend class GlobalSafepointScope;
friend class LocalHeap;
- friend class PersistentHandles;
friend class SafepointScope;
};
@@ -181,6 +179,8 @@ class GlobalSafepoint final {
void AssertNoClients();
+ void AssertActive() { clients_mutex_.AssertHeld(); }
+
private:
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveGlobalSafepointScope(Isolate* initiator);
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 8a0a1da96b..14306c0910 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_SCAVENGER_INL_H_
#define V8_HEAP_SCAVENGER_INL_H_
+#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/incremental-marking-inl.h"
-#include "src/heap/local-allocator-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/scavenger.h"
#include "src/objects/map.h"
@@ -83,7 +83,8 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
}
bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
- int size) {
+ int size,
+ PromotionHeapChoice promotion_heap_choice) {
// Copy the content of source to target.
target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
heap()->CopyBlock(target.address() + kTaggedSize,
@@ -100,7 +101,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->OnMoveEvent(target, source, size);
}
- if (is_incremental_marking_) {
+ if (is_incremental_marking_ &&
+ promotion_heap_choice != kPromoteIntoSharedHeap) {
heap()->incremental_marking()->TransferColor(source, target);
}
heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
@@ -123,7 +125,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
- const bool self_success = MigrateObject(map, object, target, object_size);
+ const bool self_success =
+ MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object.map_word(kAcquireLoad);
@@ -171,7 +174,8 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
- const bool self_success = MigrateObject(map, object, target, object_size);
+ const bool self_success =
+ MigrateObject(map, object, target, object_size, promotion_heap_choice);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object.map_word(kAcquireLoad);
@@ -182,7 +186,11 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
: CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
- if (object_fields == ObjectFields::kMaybePointers) {
+
+ // During incremental marking we want to push every object in order to
+ // record slots for map words. Necessary for map space compaction.
+ if (object_fields == ObjectFields::kMaybePointers ||
+ is_compacting_including_map_space_) {
promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
@@ -203,7 +211,6 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// TODO(hpayer): Make this check size based, i.e.
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
- FLAG_young_generation_large_objects &&
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
@@ -377,7 +384,8 @@ SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
map, slot, String::unchecked_cast(source), size,
ObjectFields::kMaybePointers);
case kVisitDataObject: // External strings have kVisitDataObject.
- if (String::IsInPlaceInternalizable(map.instance_type())) {
+ if (String::IsInPlaceInternalizableExcludingExternal(
+ map.instance_type())) {
return EvacuateInPlaceInternalizableString(
map, slot, String::unchecked_cast(source), size,
ObjectFields::kDataOnly);
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 3e3a67a5e6..56e002a98c 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,14 +4,18 @@
#include "src/heap/scavenger.h"
+#include "src/common/globals.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
+#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
@@ -32,6 +36,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
bool record_slots)
: scavenger_(scavenger), record_slots_(record_slots) {}
+ V8_INLINE void VisitMapPointer(HeapObject host) final {
+ if (!record_slots_) return;
+ MapWord map_word = host.map_word(kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+ // Surviving new large objects have forwarding pointers in the map word.
+ DCHECK(MemoryChunk::FromHeapObject(host)->InNewLargeObjectSpace());
+ return;
+ }
+ HandleSlot(host, HeapObjectSlot(host.map_slot()), map_word.ToMap());
+ }
+
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
@@ -118,10 +133,9 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
slot.address());
}
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(target)) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
// Code slots never appear in new space because CodeDataContainers, the
@@ -281,18 +295,8 @@ void ScavengerCollector::CollectGarbage() {
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
- // Try to finish sweeping here, such that the following code doesn't need to
- // pause & resume sweeping.
- if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
- !sweeper->AreSweeperTasksRunning()) {
- // At this point we know that all concurrent sweeping tasks have run
- // out-of-work and quit: all pages are swept. The main thread still needs
- // to complete sweeping though.
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
- }
-
// Pause the concurrent sweeper.
- Sweeper::PauseOrCompleteScope pause_scope(sweeper);
+ Sweeper::PauseScope pause_scope(sweeper);
// Filter out pages from the sweeper that need to be processed for old to
// new slots by the Scavenger. After processing, the Scavenger adds back
// pages that are still unsweeped. This way the Scavenger has exclusive
@@ -400,7 +404,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->UpdateYoungReferencesInExternalStringTable(
&Heap::UpdateYoungReferenceInExternalStringTableEntry);
- heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ heap_->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->UpdateRetainersAfterScavenge();
@@ -497,6 +501,10 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
+ const bool is_compacting = heap_->incremental_marking()->IsCompacting();
+ MajorAtomicMarkingState* marking_state =
+ heap_->incremental_marking()->atomic_marking_state();
+
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject object = update_info.first;
@@ -504,6 +512,12 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
+
+ if (is_compacting && marking_state->IsBlack(object) &&
+ MarkCompactCollector::IsOnEvacuationCandidate(map)) {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
+ MemoryChunk::FromHeapObject(object), object.map_slot().address());
+ }
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@@ -540,6 +554,15 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
large_object_promotion_list_local_(
&promotion_list->large_object_promotion_list_) {}
+namespace {
+ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
+ if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ }
+ return nullptr;
+}
+} // namespace
+
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
@@ -554,12 +577,12 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
copied_size_(0),
promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
- shared_old_allocator_(heap_->shared_old_allocator_.get()),
+ shared_old_allocator_(CreateSharedOldAllocator(heap_)),
is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()),
- shared_string_table_(FLAG_shared_string_table &&
- (heap->isolate()->shared_isolate() != nullptr)) {}
+ is_compacting_including_map_space_(is_compacting_ && FLAG_compact_maps),
+ shared_string_table_(shared_old_allocator_.get() != nullptr) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
@@ -574,7 +597,13 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
- target.IterateBodyFast(map, size, &visitor);
+
+ if (is_compacting_including_map_space_) {
+ // When we compact map space, we also want to visit the map word.
+ target.IterateFast(map, size, &visitor);
+ } else {
+ target.IterateBodyFast(map, size, &visitor);
+ }
if (map.IsJSArrayBufferMap()) {
DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
@@ -741,6 +770,7 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
+ if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
empty_chunks_local_.Publish();
ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 0eb12a5f3d..0dff0ec133 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -7,8 +7,8 @@
#include "src/base/platform/condition-variable.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/evacuation-allocator.h"
#include "src/heap/index-generator.h"
-#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
@@ -111,6 +111,8 @@ class Scavenger {
size_t bytes_promoted() const { return promoted_size_; }
private:
+ enum PromotionHeapChoice { kPromoteIntoLocalHeap, kPromoteIntoSharedHeap };
+
// Number of objects to process before interrupting for potentially waking
// up other tasks.
static const int kInterruptThreshold = 128;
@@ -135,7 +137,8 @@ class Scavenger {
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map map, HeapObject source, HeapObject target,
- int size);
+ int size,
+ PromotionHeapChoice promotion_heap_choice);
V8_INLINE SlotCallbackResult
RememberedSetEntryNeeded(CopyAndForwardResult result);
@@ -145,8 +148,6 @@ class Scavenger {
SemiSpaceCopyObject(Map map, THeapObjectSlot slot, HeapObject object,
int object_size, ObjectFields object_fields);
- enum PromotionHeapChoice { kPromoteIntoLocalHeap, kPromoteIntoSharedHeap };
-
template <typename THeapObjectSlot,
PromotionHeapChoice promotion_heap_choice = kPromoteIntoLocalHeap>
V8_INLINE CopyAndForwardResult PromoteObject(Map map, THeapObjectSlot slot,
@@ -197,13 +198,14 @@ class Scavenger {
size_t copied_size_;
size_t promoted_size_;
EvacuationAllocator allocator_;
- ConcurrentAllocator* shared_old_allocator_ = nullptr;
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
EphemeronRememberedSet ephemeron_remembered_set_;
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
+ const bool is_compacting_including_map_space_;
const bool shared_string_table_;
friend class IterateAndScavengePromotedObjectsVisitor;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 4e7b2afbdc..806da907c0 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -13,6 +13,7 @@
#include "src/init/setup-isolate.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/arguments.h"
+#include "src/objects/call-site-info.h"
#include "src/objects/cell-inl.h"
#include "src/objects/contexts.h"
#include "src/objects/data-handler.h"
@@ -37,7 +38,6 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
#include "src/objects/source-text-module.h"
-#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
@@ -151,9 +151,9 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
SKIP_WRITE_BARRIER);
Map map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
- inobject_properties);
+ inobject_properties, this);
- return map;
+ return AllocationResult::FromObject(map);
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
@@ -184,7 +184,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- return map;
+ return AllocationResult::FromObject(map);
}
void Heap::FinalizePartialMap(Map map) {
@@ -208,7 +208,7 @@ AllocationResult Heap::Allocate(Handle<Map> map,
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
result.set_map_after_allocation(*map, write_barrier_mode);
- return result;
+ return AllocationResult::FromObject(result);
}
bool Heap::CreateInitialMaps() {
@@ -250,7 +250,6 @@ bool Heap::CreateInitialMaps() {
#undef ALLOCATE_PARTIAL_MAP
}
- // Allocate the empty array.
{
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
@@ -476,6 +475,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
ALLOCATE_VARSIZE_MAP(SIMPLE_NUMBER_DICTIONARY_TYPE,
simple_number_dictionary)
+ ALLOCATE_VARSIZE_MAP(NAME_TO_INDEX_HASH_TABLE_TYPE,
+ name_to_index_hash_table)
+ ALLOCATE_VARSIZE_MAP(REGISTERED_SYMBOL_TABLE_TYPE, registered_symbol_table)
ALLOCATE_VARSIZE_MAP(EMBEDDER_DATA_ARRAY_TYPE, embedder_data_array)
ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
@@ -516,6 +518,8 @@ bool Heap::CreateInitialMaps() {
WasmInternalFunction::kSize, wasm_internal_function)
IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
wasm_js_function_data)
+ IF_WASM(ALLOCATE_MAP, WASM_ON_FULFILLED_DATA_TYPE,
+ WasmOnFulfilledData::kSize, wasm_onfulfilled_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
wasm_type_info)
@@ -523,13 +527,22 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kHeaderSize,
message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
+ ALLOCATE_MAP(JS_EXTERNAL_OBJECT_TYPE, JSExternalObject::kHeaderSize,
external)
external_map().set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
+ {
+ AllocationResult alloc = AllocateRaw(
+ ArrayList::SizeFor(ArrayList::kFirstIndex), AllocationType::kReadOnly);
+ if (!alloc.To(&obj)) return false;
+ obj.set_map_after_allocation(roots.array_list_map(), SKIP_WRITE_BARRIER);
+ ArrayList::cast(obj).set_length(ArrayList::kFirstIndex);
+ ArrayList::cast(obj).SetLength(0);
+ }
+ set_empty_array_list(ArrayList::cast(obj));
{
AllocationResult alloc =
@@ -781,16 +794,20 @@ void Heap::CreateInitialObjects() {
Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+
set_empty_property_dictionary(*empty_property_dictionary);
- set_public_symbol_table(*empty_property_dictionary);
- set_api_symbol_table(*empty_property_dictionary);
- set_api_private_symbol_table(*empty_property_dictionary);
+ Handle<RegisteredSymbolTable> empty_symbol_table = RegisteredSymbolTable::New(
+ isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_symbol_table->HasSufficientCapacityToAdd(1));
+ set_public_symbol_table(*empty_symbol_table);
+ set_api_symbol_table(*empty_symbol_table);
+ set_api_private_symbol_table(*empty_symbol_table);
set_number_string_cache(*factory->NewFixedArray(
kInitialNumberStringCacheSize * 2, AllocationType::kOld));
- set_basic_block_profiling_data(ArrayList::cast(roots.empty_fixed_array()));
+ set_basic_block_profiling_data(roots.empty_array_list());
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
@@ -811,6 +828,7 @@ void Heap::CreateInitialObjects() {
set_shared_wasm_memories(roots.empty_weak_array_list());
#ifdef V8_ENABLE_WEBASSEMBLY
set_active_continuation(roots.undefined_value());
+ set_active_suspender(roots.undefined_value());
#endif // V8_ENABLE_WEBASSEMBLY
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
index 92540574a0..ae4dd7a79f 100644
--- a/deps/v8/src/heap/slot-set.cc
+++ b/deps/v8/src/heap/slot-set.cc
@@ -64,7 +64,7 @@ void TypedSlotSet::ClearInvalidSlots(
while (chunk != nullptr) {
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
- if (type == CLEARED_SLOT) continue;
+ if (type == SlotType::kCleared) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
std::map<uint32_t, uint32_t>::const_iterator upper_bound =
invalid_ranges.upper_bound(offset);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 5e70cbc33d..7f6f8c3c41 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -602,15 +602,43 @@ class SlotSet {
STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
-enum SlotType {
- FULL_EMBEDDED_OBJECT_SLOT,
- COMPRESSED_EMBEDDED_OBJECT_SLOT,
- DATA_EMBEDDED_OBJECT_SLOT,
- FULL_OBJECT_SLOT,
- COMPRESSED_OBJECT_SLOT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- CLEARED_SLOT
+enum class SlotType : uint8_t {
+ // Full pointer sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectFull,
+
+ // Tagged sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectCompressed,
+
+ // Full pointer sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectData,
+
+ // Full pointer sized slot storing instruction start of Code object.
+ // RelocInfo::target_address/RelocInfo::set_target_address methods are used
+ // for accessing. Used when pointer is stored in the instruction stream.
+ kCodeEntry,
+
+ // Raw full pointer sized slot. Slot is accessed directly. Used when pointer
+ // is stored in constant pool.
+ kConstPoolEmbeddedObjectFull,
+
+ // Raw tagged sized slot. Slot is accessed directly. Used when pointer is
+ // stored in constant pool.
+ kConstPoolEmbeddedObjectCompressed,
+
+ // Raw full pointer sized slot storing instruction start of Code object. Slot
+ // is accessed directly. Used when pointer is stored in constant pool.
+ kConstPoolCodeEntry,
+
+ // Slot got cleared but has not been removed from the slot set.
+ kCleared,
+
+ kLast = kCleared
};
// Data structure for maintaining a list of typed slots in a page.
@@ -669,7 +697,7 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
// This can run concurrently to ClearInvalidSlots().
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
- STATIC_ASSERT(CLEARED_SLOT < 8);
+ STATIC_ASSERT(static_cast<uint8_t>(SlotType::kLast) < 8);
Chunk* chunk = head_;
Chunk* previous = nullptr;
int new_count = 0;
@@ -677,7 +705,7 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
bool empty = true;
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
- if (type != CLEARED_SLOT) {
+ if (type != SlotType::kCleared) {
uint32_t offset = OffsetField::decode(slot.type_and_offset);
Address addr = page_start_ + offset;
if (callback(type, addr) == KEEP_SLOT) {
@@ -727,7 +755,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
static TypedSlot ClearedTypedSlot() {
- return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
+ return TypedSlot{TypeField::encode(SlotType::kCleared) |
+ OffsetField::encode(0)};
}
Address page_start_;
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 796d118988..fb290feee5 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -93,7 +93,8 @@ OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
state_(kOldSpaceState),
old_iterator_(heap->old_space()->begin()),
code_iterator_(heap->code_space()->begin()),
- map_iterator_(heap->map_space()->begin()),
+ map_iterator_(heap->map_space() ? heap->map_space()->begin()
+ : PageRange::iterator(nullptr)),
lo_iterator_(heap->lo_space()->begin()),
code_lo_iterator_(heap->code_lo_space()->begin()) {}
@@ -140,21 +141,19 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_.CanIncrementTop(aligned_size)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject object =
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
- if (filler_size > 0) {
- return heap_->PrecedeWithFiller(object, filler_size);
- }
-
- return AllocationResult(object);
+ return filler_size > 0 ? AllocationResult::FromObject(
+ heap_->PrecedeWithFiller(object, filler_size))
+ : AllocationResult::FromObject(object);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
- if (result.IsRetry()) return InvalidBuffer();
+ if (result.IsFailure()) return InvalidBuffer();
HeapObject obj;
bool ok = result.To(&obj);
USE(ok);
@@ -175,6 +174,24 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
return false;
}
+bool MemoryChunkIterator::HasNext() {
+ if (current_chunk_) return true;
+
+ while (space_iterator_.HasNext()) {
+ Space* space = space_iterator_.Next();
+ current_chunk_ = space->first_page();
+ if (current_chunk_) return true;
+ }
+
+ return false;
+}
+
+MemoryChunk* MemoryChunkIterator::Next() {
+ MemoryChunk* chunk = current_chunk_;
+ current_chunk_ = chunk->list_node().next();
+ return chunk;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 43d01f3989..77be40f779 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -13,6 +13,7 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
@@ -22,6 +23,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@@ -251,24 +253,20 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer);
}
-void Space::PauseAllocationObservers() {
- allocation_observers_paused_depth_++;
- if (allocation_observers_paused_depth_ == 1) allocation_counter_.Pause();
-}
+void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
-void Space::ResumeAllocationObservers() {
- allocation_observers_paused_depth_--;
- if (allocation_observers_paused_depth_ == 0) allocation_counter_.Resume();
-}
+void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
- if (heap()->inline_allocation_disabled()) {
- // Fit the requested area exactly.
+ if (!use_lab_) {
+ // LABs are disabled, so we fit the requested area exactly.
return start + min_size;
- } else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
+ }
+
+ if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
@@ -283,10 +281,27 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end);
- } else {
- // The entire node can be used as the linear allocation area.
- return end;
}
+
+ // LABs are enabled and no observers attached. Return the whole node for the
+ // LAB.
+ return end;
+}
+
+void SpaceWithLinearArea::DisableInlineAllocation() {
+ if (!use_lab_) return;
+
+ use_lab_ = false;
+ FreeLinearAllocationArea();
+ UpdateInlineAllocationLimit(0);
+}
+
+void SpaceWithLinearArea::EnableInlineAllocation() {
+ if (use_lab_) return;
+
+ use_lab_ = true;
+ AdvanceAllocationObservers();
+ UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 3ac1e00208..18b760e1a5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -7,18 +7,19 @@
#include <atomic>
#include <memory>
-#include <vector>
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/base-space.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/objects.h"
#include "src/utils/allocation.h"
@@ -170,14 +171,23 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
return external_backing_store_bytes_[type];
}
- MemoryChunk* first_page() { return memory_chunk_list_.front(); }
- MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+ virtual MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ virtual MemoryChunk* last_page() { return memory_chunk_list_.back(); }
- const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
- const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
+ virtual const MemoryChunk* first_page() const {
+ return memory_chunk_list_.front();
+ }
+ virtual const MemoryChunk* last_page() const {
+ return memory_chunk_list_.back();
+ }
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+ virtual Page* InitializePage(MemoryChunk* chunk) {
+ UNREACHABLE();
+ return nullptr;
+ }
+
FreeList* free_list() { return free_list_.get(); }
Address FirstPageAddress() const { return first_page()->address(); }
@@ -187,8 +197,6 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
#endif
protected:
- int allocation_observers_paused_depth_ = 0;
-
AllocationCounter allocation_counter_;
// The List manages the pages that belong to the given space.
@@ -301,6 +309,8 @@ class Page : public MemoryChunk {
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
+ ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
+
private:
friend class MemoryAllocator;
};
@@ -468,6 +478,7 @@ class SpaceWithLinearArea : public Space {
size_t allocation_size);
void MarkLabStartInitialized();
+ virtual void FreeLinearAllocationArea() = 0;
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
@@ -478,18 +489,35 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
- V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+ void DisableInlineAllocation();
+ void EnableInlineAllocation();
+ bool IsInlineAllocationEnabled() const { return use_lab_; }
void PrintAllocationsOrigins();
protected:
- // TODO(ofrobots): make these private after refactoring is complete.
+ V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+
LinearAllocationArea* const allocation_info_;
+ bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
+// Iterates over all memory chunks in the heap (across all spaces).
+class MemoryChunkIterator {
+ public:
+ explicit MemoryChunkIterator(Heap* heap) : space_iterator_(heap) {}
+
+ V8_INLINE bool HasNext();
+ V8_INLINE MemoryChunk* Next();
+
+ private:
+ SpaceIterator space_iterator_;
+ MemoryChunk* current_chunk_ = nullptr;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 1b9a9b4eb7..5745c4bf1f 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -6,6 +6,7 @@
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
#include "src/heap/gc-tracer.h"
@@ -27,36 +28,27 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
iterability_task_started_(false),
should_reduce_memory_(false) {}
-Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
- : sweeper_(sweeper) {
+Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
if (!sweeper_->sweeping_in_progress()) return;
if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
sweeper_->job_handle_->Cancel();
-
- // Complete sweeping if there's nothing more to do.
- if (sweeper_->IsDoneSweeping()) {
- sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
- DCHECK(!sweeper_->sweeping_in_progress());
- } else {
- // Unless sweeping is complete the flag still indicates that the sweeper
- // is enabled. It just cannot use tasks anymore.
- DCHECK(sweeper_->sweeping_in_progress());
- }
}
-Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
+Sweeper::PauseScope::~PauseScope() {
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
}
Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
- Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
+ Sweeper* sweeper, const PauseScope& pause_scope)
: sweeper_(sweeper),
- pause_or_complete_scope_(pause_or_complete_scope),
sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
- USE(pause_or_complete_scope_);
+ // The PauseScope here only serves as a witness that concurrent sweeping has
+ // been paused.
+ USE(pause_scope);
+
if (!sweeping_in_progress_) return;
int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
@@ -135,7 +127,6 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
-
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
@@ -165,16 +156,14 @@ void Sweeper::StartSweeping() {
// evacuating a page, already swept pages will have enough free bytes to
// hold the objects to move (and therefore, we won't need to wait for more
// pages to be swept in order to move those objects).
- // Since maps don't move, there is no need to sort the pages from MAP_SPACE
- // before sweeping them.
- if (space != MAP_SPACE) {
- int space_index = GetSweepSpaceIndex(space);
- std::sort(
- sweeping_list_[space_index].begin(),
- sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
- return marking_state->live_bytes(a) > marking_state->live_bytes(b);
- });
- }
+ // We sort in descending order of live bytes, i.e., ascending order of free
+ // bytes, because GetSweepingPageSafe returns pages in reverse order.
+ int space_index = GetSweepSpaceIndex(space);
+ std::sort(
+ sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
+ [marking_state](Page* a, Page* b) {
+ return marking_state->live_bytes(a) > marking_state->live_bytes(b);
+ });
});
}
@@ -337,6 +326,15 @@ int Sweeper::RawSweep(
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
if (code_object_registry) code_object_registry->Clear();
+ base::Optional<ActiveSystemPages> active_system_pages_after_sweeping;
+ if (should_reduce_memory_) {
+ // Only decrement counter when we discard unused system pages.
+ active_system_pages_after_sweeping = ActiveSystemPages();
+ active_system_pages_after_sweeping->Init(
+ MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(), Page::kPageSize);
+ }
+
// Phase 2: Free the non-live memory and clean-up the regular remembered set
// entires.
@@ -385,11 +383,18 @@ int Sweeper::RawSweep(
&old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
- DCHECK(map.IsMap(cage_base));
+ // Map might be forwarded during GC.
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
+ if (active_system_pages_after_sweeping) {
+ active_system_pages_after_sweeping->Add(
+ free_end - p->address(), free_start - p->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+ }
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
p->object_start_bitmap()->SetBit(object.address());
#endif
@@ -412,6 +417,13 @@ int Sweeper::RawSweep(
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
+ if (active_system_pages_after_sweeping) {
+ // Decrement accounted memory for discarded memory.
+ PagedSpace* paged_space = static_cast<PagedSpace*>(p->owner());
+ paged_space->ReduceActiveSystemPages(p,
+ *active_system_pages_after_sweeping);
+ }
+
if (code_object_registry) code_object_registry->Finalize();
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
@@ -441,6 +453,8 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
}
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
+ TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
+ ThreadKind::kMain);
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index f6a362d596..9ac9172b51 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_SWEEPER_H_
#define V8_HEAP_SWEEPER_H_
-#include <deque>
#include <map>
#include <vector>
@@ -32,11 +31,11 @@ class Sweeper {
using SweptList = std::vector<Page*>;
using FreeRangesMap = std::map<uint32_t, uint32_t>;
- // Pauses the sweeper tasks or completes sweeping.
- class V8_NODISCARD PauseOrCompleteScope final {
+ // Pauses the sweeper tasks.
+ class V8_NODISCARD PauseScope final {
public:
- explicit PauseOrCompleteScope(Sweeper* sweeper);
- ~PauseOrCompleteScope();
+ explicit PauseScope(Sweeper* sweeper);
+ ~PauseScope();
private:
Sweeper* const sweeper_;
@@ -48,8 +47,7 @@ class Sweeper {
// after exiting this scope.
class V8_NODISCARD FilterSweepingPagesScope final {
public:
- FilterSweepingPagesScope(
- Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
+ FilterSweepingPagesScope(Sweeper* sweeper, const PauseScope& pause_scope);
~FilterSweepingPagesScope();
template <typename Callback>
@@ -70,7 +68,6 @@ class Sweeper {
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
- const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
diff --git a/deps/v8/src/heap/third-party/heap-api-stub.cc b/deps/v8/src/heap/third-party/heap-api-stub.cc
index 3ca562fe58..9690a8ca27 100644
--- a/deps/v8/src/heap/third-party/heap-api-stub.cc
+++ b/deps/v8/src/heap/third-party/heap-api-stub.cc
@@ -51,13 +51,25 @@ bool Heap::InOldSpace(Address) { return false; }
bool Heap::InReadOnlySpace(Address) { return false; }
// static
+bool Heap::InLargeObjectSpace(Address address) { return false; }
+
+// static
bool Heap::IsValidHeapObject(HeapObject) { return false; }
// static
+bool Heap::IsImmovable(HeapObject) { return false; }
+
+// static
bool Heap::IsValidCodeObject(HeapObject) { return false; }
+void Heap::ResetIterator() {}
+
+HeapObject Heap::NextObject() { return HeapObject(); }
+
bool Heap::CollectGarbage() { return false; }
+size_t Heap::Capacity() { return 0; }
+
} // namespace third_party_heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 369dfdf31b..85599ae570 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,4 +1,3 @@
ishell@chromium.org
jkummerow@chromium.org
-mvstanton@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 7aff16b9da..4542f4af42 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -225,8 +225,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&call_handler);
{
- // TODO(v8:11880): call CodeT directly.
- TNode<Code> code_handler = FromCodeT(CAST(handler));
+ TNode<CodeT> code_handler = CAST(handler);
exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, code_handler,
p->context(), p->lookup_start_object(),
p->name(), p->slot(), p->vector());
@@ -1187,6 +1186,37 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
holder, accessor_info, p->name(), p->value());
}
+void AccessorAssembler::HandleStoreICSmiHandlerJSSharedStructFieldCase(
+ TNode<Context> context, TNode<Word32T> handler_word, TNode<JSObject> holder,
+ TNode<Object> value) {
+ CSA_DCHECK(this,
+ Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
+ STORE_KIND(kSharedStructField)));
+ CSA_DCHECK(
+ this,
+ Word32Equal(DecodeWord32<StoreHandler::RepresentationBits>(handler_word),
+ Int32Constant(Representation::kTagged)));
+
+ TVARIABLE(Object, shared_value, value);
+ SharedValueBarrier(context, &shared_value);
+
+ TNode<BoolT> is_inobject =
+ IsSetWord32<StoreHandler::IsInobjectBits>(handler_word);
+ TNode<HeapObject> property_storage = Select<HeapObject>(
+ is_inobject, [&]() { return holder; },
+ [&]() { return LoadFastProperties(holder); });
+
+ TNode<UintPtrT> index =
+ DecodeWordFromWord32<StoreHandler::FieldIndexBits>(handler_word);
+ TNode<IntPtrT> offset = Signed(TimesTaggedSize(index));
+
+ StoreJSSharedStructInObjectField(property_storage, offset,
+ shared_value.value());
+
+ // Return the original value.
+ Return(value);
+}
+
void AccessorAssembler::HandleStoreICHandlerCase(
const StoreICParameters* p, TNode<MaybeObject> handler, Label* miss,
ICMode ic_mode, ElementSupport support_elements) {
@@ -1233,7 +1263,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
BIND(&dictionary_found);
{
- if (p->IsDefineOwn()) {
+ if (p->IsDefineKeyedOwn()) {
// Take slow path to throw if a private name already exists.
GotoIf(IsPrivateSymbol(CAST(p->name())), &if_slow);
}
@@ -1270,10 +1300,13 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_fast_smi);
{
- Label data(this), accessor(this), native_data_property(this);
+ Label data(this), accessor(this), shared_struct_field(this),
+ native_data_property(this);
GotoIf(Word32Equal(handler_kind, STORE_KIND(kAccessor)), &accessor);
- Branch(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
- &native_data_property, &data);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
+ &native_data_property);
+ Branch(Word32Equal(handler_kind, STORE_KIND(kSharedStructField)),
+ &shared_struct_field, &data);
BIND(&accessor);
HandleStoreAccessor(p, CAST(holder), handler_word);
@@ -1281,6 +1314,10 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&native_data_property);
HandleStoreICNativeDataProperty(p, CAST(holder), handler_word);
+ BIND(&shared_struct_field);
+ HandleStoreICSmiHandlerJSSharedStructFieldCase(p->context(), handler_word,
+ CAST(holder), p->value());
+
BIND(&data);
// Handle non-transitioning field stores.
HandleStoreICSmiHandlerCase(handler_word, CAST(holder), p->value(), miss);
@@ -1307,10 +1344,10 @@ void AccessorAssembler::HandleStoreICHandlerCase(
p->slot(), p->vector(), p->receiver(), p->name());
} else {
Runtime::FunctionId id;
- if (p->IsStoreOwn()) {
- id = Runtime::kStoreOwnIC_Slow;
- } else if (p->IsDefineOwn()) {
- id = Runtime::kKeyedDefineOwnIC_Slow;
+ if (p->IsDefineNamedOwn()) {
+ id = Runtime::kDefineNamedOwnIC_Slow;
+ } else if (p->IsDefineKeyedOwn()) {
+ id = Runtime::kDefineKeyedOwnIC_Slow;
} else {
id = Runtime::kKeyedStoreIC_Slow;
}
@@ -1335,8 +1372,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
{
- // TODO(v8:11880): call CodeT directly.
- TNode<Code> code_handler = FromCodeT(CAST(strong_handler));
+ TNode<CodeT> code_handler = CAST(strong_handler);
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
p->vector());
@@ -1359,7 +1395,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
ExitPoint direct_exit(this);
// StoreGlobalIC_PropertyCellCase doesn't properly handle private names
// but they are not expected here anyway.
- CSA_DCHECK(this, BoolConstant(!p->IsDefineOwn()));
+ CSA_DCHECK(this, BoolConstant(!p->IsDefineKeyedOwn()));
StoreGlobalIC_PropertyCellCase(property_cell, p->value(), &direct_exit,
miss);
}
@@ -1367,7 +1403,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
{
TNode<Map> map = CAST(map_or_property_cell);
HandleStoreICTransitionMapHandlerCase(p, map, miss,
- p->IsAnyStoreOwn()
+ p->IsAnyDefineOwn()
? kDontCheckPrototypeValidity
: kCheckPrototypeValidity);
Return(p->value());
@@ -1665,6 +1701,55 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&done);
}
+void AccessorAssembler::StoreJSSharedStructField(
+ TNode<Context> context, TNode<HeapObject> shared_struct,
+ TNode<Map> shared_struct_map, TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor_name_index, TNode<Uint32T> details,
+ TNode<Object> maybe_local_value) {
+ CSA_DCHECK(this, IsJSSharedStruct(shared_struct));
+
+ Label done(this);
+
+ TNode<UintPtrT> field_index =
+ DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
+ field_index = Unsigned(IntPtrAdd(
+ field_index,
+ Unsigned(LoadMapInobjectPropertiesStartInWords(shared_struct_map))));
+
+ TNode<IntPtrT> instance_size_in_words =
+ LoadMapInstanceSizeInWords(shared_struct_map);
+
+ TVARIABLE(Object, shared_value, maybe_local_value);
+ SharedValueBarrier(context, &shared_value);
+
+ Label inobject(this), backing_store(this);
+ Branch(UintPtrLessThan(field_index, instance_size_in_words), &inobject,
+ &backing_store);
+
+ BIND(&inobject);
+ {
+ TNode<IntPtrT> field_offset = Signed(TimesTaggedSize(field_index));
+ StoreJSSharedStructInObjectField(shared_struct, field_offset,
+ shared_value.value());
+ Goto(&done);
+ }
+
+ BIND(&backing_store);
+ {
+ TNode<IntPtrT> backing_store_index =
+ Signed(IntPtrSub(field_index, instance_size_in_words));
+
+ Label tagged_rep(this), double_rep(this);
+ TNode<PropertyArray> properties =
+ CAST(LoadFastProperties(CAST(shared_struct)));
+ StoreJSSharedStructPropertyArrayElement(properties, backing_store_index,
+ shared_value.value());
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
void AccessorAssembler::CheckPrototypeValidityCell(
TNode<Object> maybe_validity_cell, Label* miss) {
Label done(this);
@@ -1712,10 +1797,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
&if_transitioning_element_store);
BIND(&if_element_store);
{
- // TODO(v8:11880): call CodeT directly.
- TailCallStub(StoreWithVectorDescriptor{}, FromCodeT(code_handler),
- p->context(), p->receiver(), p->name(), p->value(),
- p->slot(), p->vector());
+ TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
}
BIND(&if_transitioning_element_store);
@@ -1727,10 +1811,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsDeprecatedMap(transition_map), miss);
- // TODO(v8:11880): call CodeT directly.
- TailCallStub(StoreTransitionDescriptor{}, FromCodeT(code_handler),
- p->context(), p->receiver(), p->name(), transition_map,
- p->value(), p->slot(), p->vector());
+ TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), transition_map, p->value(),
+ p->slot(), p->vector());
}
};
}
@@ -1795,10 +1878,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
if (ic_mode == ICMode::kGlobalIC) {
TailCallRuntime(Runtime::kStoreGlobalIC_Slow, p->context(), p->value(),
p->slot(), p->vector(), p->receiver(), p->name());
- } else if (p->IsAnyStoreOwn()) {
- // DefineOwnIC and StoreOwnIC shouldn't be using slow proto handlers,
- // otherwise proper slow function must be called.
- CSA_DCHECK(this, BoolConstant(!p->IsAnyStoreOwn()));
+ } else if (p->IsAnyDefineOwn()) {
+ // DefineKeyedOwnIC and DefineNamedOwnIC shouldn't be using slow proto
+ // handlers, otherwise proper slow function must be called.
+ CSA_DCHECK(this, BoolConstant(!p->IsAnyDefineOwn()));
Unreachable();
} else {
TailCallRuntime(Runtime::kKeyedStoreIC_Slow, p->context(), p->value(),
@@ -1881,7 +1964,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
ExitPoint direct_exit(this);
// StoreGlobalIC_PropertyCellCase doesn't properly handle private names
// but they are not expected here anyway.
- CSA_DCHECK(this, BoolConstant(!p->IsDefineOwn()));
+ CSA_DCHECK(this, BoolConstant(!p->IsDefineKeyedOwn()));
StoreGlobalIC_PropertyCellCase(CAST(holder), p->value(), &direct_exit,
miss);
}
@@ -2950,7 +3033,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
// Call into the stub that implements the non-inlined parts of LoadIC.
Callable ic = Builtins::CallableFor(isolate(), Builtin::kLoadIC_Noninlined);
- TNode<Code> code_target = HeapConstant(ic.code());
+ TNode<CodeT> code_target = HeapConstant(ic.code());
exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context(),
p->receiver_and_lookup_start_object(), p->name(),
p->slot(), p->vector());
@@ -3648,16 +3731,18 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
- auto builtin = p->IsStoreOwn() ? Builtin::kStoreOwnIC_NoFeedback
- : Builtin::kStoreIC_NoFeedback;
+ // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can
+ // be called here and below when !p->IsDefineNamedOwn().
+ auto builtin = p->IsDefineNamedOwn() ? Builtin::kDefineNamedOwnIC_NoFeedback
+ : Builtin::kStoreIC_NoFeedback;
TailCallBuiltin(builtin, p->context(), p->receiver(), p->name(), p->value(),
p->slot());
}
BIND(&miss);
{
- auto runtime =
- p->IsStoreOwn() ? Runtime::kStoreOwnIC_Miss : Runtime::kStoreIC_Miss;
+ auto runtime = p->IsDefineNamedOwn() ? Runtime::kDefineNamedOwnIC_Miss
+ : Runtime::kStoreIC_Miss;
TailCallRuntime(runtime, p->context(), p->value(), p->slot(), p->vector(),
p->receiver(), p->name());
}
@@ -3870,7 +3955,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
}
}
-void AccessorAssembler::KeyedDefineOwnIC(const StoreICParameters* p) {
+void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) {
Label miss(this, Label::kDeferred);
{
TVARIABLE(MaybeObject, var_handler);
@@ -3892,7 +3977,7 @@ void AccessorAssembler::KeyedDefineOwnIC(const StoreICParameters* p) {
&if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
- Comment("KeyedDefineOwnIC_if_handler");
+ Comment("DefineKeyedOwnIC_if_handler");
HandleStoreICHandlerCase(p, var_handler.value(), &miss,
ICMode::kNonGlobalIC, kSupportElements);
}
@@ -3901,7 +3986,7 @@ void AccessorAssembler::KeyedDefineOwnIC(const StoreICParameters* p) {
TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
// CheckPolymorphic case.
- Comment("KeyedDefineOwnIC_try_polymorphic");
+ Comment("DefineKeyedOwnIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
@@ -3911,21 +3996,21 @@ void AccessorAssembler::KeyedDefineOwnIC(const StoreICParameters* p) {
BIND(&try_megamorphic);
{
// Check megamorphic case.
- Comment("KeyedDefineOwnIC_try_megamorphic");
+ Comment("DefineKeyedOwnIC_try_megamorphic");
Branch(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()),
&no_feedback, &try_polymorphic_name);
}
BIND(&no_feedback);
{
- TailCallBuiltin(Builtin::kKeyedDefineOwnIC_Megamorphic, p->context(),
+ TailCallBuiltin(Builtin::kDefineKeyedOwnIC_Megamorphic, p->context(),
p->receiver(), p->name(), p->value(), p->slot());
}
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a fixed array in the next slot.
- Comment("KeyedDefineOwnIC_try_polymorphic_name");
+ Comment("DefineKeyedOwnIC_try_polymorphic_name");
GotoIfNot(TaggedEqual(strong_feedback, p->name()), &miss);
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
@@ -3938,8 +4023,8 @@ void AccessorAssembler::KeyedDefineOwnIC(const StoreICParameters* p) {
}
BIND(&miss);
{
- Comment("KeyedDefineOwnIC_miss");
- TailCallRuntime(Runtime::kKeyedDefineOwnIC_Miss, p->context(), p->value(),
+ Comment("DefineKeyedOwnIC_miss");
+ TailCallRuntime(Runtime::kDefineKeyedOwnIC_Miss, p->context(), p->value(),
p->slot(), p->vector(), p->receiver(), p->name());
}
}
@@ -3977,8 +4062,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
{
// Call the handler.
- // TODO(v8:11880): call CodeT directly.
- TNode<Code> code_handler = FromCodeT(CAST(handler));
+ TNode<CodeT> code_handler = CAST(handler);
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
p->vector());
@@ -3991,9 +4075,8 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<Map> transition_map =
CAST(GetHeapObjectAssumeWeak(maybe_transition_map, &miss));
GotoIf(IsDeprecatedMap(transition_map), &miss);
- // TODO(v8:11880): call CodeT directly.
- TNode<Code> code = FromCodeT(
- CAST(LoadObjectField(handler, StoreHandler::kSmiHandlerOffset)));
+ TNode<CodeT> code =
+ CAST(LoadObjectField(handler, StoreHandler::kSmiHandlerOffset));
TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
p->receiver(), p->name(), transition_map, p->value(),
p->slot(), p->vector());
@@ -4489,7 +4572,7 @@ void AccessorAssembler::GenerateStoreICBaseline() {
vector);
}
-void AccessorAssembler::GenerateStoreOwnIC() {
+void AccessorAssembler::GenerateDefineNamedOwnIC() {
using Descriptor = StoreWithVectorDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4500,11 +4583,13 @@ void AccessorAssembler::GenerateStoreOwnIC() {
auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector,
- StoreICMode::kStoreOwn);
+ StoreICMode::kDefineNamedOwn);
+ // StoreIC is a generic helper than handle both set and define own
+ // named stores.
StoreIC(&p);
}
-void AccessorAssembler::GenerateStoreOwnICTrampoline() {
+void AccessorAssembler::GenerateDefineNamedOwnICTrampoline() {
using Descriptor = StoreDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4514,11 +4599,11 @@ void AccessorAssembler::GenerateStoreOwnICTrampoline() {
auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
- TailCallBuiltin(Builtin::kStoreOwnIC, context, receiver, name, value, slot,
- vector);
+ TailCallBuiltin(Builtin::kDefineNamedOwnIC, context, receiver, name, value,
+ slot, vector);
}
-void AccessorAssembler::GenerateStoreOwnICBaseline() {
+void AccessorAssembler::GenerateDefineNamedOwnICBaseline() {
using Descriptor = StoreWithVectorDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4528,8 +4613,8 @@ void AccessorAssembler::GenerateStoreOwnICBaseline() {
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
TNode<Context> context = LoadContextFromBaseline();
- TailCallBuiltin(Builtin::kStoreOwnIC, context, receiver, name, value, slot,
- vector);
+ TailCallBuiltin(Builtin::kDefineNamedOwnIC, context, receiver, name, value,
+ slot, vector);
}
void AccessorAssembler::GenerateKeyedStoreIC() {
@@ -4575,7 +4660,7 @@ void AccessorAssembler::GenerateKeyedStoreICBaseline() {
vector);
}
-void AccessorAssembler::GenerateKeyedDefineOwnIC() {
+void AccessorAssembler::GenerateDefineKeyedOwnIC() {
using Descriptor = StoreWithVectorDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4586,11 +4671,11 @@ void AccessorAssembler::GenerateKeyedDefineOwnIC() {
auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector,
- StoreICMode::kDefineOwn);
- KeyedDefineOwnIC(&p);
+ StoreICMode::kDefineKeyedOwn);
+ DefineKeyedOwnIC(&p);
}
-void AccessorAssembler::GenerateKeyedDefineOwnICTrampoline() {
+void AccessorAssembler::GenerateDefineKeyedOwnICTrampoline() {
using Descriptor = StoreDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4600,11 +4685,11 @@ void AccessorAssembler::GenerateKeyedDefineOwnICTrampoline() {
auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
- TailCallBuiltin(Builtin::kKeyedDefineOwnIC, context, receiver, name, value,
+ TailCallBuiltin(Builtin::kDefineKeyedOwnIC, context, receiver, name, value,
slot, vector);
}
-void AccessorAssembler::GenerateKeyedDefineOwnICBaseline() {
+void AccessorAssembler::GenerateDefineKeyedOwnICBaseline() {
using Descriptor = StoreBaselineDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
@@ -4614,7 +4699,7 @@ void AccessorAssembler::GenerateKeyedDefineOwnICBaseline() {
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
TNode<Context> context = LoadContextFromBaseline();
- TailCallBuiltin(Builtin::kKeyedDefineOwnIC, context, receiver, name, value,
+ TailCallBuiltin(Builtin::kDefineKeyedOwnIC, context, receiver, name, value,
slot, vector);
}
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index b9952a9863..c38c9cccec 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -42,9 +42,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void GenerateStoreIC();
void GenerateStoreICTrampoline();
void GenerateStoreICBaseline();
- void GenerateStoreOwnIC();
- void GenerateStoreOwnICTrampoline();
- void GenerateStoreOwnICBaseline();
+ void GenerateDefineNamedOwnIC();
+ void GenerateDefineNamedOwnICTrampoline();
+ void GenerateDefineNamedOwnICBaseline();
void GenerateStoreGlobalIC();
void GenerateStoreGlobalICTrampoline();
void GenerateStoreGlobalICBaseline();
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedStoreICTrampoline();
void GenerateKeyedStoreICBaseline();
- void GenerateKeyedDefineOwnIC();
- void GenerateKeyedDefineOwnICTrampoline();
- void GenerateKeyedDefineOwnICBaseline();
+ void GenerateDefineKeyedOwnIC();
+ void GenerateDefineKeyedOwnICTrampoline();
+ void GenerateDefineKeyedOwnICBaseline();
void GenerateStoreInArrayLiteralIC();
void GenerateStoreInArrayLiteralICBaseline();
@@ -198,9 +198,10 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
protected:
enum class StoreICMode {
+ // TODO(v8:12548): rename to kDefineKeyedOwnInLiteral
kDefault,
- kStoreOwn,
- kDefineOwn,
+ kDefineNamedOwn,
+ kDefineKeyedOwn,
};
struct StoreICParameters {
StoreICParameters(TNode<Context> context,
@@ -227,9 +228,15 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
bool receiver_is_null() const { return !receiver_.has_value(); }
- bool IsStoreOwn() const { return mode_ == StoreICMode::kStoreOwn; }
- bool IsDefineOwn() const { return mode_ == StoreICMode::kDefineOwn; }
- bool IsAnyStoreOwn() const { return IsStoreOwn() || IsDefineOwn(); }
+ bool IsDefineNamedOwn() const {
+ return mode_ == StoreICMode::kDefineNamedOwn;
+ }
+ bool IsDefineKeyedOwn() const {
+ return mode_ == StoreICMode::kDefineKeyedOwn;
+ }
+ bool IsAnyDefineOwn() const {
+ return IsDefineNamedOwn() || IsDefineKeyedOwn();
+ }
private:
TNode<Context> context_;
@@ -273,6 +280,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Object> value, Label* slow,
bool do_transitioning_store);
+ void StoreJSSharedStructField(TNode<Context> context,
+ TNode<HeapObject> shared_struct,
+ TNode<Map> shared_struct_map,
+ TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor_name_index,
+ TNode<Uint32T> details, TNode<Object> value);
+
TNode<BoolT> IsPropertyDetailsConst(TNode<Uint32T> details);
void CheckFieldType(TNode<DescriptorArray> descriptors,
@@ -316,7 +330,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Object> value,
ExitPoint* exit_point, Label* miss);
void KeyedStoreIC(const StoreICParameters* p);
- void KeyedDefineOwnIC(const StoreICParameters* p);
+ void DefineKeyedOwnIC(const StoreICParameters* p);
void StoreInArrayLiteralIC(const StoreICParameters* p);
// IC dispatcher behavior.
@@ -435,6 +449,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void HandleStoreICSmiHandlerCase(TNode<Word32T> handler_word,
TNode<JSObject> holder, TNode<Object> value,
Label* miss);
+ void HandleStoreICSmiHandlerJSSharedStructFieldCase(
+ TNode<Context> context, TNode<Word32T> handler_word,
+ TNode<JSObject> holder, TNode<Object> value);
void HandleStoreFieldAndReturn(TNode<Word32T> handler_word,
TNode<JSObject> holder, TNode<Object> value,
base::Optional<TNode<Float64T>> double_value,
@@ -575,7 +592,7 @@ class ExitPoint {
template <class... TArgs>
void ReturnCallStub(const CallInterfaceDescriptor& descriptor,
- TNode<Code> target, TNode<Context> context,
+ TNode<CodeT> target, TNode<Context> context,
TArgs... args) {
if (IsDirect()) {
asm_->TailCallStub(descriptor, target, context, args...);
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 4deed77e75..51c403ceb4 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -596,7 +596,9 @@ TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback) {
+ const LazyNode<Context>& context, TNode<UintPtrT>* slot,
+ const LazyNode<HeapObject>* maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
TVARIABLE(Object, result);
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
@@ -615,14 +617,14 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
TaggedToWord32OrBigIntWithFeedback(
context(), left, &if_left_number, &var_left_word32, &if_left_bigint,
- &var_left_bigint, feedback ? &var_left_feedback : nullptr);
+ &var_left_bigint, slot ? &var_left_feedback : nullptr);
Label right_is_bigint(this);
BIND(&if_left_number);
{
TaggedToWord32OrBigIntWithFeedback(
context(), right, &do_number_op, &var_right_word32, &right_is_bigint,
- &var_right_bigint, feedback ? &var_right_feedback : nullptr);
+ &var_right_bigint, slot ? &var_right_feedback : nullptr);
}
BIND(&right_is_bigint);
@@ -639,13 +641,15 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
result = BitwiseOp(var_left_word32.value(), var_right_word32.value(),
bitwise_op);
- if (feedback) {
+ if (slot) {
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(result.value()), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
TNode<Smi> input_feedback =
SmiOr(var_left_feedback.value(), var_right_feedback.value());
- *feedback = SmiOr(result_type, input_feedback);
+ TNode<Smi> feedback = SmiOr(result_type, input_feedback);
+ UpdateFeedback(feedback, (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
}
Goto(&done);
}
@@ -661,9 +665,15 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
BIND(&do_bigint_op);
{
- if (feedback) {
- *feedback = SmiOr(var_left_feedback.value(), var_right_feedback.value());
+ if (slot) {
+ // Ensure that the feedback is updated even if the runtime call below
+ // would throw.
+ TNode<Smi> feedback =
+ SmiOr(var_left_feedback.value(), var_right_feedback.value());
+ UpdateFeedback(feedback, (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
}
+
result = CallRuntime(
Runtime::kBigIntBinaryOp, context(), var_left_maybe_bigint.value(),
var_right_maybe_bigint.value(), SmiConstant(bitwise_op));
@@ -677,35 +687,71 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
TNode<Object>
BinaryOpAssembler::Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback) {
+ const LazyNode<Context>& context, TNode<UintPtrT>* slot,
+ const LazyNode<HeapObject>* maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
TNode<Smi> right_smi = CAST(right);
TVARIABLE(Object, result);
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Word32T, var_left_word32);
TVARIABLE(BigInt, var_left_bigint);
- Label do_smi_op(this), if_bigint_mix(this, Label::kDeferred), done(this);
-
- TaggedToWord32OrBigIntWithFeedback(context(), left, &do_smi_op,
- &var_left_word32, &if_bigint_mix,
- &var_left_bigint, &var_left_feedback);
- BIND(&do_smi_op);
- result =
- BitwiseOp(var_left_word32.value(), SmiToInt32(right_smi), bitwise_op);
- if (feedback) {
- TNode<Smi> result_type = SelectSmiConstant(
- TaggedIsSmi(result.value()), BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- *feedback = SmiOr(result_type, var_left_feedback.value());
+ TVARIABLE(Smi, feedback);
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this, Label::kDeferred);
+ Label do_number_op(this), if_bigint_mix(this), done(this);
+
+ Branch(TaggedIsSmi(left), &if_lhsissmi, &if_lhsisnotsmi);
+
+ BIND(&if_lhsissmi);
+ {
+ TNode<Smi> left_smi = CAST(left);
+ result = BitwiseSmiOp(left_smi, right_smi, bitwise_op);
+ if (slot) {
+ if (IsBitwiseOutputKnownSmi(bitwise_op)) {
+ feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
+ } else {
+ feedback = SelectSmiConstant(TaggedIsSmi(result.value()),
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ }
+ }
+ Goto(&done);
}
- Goto(&done);
- BIND(&if_bigint_mix);
- if (feedback) {
- *feedback = var_left_feedback.value();
+ BIND(&if_lhsisnotsmi);
+ {
+ TNode<HeapObject> left_pointer = CAST(left);
+ TaggedPointerToWord32OrBigIntWithFeedback(
+ context(), left_pointer, &do_number_op, &var_left_word32,
+ &if_bigint_mix, &var_left_bigint, &var_left_feedback);
+ BIND(&do_number_op);
+ {
+ result =
+ BitwiseOp(var_left_word32.value(), SmiToInt32(right_smi), bitwise_op);
+ if (slot) {
+ TNode<Smi> result_type = SelectSmiConstant(
+ TaggedIsSmi(result.value()), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ feedback = SmiOr(result_type, var_left_feedback.value());
+ }
+ Goto(&done);
+ }
+
+ BIND(&if_bigint_mix);
+ {
+ if (slot) {
+ // Ensure that the feedback is updated before we throw.
+ feedback = var_left_feedback.value();
+ UpdateFeedback(feedback.value(), (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
+ }
}
- ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
BIND(&done);
+ UpdateFeedback(feedback.value(), (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
return result.value();
}
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 1f6f353ae3..014673634c 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -6,6 +6,7 @@
#define V8_IC_BINARY_OP_ASSEMBLER_H_
#include <functional>
+
#include "src/codegen/code-stub-assembler.h"
namespace v8 {
@@ -57,11 +58,9 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseOr, left, right, context, &feedback, rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kBitwiseOr, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
return result;
}
@@ -69,11 +68,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseXor, left, right, context, &feedback, rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kBitwiseXor, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
+
return result;
}
@@ -81,11 +79,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseAnd, left, right, context, &feedback, rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kBitwiseAnd, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
+
return result;
}
@@ -93,11 +90,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftLeft, left, right, context, &feedback, rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kShiftLeft, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
+
return result;
}
@@ -105,11 +101,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftRight, left, right, context, &feedback, rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kShiftRight, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
+
return result;
}
@@ -117,24 +112,25 @@ class BinaryOpAssembler : public CodeStubAssembler {
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
- TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftRightLogical, left, right, context, &feedback,
- rhs_known_smi);
- UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
- update_feedback_mode);
+ Operation::kShiftRightLogical, left, right, context, slot,
+ maybe_feedback_vector, update_feedback_mode, rhs_known_smi);
+
return result;
}
TNode<Object> Generate_BitwiseBinaryOpWithFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<UintPtrT> slot,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
return rhs_known_smi
? Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
- bitwise_op, left, right, context, feedback)
+ bitwise_op, left, right, context, &slot,
+ &maybe_feedback_vector, update_feedback_mode)
: Generate_BitwiseBinaryOpWithOptionalFeedback(
- bitwise_op, left, right, context, feedback);
+ bitwise_op, left, right, context, &slot,
+ &maybe_feedback_vector, update_feedback_mode);
}
TNode<Object> Generate_BitwiseBinaryOp(Operation bitwise_op,
@@ -142,7 +138,8 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> right,
TNode<Context> context) {
return Generate_BitwiseBinaryOpWithOptionalFeedback(
- bitwise_op, left, right, [&] { return context; }, nullptr);
+ bitwise_op, left, right, [&] { return context; }, nullptr, nullptr,
+ UpdateFeedbackMode::kOptionalFeedback);
}
private:
@@ -160,11 +157,29 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseBinaryOpWithOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback);
+ const LazyNode<Context>& context, TNode<UintPtrT>* slot,
+ const LazyNode<HeapObject>* maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
TNode<Object> Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback);
+ const LazyNode<Context>& context, TNode<UintPtrT>* slot,
+ const LazyNode<HeapObject>* maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
+
+ // Check if output is known to be Smi when both operands of bitwise operation
+ // are Smi.
+ bool IsBitwiseOutputKnownSmi(Operation bitwise_op) {
+ switch (bitwise_op) {
+ case Operation::kBitwiseAnd:
+ case Operation::kBitwiseOr:
+ case Operation::kBitwiseXor:
+ case Operation::kShiftRight:
+ return true;
+ default:
+ return false;
+ }
+ }
};
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 081229c443..d9ce607965 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -21,12 +21,7 @@ namespace v8 {
namespace internal {
inline Handle<Object> MakeCodeHandler(Isolate* isolate, Builtin builtin) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Code code = isolate->builtins()->code(builtin);
- return handle(code.code_data_container(kAcquireLoad), isolate);
- } else {
- return isolate->builtins()->code_handle(builtin);
- }
+ return isolate->builtins()->code_handle(builtin);
}
OBJECT_CONSTRUCTORS_IMPL(LoadHandler, DataHandler)
@@ -230,7 +225,8 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation) {
DCHECK(!representation.IsNone());
- DCHECK(kind == Kind::kField || kind == Kind::kConstField);
+ DCHECK(kind == Kind::kField || kind == Kind::kConstField ||
+ kind == Kind::kSharedStructField);
int config = KindBits::encode(kind) |
IsInobjectBits::encode(field_index.is_inobject()) |
@@ -249,6 +245,14 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
return StoreField(isolate, kind, descriptor, field_index, representation);
}
+Handle<Smi> StoreHandler::StoreSharedStructField(
+ Isolate* isolate, int descriptor, FieldIndex field_index,
+ Representation representation) {
+ DCHECK(representation.Equals(Representation::Tagged()));
+ return StoreField(isolate, Kind::kSharedStructField, descriptor, field_index,
+ representation);
+}
+
Handle<Smi> StoreHandler::StoreNativeDataProperty(Isolate* isolate,
int descriptor) {
int config = KindBits::encode(Kind::kNativeDataProperty) |
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 798ae6a09e..d7a16a9f88 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -250,6 +250,7 @@ class StoreHandler final : public DataHandler {
kConstField,
kAccessor,
kNativeDataProperty,
+ kSharedStructField,
kApiSetter,
kApiSetterHolderIsPrototype,
kGlobalProxy,
@@ -301,6 +302,11 @@ class StoreHandler final : public DataHandler {
PropertyConstness constness,
Representation representation);
+ // Creates a Smi-handler for storing a field to a JSSharedStruct.
+ static inline Handle<Smi> StoreSharedStructField(
+ Isolate* isolate, int descriptor, FieldIndex field_index,
+ Representation representation);
+
// Create a store transition handler which doesn't check prototype chain.
static MaybeObjectHandle StoreOwnTransition(Isolate* isolate,
Handle<Map> transition_map);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 1eeba58612..148fd24810 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -17,7 +17,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
-#include "src/execution/runtime-profiler.h"
+#include "src/execution/tiering-manager.h"
#include "src/handles/handles-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-configuration-inl.h"
@@ -127,7 +127,8 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
} else if (IsKeyedLoadIC()) {
KeyedAccessLoadMode mode = nexus()->GetKeyedAccessLoadMode();
modifier = GetModifier(mode);
- } else if (IsKeyedStoreIC() || IsStoreInArrayLiteralIC() || IsDefineOwnIC()) {
+ } else if (IsKeyedStoreIC() || IsStoreInArrayLiteralIC() ||
+ IsDefineKeyedOwnIC()) {
KeyedAccessStoreMode mode = nexus()->GetKeyedAccessStoreMode();
modifier = GetModifier(mode);
}
@@ -345,7 +346,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
}
#endif
- isolate->runtime_profiler()->NotifyICChanged();
+ isolate->tiering_manager()->NotifyICChanged();
}
namespace {
@@ -531,8 +532,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name,
global->native_context().script_context_table(), isolate());
VariableLookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
- &lookup_result)) {
+ if (script_contexts->Lookup(str_name, &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate(), script_contexts, lookup_result.context_index);
@@ -847,7 +847,7 @@ StubCache* IC::stub_cache() {
// HasICs and each of the store own ICs require its own stub cache.
// Until we create them, don't allow accessing the load/store stub caches.
DCHECK(!IsAnyHas());
- DCHECK(!is_any_store_own());
+ DCHECK(!IsAnyDefineOwn());
if (IsAnyLoad()) {
return isolate()->load_stub_cache();
} else {
@@ -858,7 +858,7 @@ StubCache* IC::stub_cache() {
void IC::UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
const MaybeObjectHandle& handler) {
- if (!IsAnyHas() && !is_any_store_own()) {
+ if (!IsAnyHas() && !IsAnyDefineOwn()) {
stub_cache()->Set(*name, *map, *handler);
}
}
@@ -884,7 +884,6 @@ inline WasmValueType GetWasmValueType(wasm::ValueType type) {
TYPE_CASE(OptRef)
case wasm::kRtt:
- case wasm::kRttWithDepth:
// Rtt values are not supposed to be made available to JavaScript side.
UNREACHABLE();
@@ -1696,8 +1695,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
global->native_context().script_context_table(), isolate());
VariableLookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
- &lookup_result)) {
+ if (script_contexts->Lookup(str_name, &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate(), script_contexts, lookup_result.context_index);
if (lookup_result.mode == VariableMode::kConst) {
@@ -1811,12 +1809,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
PropertyKey key(isolate(), name);
LookupIterator it(
isolate(), object, key,
- IsAnyStoreOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT);
- DCHECK_IMPLIES(IsStoreOwnIC(), it.IsFound() && it.HolderIsReceiver());
- // TODO(joyee): IsStoreOwnIC() is used in [[DefineOwnProperty]]
- // operations during initialization of object literals and class
- // fields. Rename them or separate them out.
- if (IsStoreOwnIC()) {
+ IsAnyDefineOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT);
+ DCHECK_IMPLIES(IsDefineNamedOwnIC(), it.IsFound() && it.HolderIsReceiver());
+ // TODO(v8:12548): refactor DefinedNamedOwnIC and SetNamedIC as subclasses
+ // of StoreIC so their logic doesn't get mixed here.
+ if (IsDefineNamedOwnIC()) {
MAYBE_RETURN_NULL(
JSReceiver::CreateDataProperty(&it, value, Nothing<ShouldThrow>()));
} else {
@@ -1844,11 +1841,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
PropertyKey key(isolate(), name);
LookupIterator it(
isolate(), object, key,
- IsAnyStoreOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT);
+ IsAnyDefineOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT);
if (name->IsPrivate()) {
bool exists = it.IsFound();
- if (name->IsPrivateName() && exists == IsDefineOwnIC()) {
+ if (name->IsPrivateName() && exists == IsDefineKeyedOwnIC()) {
Handle<String> name_string(
String::cast(Symbol::cast(*name).description()), isolate());
if (exists) {
@@ -1870,14 +1867,14 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
}
}
- // For IsStoreOwnIC(), we can't simply do CreateDataProperty below
+ // For IsDefineNamedOwnIC(), we can't simply do CreateDataProperty below
// because we need to check the attributes before UpdateCaches updates
// the state of the LookupIterator.
LookupIterator::State original_state = it.state();
// We'll defer the check for JSProxy and objects with named interceptors,
// because the defineProperty traps need to be called first if they are
// present.
- if (IsStoreOwnIC() && !object->IsJSProxy() &&
+ if (IsDefineNamedOwnIC() && !object->IsJSProxy() &&
!Handle<JSObject>::cast(object)->HasNamedInterceptor()) {
Maybe<bool> can_define = JSReceiver::CheckIfCanDefine(
isolate(), &it, value, Nothing<ShouldThrow>());
@@ -1894,15 +1891,13 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
: TraceIC("StoreIC", name);
}
- // TODO(joyee): IsStoreOwnIC() is true in [[DefineOwnProperty]]
- // operations during initialization of object literals and class
- // fields. In both paths, Rename the operations properly to avoid
- // confusion.
+ // TODO(v8:12548): refactor DefinedNamedOwnIC and SetNamedIC as subclasses
+ // of StoreIC so their logic doesn't get mixed here.
// ES #sec-definefield
// ES #sec-runtime-semantics-propertydefinitionevaluation
- if (IsStoreOwnIC()) {
- // Private property should be defined via DefineOwnIC (as private names) or
- // stored via other store ICs through private symbols.
+ if (IsDefineNamedOwnIC()) {
+ // Private property should be defined via DefineKeyedOwnIC or
+ // KeyedStoreIC with private symbols.
DCHECK(!name->IsPrivate());
MAYBE_RETURN_NULL(DefineOwnDataProperty(
&it, original_state, value, Nothing<ShouldThrow>(), store_origin));
@@ -1954,7 +1949,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
#endif
return StoreHandler::StoreGlobal(lookup->transition_cell());
}
- if (IsDefineOwnIC()) {
+ if (IsDefineKeyedOwnIC()) {
// Private field can't be deleted from this global object and can't
// be overwritten, so install slow handler in order to make store IC
// throw if a private name already exists.
@@ -1973,7 +1968,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
!lookup_start_object_map()->is_dictionary_map());
DCHECK(lookup->IsCacheableTransition());
- if (IsAnyStoreOwn()) {
+ if (IsAnyDefineOwn()) {
return StoreHandler::StoreOwnTransition(isolate(),
lookup->transition_map());
}
@@ -1987,13 +1982,13 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
// If the interceptor is on the receiver...
if (lookup->HolderIsReceiverOrHiddenPrototype() && !info.non_masking()) {
// ...return a store interceptor Smi handler if there is a setter
- // interceptor and it's not StoreOwnIC (which should call the
+ // interceptor and it's not DefineNamedOwnIC (which should call the
// definer)...
- if (!info.setter().IsUndefined(isolate()) && !IsStoreOwnIC()) {
+ if (!info.setter().IsUndefined(isolate()) && !IsDefineNamedOwnIC()) {
return MaybeObjectHandle(StoreHandler::StoreInterceptor(isolate()));
}
// ...otherwise return a slow-case Smi handler, which invokes the
- // definer for StoreOwnIC.
+ // definer for DefineNamedOwnIC.
return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
@@ -2150,11 +2145,15 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
int descriptor = lookup->GetFieldDescriptorIndex();
FieldIndex index = lookup->GetFieldIndex();
+ if (V8_UNLIKELY(holder->IsJSSharedStruct())) {
+ return MaybeObjectHandle(StoreHandler::StoreSharedStructField(
+ isolate(), descriptor, index, lookup->representation()));
+ }
PropertyConstness constness = lookup->constness();
if (constness == PropertyConstness::kConst &&
- IsStoreOwnICKind(nexus()->kind())) {
- // StoreOwnICs are used for initializing object literals therefore
- // we must store the value unconditionally even to
+ IsDefineNamedOwnICKind(nexus()->kind())) {
+ // DefineNamedOwnICs are used for initializing object literals
+ // therefore we must store the value unconditionally even to
// VariableMode::kConst fields.
constness = PropertyConstness::kMutable;
}
@@ -2174,9 +2173,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSReceiver>::cast(lookup->GetReceiver());
Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
- // IsStoreOwnIC() is true when we are defining public fields on a Proxy.
- // In that case use the slow stub to invoke the define trap.
- if (IsStoreOwnIC()) {
+ // IsDefineNamedOwnIC() is true when we are defining public fields on a
+ // Proxy. In that case use the slow stub to invoke the define trap.
+ if (IsDefineNamedOwnIC()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
@@ -2372,7 +2371,7 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
code = StoreHandler::StoreSlow(isolate(), store_mode);
}
- if (is_any_store_own() || IsStoreInArrayLiteralIC()) return code;
+ if (IsAnyDefineOwn() || IsStoreInArrayLiteralIC()) return code;
Handle<Object> validity_cell;
if (!prev_validity_cell.ToHandle(&validity_cell)) {
validity_cell =
@@ -2490,9 +2489,11 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// might deprecate the current map again, if value does not fit.
if (MigrateDeprecated(isolate(), object)) {
Handle<Object> result;
+ // TODO(v8:12548): refactor DefineKeyedOwnIC as a subclass of StoreIC
+ // so the logic doesn't get mixed here.
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- IsDefineOwnIC()
+ IsDefineKeyedOwnIC()
? Runtime::DefineObjectOwnProperty(isolate(), object, key, value,
StoreOrigin::kMaybeKeyed)
: Runtime::SetObjectProperty(isolate(), object, key, value,
@@ -2560,7 +2561,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
DCHECK(store_handle.is_null());
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
- IsDefineOwnIC()
+ // TODO(v8:12548): refactor DefineKeyedOwnIC as a subclass of StoreIC
+ // so the logic doesn't get mixed here.
+ IsDefineKeyedOwnIC()
? Runtime::DefineObjectOwnProperty(isolate(), object, key, value,
StoreOrigin::kMaybeKeyed)
: Runtime::SetObjectProperty(isolate(), object, key, value,
@@ -2710,7 +2713,7 @@ RUNTIME_FUNCTION(Runtime_LoadNoFeedbackIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Name> key = args.at<Name>(1);
- CONVERT_INT32_ARG_CHECKED(slot_kind, 2);
+ int slot_kind = args.smi_value_at(2);
FeedbackSlotKind kind = static_cast<FeedbackSlotKind>(slot_kind);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
@@ -2745,7 +2748,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
Handle<String> name = args.at<String>(0);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
- CONVERT_INT32_ARG_CHECKED(typeof_value, 3);
+ int typeof_value = args.smi_value_at(3);
TypeofMode typeof_mode = static_cast<TypeofMode>(typeof_value);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
@@ -2769,7 +2772,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
@@ -2831,11 +2834,11 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- // When there is no feedback vector it is OK to use the StoreNamedStrict as
- // the feedback slot kind. We only need if it is StoreOwnICKind when
+ // When there is no feedback vector it is OK to use the SetNamedStrict as
+ // the feedback slot kind. We only reuse this for DefineNamedOwnIC when
// installing the handler for storing const properties. This will happen only
// when feedback vector is available.
- FeedbackSlotKind kind = FeedbackSlotKind::kStoreNamedStrict;
+ FeedbackSlotKind kind = FeedbackSlotKind::kSetNamedStrict;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
@@ -2843,13 +2846,13 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
kind = vector->GetKind(vector_slot);
}
- DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind));
+ DCHECK(IsStoreICKind(kind) || IsDefineNamedOwnICKind(kind));
StoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
-RUNTIME_FUNCTION(Runtime_StoreOwnIC_Miss) {
+RUNTIME_FUNCTION(Runtime_DefineNamedOwnIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2861,9 +2864,9 @@ RUNTIME_FUNCTION(Runtime_StoreOwnIC_Miss) {
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- // When there is no feedback vector it is OK to use the StoreOwnNamed
+ // When there is no feedback vector it is OK to use the DefineNamedOwn
// feedback kind. There _should_ be a vector, though.
- FeedbackSlotKind kind = FeedbackSlotKind::kStoreOwnNamed;
+ FeedbackSlotKind kind = FeedbackSlotKind::kDefineNamedOwn;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
@@ -2871,13 +2874,16 @@ RUNTIME_FUNCTION(Runtime_StoreOwnIC_Miss) {
kind = vector->GetKind(vector_slot);
}
- DCHECK(IsStoreOwnICKind(kind));
+ DCHECK(IsDefineNamedOwnICKind(kind));
+
+ // TODO(v8:12548): refactor DefineNamedOwnIC as a subclass of StoreIC, which
+ // can be called here.
StoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
-RUNTIME_FUNCTION(Runtime_StoreOwnIC_Slow) {
+RUNTIME_FUNCTION(Runtime_DefineNamedOwnIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -2885,9 +2891,9 @@ RUNTIME_FUNCTION(Runtime_StoreOwnIC_Slow) {
Handle<Object> object = args.at(1);
Handle<Object> key = args.at(2);
- // Unlike DefineOwn, StoreOwn doesn't handle private fields and is used for
- // defining data properties in object literals and defining public class
- // fields.
+ // Unlike DefineKeyedOwnIC, DefineNamedOwnIC doesn't handle private
+ // fields and is used for defining data properties in object literals
+ // and defining named public class fields.
DCHECK(!key->IsSymbol() || !Symbol::cast(*key).is_private_name());
PropertyKey lookup_key(isolate, key);
@@ -2923,7 +2929,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalICNoFeedback_Miss) {
Handle<Object> value = args.at(0);
Handle<Name> key = args.at<Name>(1);
- // TODO(mythria): Replace StoreGlobalStrict/Sloppy with StoreNamed.
+ // TODO(mythria): Replace StoreGlobalStrict/Sloppy with SetNamedProperty.
StoreGlobalIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(),
FeedbackSlotKind::kStoreGlobalStrict);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
@@ -2936,7 +2942,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
+ Handle<String> name = args.at<String>(4);
#ifdef DEBUG
{
@@ -2956,8 +2962,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
native_context->script_context_table(), isolate);
VariableLookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate, *script_contexts, *name,
- &lookup_result)) {
+ if (script_contexts->Lookup(name, &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
if (lookup_result.mode == VariableMode::kConst) {
@@ -3000,7 +3005,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
// installed in feedback vectors. In such cases, we need to get the kind from
// feedback vector slot since the handlers are used for both for StoreKeyed
// and StoreInArrayLiteral kinds.
- FeedbackSlotKind kind = FeedbackSlotKind::kStoreKeyedStrict;
+ FeedbackSlotKind kind = FeedbackSlotKind::kSetKeyedStrict;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
@@ -3010,7 +3015,9 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
// The elements store stubs miss into this function, but they are shared by
// different ICs.
- if (IsKeyedStoreICKind(kind) || IsKeyedDefineOwnICKind(kind)) {
+ // TODO(v8:12548): refactor DefineKeyedOwnIC as a subclass of KeyedStoreIC,
+ // which can be called here.
+ if (IsKeyedStoreICKind(kind) || IsDefineKeyedOwnICKind(kind)) {
KeyedStoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
@@ -3025,7 +3032,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
}
}
-RUNTIME_FUNCTION(Runtime_KeyedDefineOwnIC_Miss) {
+RUNTIME_FUNCTION(Runtime_DefineKeyedOwnIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -3036,17 +3043,17 @@ RUNTIME_FUNCTION(Runtime_KeyedDefineOwnIC_Miss) {
Handle<Object> key = args.at(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- FeedbackSlotKind kind = FeedbackSlotKind::kDefineOwnKeyed;
+ FeedbackSlotKind kind = FeedbackSlotKind::kDefineKeyedOwn;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
kind = vector->GetKind(vector_slot);
- DCHECK(IsDefineOwnICKind(kind));
+ DCHECK(IsDefineKeyedOwnICKind(kind));
}
- // The elements store stubs miss into this function, but they are shared by
- // different ICs.
+ // TODO(v8:12548): refactor DefineKeyedOwnIC as a subclass of KeyedStoreIC,
+ // which can be called here.
KeyedStoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
@@ -3086,7 +3093,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
StoreOrigin::kMaybeKeyed));
}
-RUNTIME_FUNCTION(Runtime_KeyedDefineOwnIC_Slow) {
+RUNTIME_FUNCTION(Runtime_DefineKeyedOwnIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -3131,7 +3138,8 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
StoreOwnElement(isolate, Handle<JSArray>::cast(object), key, value);
return *value;
} else {
- DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind));
+ DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind) ||
+ IsDefineKeyedOwnICKind(kind));
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, object, key, value,
StoreOrigin::kMaybeKeyed));
@@ -3239,11 +3247,11 @@ static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<Object> source = args.at<Object>(0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
+ Handle<Object> source = args.at(0);
+ int flags = args.smi_value_at(1);
if (!MigrateDeprecated(isolate, source)) {
- CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 2);
+ int index = args.tagged_index_value_at(2);
FeedbackSlot slot = FeedbackVector::ToSlot(index);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
if (maybe_vector->IsFeedbackVector()) {
@@ -3393,8 +3401,8 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
// TODO(verwaest): This should probably get the holder and receiver as input.
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
- DCHECK_GE(args.smi_at(1), 0);
- uint32_t index = args.smi_at(1);
+ DCHECK_GE(args.smi_value_at(1), 0);
+ uint32_t index = args.smi_value_at(1);
Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
isolate);
@@ -3438,8 +3446,8 @@ RUNTIME_FUNCTION(Runtime_KeyedHasIC_Miss) {
RUNTIME_FUNCTION(Runtime_HasElementWithInterceptor) {
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
- DCHECK_GE(args.smi_at(1), 0);
- uint32_t index = args.smi_at(1);
+ DCHECK_GE(args.smi_value_at(1), 0);
+ uint32_t index = args.smi_value_at(1);
Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
isolate);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index e14dd7c17a..f1c489efbe 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -53,11 +53,13 @@ class IC {
return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
}
bool IsAnyStore() const {
- return IsStoreIC() || IsStoreOwnIC() || IsStoreGlobalIC() ||
+ return IsStoreIC() || IsDefineNamedOwnIC() || IsStoreGlobalIC() ||
IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind()) ||
- IsDefineOwnIC();
+ IsDefineKeyedOwnIC();
+ }
+ bool IsAnyDefineOwn() const {
+ return IsDefineNamedOwnIC() || IsDefineKeyedOwnIC();
}
- bool IsAnyStoreOwn() const { return IsStoreOwnIC() || IsDefineOwnIC(); }
static inline bool IsHandler(MaybeObject object);
@@ -120,19 +122,17 @@ class IC {
bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
bool IsStoreGlobalIC() const { return IsStoreGlobalICKind(kind_); }
bool IsStoreIC() const { return IsStoreICKind(kind_); }
- bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
- bool IsDefineOwnIC() const { return IsDefineOwnICKind(kind_); }
+ bool IsDefineNamedOwnIC() const { return IsDefineNamedOwnICKind(kind_); }
bool IsStoreInArrayLiteralIC() const {
return IsStoreInArrayLiteralICKind(kind_);
}
bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
bool IsKeyedHasIC() const { return IsKeyedHasICKind(kind_); }
- bool IsKeyedDefineOwnIC() const { return IsKeyedDefineOwnICKind(kind_); }
+ bool IsDefineKeyedOwnIC() const { return IsDefineKeyedOwnICKind(kind_); }
bool is_keyed() const {
return IsKeyedLoadIC() || IsKeyedStoreIC() || IsStoreInArrayLiteralIC() ||
- IsKeyedHasIC() || IsKeyedDefineOwnIC();
+ IsKeyedHasIC() || IsDefineKeyedOwnIC();
}
- bool is_any_store_own() const { return IsStoreOwnIC() || IsDefineOwnIC(); }
bool ShouldRecomputeHandler(Handle<String> name);
Handle<Map> lookup_start_object_map() { return lookup_start_object_map_; }
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 3df5943ddd..cef7a2fa28 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -17,16 +17,17 @@ namespace v8 {
namespace internal {
enum class StoreMode {
+ // TODO(v8:12548): rename to kSet and kDefineKeyedOwnInLiteral
kOrdinary,
kInLiteral,
- // kStoreOwn performs an ordinary property store without traversing the
+ // kDefineNamedOwn performs an ordinary property store without traversing the
// prototype chain. In the case of private fields, it will throw if the
// field does not already exist.
- // kDefineOwn is similar to kStoreOwn, but for private class fields, it
- // will throw if the field does already exist.
- kStoreOwn,
- kDefineOwn
+ // kDefineKeyedOwn is similar to kDefineNamedOwn, but for private class
+ // fields, it will throw if the field does already exist.
+ kDefineNamedOwn,
+ kDefineKeyedOwn
};
// With private symbols, 'define' semantics will throw if the field already
@@ -83,16 +84,18 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
// kind.
void EmitGenericPropertyStore(TNode<JSReceiver> receiver,
TNode<Map> receiver_map,
+ TNode<Uint16T> instance_type,
const StoreICParameters* p,
ExitPoint* exit_point, Label* slow,
Maybe<LanguageMode> maybe_language_mode);
void EmitGenericPropertyStore(TNode<JSReceiver> receiver,
TNode<Map> receiver_map,
+ TNode<Uint16T> instance_type,
const StoreICParameters* p, Label* slow) {
ExitPoint direct_exit(this);
- EmitGenericPropertyStore(receiver, receiver_map, p, &direct_exit, slow,
- Nothing<LanguageMode>());
+ EmitGenericPropertyStore(receiver, receiver_map, instance_type, p,
+ &direct_exit, slow, Nothing<LanguageMode>());
}
void BranchIfPrototypesMayHaveReadOnlyElements(
@@ -146,8 +149,8 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
bool IsKeyedStore() const { return mode_ == StoreMode::kOrdinary; }
bool IsStoreInLiteral() const { return mode_ == StoreMode::kInLiteral; }
- bool IsKeyedStoreOwn() const { return mode_ == StoreMode::kStoreOwn; }
- bool IsKeyedDefineOwn() const { return mode_ == StoreMode::kDefineOwn; }
+ bool IsDefineNamedOwn() const { return mode_ == StoreMode::kDefineNamedOwn; }
+ bool IsDefineKeyedOwn() const { return mode_ == StoreMode::kDefineKeyedOwn; }
bool ShouldCheckPrototype() const { return IsKeyedStore(); }
bool ShouldReconfigureExisting() const { return IsStoreInLiteral(); }
@@ -158,8 +161,8 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
// We don't need the prototype validity check for "own" stores, because
// we don't care about the prototype chain.
// Thus, we need the prototype check only for ordinary stores.
- DCHECK_IMPLIES(!IsKeyedStore(), IsStoreInLiteral() || IsKeyedStoreOwn() ||
- IsKeyedDefineOwn());
+ DCHECK_IMPLIES(!IsKeyedStore(), IsStoreInLiteral() || IsDefineNamedOwn() ||
+ IsDefineKeyedOwn());
return IsKeyedStore();
}
};
@@ -169,9 +172,9 @@ void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
assembler.KeyedStoreGeneric();
}
-void KeyedDefineOwnGenericGenerator::Generate(
+void DefineKeyedOwnGenericGenerator::Generate(
compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kDefineOwn);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kDefineKeyedOwn);
assembler.KeyedStoreGeneric();
}
@@ -180,9 +183,11 @@ void StoreICNoFeedbackGenerator::Generate(compiler::CodeAssemblerState* state) {
assembler.StoreIC_NoFeedback();
}
-void StoreOwnICNoFeedbackGenerator::Generate(
+void DefineNamedOwnICNoFeedbackGenerator::Generate(
compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kStoreOwn);
+ // TODO(v8:12548): it's a hack to reuse KeyedStoreGenericAssembler for
+ // DefineNamedOwnIC, we should separate it out.
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kDefineNamedOwn);
assembler.StoreIC_NoFeedback();
}
@@ -792,7 +797,8 @@ TNode<Map> KeyedStoreGenericAssembler::FindCandidateStoreICTransitionMapHandler(
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<JSReceiver> receiver, TNode<Map> receiver_map,
- const StoreICParameters* p, ExitPoint* exit_point, Label* slow,
+ TNode<Uint16T> instance_type, const StoreICParameters* p,
+ ExitPoint* exit_point, Label* slow,
Maybe<LanguageMode> maybe_language_mode) {
CSA_DCHECK(this, IsSimpleObjectMap(receiver_map));
// TODO(rmcilroy) Type as Struct once we use a trimmed down
@@ -817,7 +823,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&descriptor_found);
{
- if (IsKeyedDefineOwn()) {
+ if (IsDefineKeyedOwn()) {
// Take slow path to throw if a private name already exists.
GotoIf(IsPrivateSymbol(name), slow);
}
@@ -841,11 +847,22 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&data_property);
{
+ Label shared(this);
+ GotoIf(IsJSSharedStructInstanceType(instance_type), &shared);
+
CheckForAssociatedProtector(name, slow);
OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
name_index, details, p->value(), slow,
false);
exit_point->Return(p->value());
+
+ BIND(&shared);
+ {
+ StoreJSSharedStructField(p->context(), receiver, receiver_map,
+ descriptors, name_index, details,
+ p->value());
+ exit_point->Return(p->value());
+ }
}
}
BIND(&lookup_transition);
@@ -879,7 +896,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&dictionary_found);
{
Label check_const(this), overwrite(this), done(this);
- if (IsKeyedDefineOwn()) {
+ if (IsDefineKeyedOwn()) {
// Take slow path to throw if a private name already exists.
GotoIf(IsPrivateSymbol(name), slow);
}
@@ -1008,10 +1025,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (!ShouldReconfigureExisting()) {
BIND(&readonly);
{
- // FIXME(joyee): IsKeyedStoreOwn is actually true from
- // StaNamedOwnProperty, which implements [[DefineOwnProperty]]
- // semantics. Rename them.
- if (IsKeyedDefineOwn() || IsKeyedStoreOwn()) {
+ if (IsDefineKeyedOwn() || IsDefineNamedOwn()) {
Goto(slow);
} else {
LanguageMode language_mode;
@@ -1068,8 +1082,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
StoreICParameters p(context, receiver, var_unique.value(), value, {},
UndefinedConstant(), StoreICMode::kDefault);
ExitPoint direct_exit(this);
- EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &direct_exit,
- &slow, language_mode);
+ EmitGenericPropertyStore(CAST(receiver), receiver_map, instance_type, &p,
+ &direct_exit, &slow, language_mode);
}
BIND(&not_internalized);
@@ -1084,18 +1098,19 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&slow);
{
- if (IsKeyedStore() || IsKeyedStoreOwn()) {
- CSA_DCHECK(this, BoolConstant(!IsKeyedStoreOwn()));
+ if (IsKeyedStore() || IsDefineNamedOwn()) {
+ // The DefineNamedOwnIC hacky reuse should never reach here.
+ CSA_DCHECK(this, BoolConstant(!IsDefineNamedOwn()));
Comment("KeyedStoreGeneric_slow");
TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
value);
- } else if (IsKeyedDefineOwn()) {
+ } else if (IsDefineKeyedOwn()) {
TailCallRuntime(Runtime::kDefineObjectOwnProperty, context, receiver, key,
value);
} else {
DCHECK(IsStoreInLiteral());
- TailCallRuntime(Runtime::kStoreDataPropertyInLiteral, context, receiver,
- key, value);
+ TailCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
+ receiver, key, value);
}
}
}
@@ -1140,17 +1155,19 @@ void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
// checks, strings and string wrappers, proxies) are handled in the runtime.
GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
{
- StoreICParameters p(
- context, receiver, name, value, slot, UndefinedConstant(),
- IsKeyedStoreOwn() ? StoreICMode::kStoreOwn : StoreICMode::kDefault);
- EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &miss);
+ StoreICParameters p(context, receiver, name, value, slot,
+ UndefinedConstant(),
+ IsDefineNamedOwn() ? StoreICMode::kDefineNamedOwn
+ : StoreICMode::kDefault);
+ EmitGenericPropertyStore(CAST(receiver), receiver_map, instance_type, &p,
+ &miss);
}
}
BIND(&miss);
{
- auto runtime =
- IsKeyedStoreOwn() ? Runtime::kStoreOwnIC_Miss : Runtime::kStoreIC_Miss;
+ auto runtime = IsDefineNamedOwn() ? Runtime::kDefineNamedOwnIC_Miss
+ : Runtime::kStoreIC_Miss;
TailCallRuntime(runtime, context, value, slot, UndefinedConstant(),
receiver_maybe_smi, name);
}
@@ -1172,14 +1189,16 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
IsSimpleObjectMap(LoadMap(receiver))));
GotoIfNot(is_simple_receiver, &slow);
- EmitGenericPropertyStore(receiver, LoadMap(receiver), &p, &exit_point, &slow,
+ TNode<Map> map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ EmitGenericPropertyStore(receiver, map, instance_type, &p, &exit_point, &slow,
Just(language_mode));
BIND(&slow);
{
if (IsStoreInLiteral()) {
- CallRuntime(Runtime::kStoreDataPropertyInLiteral, context, receiver,
- unique_name, value);
+ CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
+ receiver, unique_name, value);
} else {
CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
value);
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 04d90865bd..6618ed3b26 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -34,7 +34,7 @@ class KeyedStoreGenericGenerator {
TNode<Object> value);
};
-class KeyedDefineOwnGenericGenerator {
+class DefineKeyedOwnGenericGenerator {
public:
static void Generate(compiler::CodeAssemblerState* state);
};
@@ -44,7 +44,7 @@ class StoreICNoFeedbackGenerator {
static void Generate(compiler::CodeAssemblerState* state);
};
-class StoreOwnICNoFeedbackGenerator {
+class DefineNamedOwnICNoFeedbackGenerator {
public:
static void Generate(compiler::CodeAssemblerState* state);
};
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index a9e10f1311..41f2f6ca68 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -78,12 +78,12 @@ class V8_EXPORT_PRIVATE StubCache {
Isolate* isolate() { return isolate_; }
- // Setting kCacheIndexShift to Name::kHashShift is convenient because it
+ // Setting kCacheIndexShift to Name::HashBits::kShift is convenient because it
// causes the bit field inside the hash field to get shifted out implicitly.
// Note that kCacheIndexShift must not get too large, because
// sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift (see
// the STATIC_ASSERT below, in {entry(...)}).
- static const int kCacheIndexShift = Name::kHashShift;
+ static const int kCacheIndexShift = Name::HashBits::kShift;
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
@@ -123,7 +123,7 @@ class V8_EXPORT_PRIVATE StubCache {
// Compute the entry for a given offset in exactly the same way as
// we do in generated code. We generate an hash code that already
- // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
+ // ends in Name::HashBits::kShift 0s. Then we multiply it so it is a multiple
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index 0fe9f8d9b4..ad5f8842a5 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -1,8 +1,9 @@
ftang@chromium.org
-gsathya@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
marja@chromium.org
verwaest@chromium.org
syg@chromium.org
+
+per-file heap-symbols.h=file:../../COMMON_OWNERS
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 947d8381d8..fdf186be36 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -52,6 +52,7 @@
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/js-shadow-realms.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format.h"
#include "src/objects/js-segment-iterator.h"
@@ -59,6 +60,7 @@
#include "src/objects/js-segments.h"
#endif // V8_INTL_SUPPORT
#include "src/codegen/script-details.h"
+#include "src/objects/js-struct.h"
#include "src/objects/js-temporal-objects-inl.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/ordered-hash-table.h"
@@ -227,6 +229,7 @@ class Genesis {
void InitializeExperimentalGlobal();
void InitializeIteratorFunctions();
void InitializeCallSiteBuiltins();
+ void InitializeConsole(Handle<JSObject> extras_binding);
#define DECLARE_FEATURE_INITIALIZATION(id, descr) void InitializeGlobal_##id();
@@ -429,6 +432,12 @@ V8_NOINLINE Handle<JSFunction> CreateFunctionForBuiltinWithPrototype(
}
Handle<Map> initial_map =
factory->NewMap(type, instance_size, elements_kind, inobject_properties);
+ if (type == JS_FUNCTION_TYPE) {
+ DCHECK_EQ(instance_size, JSFunction::kSizeWithPrototype);
+ // Since we are creating an initial map for JSFunction objects with
+ // prototype slot, set the respective bit.
+ initial_map->set_has_prototype_slot(true);
+ }
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
// shared prototype?
@@ -1242,7 +1251,7 @@ void Genesis::InstallGlobalThisBinding() {
context->set(slot, native_context()->global_proxy());
Handle<ScriptContextTable> new_script_contexts =
- ScriptContextTable::Extend(script_contexts, context);
+ ScriptContextTable::Extend(isolate(), script_contexts, context);
native_context()->set_script_context_table(*new_script_contexts);
}
@@ -1437,10 +1446,10 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
isolate->native_context()->set_initial_error_prototype(*prototype);
} else {
Handle<JSFunction> global_error = isolate->error_function();
- CHECK(JSReceiver::SetPrototype(error_fun, global_error, false,
+ CHECK(JSReceiver::SetPrototype(isolate, error_fun, global_error, false,
kThrowOnError)
.FromMaybe(false));
- CHECK(JSReceiver::SetPrototype(prototype,
+ CHECK(JSReceiver::SetPrototype(isolate, prototype,
handle(global_error->prototype(), isolate),
false, kThrowOnError)
.FromMaybe(false));
@@ -1628,6 +1637,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
function_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, function_fun,
Context::FUNCTION_FUNCTION_INDEX);
+ native_context()->set_function_prototype(*prototype);
// Setup the methods on the %FunctionPrototype%.
JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
@@ -1901,12 +1911,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kNumberParseFloat, 1, true);
JSObject::AddProperty(isolate_, global_object, "parseFloat",
parse_float_fun, DONT_ENUM);
+ native_context()->set_global_parse_float_fun(*parse_float_fun);
// Install Number.parseInt and Global.parseInt.
Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
isolate_, number_fun, "parseInt", Builtin::kNumberParseInt, 2, true);
JSObject::AddProperty(isolate_, global_object, "parseInt", parse_int_fun,
DONT_ENUM);
+ native_context()->set_global_parse_int_fun(*parse_int_fun);
// Install Number constants
const double kMaxValue = 1.7976931348623157e+308;
@@ -2791,71 +2803,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallToStringTag(isolate_, math, "Math");
}
- { // -- C o n s o l e
- Handle<String> name = factory->InternalizeUtf8String("console");
-
- Handle<NativeContext> context(isolate()->native_context());
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfoForBuiltin(name, Builtin::kIllegal);
- info->set_language_mode(LanguageMode::kStrict);
-
- Handle<JSFunction> cons =
- Factory::JSFunctionBuilder{isolate(), info, context}.Build();
- Handle<JSObject> empty = factory->NewJSObject(isolate_->object_function());
- JSFunction::SetPrototype(cons, empty);
-
- Handle<JSObject> console = factory->NewJSObject(cons, AllocationType::kOld);
- DCHECK(console->IsJSObject());
- JSObject::AddProperty(isolate_, global, name, console, DONT_ENUM);
- SimpleInstallFunction(isolate_, console, "debug", Builtin::kConsoleDebug, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "error", Builtin::kConsoleError, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "info", Builtin::kConsoleInfo, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "log", Builtin::kConsoleLog, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "warn", Builtin::kConsoleWarn, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "dir", Builtin::kConsoleDir, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "dirxml", Builtin::kConsoleDirXml,
- 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "table", Builtin::kConsoleTable, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "trace", Builtin::kConsoleTrace, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "group", Builtin::kConsoleGroup, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "groupCollapsed",
- Builtin::kConsoleGroupCollapsed, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "groupEnd",
- Builtin::kConsoleGroupEnd, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "clear", Builtin::kConsoleClear, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "count", Builtin::kConsoleCount, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "countReset",
- Builtin::kConsoleCountReset, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "assert",
- Builtin::kFastConsoleAssert, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "profile",
- Builtin::kConsoleProfile, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "profileEnd",
- Builtin::kConsoleProfileEnd, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "time", Builtin::kConsoleTime, 0,
- false, NONE);
- SimpleInstallFunction(isolate_, console, "timeLog",
- Builtin::kConsoleTimeLog, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "timeEnd",
- Builtin::kConsoleTimeEnd, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "timeStamp",
- Builtin::kConsoleTimeStamp, 0, false, NONE);
- SimpleInstallFunction(isolate_, console, "context",
- Builtin::kConsoleContext, 1, true, NONE);
- InstallToStringTag(isolate_, console, "Object");
- }
-
#ifdef V8_INTL_SUPPORT
{ // -- I n t l
Handle<JSObject> intl =
@@ -2870,6 +2817,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate(), intl, "getCanonicalLocales",
Builtin::kIntlGetCanonicalLocales, 1, false);
+ SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
+ Builtin::kIntlSupportedValuesOf, 1, false);
+
{ // -- D a t e T i m e F o r m a t
Handle<JSFunction> date_time_format_constructor = InstallFunction(
isolate_, intl, "DateTimeFormat", JS_DATE_TIME_FORMAT_TYPE,
@@ -3134,6 +3084,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(isolate(), prototype,
factory->numberingSystem_string(),
Builtin::kLocalePrototypeNumberingSystem, true);
+
+ // Intl Locale Info functions
+ SimpleInstallGetter(isolate(), prototype, factory->calendars_string(),
+ Builtin::kLocalePrototypeCalendars, true);
+ SimpleInstallGetter(isolate(), prototype, factory->collations_string(),
+ Builtin::kLocalePrototypeCollations, true);
+ SimpleInstallGetter(isolate(), prototype, factory->hourCycles_string(),
+ Builtin::kLocalePrototypeHourCycles, true);
+ SimpleInstallGetter(isolate(), prototype,
+ factory->numberingSystems_string(),
+ Builtin::kLocalePrototypeNumberingSystems, true);
+ SimpleInstallGetter(isolate(), prototype, factory->textInfo_string(),
+ Builtin::kLocalePrototypeTextInfo, true);
+ SimpleInstallGetter(isolate(), prototype, factory->timeZones_string(),
+ Builtin::kLocalePrototypeTimeZones, true);
+ SimpleInstallGetter(isolate(), prototype, factory->weekInfo_string(),
+ Builtin::kLocalePrototypeWeekInfo, true);
}
{ // -- D i s p l a y N a m e s
@@ -3840,7 +3807,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
- STATIC_ASSERT(JSFunctionOrBoundFunction::kLengthDescriptorIndex == 0);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex ==
+ 0);
Descriptor d = Descriptor::AccessorConstant(
factory->length_string(), factory->bound_function_length_accessor(),
roc_attribs);
@@ -3848,7 +3817,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // name
- STATIC_ASSERT(JSFunctionOrBoundFunction::kNameDescriptorIndex == 1);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex ==
+ 1);
Descriptor d = Descriptor::AccessorConstant(
factory->name_string(), factory->bound_function_name_accessor(),
roc_attribs);
@@ -4054,7 +4025,8 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
result->shared().DontAdaptArguments();
result->shared().set_length(3);
- CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
+ CHECK(JSObject::SetPrototype(isolate(), result, typed_array_function, false,
+ kDontThrow)
.FromJust());
Handle<Smi> bytes_per_element(
@@ -4071,8 +4043,8 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
DCHECK(result->prototype().IsJSObject());
Handle<JSObject> prototype(JSObject::cast(result->prototype()), isolate());
- CHECK(JSObject::SetPrototype(prototype, typed_array_prototype, false,
- kDontThrow)
+ CHECK(JSObject::SetPrototype(isolate(), prototype, typed_array_prototype,
+ false, kDontThrow)
.FromJust());
CHECK_NE(prototype->map().ptr(),
@@ -4391,6 +4363,80 @@ void Genesis::InitializeCallSiteBuiltins() {
}
}
+void Genesis::InitializeConsole(Handle<JSObject> extras_binding) {
+ HandleScope scope(isolate());
+ Factory* factory = isolate_->factory();
+
+ // -- C o n s o l e
+ Handle<String> name = factory->console_string();
+
+ Handle<NativeContext> context(isolate_->native_context());
+ Handle<JSGlobalObject> global(context->global_object(), isolate());
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfoForBuiltin(name, Builtin::kIllegal);
+ info->set_language_mode(LanguageMode::kStrict);
+
+ Handle<JSFunction> cons =
+ Factory::JSFunctionBuilder{isolate(), info, context}.Build();
+ Handle<JSObject> empty = factory->NewJSObject(isolate_->object_function());
+ JSFunction::SetPrototype(cons, empty);
+
+ Handle<JSObject> console = factory->NewJSObject(cons, AllocationType::kOld);
+ DCHECK(console->IsJSObject());
+
+ JSObject::AddProperty(isolate_, extras_binding, name, console, DONT_ENUM);
+ // TODO(v8:11989): remove this in the next release
+ JSObject::AddProperty(isolate_, global, name, console, DONT_ENUM);
+
+ SimpleInstallFunction(isolate_, console, "debug", Builtin::kConsoleDebug, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "error", Builtin::kConsoleError, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "info", Builtin::kConsoleInfo, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "log", Builtin::kConsoleLog, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "warn", Builtin::kConsoleWarn, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "dir", Builtin::kConsoleDir, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "dirxml", Builtin::kConsoleDirXml, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "table", Builtin::kConsoleTable, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "trace", Builtin::kConsoleTrace, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "group", Builtin::kConsoleGroup, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "groupCollapsed",
+ Builtin::kConsoleGroupCollapsed, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "groupEnd",
+ Builtin::kConsoleGroupEnd, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "clear", Builtin::kConsoleClear, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "count", Builtin::kConsoleCount, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "countReset",
+ Builtin::kConsoleCountReset, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "assert",
+ Builtin::kFastConsoleAssert, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "profile", Builtin::kConsoleProfile,
+ 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "profileEnd",
+ Builtin::kConsoleProfileEnd, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "time", Builtin::kConsoleTime, 0,
+ false, NONE);
+ SimpleInstallFunction(isolate_, console, "timeLog", Builtin::kConsoleTimeLog,
+ 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "timeEnd", Builtin::kConsoleTimeEnd,
+ 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "timeStamp",
+ Builtin::kConsoleTimeStamp, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "context", Builtin::kConsoleContext,
+ 1, true, NONE);
+ InstallToStringTag(isolate_, console, "Object");
+}
+
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
@@ -4405,6 +4451,61 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
+void Genesis::InitializeGlobal_harmony_shadow_realm() {
+ if (!FLAG_harmony_shadow_realm) return;
+ // -- S h a d o w R e a l m
+ // #sec-shadowrealm-objects
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Handle<JSFunction> shadow_realm_fun = InstallFunction(
+ isolate_, global, "ShadowRealm", JS_SHADOW_REALM_TYPE,
+ JSShadowRealm::kHeaderSize, 0, factory()->the_hole_value(),
+ Builtin::kShadowRealmConstructor);
+ shadow_realm_fun->shared().set_length(0);
+ shadow_realm_fun->shared().DontAdaptArguments();
+
+ // Setup %ShadowRealmPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(shadow_realm_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate_, prototype, factory()->ShadowRealm_string());
+
+ SimpleInstallFunction(isolate_, prototype, "evaluate",
+ Builtin::kShadowRealmPrototypeEvaluate, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "importValue",
+ Builtin::kShadowRealmPrototypeImportValue, 2, true);
+
+ { // --- W r a p p e d F u n c t i o n
+ Handle<Map> map = factory()->NewMap(JS_WRAPPED_FUNCTION_TYPE,
+ JSWrappedFunction::kHeaderSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 0);
+ map->SetConstructor(native_context()->object_function());
+ map->set_is_callable(true);
+ Handle<JSObject> empty_function(native_context()->function_prototype(),
+ isolate());
+ Map::SetPrototype(isolate(), map, empty_function);
+
+ native_context()->set_wrapped_function_map(*map);
+ }
+}
+
+void Genesis::InitializeGlobal_harmony_struct() {
+ if (!FLAG_harmony_struct) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Handle<String> name =
+ isolate()->factory()->InternalizeUtf8String("SharedStructType");
+ Handle<JSFunction> shared_struct_type_fun = CreateFunctionForBuiltin(
+ isolate(), name, isolate()->strict_function_with_readonly_prototype_map(),
+ Builtin::kSharedStructTypeConstructor);
+ JSObject::MakePrototypesFast(shared_struct_type_fun, kStartAtReceiver,
+ isolate());
+ shared_struct_type_fun->shared().set_native(true);
+ shared_struct_type_fun->shared().DontAdaptArguments();
+ shared_struct_type_fun->shared().set_length(1);
+ JSObject::AddProperty(isolate(), global, "SharedStructType",
+ shared_struct_type_fun, DONT_ENUM);
+}
+
void Genesis::InitializeGlobal_harmony_array_find_last() {
if (!FLAG_harmony_array_find_last) return;
@@ -4438,6 +4539,28 @@ void Genesis::InitializeGlobal_harmony_array_find_last() {
}
}
+void Genesis::InitializeGlobal_harmony_array_grouping() {
+ if (!FLAG_harmony_array_grouping) return;
+
+ Handle<JSFunction> array_function(native_context()->array_function(),
+ isolate());
+ Handle<JSObject> array_prototype(
+ JSObject::cast(array_function->instance_prototype()), isolate());
+
+ SimpleInstallFunction(isolate_, array_prototype, "groupBy",
+ Builtin::kArrayPrototypeGroupBy, 1, false);
+ SimpleInstallFunction(isolate_, array_prototype, "groupByToMap",
+ Builtin::kArrayPrototypeGroupByToMap, 1, false);
+
+ Handle<JSObject> unscopables = Handle<JSObject>::cast(
+ JSObject::GetProperty(isolate(), array_prototype,
+ isolate()->factory()->unscopables_symbol())
+ .ToHandleChecked());
+
+ InstallTrueValuedProperty(isolate_, unscopables, "groupBy");
+ InstallTrueValuedProperty(isolate_, unscopables, "groupByToMap");
+}
+
void Genesis::InitializeGlobal_harmony_object_has_own() {
if (!FLAG_harmony_object_has_own) return;
@@ -4585,6 +4708,7 @@ void Genesis::InitializeGlobal_harmony_temporal() {
Handle<JSObject> now = factory()->NewJSObject(isolate_->object_function(),
AllocationType::kOld);
JSObject::AddProperty(isolate_, temporal, "Now", now, DONT_ENUM);
+ InstallToStringTag(isolate_, now, "Temporal.Now");
// Note: There are NO Temporal.Now.plainTime
// See https://github.com/tc39/proposal-temporal/issues/1540
@@ -5193,34 +5317,32 @@ void Genesis::InitializeGlobal_harmony_temporal() {
}
#undef INSTALL_TEMPORAL_CTOR_AND_PROTOTYPE
#undef INSTALL_TEMPORAL_FUNC
+
+ // The StringListFromIterable functions is created but not
+ // exposed, as it is used internally by CalendarFields.
+ {
+ Handle<JSFunction> func = SimpleCreateFunction(
+ isolate_,
+ factory()->InternalizeUtf8String("StringFixedArrayFromIterable"),
+ Builtin::kStringFixedArrayFromIterable, 1, false);
+ native_context()->set_string_fixed_array_from_iterable(*func);
+ }
+ // The TemporalInsantFixedArrayFromIterable functions is created but not
+ // exposed, as it is used internally by GetPossibleInstantsFor.
+ {
+ Handle<JSFunction> func = SimpleCreateFunction(
+ isolate_,
+ factory()->InternalizeUtf8String(
+ "TemporalInstantFixedArrayFromIterable"),
+ Builtin::kTemporalInstantFixedArrayFromIterable, 1, false);
+ native_context()->set_temporal_instant_fixed_array_from_iterable(*func);
+ }
}
#ifdef V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_harmony_intl_locale_info() {
- if (!FLAG_harmony_intl_locale_info) return;
- Handle<JSObject> prototype(
- JSObject::cast(native_context()->intl_locale_function().prototype()),
- isolate_);
- SimpleInstallGetter(isolate(), prototype, factory()->calendars_string(),
- Builtin::kLocalePrototypeCalendars, true);
- SimpleInstallGetter(isolate(), prototype, factory()->collations_string(),
- Builtin::kLocalePrototypeCollations, true);
- SimpleInstallGetter(isolate(), prototype, factory()->hourCycles_string(),
- Builtin::kLocalePrototypeHourCycles, true);
- SimpleInstallGetter(isolate(), prototype,
- factory()->numberingSystems_string(),
- Builtin::kLocalePrototypeNumberingSystems, true);
- SimpleInstallGetter(isolate(), prototype, factory()->textInfo_string(),
- Builtin::kLocalePrototypeTextInfo, true);
- SimpleInstallGetter(isolate(), prototype, factory()->timeZones_string(),
- Builtin::kLocalePrototypeTimeZones, true);
- SimpleInstallGetter(isolate(), prototype, factory()->weekInfo_string(),
- Builtin::kLocalePrototypeWeekInfo, true);
-}
-
-void Genesis::InitializeGlobal_harmony_intl_enumeration() {
- if (!FLAG_harmony_intl_enumeration) return;
+void Genesis::InitializeGlobal_harmony_intl_number_format_v3() {
+ if (!FLAG_harmony_intl_number_format_v3) return;
Handle<JSObject> intl = Handle<JSObject>::cast(
JSReceiver::GetProperty(
@@ -5229,8 +5351,35 @@ void Genesis::InitializeGlobal_harmony_intl_enumeration() {
factory()->InternalizeUtf8String("Intl"))
.ToHandleChecked());
- SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
- Builtin::kIntlSupportedValuesOf, 1, false);
+ {
+ Handle<JSFunction> number_format_constructor = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ isolate(), Handle<JSReceiver>(JSReceiver::cast(*intl), isolate()),
+ factory()->InternalizeUtf8String("NumberFormat"))
+ .ToHandleChecked());
+
+ Handle<JSObject> prototype(
+ JSObject::cast(number_format_constructor->prototype()), isolate());
+
+ SimpleInstallFunction(isolate(), prototype, "formatRange",
+ Builtin::kNumberFormatPrototypeFormatRange, 2, false);
+ SimpleInstallFunction(isolate(), prototype, "formatRangeToParts",
+ Builtin::kNumberFormatPrototypeFormatRangeToParts, 2,
+ false);
+ }
+ {
+ Handle<JSFunction> plural_rules_constructor = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ isolate(), Handle<JSReceiver>(JSReceiver::cast(*intl), isolate()),
+ factory()->InternalizeUtf8String("PluralRules"))
+ .ToHandleChecked());
+
+ Handle<JSObject> prototype(
+ JSObject::cast(plural_rules_constructor->prototype()), isolate());
+
+ SimpleInstallFunction(isolate(), prototype, "selectRange",
+ Builtin::kPluralRulesPrototypeSelectRange, 2, false);
+ }
}
#endif // V8_INTL_SUPPORT
@@ -5308,6 +5457,8 @@ bool Genesis::InstallABunchOfRandomThings() {
isolate());
DCHECK(JSObject::cast(object_function->initial_map().prototype())
.HasFastProperties());
+ native_context()->set_object_function_prototype(
+ JSObject::cast(object_function->initial_map().prototype()));
native_context()->set_object_function_prototype_map(
HeapObject::cast(object_function->initial_map().prototype()).map());
}
@@ -5639,6 +5790,8 @@ bool Genesis::InstallExtrasBindings() {
SimpleInstallFunction(isolate(), extras_binding, "trace", Builtin::kTrace, 5,
true);
+ InitializeConsole(extras_binding);
+
native_context()->set_extras_binding_object(*extras_binding);
return true;
@@ -5657,9 +5810,9 @@ void Genesis::InitializeMapCaches() {
DisallowGarbageCollection no_gc;
native_context()->set_map_cache(*cache);
Map initial = native_context()->object_function().initial_map();
- cache->Set(0, HeapObjectReference::Weak(initial), SKIP_WRITE_BARRIER);
+ cache->Set(0, HeapObjectReference::Weak(initial));
cache->Set(initial.GetInObjectProperties(),
- HeapObjectReference::Weak(initial), SKIP_WRITE_BARRIER);
+ HeapObjectReference::Weak(initial));
}
}
@@ -5728,7 +5881,7 @@ bool Genesis::InstallExtensions(Isolate* isolate,
InstallExtension(isolate, "v8/gc", &extension_states)) &&
(!FLAG_expose_externalize_string ||
InstallExtension(isolate, "v8/externalize", &extension_states)) &&
- (!TracingFlags::is_gc_stats_enabled() ||
+ (!(FLAG_expose_statistics || TracingFlags::is_gc_stats_enabled()) ||
InstallExtension(isolate, "v8/statistics", &extension_states)) &&
(!FLAG_expose_trigger_failure ||
InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
@@ -5896,8 +6049,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it.
if (PropertyAlreadyExists(isolate(), to, key)) continue;
FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
- Handle<Object> value =
- JSObject::FastPropertyAt(from, details.representation(), index);
+ Handle<Object> value = JSObject::FastPropertyAt(
+ isolate(), from, details.representation(), index);
JSObject::AddProperty(isolate(), to, key, value,
details.attributes());
} else {
@@ -5936,8 +6089,15 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Object> value(cell->value(), isolate());
if (value->IsTheHole(isolate())) continue;
PropertyDetails details = cell->property_details();
- if (details.kind() != PropertyKind::kData) continue;
- JSObject::AddProperty(isolate(), to, key, value, details.attributes());
+ if (details.kind() == PropertyKind::kData) {
+ JSObject::AddProperty(isolate(), to, key, value, details.attributes());
+ } else {
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
+ DCHECK(!to->HasFastProperties());
+ PropertyDetails d(PropertyKind::kAccessor, details.attributes(),
+ PropertyCellType::kMutable);
+ JSObject::SetNormalizedProperty(to, key, value, d);
+ }
}
} else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
@@ -6111,8 +6271,8 @@ Genesis::Genesis(
// If no global proxy template was passed in, simply use the global in the
// snapshot. If a global proxy template was passed in it's used to recreate
- // the global object and its protype chain, and the data properties from the
- // deserialized global are copied onto it.
+ // the global object and its prototype chain, and the data and the accessor
+ // properties from the deserialized global are copied onto it.
if (context_snapshot_index == 0 && !global_proxy_template.IsEmpty()) {
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
@@ -6159,14 +6319,6 @@ Genesis::Genesis(
}
}
- // TODO(v8:10391): The reason is that the NativeContext::microtask_queue
- // serialization is not actually supported, and therefore the field is
- // serialized as raw data instead of being serialized as ExternalReference.
- // As a result, when V8 heap sandbox is enabled, the external pointer entry
- // is not allocated for microtask queue field during deserialization, so we
- // allocate it manually here.
- native_context()->AllocateExternalPointerEntries(isolate);
-
native_context()->set_microtask_queue(
isolate, microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
: isolate->default_microtask_queue());
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 2476fc5c6a..c19f1a1136 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -14,6 +14,7 @@
V(_, calendars_string, "calendars") \
V(_, cardinal_string, "cardinal") \
V(_, caseFirst_string, "caseFirst") \
+ V(_, ceil_string, "ceil") \
V(_, compare_string, "compare") \
V(_, collation_string, "collation") \
V(_, collations_string, "collations") \
@@ -30,15 +31,15 @@
V(_, direction_string, "direction") \
V(_, endRange_string, "endRange") \
V(_, engineering_string, "engineering") \
- V(_, era_string, "era") \
- V(_, eraYear_string, "eraYear") \
V(_, exceptZero_string, "exceptZero") \
+ V(_, expand_string, "expand") \
V(_, exponentInteger_string, "exponentInteger") \
V(_, exponentMinusSign_string, "exponentMinusSign") \
V(_, exponentSeparator_string, "exponentSeparator") \
V(_, fallback_string, "fallback") \
V(_, first_string, "first") \
V(_, firstDay_string, "firstDay") \
+ V(_, floor_string, "floor") \
V(_, format_string, "format") \
V(_, fraction_string, "fraction") \
V(_, fractionalSecond_string, "fractionalSecond") \
@@ -51,6 +52,11 @@
V(_, h12_string, "h12") \
V(_, h23_string, "h23") \
V(_, h24_string, "h24") \
+ V(_, halfCeil_string, "halfCeil") \
+ V(_, halfEven_string, "halfEven") \
+ V(_, halfExpand_string, "halfExpand") \
+ V(_, halfFloor_string, "halfFloor") \
+ V(_, halfTrunc_string, "halfTrunc") \
V(_, hour12_string, "hour12") \
V(_, hourCycle_string, "hourCycle") \
V(_, hourCycles_string, "hourCycles") \
@@ -62,6 +68,7 @@
V(_, kana_string, "kana") \
V(_, language_string, "language") \
V(_, languageDisplay_string, "languageDisplay") \
+ V(_, lessPrecision_string, "lessPrecision") \
V(_, letter_string, "letter") \
V(_, list_string, "list") \
V(_, literal_string, "literal") \
@@ -71,13 +78,17 @@
V(_, ltr_string, "ltr") \
V(_, maximumFractionDigits_string, "maximumFractionDigits") \
V(_, maximumSignificantDigits_string, "maximumSignificantDigits") \
+ V(_, min2_string, "min2") \
V(_, minimalDays_string, "minimalDays") \
V(_, minimumFractionDigits_string, "minimumFractionDigits") \
V(_, minimumIntegerDigits_string, "minimumIntegerDigits") \
V(_, minimumSignificantDigits_string, "minimumSignificantDigits") \
+ V(_, minus_0, "-0") \
V(_, minusSign_string, "minusSign") \
+ V(_, morePrecision_string, "morePrecision") \
V(_, nan_string, "nan") \
V(_, narrowSymbol_string, "narrowSymbol") \
+ V(_, negative_string, "negative") \
V(_, never_string, "never") \
V(_, none_string, "none") \
V(_, notation_string, "notation") \
@@ -91,6 +102,8 @@
V(_, quarter_string, "quarter") \
V(_, region_string, "region") \
V(_, relatedYear_string, "relatedYear") \
+ V(_, roundingMode_string, "roundingMode") \
+ V(_, roundingPriority_string, "roundingPriority") \
V(_, rtl_string, "rtl") \
V(_, scientific_string, "scientific") \
V(_, segment_string, "segment") \
@@ -103,12 +116,15 @@
V(_, standard_string, "standard") \
V(_, startRange_string, "startRange") \
V(_, strict_string, "strict") \
+ V(_, stripIfInteger_string, "stripIfInteger") \
V(_, style_string, "style") \
V(_, term_string, "term") \
V(_, textInfo_string, "textInfo") \
V(_, timeStyle_string, "timeStyle") \
V(_, timeZones_string, "timeZones") \
V(_, timeZoneName_string, "timeZoneName") \
+ V(_, trailingZeroDisplay_string, "trailingZeroDisplay") \
+ V(_, trunc_string, "trunc") \
V(_, type_string, "type") \
V(_, unknown_string, "unknown") \
V(_, upper_string, "upper") \
@@ -168,6 +184,8 @@
V(_, computed_string, "<computed>") \
V(_, configurable_string, "configurable") \
V(_, conjunction_string, "conjunction") \
+ V(_, console_string, "console") \
+ V(_, constrain_string, "constrain") \
V(_, construct_string, "construct") \
V(_, constructor_string, "constructor") \
V(_, current_string, "current") \
@@ -208,6 +226,8 @@
V(_, epochMilliseconds_string, "epochMilliseconds") \
V(_, epochNanoseconds_string, "epochNanoseconds") \
V(_, epochSeconds_string, "epochSeconds") \
+ V(_, era_string, "era") \
+ V(_, eraYear_string, "eraYear") \
V(_, errors_string, "errors") \
V(_, error_to_string, "[object Error]") \
V(_, eval_string, "eval") \
@@ -229,6 +249,7 @@
V(_, get_string, "get") \
V(_, getOffsetNanosecondsFor_string, "getOffsetNanosecondsFor") \
V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(_, getPossibleInstantsFor_string, "getPossibleInstantsFor") \
V(_, getPrototypeOf_string, "getPrototypeOf") \
V(_, global_string, "global") \
V(_, globalThis_string, "globalThis") \
@@ -326,6 +347,8 @@
V(_, overflow_string, "overflow") \
V(_, ownKeys_string, "ownKeys") \
V(_, percent_string, "percent") \
+ V(_, plainDate_string, "plainDate") \
+ V(_, plainTime_string, "plainTime") \
V(_, position_string, "position") \
V(_, preventExtensions_string, "preventExtensions") \
V(_, private_constructor_string, "#constructor") \
@@ -343,11 +366,13 @@
V(_, RegExp_string, "RegExp") \
V(_, regexp_to_string, "[object RegExp]") \
V(_, reject_string, "reject") \
+ V(_, relativeTo_string, "relativeTo") \
V(_, resizable_string, "resizable") \
V(_, ResizableArrayBuffer_string, "ResizableArrayBuffer") \
V(_, resolve_string, "resolve") \
V(_, return_string, "return") \
V(_, revoke_string, "revoke") \
+ V(_, roundingIncrement_string, "roundingIncrement") \
V(_, RuntimeError_string, "RuntimeError") \
V(_, WebAssemblyException_string, "WebAssembly.Exception") \
V(_, Script_string, "Script") \
@@ -361,6 +386,7 @@
V(_, set_string, "set") \
V(_, SetIterator_string, "Set Iterator") \
V(_, setPrototypeOf_string, "setPrototypeOf") \
+ V(_, ShadowRealm_string, "ShadowRealm") \
V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
V(_, sign_string, "sign") \
V(_, smallestUnit_string, "smallestUnit") \
@@ -415,15 +441,15 @@
#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
V(_, array_buffer_wasm_memory_symbol) \
- V(_, call_site_frame_info_symbol) \
+ V(_, call_site_info_symbol) \
V(_, console_context_id_symbol) \
V(_, console_context_name_symbol) \
V(_, class_fields_symbol) \
V(_, class_positions_symbol) \
- V(_, detailed_stack_trace_symbol) \
V(_, elements_transition_symbol) \
V(_, error_end_pos_symbol) \
V(_, error_script_symbol) \
+ V(_, error_stack_symbol) \
V(_, error_start_pos_symbol) \
V(_, frozen_symbol) \
V(_, interpreter_trampoline_symbol) \
@@ -436,11 +462,11 @@
V(_, promise_debug_message_symbol) \
V(_, promise_forwarding_handler_symbol) \
V(_, promise_handled_by_symbol) \
+ V(_, promise_awaited_by_symbol) \
V(_, regexp_result_names_symbol) \
V(_, regexp_result_regexp_input_symbol) \
V(_, regexp_result_regexp_last_index_symbol) \
V(_, sealed_symbol) \
- V(_, stack_trace_symbol) \
V(_, strict_function_transition_symbol) \
V(_, wasm_exception_tag_symbol) \
V(_, wasm_exception_values_symbol) \
@@ -516,6 +542,7 @@
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_CLEAR_WEAK_REFERENCES) \
+ F(MC_SWEEP_EXTERNAL_POINTER_TABLE) \
F(MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(MC_COMPLETE_SWEEPING) \
F(MC_EVACUATE_CANDIDATES) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 853d7f9358..e1bc777257 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -5,10 +5,9 @@
#include "src/init/isolate-allocator.h"
#include "src/base/bounded-page-allocator.h"
-#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/sandbox.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
@@ -76,40 +75,29 @@ void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
base::AddressRegion existing_reservation;
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- // TODO(chromium:1218005) avoid the name collision with
- // v8::internal::VirtualMemoryCage and ideally figure out a clear naming
- // scheme for the different types of virtual memory cages.
-
- // For now, we allow the virtual memory cage to be disabled even when
- // compiling with v8_enable_virtual_memory_cage. This fallback will be
- // disallowed in the future, at the latest once ArrayBuffers are referenced
- // through an offset rather than a raw pointer.
- if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
- CHECK(kAllowBackingStoresOutsideCage);
+#ifdef V8_SANDBOX
+ // For now, we allow the sandbox to be disabled even when compiling with
+ // v8_enable_sandbox. This fallback will be disallowed in the future, at the
+ // latest once sandboxed pointers are enabled.
+ if (GetProcessWideSandbox()->is_disabled()) {
+ CHECK(kAllowBackingStoresOutsideSandbox);
} else {
- auto cage = GetProcessWideVirtualMemoryCage();
- CHECK(cage->is_initialized());
- // The pointer compression cage must be placed at the start of the virtual
- // memory cage.
+ auto sandbox = GetProcessWideSandbox();
+ CHECK(sandbox->is_initialized());
+ // The pointer compression cage must be placed at the start of the sandbox.
+
// TODO(chromium:12180) this currently assumes that no other pages were
// allocated through the cage's page allocator in the meantime. In the
// future, the cage initialization will happen just before this function
// runs, and so this will be guaranteed. Currently however, it is possible
// that the embedder accidentally uses the cage's page allocator prior to
// initializing V8, in which case this CHECK will likely fail.
- // TODO(chromium:12180) here we rely on our BoundedPageAllocators to
- // respect the hint parameter. Instead, it would probably be better to add
- // a new API that guarantees this, either directly to the PageAllocator
- // interface or to a derived one.
- void* hint = reinterpret_cast<void*>(cage->base());
- void* base = cage->page_allocator()->AllocatePages(
- hint, params.reservation_size, params.base_alignment,
- PageAllocator::kNoAccess);
- CHECK_EQ(base, hint);
- existing_reservation =
- base::AddressRegion(cage->base(), params.reservation_size);
- params.page_allocator = cage->page_allocator();
+ Address base = sandbox->address_space()->AllocatePages(
+ sandbox->base(), params.reservation_size, params.base_alignment,
+ PagePermissions::kNoAccess);
+ CHECK_EQ(sandbox->base(), base);
+ existing_reservation = base::AddressRegion(base, params.reservation_size);
+ params.page_allocator = sandbox->page_allocator();
}
#endif
if (!GetProcessWidePtrComprCage()->InitReservation(params,
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 5172d5da9a..edac725e8e 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -17,14 +17,13 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/init/bootstrapper.h"
#include "src/libsampler/sampler.h"
#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/sandbox.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
@@ -39,30 +38,80 @@
namespace v8 {
namespace internal {
-V8_DECLARE_ONCE(init_once);
+v8::Platform* V8::platform_ = nullptr;
+
+namespace {
+enum class V8StartupState {
+ kIdle,
+ kPlatformInitializing,
+ kPlatformInitialized,
+ kV8Initializing,
+ kV8Initialized,
+ kV8Disposing,
+ kV8Disposed,
+ kPlatformDisposing,
+ kPlatformDisposed
+};
+
+std::atomic<V8StartupState> v8_startup_state_(V8StartupState::kIdle);
+
+void AdvanceStartupState(V8StartupState expected_next_state) {
+ V8StartupState current_state = v8_startup_state_;
+ CHECK_NE(current_state, V8StartupState::kPlatformDisposed);
+ V8StartupState next_state =
+ static_cast<V8StartupState>(static_cast<int>(current_state) + 1);
+ if (next_state != expected_next_state) {
+ // Ensure the following order:
+ // v8::V8::InitializePlatform(platform);
+ // v8::V8::Initialize();
+ // v8::Isolate* isolate = v8::Isolate::New(...);
+ // ...
+ // isolate->Dispose();
+ // v8::V8::Dispose();
+ // v8::V8::DisposePlatform();
+ FATAL("Wrong intialization order: got %d expected %d!",
+ static_cast<int>(current_state), static_cast<int>(next_state));
+ }
+ if (!v8_startup_state_.compare_exchange_strong(current_state, next_state)) {
+ FATAL(
+ "Multiple threads are initializating V8 in the wrong order: expected "
+ "%d got %d!",
+ static_cast<int>(current_state),
+ static_cast<int>(v8_startup_state_.load()));
+ }
+}
+
+} // namespace
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-V8_DECLARE_ONCE(init_natives_once);
V8_DECLARE_ONCE(init_snapshot_once);
#endif
-v8::Platform* V8::platform_ = nullptr;
-
-void V8::Initialize() { base::CallOnce(&init_once, &InitializeOncePerProcess); }
-
-void V8::Dispose() {
-#if V8_ENABLE_WEBASSEMBLY
- wasm::WasmEngine::GlobalTearDown();
-#endif // V8_ENABLE_WEBASSEMBLY
-#if defined(USE_SIMULATOR)
- Simulator::GlobalTearDown();
+void V8::InitializePlatform(v8::Platform* platform) {
+ AdvanceStartupState(V8StartupState::kPlatformInitializing);
+ CHECK(!platform_);
+ CHECK_NOT_NULL(platform);
+ platform_ = platform;
+ v8::base::SetPrintStackTrace(platform_->GetStackTracePrinter());
+ v8::tracing::TracingCategoryObserver::SetUp();
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ if (FLAG_enable_system_instrumentation) {
+ // TODO(sartang@microsoft.com): Move to platform specific diagnostics object
+ v8::internal::ETWJITInterface::Register();
+ }
#endif
- CallDescriptors::TearDown();
- ElementsAccessor::TearDown();
- RegisteredExtension::UnregisterAll();
- FlagList::ResetAllFlags(); // Frees memory held by string arguments.
+ AdvanceStartupState(V8StartupState::kPlatformInitialized);
}
+#ifdef V8_SANDBOX
+bool V8::InitializeSandbox() {
+ // Platform must have been initialized already.
+ CHECK(platform_);
+ v8::VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace();
+ return GetProcessWideSandbox()->Initialize(vas);
+}
+#endif // V8_SANDBOX
+
#define DISABLE_FLAG(flag) \
if (FLAG_##flag) { \
PrintF(stderr, \
@@ -70,15 +119,16 @@ void V8::Dispose() {
FLAG_##flag = false; \
}
-void V8::InitializeOncePerProcess() {
+void V8::Initialize() {
+ AdvanceStartupState(V8StartupState::kV8Initializing);
CHECK(platform_);
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
+#ifdef V8_SANDBOX
+ if (!GetProcessWideSandbox()->is_initialized()) {
// For now, we still allow the cage to be disabled even if V8 was compiled
- // with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
- CHECK(kAllowBackingStoresOutsideCage);
- GetProcessWideVirtualMemoryCage()->Disable();
+ // with V8_SANDBOX. This will eventually be forbidden.
+ CHECK(kAllowBackingStoresOutsideSandbox);
+ GetProcessWideSandbox()->Disable();
}
#endif
@@ -201,32 +251,31 @@ void V8::InitializeOncePerProcess() {
#endif // V8_ENABLE_WEBASSEMBLY
ExternalReferenceTable::InitializeOncePerProcess();
-}
-void V8::InitializePlatform(v8::Platform* platform) {
- CHECK(!platform_);
- CHECK(platform);
- platform_ = platform;
- v8::base::SetPrintStackTrace(platform_->GetStackTracePrinter());
- v8::tracing::TracingCategoryObserver::SetUp();
-#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
- if (FLAG_enable_system_instrumentation) {
- // TODO(sartang@microsoft.com): Move to platform specific diagnostics object
- v8::internal::ETWJITInterface::Register();
- }
-#endif
+ AdvanceStartupState(V8StartupState::kV8Initialized);
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-bool V8::InitializeVirtualMemoryCage() {
- // Platform must have been initialized already.
+#undef DISABLE_FLAG
+
+void V8::Dispose() {
+ AdvanceStartupState(V8StartupState::kV8Disposing);
CHECK(platform_);
- v8::VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace();
- return GetProcessWideVirtualMemoryCage()->Initialize(vas);
-}
+#if V8_ENABLE_WEBASSEMBLY
+ wasm::WasmEngine::GlobalTearDown();
+#endif // V8_ENABLE_WEBASSEMBLY
+#if defined(USE_SIMULATOR)
+ Simulator::GlobalTearDown();
#endif
+ CallDescriptors::TearDown();
+ ElementsAccessor::TearDown();
+ RegisteredExtension::UnregisterAll();
+ Isolate::DisposeOncePerProcess();
+ FlagList::ResetAllFlags(); // Frees memory held by string arguments.
+ AdvanceStartupState(V8StartupState::kV8Disposed);
+}
void V8::DisposePlatform() {
+ AdvanceStartupState(V8StartupState::kPlatformDisposing);
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
if (FLAG_enable_system_instrumentation) {
@@ -236,13 +285,14 @@ void V8::DisposePlatform() {
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_SANDBOX
// TODO(chromium:1218005) alternatively, this could move to its own
- // public TearDownVirtualMemoryCage function.
- GetProcessWideVirtualMemoryCage()->TearDown();
+ // public TearDownSandbox function.
+ GetProcessWideSandbox()->TearDown();
#endif
platform_ = nullptr;
+ AdvanceStartupState(V8StartupState::kPlatformDisposed);
}
v8::Platform* V8::GetCurrentPlatform() {
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index edd5be247d..3f8fe14b45 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -19,7 +19,6 @@ class Isolate;
class V8 : public AllStatic {
public:
// Global actions.
-
static void Initialize();
static void Dispose();
@@ -29,8 +28,8 @@ class V8 : public AllStatic {
const char* location,
bool is_heap_oom = false);
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- static bool InitializeVirtualMemoryCage();
+#ifdef V8_SANDBOX
+ static bool InitializeSandbox();
#endif
static void InitializePlatform(v8::Platform* platform);
@@ -43,10 +42,6 @@ class V8 : public AllStatic {
static void SetSnapshotBlob(StartupData* snapshot_blob);
private:
- static void InitializeOncePerProcessImpl();
- static void InitializeOncePerProcess();
-
- // v8::Platform to use.
static v8::Platform* platform_;
};
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 495354da45..733fab3dff 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -47,6 +47,8 @@ inspector_protocol_generate("protocol_generated_sources") {
inspector_protocol_dir = _inspector_protocol
out_dir = target_gen_dir
+ _protocol_path = rebase_path(v8_inspector_js_protocol, root_build_dir)
+ config_values = [ "protocol.path=$_protocol_path" ]
config_file = v8_path_prefix + "/src/inspector/inspector_protocol_config.json"
inputs = [
v8_inspector_js_protocol,
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 9cd481e96b..8349a4995a 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -271,7 +271,7 @@ class InjectedScript::ProtocolPromiseHandler {
result->ToDetailString(isolate->GetCurrentContext())
.ToLocalChecked());
v8::Local<v8::StackTrace> stackTrace =
- v8::debug::GetDetailedStackTrace(isolate, result.As<v8::Object>());
+ v8::Exception::GetStackTrace(result);
if (!stackTrace.IsEmpty()) {
stack = m_inspector->debugger()->createStackTrace(stackTrace);
}
@@ -440,6 +440,7 @@ Response InjectedScript::getProperties(
Response InjectedScript::getInternalAndPrivateProperties(
v8::Local<v8::Value> value, const String16& groupName,
+ bool accessorPropertiesOnly,
std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>*
internalProperties,
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>*
@@ -453,27 +454,31 @@ Response InjectedScript::getInternalAndPrivateProperties(
v8::Local<v8::Context> context = m_context->context();
int sessionId = m_sessionId;
- std::vector<InternalPropertyMirror> internalPropertiesWrappers;
- ValueMirror::getInternalProperties(m_context->context(), value_obj,
- &internalPropertiesWrappers);
- for (const auto& internalProperty : internalPropertiesWrappers) {
- std::unique_ptr<RemoteObject> remoteObject;
- Response response = internalProperty.value->buildRemoteObject(
- m_context->context(), WrapMode::kNoPreview, &remoteObject);
- if (!response.IsSuccess()) return response;
- response = bindRemoteObjectIfNeeded(sessionId, context,
- internalProperty.value->v8Value(),
- groupName, remoteObject.get());
- if (!response.IsSuccess()) return response;
- (*internalProperties)
- ->emplace_back(InternalPropertyDescriptor::create()
- .setName(internalProperty.name)
- .setValue(std::move(remoteObject))
- .build());
+
+ if (!accessorPropertiesOnly) {
+ std::vector<InternalPropertyMirror> internalPropertiesWrappers;
+ ValueMirror::getInternalProperties(m_context->context(), value_obj,
+ &internalPropertiesWrappers);
+ for (const auto& internalProperty : internalPropertiesWrappers) {
+ std::unique_ptr<RemoteObject> remoteObject;
+ Response response = internalProperty.value->buildRemoteObject(
+ m_context->context(), WrapMode::kNoPreview, &remoteObject);
+ if (!response.IsSuccess()) return response;
+ response = bindRemoteObjectIfNeeded(sessionId, context,
+ internalProperty.value->v8Value(),
+ groupName, remoteObject.get());
+ if (!response.IsSuccess()) return response;
+ (*internalProperties)
+ ->emplace_back(InternalPropertyDescriptor::create()
+ .setName(internalProperty.name)
+ .setValue(std::move(remoteObject))
+ .build());
+ }
}
std::vector<PrivatePropertyMirror> privatePropertyWrappers =
- ValueMirror::getPrivateProperties(context, value_obj);
+ ValueMirror::getPrivateProperties(context, value_obj,
+ accessorPropertiesOnly);
for (const auto& privateProperty : privatePropertyWrappers) {
std::unique_ptr<PrivatePropertyDescriptor> descriptor =
PrivatePropertyDescriptor::create()
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 86bcf60b17..502576326f 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -85,6 +85,7 @@ class InjectedScript final {
Response getInternalAndPrivateProperties(
v8::Local<v8::Value>, const String16& groupName,
+ bool accessorPropertiesOnly,
std::unique_ptr<
protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
internalProperties,
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 8db0a4f5ba..5163950a1f 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -56,7 +56,7 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
m_origin(toString16(info.origin)),
m_humanReadableName(toString16(info.humanReadableName)),
m_auxData(toString16(info.auxData)),
- m_uniqueId(V8DebuggerId::generate(inspector)) {
+ m_uniqueId(internal::V8DebuggerId::generate(inspector)) {
v8::debug::SetContextId(info.context, contextId);
m_weakCallbackData =
new WeakCallbackData(this, m_inspector, m_contextGroupId, m_contextId);
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index 50e5a87bb3..987d3a7642 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -43,7 +43,7 @@ class InspectedContext {
int contextGroupId() const { return m_contextGroupId; }
String16 origin() const { return m_origin; }
String16 humanReadableName() const { return m_humanReadableName; }
- V8DebuggerId uniqueId() const { return m_uniqueId; }
+ internal::V8DebuggerId uniqueId() const { return m_uniqueId; }
String16 auxData() const { return m_auxData; }
bool isReported(int sessionId) const;
@@ -73,7 +73,7 @@ class InspectedContext {
const String16 m_origin;
const String16 m_humanReadableName;
const String16 m_auxData;
- const V8DebuggerId m_uniqueId;
+ const internal::V8DebuggerId m_uniqueId;
std::unordered_set<int> m_reportedSessionIds;
std::unordered_map<int, std::unique_ptr<InjectedScript>> m_injectedScripts;
WeakCallbackData* m_weakCallbackData;
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index bd4f9c534c..ba96b34574 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -1,6 +1,5 @@
{
"protocol": {
- "path": "../../include/js_protocol.pdl",
"package": "src/inspector/protocol",
"output": "protocol",
"namespace": ["v8_inspector", "protocol"],
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 00d9987206..ee6ad9edfd 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -238,13 +238,3 @@ std::string String16::utf8() const {
}
} // namespace v8_inspector
-
-namespace v8_crdtp {
-void SerializerTraits<v8_inspector::String16>::Serialize(
- const v8_inspector::String16& str, std::vector<uint8_t>* out) {
- cbor::EncodeFromUTF16(
- span<uint16_t>(reinterpret_cast<const uint16_t*>(str.characters16()),
- str.length()),
- out);
-}
-} // namespace v8_crdtp
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index b38917185e..7dfc5e34a8 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -6,13 +6,13 @@
#define V8_INSPECTOR_STRING_16_H_
#include <stdint.h>
+
#include <cctype>
#include <climits>
#include <cstring>
#include <string>
#include <vector>
-#include "../../third_party/inspector_protocol/crdtp/serializer_traits.h"
#include "src/base/compiler-specific.h"
namespace v8_inspector {
@@ -168,13 +168,4 @@ struct hash<v8_inspector::String16> {
#endif // !defined(__APPLE__) || defined(_LIBCPP_VERSION)
-// See third_party/inspector_protocol/crdtp/serializer_traits.h.
-namespace v8_crdtp {
-template <>
-struct SerializerTraits<v8_inspector::String16> {
- static void Serialize(const v8_inspector::String16& str,
- std::vector<uint8_t>* out);
-};
-} // namespace v8_crdtp
-
#endif // V8_INSPECTOR_STRING_16_H_
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 38ced64521..0ddadab4db 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -305,8 +305,4 @@ void ProtocolTypeTraits<Binary>::Serialize(const Binary& value,
cbor::EncodeBinary(span<uint8_t>(value.data(), value.size()), bytes);
}
-void SerializerTraits<Binary>::Serialize(
- const v8_inspector::protocol::Binary& binary, std::vector<uint8_t>* out) {
- cbor::EncodeBinary(span<uint8_t>(binary.data(), binary.size()), out);
-}
} // namespace v8_crdtp
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 7791d9e481..7f427d9b7f 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -108,12 +108,6 @@ struct ProtocolTypeTraits<v8_inspector::protocol::Binary> {
std::vector<uint8_t>* bytes);
};
-template <>
-struct SerializerTraits<v8_inspector::protocol::Binary> {
- static void Serialize(const v8_inspector::protocol::Binary& binary,
- std::vector<uint8_t>* out);
-};
-
namespace detail {
template <>
struct MaybeTypedef<v8_inspector::String16> {
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
index 3c353a73a2..80a2e1acdb 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -30,14 +30,12 @@ Response V8ConsoleAgentImpl::enable() {
if (m_enabled) return Response::Success();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
m_enabled = true;
- m_session->inspector()->enableStackCapturingIfNeeded();
reportAllMessages();
return Response::Success();
}
Response V8ConsoleAgentImpl::disable() {
if (!m_enabled) return Response::Success();
- m_session->inspector()->disableStackCapturingIfNeeded();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
m_enabled = false;
return Response::Success();
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 2734c67876..719585a9c2 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -244,9 +244,9 @@ void V8ConsoleMessage::reportToFrontend(
.setLevel(level)
.setText(m_message)
.build();
- result->setLine(static_cast<int>(m_lineNumber));
- result->setColumn(static_cast<int>(m_columnNumber));
- result->setUrl(m_url);
+ if (m_lineNumber) result->setLine(m_lineNumber);
+ if (m_columnNumber) result->setColumn(m_columnNumber);
+ if (!m_url.isEmpty()) result->setUrl(m_url);
frontend->messageAdded(std::move(result));
}
@@ -396,23 +396,11 @@ V8ConsoleMessage::getAssociatedExceptionData(
v8::Isolate* isolate = inspector->isolate();
v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context;
- if (!inspector->exceptionMetaDataContext().ToLocal(&context)) return nullptr;
v8::MaybeLocal<v8::Value> maybe_exception = m_arguments[0]->Get(isolate);
v8::Local<v8::Value> exception;
if (!maybe_exception.ToLocal(&exception)) return nullptr;
- v8::MaybeLocal<v8::Object> maybe_data =
- inspector->getAssociatedExceptionData(exception);
- v8::Local<v8::Object> data;
- if (!maybe_data.ToLocal(&data)) return nullptr;
- v8::TryCatch tryCatch(isolate);
- v8::MicrotasksScope microtasksScope(isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
- v8::Context::Scope contextScope(context);
- std::unique_ptr<protocol::DictionaryValue> jsonObject;
- objectToProtocolValue(context, data, 2, &jsonObject);
- return jsonObject;
+ return inspector->getAssociatedExceptionDataForProtocol(exception);
}
std::unique_ptr<protocol::Runtime::RemoteObject>
@@ -479,8 +467,10 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
clientLevel = v8::Isolate::kMessageError;
} else if (type == ConsoleAPIType::kWarning) {
clientLevel = v8::Isolate::kMessageWarning;
- } else if (type == ConsoleAPIType::kInfo || type == ConsoleAPIType::kLog) {
+ } else if (type == ConsoleAPIType::kInfo) {
clientLevel = v8::Isolate::kMessageInfo;
+ } else if (type == ConsoleAPIType::kLog) {
+ clientLevel = v8::Isolate::kMessageLog;
}
if (type != ConsoleAPIType::kClear) {
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 3340f46399..a2a13fea14 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -21,6 +21,7 @@
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
+#include "src/tracing/trace-event.h"
namespace v8_inspector {
@@ -188,54 +189,63 @@ V8Console::V8Console(V8InspectorImpl* inspector) : m_inspector(inspector) {}
void V8Console::Debug(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Debug");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kDebug);
}
void V8Console::Error(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Error");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kError);
}
void V8Console::Info(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Info");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kInfo);
}
void V8Console::Log(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Log");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kLog);
}
void V8Console::Warn(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Warn");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kWarning);
}
void V8Console::Dir(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Dir");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kDir);
}
void V8Console::DirXml(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::DirXml");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kDirXML);
}
void V8Console::Table(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Table");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCall(ConsoleAPIType::kTable);
}
void V8Console::Trace(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Trace");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kTrace,
String16("console.trace"));
@@ -243,6 +253,7 @@ void V8Console::Trace(const v8::debug::ConsoleCallArguments& info,
void V8Console::Group(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Group");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroup,
String16("console.group"));
@@ -251,6 +262,8 @@ void V8Console::Group(const v8::debug::ConsoleCallArguments& info,
void V8Console::GroupCollapsed(
const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::GroupCollapsed");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroupCollapsed,
String16("console.groupCollapsed"));
@@ -258,6 +271,8 @@ void V8Console::GroupCollapsed(
void V8Console::GroupEnd(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::GroupEnd");
ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kEndGroup,
String16("console.groupEnd"));
@@ -265,6 +280,7 @@ void V8Console::GroupEnd(const v8::debug::ConsoleCallArguments& info,
void V8Console::Clear(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Clear");
ConsoleHelper helper(info, consoleContext, m_inspector);
if (!helper.groupId()) return;
m_inspector->client()->consoleClear(helper.groupId());
@@ -295,6 +311,8 @@ static String16 identifierFromTitleOrStackTrace(
void V8Console::Count(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::Count");
ConsoleHelper helper(info, consoleContext, m_inspector);
String16 title = helper.firstArgToString(String16("default"), false);
String16 identifier = identifierFromTitleOrStackTrace(
@@ -306,10 +324,15 @@ void V8Console::Count(const v8::debug::ConsoleCallArguments& info,
helper.reportCallWithArgument(
ConsoleAPIType::kCount,
title.isEmpty() ? countString : (title + ": " + countString));
+ TRACE_EVENT_END2(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::Count", "title",
+ TRACE_STR_COPY(title.utf8().c_str()), "count", count);
}
void V8Console::CountReset(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::CountReset");
ConsoleHelper helper(info, consoleContext, m_inspector);
String16 title = helper.firstArgToString(String16("default"), false);
String16 identifier = identifierFromTitleOrStackTrace(
@@ -320,10 +343,14 @@ void V8Console::CountReset(const v8::debug::ConsoleCallArguments& info,
helper.reportCallWithArgument(ConsoleAPIType::kWarning,
"Count for '" + title + "' does not exist");
}
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::CountReset", "title",
+ TRACE_STR_COPY(title.utf8().c_str()));
}
void V8Console::Assert(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Assert");
ConsoleHelper helper(info, consoleContext, m_inspector);
DCHECK(!helper.firstArgToBoolean(false));
@@ -338,20 +365,30 @@ void V8Console::Assert(const v8::debug::ConsoleCallArguments& info,
void V8Console::Profile(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::Profile");
ConsoleHelper helper(info, consoleContext, m_inspector);
- helper.forEachSession([&helper](V8InspectorSessionImpl* session) {
- session->profilerAgent()->consoleProfile(
- helper.firstArgToString(String16()));
+ String16 title = helper.firstArgToString(String16());
+ helper.forEachSession([&title](V8InspectorSessionImpl* session) {
+ session->profilerAgent()->consoleProfile(title);
});
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::Profile", "title",
+ TRACE_STR_COPY(title.utf8().c_str()));
}
void V8Console::ProfileEnd(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::ProfileEnd");
ConsoleHelper helper(info, consoleContext, m_inspector);
- helper.forEachSession([&helper](V8InspectorSessionImpl* session) {
- session->profilerAgent()->consoleProfileEnd(
- helper.firstArgToString(String16()));
+ String16 title = helper.firstArgToString(String16());
+ helper.forEachSession([&title](V8InspectorSessionImpl* session) {
+ session->profilerAgent()->consoleProfileEnd(title);
});
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::ProfileEnd", "title",
+ TRACE_STR_COPY(title.utf8().c_str()));
}
static void timeFunction(const v8::debug::ConsoleCallArguments& info,
@@ -408,21 +445,26 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
void V8Console::Time(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::Time");
timeFunction(info, consoleContext, false, m_inspector);
}
void V8Console::TimeLog(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::TimeLog");
timeEndFunction(info, consoleContext, true, m_inspector);
}
void V8Console::TimeEnd(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"), "V8Console::TimeEnd");
timeEndFunction(info, consoleContext, false, m_inspector);
}
void V8Console::TimeStamp(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8Console::TimeStamp");
ConsoleHelper helper(info, consoleContext, m_inspector);
String16 title = helper.firstArgToString(String16());
m_inspector->client()->consoleTimeStamp(toStringView(title));
@@ -471,7 +513,11 @@ v8::Maybe<int64_t> V8Console::ValidateAndGetTaskId(
void V8Console::scheduleAsyncTask(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() != 1) {
+ if (info.Length() != 1 && info.Length() != 2) {
+ info.GetIsolate()->ThrowError("Unexpected arguments");
+ return;
+ }
+ if (info.Length() == 2 && !info[1]->IsBoolean()) {
info.GetIsolate()->ThrowError("Unexpected arguments");
return;
}
@@ -479,6 +525,8 @@ void V8Console::scheduleAsyncTask(
v8::debug::ConsoleCallArguments args(info);
ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
String16 argName = helper.firstArgToString(String16());
+ bool recurring =
+ info.Length() == 2 ? info[1].As<v8::Boolean>()->Value() : false;
int64_t id = m_taskIdCounter++;
auto it = m_asyncTaskIds.find(id);
@@ -487,11 +535,13 @@ void V8Console::scheduleAsyncTask(
return;
}
- int* taskPtr = new int();
- m_asyncTaskIds.emplace(id, taskPtr);
+ AsyncTaskInfo taskInfo;
+ taskInfo.ptr = new int();
+ taskInfo.recurring = recurring;
+ m_asyncTaskIds.emplace(id, taskInfo);
StringView taskName = StringView(argName.characters16(), argName.length());
- m_inspector->asyncTaskScheduled(taskName, taskPtr, false);
+ m_inspector->asyncTaskScheduled(taskName, taskInfo.ptr, recurring);
info.GetReturnValue().Set(v8::Number::New(info.GetIsolate(), id));
}
@@ -502,8 +552,8 @@ void V8Console::startAsyncTask(
if (maybeArgId.IsNothing()) return;
int64_t taskId = maybeArgId.FromJust();
- int* taskPtr = m_asyncTaskIds[taskId];
- m_inspector->asyncTaskStarted(taskPtr);
+ AsyncTaskInfo taskInfo = m_asyncTaskIds[taskId];
+ m_inspector->asyncTaskStarted(taskInfo.ptr);
}
void V8Console::finishAsyncTask(
@@ -512,10 +562,14 @@ void V8Console::finishAsyncTask(
if (maybeArgId.IsNothing()) return;
int64_t taskId = maybeArgId.FromJust();
- int* taskPtr = m_asyncTaskIds[taskId];
- m_inspector->asyncTaskFinished(taskPtr);
+ AsyncTaskInfo taskInfo = m_asyncTaskIds[taskId];
+ m_inspector->asyncTaskFinished(taskInfo.ptr);
+
+ if (taskInfo.recurring) {
+ return;
+ }
- delete taskPtr;
+ delete taskInfo.ptr;
m_asyncTaskIds.erase(taskId);
}
@@ -525,10 +579,10 @@ void V8Console::cancelAsyncTask(
if (maybeArgId.IsNothing()) return;
int64_t taskId = maybeArgId.FromJust();
- int* taskPtr = m_asyncTaskIds[taskId];
- m_inspector->asyncTaskCanceled(taskPtr);
+ AsyncTaskInfo taskInfo = m_asyncTaskIds[taskId];
+ m_inspector->asyncTaskCanceled(taskInfo.ptr);
- delete taskPtr;
+ delete taskInfo.ptr;
m_asyncTaskIds.erase(taskId);
}
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index adb746f294..c1ed2455e3 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -56,6 +56,11 @@ class V8Console : public v8::debug::ConsoleDelegate {
v8::Local<v8::ArrayBuffer> m_thisReference;
};
+ struct AsyncTaskInfo {
+ int* ptr;
+ bool recurring;
+ };
+
explicit V8Console(V8InspectorImpl* inspector);
private:
@@ -185,11 +190,11 @@ class V8Console : public v8::debug::ConsoleDelegate {
// A map of unique pointers used for the scheduling and joining async stacks.
// The async stack traces instrumentation is exposed on the console object,
- // behind a --experimental-async-stack-tagging-api flag. For now, it serves as
- // a prototype that aims to validate whether the debugging experience can be
- // improved for userland code that uses custom schedulers.
+ // behind a --experimental-async-stack-tagging-api flag. For now, it serves
+ // as a prototype that aims to validate whether the debugging experience can
+ // be improved for userland code that uses custom schedulers.
int64_t m_taskIdCounter = 0;
- std::map<int64_t, int*> m_asyncTaskIds;
+ std::map<int64_t, AsyncTaskInfo> m_asyncTaskIds;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 4f209360f9..08135e6810 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -435,12 +435,11 @@ Response V8DebuggerAgentImpl::disable() {
resetBlackboxedStateCache();
m_skipList.clear();
m_scripts.clear();
- m_cachedScriptIds.clear();
+ m_cachedScripts.clear();
m_cachedScriptSize = 0;
for (const auto& it : m_debuggerBreakpointIdToBreakpointId) {
v8::debug::RemoveBreakpoint(m_isolate, it.first);
}
- m_breakpointsOnScriptRun.clear();
m_breakpointIdToDebuggerBreakpointIds.clear();
m_debuggerBreakpointIdToBreakpointId.clear();
m_debugger->setAsyncCallStackDepth(this, 0);
@@ -718,9 +717,15 @@ Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
// not Wasm breakpoint.
std::vector<V8DebuggerScript*> scripts;
for (const auto& scriptIter : m_scripts) {
- if (!matches(m_inspector, *scriptIter.second, type, selector)) continue;
+ const bool scriptSelectorMatch =
+ matches(m_inspector, *scriptIter.second, type, selector);
+ const bool isInstrumentation =
+ type == BreakpointType::kInstrumentationBreakpoint;
+ if (!scriptSelectorMatch && !isInstrumentation) continue;
V8DebuggerScript* script = scriptIter.second.get();
- scripts.push_back(script);
+ if (script->getLanguage() == V8DebuggerScript::Language::WebAssembly) {
+ scripts.push_back(script);
+ }
}
removeBreakpointImpl(breakpointId, scripts);
@@ -746,7 +751,6 @@ void V8DebuggerAgentImpl::removeBreakpointImpl(
#endif // V8_ENABLE_WEBASSEMBLY
v8::debug::RemoveBreakpoint(m_isolate, id);
m_debuggerBreakpointIdToBreakpointId.erase(id);
- m_breakpointsOnScriptRun.erase(id);
}
m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
}
@@ -845,9 +849,10 @@ Response V8DebuggerAgentImpl::getStackTrace(
int64_t id = inStackTraceId->getId().toInteger64(&isOk);
if (!isOk) return Response::ServerError("Invalid stack trace id");
- V8DebuggerId debuggerId;
+ internal::V8DebuggerId debuggerId;
if (inStackTraceId->hasDebuggerId()) {
- debuggerId = V8DebuggerId(inStackTraceId->getDebuggerId(String16()));
+ debuggerId =
+ internal::V8DebuggerId(inStackTraceId->getDebuggerId(String16()));
} else {
debuggerId = m_debugger->debuggerIdFor(m_session->contextGroupId());
}
@@ -1067,8 +1072,20 @@ Response V8DebuggerAgentImpl::getScriptSource(
Maybe<protocol::Binary>* bytecode) {
if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
- if (it == m_scripts.end())
+ if (it == m_scripts.end()) {
+ auto cachedScriptIt =
+ std::find_if(m_cachedScripts.begin(), m_cachedScripts.end(),
+ [&scriptId](const CachedScript& cachedScript) {
+ return cachedScript.scriptId == scriptId;
+ });
+ if (cachedScriptIt != m_cachedScripts.end()) {
+ *scriptSource = cachedScriptIt->source;
+ *bytecode = protocol::Binary::fromSpan(cachedScriptIt->bytecode.data(),
+ cachedScriptIt->bytecode.size());
+ return Response::Success();
+ }
return Response::ServerError("No script for id: " + scriptId.utf8());
+ }
*scriptSource = it->second->source(0);
#if V8_ENABLE_WEBASSEMBLY
v8::MemorySpan<const uint8_t> span;
@@ -1465,20 +1482,13 @@ Response V8DebuggerAgentImpl::currentCallFrames(
.setLineNumber(loc.GetLineNumber())
.setColumnNumber(loc.GetColumnNumber())
.build();
- String16 scriptId = String16::fromInteger(script->Id());
- ScriptsMap::iterator scriptIterator =
- m_scripts.find(location->getScriptId());
- String16 url;
- if (scriptIterator != m_scripts.end()) {
- url = scriptIterator->second->sourceURL();
- }
auto frame = CallFrame::create()
.setCallFrameId(callFrameId)
.setFunctionName(toProtocolString(
m_isolate, iterator->GetFunctionDebugName()))
.setLocation(std::move(location))
- .setUrl(url)
+ .setUrl(String16())
.setScopeChain(std::move(scopes))
.setThis(std::move(protocolReceiver))
.build();
@@ -1521,7 +1531,8 @@ V8DebuggerAgentImpl::currentExternalStackTrace() {
if (externalParent.IsInvalid()) return nullptr;
return protocol::Runtime::StackTraceId::create()
.setId(stackTraceIdToString(externalParent.id))
- .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString())
+ .setDebuggerId(
+ internal::V8DebuggerId(externalParent.debugger_id).toString())
.build();
}
@@ -1577,7 +1588,6 @@ void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
if (!success) {
- DCHECK(!script->isSourceLoadedLazily());
String16 scriptSource = script->source(0);
script->setSourceURL(findSourceURL(scriptSource, false));
script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
@@ -1652,23 +1662,14 @@ void V8DebuggerAgentImpl::didParseSource(
return;
}
- if (scriptRef->isSourceLoadedLazily()) {
- m_frontend.scriptParsed(
- scriptId, scriptURL, 0, 0, 0, 0, contextId, scriptRef->hash(),
- std::move(executionContextAuxDataParam), isLiveEditParam,
- std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam, 0,
- std::move(stackTrace), std::move(codeOffset), std::move(scriptLanguage),
- std::move(debugSymbols), embedderName);
- } else {
- m_frontend.scriptParsed(
- scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
- scriptRef->endLine(), scriptRef->endColumn(), contextId,
- scriptRef->hash(), std::move(executionContextAuxDataParam),
- isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
- isModuleParam, scriptRef->length(), std::move(stackTrace),
- std::move(codeOffset), std::move(scriptLanguage),
- std::move(debugSymbols), embedderName);
- }
+ m_frontend.scriptParsed(
+ scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+ scriptRef->endLine(), scriptRef->endColumn(), contextId,
+ scriptRef->hash(), std::move(executionContextAuxDataParam),
+ isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
+ isModuleParam, scriptRef->length(), std::move(stackTrace),
+ std::move(codeOffset), std::move(scriptLanguage), std::move(debugSymbols),
+ embedderName);
std::vector<protocol::DictionaryValue*> potentialBreakpoints;
if (!scriptURL.isEmpty()) {
@@ -1740,19 +1741,48 @@ void V8DebuggerAgentImpl::setScriptInstrumentationBreakpointIfNeeded(
if (!breakpoints->get(breakpointId)) return;
}
v8::debug::BreakpointId debuggerBreakpointId;
- if (!scriptRef->setBreakpointOnRun(&debuggerBreakpointId)) return;
- std::unique_ptr<protocol::DictionaryValue> data =
- protocol::DictionaryValue::create();
- data->setString("url", scriptRef->sourceURL());
- data->setString("scriptId", scriptRef->scriptId());
- if (!sourceMapURL.isEmpty()) data->setString("sourceMapURL", sourceMapURL);
+ if (!scriptRef->setInstrumentationBreakpoint(&debuggerBreakpointId)) return;
- m_breakpointsOnScriptRun[debuggerBreakpointId] = std::move(data);
m_debuggerBreakpointIdToBreakpointId[debuggerBreakpointId] = breakpointId;
m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
debuggerBreakpointId);
}
+void V8DebuggerAgentImpl::didPauseOnInstrumentation(
+ v8::debug::BreakpointId instrumentationId) {
+ String16 breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
+ std::unique_ptr<protocol::DictionaryValue> breakAuxData;
+
+ std::unique_ptr<Array<CallFrame>> protocolCallFrames;
+ Response response = currentCallFrames(&protocolCallFrames);
+ if (!response.IsSuccess())
+ protocolCallFrames = std::make_unique<Array<CallFrame>>();
+
+ if (m_debuggerBreakpointIdToBreakpointId.find(instrumentationId) !=
+ m_debuggerBreakpointIdToBreakpointId.end()) {
+ DCHECK_GT(protocolCallFrames->size(), 0);
+ if (protocolCallFrames->size() > 0) {
+ breakReason = protocol::Debugger::Paused::ReasonEnum::Instrumentation;
+ const String16 scriptId =
+ protocolCallFrames->at(0)->getLocation()->getScriptId();
+ DCHECK_NE(m_scripts.find(scriptId), m_scripts.end());
+ const auto& script = m_scripts[scriptId];
+
+ breakAuxData = protocol::DictionaryValue::create();
+ breakAuxData->setString("scriptId", script->scriptId());
+ breakAuxData->setString("url", script->sourceURL());
+ if (!script->sourceMappingURL().isEmpty()) {
+ breakAuxData->setString("sourceMapURL", (script->sourceMappingURL()));
+ }
+ }
+ }
+
+ m_frontend.paused(std::move(protocolCallFrames), breakReason,
+ std::move(breakAuxData),
+ std::make_unique<Array<String16>>(),
+ currentAsyncStackTrace(), currentExternalStackTrace());
+}
+
void V8DebuggerAgentImpl::didPause(
int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
@@ -1793,25 +1823,8 @@ void V8DebuggerAgentImpl::didPause(
}
auto hitBreakpointIds = std::make_unique<Array<String16>>();
- bool hitInstrumentationBreakpoint = false;
bool hitRegularBreakpoint = false;
for (const auto& id : hitBreakpoints) {
- auto it = m_breakpointsOnScriptRun.find(id);
- if (it != m_breakpointsOnScriptRun.end()) {
- if (!hitInstrumentationBreakpoint) {
- // We may hit several instrumentation breakpoints: 1. they are
- // kept around, and 2. each session may set their own.
- // Only report one.
- // TODO(kimanh): This will not be needed anymore if we
- // make sure that we can only hit an instrumentation
- // breakpoint once. This workaround is currently for wasm.
- hitInstrumentationBreakpoint = true;
- hitReasons.push_back(std::make_pair(
- protocol::Debugger::Paused::ReasonEnum::Instrumentation,
- std::move(it->second)));
- }
- continue;
- }
auto breakpointIterator = m_debuggerBreakpointIdToBreakpointId.find(id);
if (breakpointIterator == m_debuggerBreakpointIdToBreakpointId.end()) {
continue;
@@ -1881,7 +1894,6 @@ void V8DebuggerAgentImpl::didPause(
}
void V8DebuggerAgentImpl::didContinue() {
- clearBreakDetails();
m_frontend.resumed();
m_frontend.flush();
}
@@ -1939,35 +1951,32 @@ void V8DebuggerAgentImpl::reset() {
resetBlackboxedStateCache();
m_skipList.clear();
m_scripts.clear();
- m_cachedScriptIds.clear();
+ m_cachedScripts.clear();
m_cachedScriptSize = 0;
}
void V8DebuggerAgentImpl::ScriptCollected(const V8DebuggerScript* script) {
DCHECK_NE(m_scripts.find(script->scriptId()), m_scripts.end());
- m_cachedScriptIds.push_back(script->scriptId());
- // TODO(alph): Properly calculate size when sources are one-byte strings.
- m_cachedScriptSize += script->length() * sizeof(uint16_t);
-
- while (m_cachedScriptSize > m_maxScriptCacheSize) {
- const String16& scriptId = m_cachedScriptIds.front();
- size_t scriptSize = m_scripts[scriptId]->length() * sizeof(uint16_t);
- DCHECK_GE(m_cachedScriptSize, scriptSize);
- m_cachedScriptSize -= scriptSize;
- m_scripts.erase(scriptId);
- m_cachedScriptIds.pop_front();
+ std::vector<uint8_t> bytecode;
+#if V8_ENABLE_WEBASSEMBLY
+ v8::MemorySpan<const uint8_t> span;
+ if (script->wasmBytecode().To(&span)) {
+ bytecode.reserve(span.size());
+ bytecode.insert(bytecode.begin(), span.data(), span.data() + span.size());
}
-}
+#endif
+ CachedScript cachedScript{script->scriptId(), script->source(0),
+ std::move(bytecode)};
+ m_cachedScriptSize += cachedScript.size();
+ m_cachedScripts.push_back(std::move(cachedScript));
+ m_scripts.erase(script->scriptId());
-std::vector<v8::debug::BreakpointId>
-V8DebuggerAgentImpl::instrumentationBreakpointIdsMatching(
- const std::vector<v8::debug::BreakpointId>& ids) {
- std::vector<v8::debug::BreakpointId> instrumentationBreakpointIds;
- for (const v8::debug::BreakpointId& id : ids) {
- if (m_breakpointsOnScriptRun.count(id) > 0)
- instrumentationBreakpointIds.push_back(id);
+ while (m_cachedScriptSize > m_maxScriptCacheSize) {
+ const CachedScript& cachedScript = m_cachedScripts.front();
+ DCHECK_GE(m_cachedScriptSize, cachedScript.size());
+ m_cachedScriptSize -= cachedScript.size();
+ m_cachedScripts.pop_front();
}
- return instrumentationBreakpointIds;
}
Response V8DebuggerAgentImpl::processSkipList(
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 693d2bed91..93f43e0386 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -149,6 +149,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void reset();
// Interface for V8InspectorImpl
+ void didPauseOnInstrumentation(v8::debug::BreakpointId instrumentationId);
+
void didPause(int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
v8::debug::ExceptionType exceptionType, bool isUncaught,
@@ -167,10 +169,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
v8::Isolate* isolate() { return m_isolate; }
- // Returns the intersection of `ids` and the current instrumentation
- // breakpoint ids.
- std::vector<v8::debug::BreakpointId> instrumentationBreakpointIdsMatching(
- const std::vector<v8::debug::BreakpointId>& ids);
+ void clearBreakDetails();
private:
void enableImpl();
@@ -190,7 +189,6 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
v8::Local<v8::String> condition);
void removeBreakpointImpl(const String16& breakpointId,
const std::vector<V8DebuggerScript*>& scripts);
- void clearBreakDetails();
void internalSetAsyncCallStackDepth(int);
void increaseCachedSkipStackGeneration();
@@ -228,7 +226,16 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
size_t m_maxScriptCacheSize = 0;
size_t m_cachedScriptSize = 0;
- std::deque<String16> m_cachedScriptIds;
+ struct CachedScript {
+ String16 scriptId;
+ String16 source;
+ std::vector<uint8_t> bytecode;
+
+ size_t size() const {
+ return source.length() * sizeof(UChar) + bytecode.size();
+ }
+ };
+ std::deque<CachedScript> m_cachedScripts;
using BreakReason =
std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
diff --git a/deps/v8/src/inspector/v8-debugger-id.cc b/deps/v8/src/inspector/v8-debugger-id.cc
index 995df6689f..6989a96e67 100644
--- a/deps/v8/src/inspector/v8-debugger-id.cc
+++ b/deps/v8/src/inspector/v8-debugger-id.cc
@@ -5,6 +5,7 @@
#include "src/inspector/v8-debugger-id.h"
#include "src/debug/debug-interface.h"
+#include "src/inspector/string-util.h"
#include "src/inspector/v8-inspector-impl.h"
namespace v8_inspector {
@@ -12,6 +13,22 @@ namespace v8_inspector {
V8DebuggerId::V8DebuggerId(std::pair<int64_t, int64_t> pair)
: m_first(pair.first), m_second(pair.second) {}
+std::unique_ptr<StringBuffer> V8DebuggerId::toString() const {
+ return StringBufferFrom(String16::fromInteger64(m_first) + "." +
+ String16::fromInteger64(m_second));
+}
+
+bool V8DebuggerId::isValid() const { return m_first || m_second; }
+
+std::pair<int64_t, int64_t> V8DebuggerId::pair() const {
+ return std::make_pair(m_first, m_second);
+}
+
+namespace internal {
+
+V8DebuggerId::V8DebuggerId(std::pair<int64_t, int64_t> pair)
+ : m_debugger_id(pair) {}
+
// static
V8DebuggerId V8DebuggerId::generate(V8InspectorImpl* inspector) {
return V8DebuggerId(std::make_pair(inspector->generateUniqueId(),
@@ -27,19 +44,18 @@ V8DebuggerId::V8DebuggerId(const String16& debuggerId) {
if (!ok) return;
int64_t second = debuggerId.substring(pos + 1).toInteger64(&ok);
if (!ok) return;
- m_first = first;
- m_second = second;
+ m_debugger_id = v8_inspector::V8DebuggerId(std::make_pair(first, second));
}
String16 V8DebuggerId::toString() const {
- return String16::fromInteger64(m_first) + "." +
- String16::fromInteger64(m_second);
+ return toString16(m_debugger_id.toString()->string());
}
-bool V8DebuggerId::isValid() const { return m_first || m_second; }
+bool V8DebuggerId::isValid() const { return m_debugger_id.isValid(); }
std::pair<int64_t, int64_t> V8DebuggerId::pair() const {
- return std::make_pair(m_first, m_second);
+ return m_debugger_id.pair();
}
+} // namespace internal
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-id.h b/deps/v8/src/inspector/v8-debugger-id.h
index 5f53c02189..757976d5bd 100644
--- a/deps/v8/src/inspector/v8-debugger-id.h
+++ b/deps/v8/src/inspector/v8-debugger-id.h
@@ -7,38 +7,35 @@
#include <utility>
+#include "include/v8-inspector.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
namespace v8_inspector {
-
class V8InspectorImpl;
-// This debugger id tries to be unique by generating two random
-// numbers, which should most likely avoid collisions.
-// Debugger id has a 1:1 mapping to context group. It is used to
-// attribute stack traces to a particular debugging, when doing any
-// cross-debugger operations (e.g. async step in).
-// See also Runtime.UniqueDebuggerId in the protocol.
+namespace internal {
+
class V8DebuggerId {
public:
V8DebuggerId() = default;
explicit V8DebuggerId(std::pair<int64_t, int64_t>);
explicit V8DebuggerId(const String16&);
V8DebuggerId(const V8DebuggerId&) V8_NOEXCEPT = default;
- ~V8DebuggerId() = default;
+ V8DebuggerId& operator=(const V8DebuggerId&) V8_NOEXCEPT = default;
static V8DebuggerId generate(V8InspectorImpl*);
+ v8_inspector::V8DebuggerId toV8DebuggerId() const { return m_debugger_id; }
String16 toString() const;
bool isValid() const;
std::pair<int64_t, int64_t> pair() const;
private:
- int64_t m_first = 0;
- int64_t m_second = 0;
+ v8_inspector::V8DebuggerId m_debugger_id;
};
+} // namespace internal
} // namespace v8_inspector
#endif // V8_INSPECTOR_V8_DEBUGGER_ID_H_
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 85c51faa02..d115912f82 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -107,7 +107,9 @@ class ActualScript : public V8DebuggerScript {
String16 source(size_t pos, size_t len) const override {
v8::HandleScope scope(m_isolate);
v8::Local<v8::String> v8Source;
- if (!script()->Source().ToLocal(&v8Source)) return String16();
+ if (!m_scriptSource.Get(m_isolate)->JavaScriptCode().ToLocal(&v8Source)) {
+ return String16();
+ }
if (pos >= static_cast<size_t>(v8Source->Length())) return String16();
size_t substringLength =
std::min(len, static_cast<size_t>(v8Source->Length()) - pos);
@@ -121,9 +123,11 @@ class ActualScript : public V8DebuggerScript {
#if V8_ENABLE_WEBASSEMBLY
v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override {
v8::HandleScope scope(m_isolate);
- auto script = this->script();
- if (!script->IsWasm()) return v8::Nothing<v8::MemorySpan<const uint8_t>>();
- return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode());
+ v8::MemorySpan<const uint8_t> bytecode;
+ if (m_scriptSource.Get(m_isolate)->WasmBytecode().To(&bytecode)) {
+ return v8::Just(bytecode);
+ }
+ return v8::Nothing<v8::MemorySpan<const uint8_t>>();
}
v8::Maybe<v8::debug::WasmScript::DebugSymbolsType> getDebugSymbolsType()
@@ -156,18 +160,8 @@ class ActualScript : public V8DebuggerScript {
#endif // V8_ENABLE_WEBASSEMBLY
return 0;
}
- bool isSourceLoadedLazily() const override { return false; }
int length() const override {
- auto script = this->script();
-#if V8_ENABLE_WEBASSEMBLY
- if (script->IsWasm()) {
- return static_cast<int>(
- v8::debug::WasmScript::Cast(*script)->Bytecode().size());
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- v8::HandleScope scope(m_isolate);
- v8::Local<v8::String> v8Source;
- return script->Source().ToLocal(&v8Source) ? v8Source->Length() : 0;
+ return static_cast<int>(m_scriptSource.Get(m_isolate)->Length());
}
const String16& sourceMappingURL() const override {
@@ -255,16 +249,16 @@ class ActualScript : public V8DebuggerScript {
id);
}
- bool setBreakpointOnRun(int* id) const override {
+ bool setInstrumentationBreakpoint(int* id) const override {
v8::HandleScope scope(m_isolate);
- return script()->SetBreakpointOnScriptEntry(id);
+ return script()->SetInstrumentationBreakpoint(id);
}
const String16& hash() const override {
if (!m_hash.isEmpty()) return m_hash;
v8::HandleScope scope(m_isolate);
v8::Local<v8::String> v8Source;
- if (!script()->Source().ToLocal(&v8Source)) {
+ if (!m_scriptSource.Get(m_isolate)->JavaScriptCode().ToLocal(&v8Source)) {
v8Source = v8::String::Empty(m_isolate);
}
m_hash = calculateHash(m_isolate, v8Source);
@@ -305,29 +299,10 @@ class ActualScript : public V8DebuggerScript {
script->SourceURL().ToLocal(&tmp) && tmp->Length() > 0;
if (script->SourceMappingURL().ToLocal(&tmp))
m_sourceMappingURL = toProtocolString(m_isolate, tmp);
- m_startLine = script->LineOffset();
- m_startColumn = script->ColumnOffset();
- std::vector<int> lineEnds = script->LineEnds();
- if (lineEnds.size()) {
- int source_length = lineEnds[lineEnds.size() - 1];
- m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
- if (lineEnds.size() > 1) {
- m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
- } else {
- m_endColumn = source_length + m_startColumn;
- }
-#if V8_ENABLE_WEBASSEMBLY
- } else if (script->IsWasm()) {
- DCHECK_EQ(0, m_startLine);
- DCHECK_EQ(0, m_startColumn);
- m_endLine = 0;
- m_endColumn = static_cast<int>(
- v8::debug::WasmScript::Cast(*script)->Bytecode().size());
-#endif // V8_ENABLE_WEBASSEMBLY
- } else {
- m_endLine = m_startLine;
- m_endColumn = m_startColumn;
- }
+ m_startLine = script->StartLine();
+ m_startColumn = script->StartColumn();
+ m_endLine = script->EndLine();
+ m_endColumn = script->EndColumn();
USE(script->ContextId().To(&m_executionContextId));
m_language = V8DebuggerScript::Language::JavaScript;
@@ -341,6 +316,8 @@ class ActualScript : public V8DebuggerScript {
m_script.Reset(m_isolate, script);
m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
+ m_scriptSource.Reset(m_isolate, script->Source());
+ m_scriptSource.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
}
void MakeWeak() override {
@@ -349,11 +326,11 @@ class ActualScript : public V8DebuggerScript {
[](const v8::WeakCallbackInfo<ActualScript>& data) {
data.GetParameter()->WeakCallback();
},
- v8::WeakCallbackType::kFinalizer);
+ v8::WeakCallbackType::kParameter);
}
void WeakCallback() {
- m_script.ClearWeak();
+ m_script.Reset();
m_agent->ScriptCollected(this);
}
@@ -368,6 +345,7 @@ class ActualScript : public V8DebuggerScript {
int m_endLine = 0;
int m_endColumn = 0;
v8::Global<v8::debug::Script> m_script;
+ v8::Global<v8::debug::ScriptSource> m_scriptSource;
};
} // namespace
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index d4486eb85e..eb80a14c04 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -59,6 +59,7 @@ class V8DebuggerScript {
V8DebuggerScript(const V8DebuggerScript&) = delete;
V8DebuggerScript& operator=(const V8DebuggerScript&) = delete;
+ v8::Local<v8::debug::ScriptSource> scriptSource();
const String16& scriptId() const { return m_id; }
bool hasSourceURLComment() const { return m_hasSourceURLComment; }
const String16& sourceURL() const { return m_url; }
@@ -76,7 +77,6 @@ class V8DebuggerScript {
int executionContextId() const { return m_executionContextId; }
virtual bool isLiveEdit() const = 0;
virtual bool isModule() const = 0;
- virtual bool isSourceLoadedLazily() const = 0;
virtual int length() const = 0;
void setSourceURL(const String16&);
@@ -97,7 +97,7 @@ class V8DebuggerScript {
virtual bool setBreakpoint(const String16& condition,
v8::debug::Location* location, int* id) const = 0;
virtual void MakeWeak() = 0;
- virtual bool setBreakpointOnRun(int* id) const = 0;
+ virtual bool setInstrumentationBreakpoint(int* id) const = 0;
#if V8_ENABLE_WEBASSEMBLY
virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index fc92a9c539..20be277cf0 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -77,6 +77,8 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
m_continueToLocationBreakpointId(kNoBreakpointId),
m_maxAsyncCallStacks(kMaxAsyncTaskStacks),
m_maxAsyncCallStackDepth(0),
+ m_maxCallStackSizeToCapture(
+ V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture),
m_pauseOnExceptionsState(v8::debug::NoBreakOnException) {}
V8Debugger::~V8Debugger() {
@@ -197,7 +199,7 @@ void V8Debugger::setPauseOnNextCall(bool pause, int targetContextGroupId) {
}
bool V8Debugger::canBreakProgram() {
- return !v8::debug::CanBreakProgram(m_isolate);
+ return v8::debug::CanBreakProgram(m_isolate);
}
void V8Debugger::breakProgram(int targetContextGroupId) {
@@ -250,7 +252,6 @@ void V8Debugger::stepIntoStatement(int targetContextGroupId,
bool breakOnAsyncCall) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
- if (asyncStepOutOfFunction(targetContextGroupId, true)) return;
m_targetContextGroupId = targetContextGroupId;
m_pauseOnAsyncCall = breakOnAsyncCall;
v8::debug::PrepareStep(m_isolate, v8::debug::StepInto);
@@ -260,7 +261,6 @@ void V8Debugger::stepIntoStatement(int targetContextGroupId,
void V8Debugger::stepOverStatement(int targetContextGroupId) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
- if (asyncStepOutOfFunction(targetContextGroupId, true)) return;
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepOver);
continueProgram(targetContextGroupId);
@@ -269,48 +269,11 @@ void V8Debugger::stepOverStatement(int targetContextGroupId) {
void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
- if (asyncStepOutOfFunction(targetContextGroupId, false)) return;
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
continueProgram(targetContextGroupId);
}
-bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
- bool onlyAtReturn) {
- v8::HandleScope handleScope(m_isolate);
- auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
- // When stepping through extensions code, it is possible that the
- // iterator doesn't have any frames, since we exclude all frames
- // that correspond to extension scripts.
- if (iterator->Done()) return false;
- bool atReturn = !iterator->GetReturnValue().IsEmpty();
- iterator->Advance();
- // Synchronous stack has more then one frame.
- if (!iterator->Done()) return false;
- // There is only one synchronous frame but we are not at return position and
- // user requests stepOver or stepInto.
- if (onlyAtReturn && !atReturn) return false;
- // If we are inside async function, current async parent was captured when
- // async function was suspended first time and we install that stack as
- // current before resume async function. So it represents current async
- // function.
- auto current = currentAsyncParent();
- if (!current) return false;
- // Lookup for parent async function.
- auto parent = current->parent();
- if (parent.expired()) return false;
- // Parent async stack will have suspended task id iff callee async function
- // is awaiting current async function. We can make stepOut there only in this
- // case.
- void* parentTask =
- std::shared_ptr<AsyncStackTrace>(parent)->suspendedTaskId();
- if (!parentTask) return false;
- m_targetContextGroupId = targetContextGroupId;
- m_taskWithScheduledBreak = parentTask;
- continueProgram(targetContextGroupId);
- return true;
-}
-
void V8Debugger::terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) {
if (m_terminateExecutionCallback) {
@@ -365,7 +328,8 @@ Response V8Debugger::continueToLocation(
m_continueToLocationTargetCallFrames = targetCallFrames;
if (m_continueToLocationTargetCallFrames !=
protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any) {
- m_continueToLocationStack = captureStackTrace(true);
+ m_continueToLocationStack = V8StackTraceImpl::capture(
+ this, V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture);
DCHECK(m_continueToLocationStack);
}
continueProgram(targetContextGroupId);
@@ -381,7 +345,8 @@ bool V8Debugger::shouldContinueToCurrentLocation() {
protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any) {
return true;
}
- std::unique_ptr<V8StackTraceImpl> currentStack = captureStackTrace(true);
+ std::unique_ptr<V8StackTraceImpl> currentStack = V8StackTraceImpl::capture(
+ this, V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture);
if (m_continueToLocationTargetCallFrames ==
protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Current) {
return m_continueToLocationStack->isEqualIgnoringTopFrame(
@@ -468,8 +433,10 @@ void V8Debugger::handleProgramBreak(
}
m_inspector->forEachSession(contextGroupId,
[](V8InspectorSessionImpl* session) {
- if (session->debuggerAgent()->enabled())
+ if (session->debuggerAgent()->enabled()) {
+ session->debuggerAgent()->clearBreakDetails();
session->debuggerAgent()->didContinue();
+ }
});
if (m_scheduledOOMBreak) m_isolate->RestoreOriginalHeapLimit();
@@ -531,6 +498,42 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
});
}
+void V8Debugger::BreakOnInstrumentation(
+ v8::Local<v8::Context> pausedContext,
+ v8::debug::BreakpointId instrumentationId) {
+ // Don't allow nested breaks.
+ if (isPaused()) return;
+
+ int contextGroupId = m_inspector->contextGroupId(pausedContext);
+ bool hasAgents = false;
+ m_inspector->forEachSession(
+ contextGroupId, [&hasAgents](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->acceptsPause(false /* isOOMBreak */))
+ hasAgents = true;
+ });
+ if (!hasAgents) return;
+
+ m_pausedContextGroupId = contextGroupId;
+ m_inspector->forEachSession(
+ contextGroupId, [instrumentationId](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->acceptsPause(false /* isOOMBreak */)) {
+ session->debuggerAgent()->didPauseOnInstrumentation(
+ instrumentationId);
+ }
+ });
+ {
+ v8::Context::Scope scope(pausedContext);
+ m_inspector->client()->runMessageLoopOnPause(contextGroupId);
+ m_pausedContextGroupId = 0;
+ }
+
+ m_inspector->forEachSession(contextGroupId,
+ [](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->enabled())
+ session->debuggerAgent()->didContinue();
+ });
+}
+
void V8Debugger::BreakProgramRequested(
v8::Local<v8::Context> pausedContext,
const std::vector<v8::debug::BreakpointId>& break_points_hit,
@@ -617,20 +620,10 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
asyncTaskFinishedForStack(task);
asyncTaskFinishedForStepping(task);
break;
- case v8::debug::kAsyncFunctionSuspended: {
- if (m_asyncTaskStacks.find(task) == m_asyncTaskStacks.end()) {
- asyncTaskScheduledForStack(toStringView("await"), task, true, true);
- }
- auto stackIt = m_asyncTaskStacks.find(task);
- if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
- std::shared_ptr<AsyncStackTrace> stack(stackIt->second);
- stack->setSuspendedTaskId(task);
- }
+ case v8::debug::kDebugAwait: {
+ asyncTaskScheduledForStack(toStringView("await"), task, false, true);
break;
}
- case v8::debug::kAsyncFunctionFinished:
- asyncTaskCanceledForStack(task);
- break;
}
}
@@ -821,8 +814,8 @@ v8::Local<v8::Array> V8Debugger::queryObjects(v8::Local<v8::Context> context,
std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
v8::Local<v8::StackTrace> v8StackTrace) {
- return V8StackTraceImpl::create(this, v8StackTrace,
- V8StackTraceImpl::maxCallStackSizeToCapture);
+ return V8StackTraceImpl::create(
+ this, v8StackTrace, V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture);
}
void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
@@ -847,6 +840,44 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
maxAsyncCallStackDepth ? this : nullptr);
}
+void V8Debugger::setMaxCallStackSizeToCapture(V8RuntimeAgentImpl* agent,
+ int size) {
+ if (size < 0) {
+ m_maxCallStackSizeToCaptureMap.erase(agent);
+ } else {
+ m_maxCallStackSizeToCaptureMap[agent] = size;
+ }
+
+ // The following logic is a bit complicated to decipher because we
+ // want to retain backwards compatible semantics:
+ //
+ // (a) When no `Runtime` domain is enabled, we stick to the default
+ // maximum call stack size, but don't let V8 collect stack traces
+ // for uncaught exceptions.
+ // (b) When `Runtime` is enabled for at least one front-end, we compute
+ // the maximum of the requested maximum call stack sizes of all the
+ // front-ends whose `Runtime` domains are enabled (which might be 0),
+ // and ask V8 to collect stack traces for uncaught exceptions.
+ //
+ // The latter allows performance test automation infrastructure to drive
+ // browser via `Runtime` domain while still minimizing the performance
+ // overhead of having the inspector attached - see the relevant design
+ // document https://bit.ly/v8-cheaper-inspector-stack-traces for more
+ if (m_maxCallStackSizeToCaptureMap.empty()) {
+ m_maxCallStackSizeToCapture =
+ V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture;
+ m_isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ } else {
+ m_maxCallStackSizeToCapture = 0;
+ for (auto const& pair : m_maxCallStackSizeToCaptureMap) {
+ if (m_maxCallStackSizeToCapture < pair.second)
+ m_maxCallStackSizeToCapture = pair.second;
+ }
+ m_isolate->SetCaptureStackTraceForUncaughtExceptions(
+ m_maxCallStackSizeToCapture > 0, m_maxCallStackSizeToCapture);
+ }
+}
+
std::shared_ptr<AsyncStackTrace> V8Debugger::stackTraceFor(
int contextGroupId, const V8StackTraceId& id) {
if (debuggerIdFor(contextGroupId).pair() != id.debugger_id) return nullptr;
@@ -864,8 +895,7 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace(
if (!contextGroupId) return V8StackTraceId();
std::shared_ptr<AsyncStackTrace> asyncStack =
- AsyncStackTrace::capture(this, toString16(description),
- V8StackTraceImpl::maxCallStackSizeToCapture);
+ AsyncStackTrace::capture(this, toString16(description));
if (!asyncStack) return V8StackTraceId();
uintptr_t id = AsyncStackTrace::store(this, asyncStack);
@@ -942,9 +972,8 @@ void V8Debugger::asyncTaskScheduledForStack(const StringView& taskName,
bool skipTopFrame) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
- std::shared_ptr<AsyncStackTrace> asyncStack = AsyncStackTrace::capture(
- this, toString16(taskName), V8StackTraceImpl::maxCallStackSizeToCapture,
- skipTopFrame);
+ std::shared_ptr<AsyncStackTrace> asyncStack =
+ AsyncStackTrace::capture(this, toString16(taskName), skipTopFrame);
if (asyncStack) {
m_asyncTaskStacks[task] = asyncStack;
if (recurring) m_recurringTasks.insert(task);
@@ -972,7 +1001,6 @@ void V8Debugger::asyncTaskStartedForStack(void* task) {
AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
std::shared_ptr<AsyncStackTrace> stack(stackIt->second);
- stack->setSuspendedTaskId(nullptr);
m_currentAsyncParent.push_back(stack);
} else {
m_currentAsyncParent.emplace_back();
@@ -1048,20 +1076,17 @@ void V8Debugger::unmuteScriptParsedEvents() {
std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
bool fullStack) {
- if (!m_isolate->InContext()) return nullptr;
-
- v8::HandleScope handles(m_isolate);
int contextGroupId = currentContextGroupId();
if (!contextGroupId) return nullptr;
int stackSize = 1;
if (fullStack) {
- stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+ stackSize = V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture;
} else {
m_inspector->forEachSession(
- contextGroupId, [&stackSize](V8InspectorSessionImpl* session) {
+ contextGroupId, [this, &stackSize](V8InspectorSessionImpl* session) {
if (session->runtimeAgent()->enabled())
- stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+ stackSize = maxCallStackSizeToCapture();
});
}
return V8StackTraceImpl::capture(this, stackSize);
@@ -1095,8 +1120,9 @@ void V8Debugger::collectOldAsyncStacksIfNeeded() {
std::shared_ptr<StackFrame> V8Debugger::symbolize(
v8::Local<v8::StackFrame> v8Frame) {
int scriptId = v8Frame->GetScriptId();
- int lineNumber = v8Frame->GetLineNumber() - 1;
- int columnNumber = v8Frame->GetColumn() - 1;
+ auto location = v8Frame->GetLocation();
+ int lineNumber = location.GetLineNumber();
+ int columnNumber = location.GetColumnNumber();
CachedStackFrameKey key{scriptId, lineNumber, columnNumber};
auto functionName = toProtocolString(isolate(), v8Frame->GetFunctionName());
auto it = m_cachedStackFrames.find(key);
@@ -1126,10 +1152,11 @@ void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) {
m_maxAsyncCallStacks = limit;
}
-V8DebuggerId V8Debugger::debuggerIdFor(int contextGroupId) {
+internal::V8DebuggerId V8Debugger::debuggerIdFor(int contextGroupId) {
auto it = m_contextGroupIdToDebuggerId.find(contextGroupId);
if (it != m_contextGroupIdToDebuggerId.end()) return it->second;
- V8DebuggerId debuggerId = V8DebuggerId::generate(m_inspector);
+ internal::V8DebuggerId debuggerId =
+ internal::V8DebuggerId::generate(m_inspector);
m_contextGroupIdToDebuggerId.insert(
it, std::make_pair(contextGroupId, debuggerId));
return debuggerId;
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 6394cfc63d..b87c099513 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -27,6 +27,7 @@ class StackFrame;
class V8Debugger;
class V8DebuggerAgentImpl;
class V8InspectorImpl;
+class V8RuntimeAgentImpl;
class V8StackTraceImpl;
struct V8StackTraceId;
@@ -85,6 +86,9 @@ class V8Debugger : public v8::debug::DebugDelegate,
int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
void setAsyncCallStackDepth(V8DebuggerAgentImpl*, int);
+ int maxCallStackSizeToCapture() const { return m_maxCallStackSizeToCapture; }
+ void setMaxCallStackSizeToCapture(V8RuntimeAgentImpl*, int);
+
std::shared_ptr<AsyncStackTrace> currentAsyncParent();
V8StackTraceId currentExternalParent();
@@ -120,7 +124,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
void setMaxAsyncTaskStacksForTest(int limit);
void dumpAsyncTaskStacksStateForTest();
- V8DebuggerId debuggerIdFor(int contextGroupId);
+ internal::V8DebuggerId debuggerIdFor(int contextGroupId);
std::shared_ptr<AsyncStackTrace> stackTraceFor(int contextGroupId,
const V8StackTraceId& id);
@@ -181,6 +185,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Context> paused_context,
const std::vector<v8::debug::BreakpointId>& break_points_hit,
v8::debug::BreakReasons break_reasons) override;
+ void BreakOnInstrumentation(v8::Local<v8::Context> paused_context,
+ v8::debug::BreakpointId) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught,
@@ -193,7 +199,6 @@ class V8Debugger : public v8::debug::DebugDelegate,
int column) override;
int currentContextGroupId();
- bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn);
bool hasScheduledBreakOnNextFunctionCall() const;
@@ -248,6 +253,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
size_t m_maxAsyncCallStacks;
int m_maxAsyncCallStackDepth;
+ int m_maxCallStackSizeToCapture;
std::vector<void*> m_currentTasks;
std::vector<std::shared_ptr<AsyncStackTrace>> m_currentAsyncParent;
@@ -259,6 +265,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::list<std::shared_ptr<AsyncStackTrace>> m_allAsyncStacks;
std::unordered_map<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+ std::unordered_map<V8RuntimeAgentImpl*, int> m_maxCallStackSizeToCaptureMap;
void* m_taskWithScheduledBreak = nullptr;
// If any of the following three is true, we schedule pause on next JS
@@ -277,7 +284,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
StackTraceIdToStackTrace m_storedStackTraces;
uintptr_t m_lastStackTraceId = 0;
- std::unordered_map<int, V8DebuggerId> m_contextGroupIdToDebuggerId;
+ std::unordered_map<int, internal::V8DebuggerId> m_contextGroupIdToDebuggerId;
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
};
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 3f48449a99..c0fd6a740f 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -44,11 +44,13 @@
#include "src/inspector/v8-console-message.h"
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger-id.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-profiler-agent-impl.h"
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/value-mirror.h"
namespace v8_inspector {
@@ -62,7 +64,6 @@ V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
: m_isolate(isolate),
m_client(client),
m_debugger(new V8Debugger(isolate, this)),
- m_capturingStackTracesCount(0),
m_lastExceptionId(0),
m_lastContextId(0),
m_isolateId(generateUniqueId()) {
@@ -84,7 +85,8 @@ int V8InspectorImpl::contextGroupId(int contextId) const {
return it != m_contextIdToGroupIdMap.end() ? it->second : 0;
}
-int V8InspectorImpl::resolveUniqueContextId(V8DebuggerId uniqueId) const {
+int V8InspectorImpl::resolveUniqueContextId(
+ internal::V8DebuggerId uniqueId) const {
auto it = m_uniqueIdToContextId.find(uniqueId.pair());
return it == m_uniqueIdToContextId.end() ? 0 : it->second;
}
@@ -112,19 +114,6 @@ v8::MaybeLocal<v8::Script> V8InspectorImpl::compileScript(
v8::ScriptCompiler::kNoCompileOptions);
}
-void V8InspectorImpl::enableStackCapturingIfNeeded() {
- if (!m_capturingStackTracesCount)
- V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
- true);
- ++m_capturingStackTracesCount;
-}
-
-void V8InspectorImpl::disableStackCapturingIfNeeded() {
- if (!(--m_capturingStackTracesCount))
- V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
- false);
-}
-
void V8InspectorImpl::muteExceptions(int contextGroupId) {
m_muteExceptionsMap[contextGroupId]++;
}
@@ -194,6 +183,13 @@ v8::MaybeLocal<v8::Context> V8InspectorImpl::contextById(int contextId) {
return context ? context->context() : v8::MaybeLocal<v8::Context>();
}
+V8DebuggerId V8InspectorImpl::uniqueDebuggerId(int contextId) {
+ InspectedContext* context = getContext(contextId);
+ internal::V8DebuggerId unique_id;
+ if (context) unique_id = context->uniqueId();
+ return unique_id.toV8DebuggerId();
+}
+
void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
int contextId = ++m_lastContextId;
auto* context = new InspectedContext(this, info, contextId);
@@ -523,4 +519,24 @@ v8::MaybeLocal<v8::Object> V8InspectorImpl::getAssociatedExceptionData(
return v8::MaybeLocal<v8::Object>();
return scope.Escape(object.As<v8::Object>());
}
+
+std::unique_ptr<protocol::DictionaryValue>
+V8InspectorImpl::getAssociatedExceptionDataForProtocol(
+ v8::Local<v8::Value> exception) {
+ v8::MaybeLocal<v8::Object> maybeData = getAssociatedExceptionData(exception);
+ v8::Local<v8::Object> data;
+ if (!maybeData.ToLocal(&data)) return nullptr;
+
+ v8::Local<v8::Context> context;
+ if (!exceptionMetaDataContext().ToLocal(&context)) return nullptr;
+
+ v8::TryCatch tryCatch(m_isolate);
+ v8::MicrotasksScope microtasksScope(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Context::Scope contextScope(context);
+ std::unique_ptr<protocol::DictionaryValue> jsonObject;
+ objectToProtocolValue(context, data, 2, &jsonObject);
+ return jsonObject;
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index d628c57a20..4a3e9bf215 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -67,7 +67,7 @@ class V8InspectorImpl : public V8Inspector {
int contextGroupId(v8::Local<v8::Context>) const;
int contextGroupId(int contextId) const;
uint64_t isolateId() const { return m_isolateId; }
- int resolveUniqueContextId(V8DebuggerId uniqueId) const;
+ int resolveUniqueContextId(internal::V8DebuggerId uniqueId) const;
v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
v8::Local<v8::String>);
@@ -84,6 +84,7 @@ class V8InspectorImpl : public V8Inspector {
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
v8::MaybeLocal<v8::Context> contextById(int contextId) override;
+ V8DebuggerId uniqueDebuggerId(int contextId) override;
void contextCollected(int contextGroupId, int contextId);
void resetContextGroup(int contextGroupId) override;
void idleStarted() override;
@@ -115,8 +116,6 @@ class V8InspectorImpl : public V8Inspector {
v8::Local<v8::Name> key, v8::Local<v8::Value> value) override;
unsigned nextExceptionId() { return ++m_lastExceptionId; }
- void enableStackCapturingIfNeeded();
- void disableStackCapturingIfNeeded();
void muteExceptions(int contextGroupId);
void unmuteExceptions(int contextGroupId);
V8ConsoleMessageStorage* ensureConsoleMessageStorage(int contextGroupId);
@@ -135,6 +134,8 @@ class V8InspectorImpl : public V8Inspector {
int64_t generateUniqueId();
V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Object> getAssociatedExceptionData(
v8::Local<v8::Value> exception);
+ std::unique_ptr<protocol::DictionaryValue>
+ getAssociatedExceptionDataForProtocol(v8::Local<v8::Value> exception);
class EvaluateScope {
public:
@@ -160,7 +161,6 @@ class V8InspectorImpl : public V8Inspector {
v8::Global<v8::Context> m_regexContext;
v8::Global<v8::Context> m_exceptionMetaDataContext;
v8::Global<v8::debug::EphemeronTable> m_exceptionMetaData;
- int m_capturingStackTracesCount;
unsigned m_lastExceptionId;
int m_lastContextId;
int m_lastSessionId = 0;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 504728f240..e6e50d597b 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -56,7 +56,9 @@ std::unique_ptr<protocol::DictionaryValue> ParseState(StringView state) {
if (!cbor.empty()) {
std::unique_ptr<protocol::Value> value =
protocol::Value::parseBinary(cbor.data(), cbor.size());
- if (value) return protocol::DictionaryValue::cast(std::move(value));
+ std::unique_ptr<protocol::DictionaryValue> dictionaryValue =
+ protocol::DictionaryValue::cast(std::move(value));
+ if (dictionaryValue) return dictionaryValue;
}
return protocol::DictionaryValue::create();
}
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 6b44459082..be6e8ab85c 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -145,15 +145,14 @@ std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
std::unique_ptr<protocol::Debugger::Location> currentDebugLocation(
V8InspectorImpl* inspector) {
- std::unique_ptr<V8StackTraceImpl> callStack =
- inspector->debugger()->captureStackTrace(false /* fullStack */);
- auto location =
- protocol::Debugger::Location::create()
- .setScriptId(String16::fromInteger(callStack->topScriptId()))
- .setLineNumber(callStack->topLineNumber())
- .build();
- location->setColumnNumber(callStack->topColumnNumber());
- return location;
+ auto stackTrace = V8StackTraceImpl::capture(inspector->debugger(), 1);
+ CHECK(stackTrace);
+ CHECK(!stackTrace->isEmpty());
+ return protocol::Debugger::Location::create()
+ .setScriptId(String16::fromInteger(stackTrace->topScriptId()))
+ .setLineNumber(stackTrace->topLineNumber())
+ .setColumnNumber(stackTrace->topColumnNumber())
+ .build();
}
volatile int s_lastProfileId = 0;
@@ -213,10 +212,9 @@ void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
std::unique_ptr<protocol::Profiler::Profile> profile =
stopProfiling(id, true);
if (!profile) return;
- std::unique_ptr<protocol::Debugger::Location> location =
- currentDebugLocation(m_session->inspector());
- m_frontend.consoleProfileFinished(id, std::move(location), std::move(profile),
- resolvedTitle);
+ m_frontend.consoleProfileFinished(
+ id, currentDebugLocation(m_session->inspector()), std::move(profile),
+ resolvedTitle);
}
Response V8ProfilerAgentImpl::enable() {
@@ -307,8 +305,7 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(
Maybe<bool> callCount, Maybe<bool> detailed,
Maybe<bool> allowTriggeredUpdates, double* out_timestamp) {
if (!m_enabled) return Response::ServerError("Profiler is not enabled");
- *out_timestamp =
- v8::base::TimeTicks::HighResolutionNow().since_origin().InSecondsF();
+ *out_timestamp = v8::base::TimeTicks::Now().since_origin().InSecondsF();
bool callCountValue = callCount.fromMaybe(false);
bool detailedValue = detailed.fromMaybe(false);
bool allowTriggeredUpdatesValue = allowTriggeredUpdates.fromMaybe(false);
@@ -421,8 +418,7 @@ Response V8ProfilerAgentImpl::takePreciseCoverage(
}
v8::HandleScope handle_scope(m_isolate);
v8::debug::Coverage coverage = v8::debug::Coverage::CollectPrecise(m_isolate);
- *out_timestamp =
- v8::base::TimeTicks::HighResolutionNow().since_origin().InSecondsF();
+ *out_timestamp = v8::base::TimeTicks::Now().since_origin().InSecondsF();
return coverageToProtocol(m_session->inspector(), coverage, out_result);
}
@@ -441,8 +437,7 @@ void V8ProfilerAgentImpl::triggerPreciseCoverageDeltaUpdate(
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>
out_result;
coverageToProtocol(m_session->inspector(), coverage, &out_result);
- double now =
- v8::base::TimeTicks::HighResolutionNow().since_origin().InSecondsF();
+ double now = v8::base::TimeTicks::Now().since_origin().InSecondsF();
m_frontend.preciseCoverageDeltaUpdate(now, occasion, std::move(out_result));
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 3a8277639c..96cc6c5a30 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -57,6 +57,7 @@ namespace v8_inspector {
namespace V8RuntimeAgentImplState {
static const char customObjectFormatterEnabled[] =
"customObjectFormatterEnabled";
+static const char maxCallStackSizeToCapture[] = "maxCallStackSizeToCapture";
static const char runtimeEnabled[] = "runtimeEnabled";
static const char bindings[] = "bindings";
static const char globalBindingsKey[] = "";
@@ -216,7 +217,7 @@ Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
}
*contextId = executionContextId.fromJust();
} else if (uniqueContextId.isJust()) {
- V8DebuggerId uniqueId(uniqueContextId.fromJust());
+ internal::V8DebuggerId uniqueId(uniqueContextId.fromJust());
if (!uniqueId.isValid())
return Response::InvalidParams("invalid uniqueContextId");
int id = inspector->resolveUniqueContextId(uniqueId);
@@ -451,15 +452,14 @@ Response V8RuntimeAgentImpl::getProperties(
: WrapMode::kNoPreview,
result, exceptionDetails);
if (!response.IsSuccess()) return response;
- if (exceptionDetails->isJust() || accessorPropertiesOnly.fromMaybe(false))
- return Response::Success();
+ if (exceptionDetails->isJust()) return Response::Success();
std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
internalPropertiesProtocolArray;
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>
privatePropertiesProtocolArray;
response = scope.injectedScript()->getInternalAndPrivateProperties(
- object, scope.objectGroupName(), &internalPropertiesProtocolArray,
- &privatePropertiesProtocolArray);
+ object, scope.objectGroupName(), accessorPropertiesOnly.fromMaybe(false),
+ &internalPropertiesProtocolArray, &privatePropertiesProtocolArray);
if (!response.IsSuccess()) return response;
if (!internalPropertiesProtocolArray->empty())
*internalProperties = std::move(internalPropertiesProtocolArray);
@@ -499,7 +499,13 @@ Response V8RuntimeAgentImpl::setMaxCallStackSizeToCapture(int size) {
return Response::ServerError(
"maxCallStackSizeToCapture should be non-negative");
}
- V8StackTraceImpl::maxCallStackSizeToCapture = size;
+ TRACE_EVENT_WITH_FLOW1(
+ TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8RuntimeAgentImpl::setMaxCallStackSizeToCapture", this,
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "size", size);
+ if (!m_enabled) return Response::ServerError("Runtime agent is not enabled");
+ m_state->setInteger(V8RuntimeAgentImplState::maxCallStackSizeToCapture, size);
+ m_inspector->debugger()->setMaxCallStackSizeToCapture(this, size);
return Response::Success();
}
@@ -802,6 +808,41 @@ Response V8RuntimeAgentImpl::removeBinding(const String16& name) {
return Response::Success();
}
+Response V8RuntimeAgentImpl::getExceptionDetails(
+ const String16& errorObjectId,
+ Maybe<protocol::Runtime::ExceptionDetails>* out_exceptionDetails) {
+ InjectedScript::ObjectScope scope(m_session, errorObjectId);
+ Response response = scope.initialize();
+ if (!response.IsSuccess()) return response;
+
+ const v8::Local<v8::Value> error = scope.object();
+ if (!error->IsNativeError())
+ return Response::ServerError("errorObjectId is not a JS error object");
+
+ const v8::Local<v8::Message> message =
+ v8::debug::CreateMessageFromException(m_inspector->isolate(), error);
+
+ response = scope.injectedScript()->createExceptionDetails(
+ message, error, scope.objectGroupName(), out_exceptionDetails);
+ if (!response.IsSuccess()) return response;
+
+ CHECK(out_exceptionDetails->isJust());
+
+ // When an exception object is present, `createExceptionDetails` assumes
+ // the exception is uncaught and will overwrite the text field to "Uncaught".
+ // Lets use the normal message text instead.
+ out_exceptionDetails->fromJust()->setText(
+ toProtocolString(m_inspector->isolate(), message->Get()));
+
+ // Check if the exception has any metadata on the inspector and also attach
+ // it.
+ std::unique_ptr<protocol::DictionaryValue> data =
+ m_inspector->getAssociatedExceptionDataForProtocol(error);
+ if (data)
+ out_exceptionDetails->fromJust()->setExceptionMetaData(std::move(data));
+ return Response::Success();
+}
+
void V8RuntimeAgentImpl::bindingCalled(const String16& name,
const String16& payload,
int executionContextId) {
@@ -839,6 +880,11 @@ void V8RuntimeAgentImpl::restore() {
V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
m_session->setCustomObjectFormatterEnabled(true);
+ int size;
+ if (m_state->getInteger(V8RuntimeAgentImplState::maxCallStackSizeToCapture,
+ &size))
+ m_inspector->debugger()->setMaxCallStackSizeToCapture(this, size);
+
m_inspector->forEachContext(
m_session->contextGroupId(),
[this](InspectedContext* context) { addBindings(context); });
@@ -846,11 +892,15 @@ void V8RuntimeAgentImpl::restore() {
Response V8RuntimeAgentImpl::enable() {
if (m_enabled) return Response::Success();
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8RuntimeAgentImpl::enable", this,
+ TRACE_EVENT_FLAG_FLOW_OUT);
m_inspector->client()->beginEnsureAllContextsInGroup(
m_session->contextGroupId());
m_enabled = true;
m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, true);
- m_inspector->enableStackCapturingIfNeeded();
+ m_inspector->debugger()->setMaxCallStackSizeToCapture(
+ this, V8StackTraceImpl::kDefaultMaxCallStackSizeToCapture);
m_session->reportAllContexts(this);
V8ConsoleMessageStorage* storage =
m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
@@ -862,10 +912,13 @@ Response V8RuntimeAgentImpl::enable() {
Response V8RuntimeAgentImpl::disable() {
if (!m_enabled) return Response::Success();
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("v8.inspector"),
+ "V8RuntimeAgentImpl::disable", this,
+ TRACE_EVENT_FLAG_FLOW_IN);
m_enabled = false;
m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
m_state->remove(V8RuntimeAgentImplState::bindings);
- m_inspector->disableStackCapturingIfNeeded();
+ m_inspector->debugger()->setMaxCallStackSizeToCapture(this, -1);
m_session->setCustomObjectFormatterEnabled(false);
reset();
m_inspector->client()->endEnsureAllContextsInGroup(
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 0ab39e8da2..ca46de5ec4 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -32,11 +32,9 @@
#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#include <memory>
-#include <set>
#include <unordered_map>
#include "include/v8-persistent-handle.h"
-// #include "include/v8-function-callback.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
@@ -130,6 +128,9 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<String16> executionContextName) override;
Response removeBinding(const String16& name) override;
void addBindings(InspectedContext* context);
+ Response getExceptionDetails(const String16& errorObjectId,
+ Maybe<protocol::Runtime::ExceptionDetails>*
+ out_exceptionDetails) override;
void reset();
void reportExecutionContextCreated(InspectedContext*);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 56291b2775..c46de4465d 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -22,9 +22,6 @@ using v8_crdtp::json::ConvertCBORToJSON;
using v8_crdtp::json::ConvertJSONToCBOR;
namespace v8_inspector {
-
-int V8StackTraceImpl::maxCallStackSizeToCapture = 200;
-
namespace {
static const char kId[] = "id";
@@ -42,8 +39,10 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
DCHECK(debugger->isolate()->InContext());
int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "SymbolizeStackTrace", "frameCount", frameCount);
+ TRACE_EVENT1(
+ TRACE_DISABLED_BY_DEFAULT("v8.inspector") "," TRACE_DISABLED_BY_DEFAULT(
+ "v8.stack_trace"),
+ "toFramesVector", "frameCount", frameCount);
std::vector<std::shared_ptr<StackFrame>> frames(frameCount);
for (int i = 0; i < frameCount; ++i) {
@@ -108,7 +107,8 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
stackTrace->setParentId(
protocol::Runtime::StackTraceId::create()
.setId(stackTraceIdToString(externalParent.id))
- .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString())
+ .setDebuggerId(
+ internal::V8DebuggerId(externalParent.debugger_id).toString())
.build());
}
return stackTrace;
@@ -116,7 +116,8 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
} // namespace
-V8StackTraceId::V8StackTraceId() : id(0), debugger_id(V8DebuggerId().pair()) {}
+V8StackTraceId::V8StackTraceId()
+ : id(0), debugger_id(internal::V8DebuggerId().pair()) {}
V8StackTraceId::V8StackTraceId(uintptr_t id,
const std::pair<int64_t, int64_t> debugger_id)
@@ -128,7 +129,7 @@ V8StackTraceId::V8StackTraceId(uintptr_t id,
: id(id), debugger_id(debugger_id), should_pause(should_pause) {}
V8StackTraceId::V8StackTraceId(StringView json)
- : id(0), debugger_id(V8DebuggerId().pair()) {
+ : id(0), debugger_id(internal::V8DebuggerId().pair()) {
if (json.length() == 0) return;
std::vector<uint8_t> cbor;
if (json.is8Bit()) {
@@ -147,7 +148,7 @@ V8StackTraceId::V8StackTraceId(StringView json)
int64_t parsedId = s.toInteger64(&isOk);
if (!isOk || !parsedId) return;
if (!dict->getString(kDebuggerId, &s)) return;
- V8DebuggerId debuggerId(s);
+ internal::V8DebuggerId debuggerId(s);
if (!debuggerId.isValid()) return;
if (!dict->getBoolean(kShouldPause, &should_pause)) return;
id = parsedId;
@@ -160,7 +161,7 @@ std::unique_ptr<StringBuffer> V8StackTraceId::ToString() {
if (IsInvalid()) return nullptr;
auto dict = protocol::DictionaryValue::create();
dict->setString(kId, String16::fromInteger64(id));
- dict->setString(kDebuggerId, V8DebuggerId(debugger_id).toString());
+ dict->setString(kDebuggerId, internal::V8DebuggerId(debugger_id).toString());
dict->setBoolean(kShouldPause, should_pause);
std::vector<uint8_t> json;
v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(dict->Serialize()),
@@ -222,13 +223,6 @@ bool StackFrame::isEqual(StackFrame* frame) const {
}
// static
-void V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(
- v8::Isolate* isolate, bool capture) {
- isolate->SetCaptureStackTraceForUncaughtExceptions(
- capture, V8StackTraceImpl::maxCallStackSizeToCapture);
-}
-
-// static
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
V8Debugger* debugger, v8::Local<v8::StackTrace> v8StackTrace,
int maxStackSize) {
@@ -257,8 +251,10 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
V8Debugger* debugger, int maxStackSize) {
DCHECK(debugger);
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "V8StackTraceImpl::capture", "maxFrameCount", maxStackSize);
+ TRACE_EVENT1(
+ TRACE_DISABLED_BY_DEFAULT("v8.inspector") "," TRACE_DISABLED_BY_DEFAULT(
+ "v8.stack_trace"),
+ "V8StackTraceImpl::capture", "maxFrameCount", maxStackSize);
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
@@ -398,12 +394,14 @@ StackFrame* V8StackTraceImpl::StackFrameIterator::frame() {
// static
std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
- V8Debugger* debugger, const String16& description, int maxStackSize,
- bool skipTopFrame) {
+ V8Debugger* debugger, const String16& description, bool skipTopFrame) {
DCHECK(debugger);
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "AsyncStackTrace::capture", "maxFrameCount", maxStackSize);
+ int maxStackSize = debugger->maxCallStackSizeToCapture();
+ TRACE_EVENT1(
+ TRACE_DISABLED_BY_DEFAULT("v8.inspector") "," TRACE_DISABLED_BY_DEFAULT(
+ "v8.stack_trace"),
+ "AsyncStackTrace::capture", "maxFrameCount", maxStackSize);
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
@@ -443,7 +441,6 @@ AsyncStackTrace::AsyncStackTrace(
std::shared_ptr<AsyncStackTrace> asyncParent,
const V8StackTraceId& externalParent)
: m_id(0),
- m_suspendedTaskId(nullptr),
m_description(description),
m_frames(std::move(frames)),
m_asyncParent(std::move(asyncParent)),
@@ -457,12 +454,6 @@ AsyncStackTrace::buildInspectorObject(V8Debugger* debugger,
maxAsyncDepth);
}
-void AsyncStackTrace::setSuspendedTaskId(void* task) {
- m_suspendedTaskId = task;
-}
-
-void* AsyncStackTrace::suspendedTaskId() const { return m_suspendedTaskId; }
-
uintptr_t AsyncStackTrace::store(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace> stack) {
if (stack->m_id) return stack->m_id;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index ec8ee90737..221700a195 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -51,9 +51,8 @@ class StackFrame {
class V8StackTraceImpl : public V8StackTrace {
public:
- static void setCaptureStackTraceForUncaughtExceptions(v8::Isolate*,
- bool capture);
- static int maxCallStackSizeToCapture;
+ static constexpr int kDefaultMaxCallStackSizeToCapture = 200;
+
static std::unique_ptr<V8StackTraceImpl> create(V8Debugger*,
v8::Local<v8::StackTrace>,
int maxStackSize);
@@ -117,7 +116,6 @@ class AsyncStackTrace {
AsyncStackTrace& operator=(const AsyncStackTrace&) = delete;
static std::shared_ptr<AsyncStackTrace> capture(V8Debugger*,
const String16& description,
- int maxStackSize,
bool skipTopFrame = false);
static uintptr_t store(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace> stack);
@@ -125,16 +123,6 @@ class AsyncStackTrace {
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObject(
V8Debugger* debugger, int maxAsyncDepth) const;
- // If async stack has suspended task id, it means that at moment when we
- // capture current stack trace we suspended corresponded asynchronous
- // execution flow and it is possible to request pause for a momemnt when
- // that flow is resumed.
- // E.g. every time when we suspend async function we mark corresponded async
- // stack as suspended and every time when this function is resumed we remove
- // suspendedTaskId.
- void setSuspendedTaskId(void* task);
- void* suspendedTaskId() const;
-
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
bool isEmpty() const;
@@ -151,7 +139,6 @@ class AsyncStackTrace {
const V8StackTraceId& externalParent);
uintptr_t m_id;
- void* m_suspendedTaskId;
String16 m_description;
std::vector<std::shared_ptr<StackFrame>> m_frames;
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 3001a56356..62514cdcc1 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -152,7 +152,7 @@ Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
std::unique_ptr<protocol::Value>* result) {
if (value->IsUndefined()) return Response::Success();
-#if defined(V8_USE_ADDRESS_SANITIZER) && V8_OS_MACOSX
+#if defined(V8_USE_ADDRESS_SANITIZER) && V8_OS_DARWIN
// For whatever reason, ASan on MacOS has bigger stack frames.
static const int kMaxDepth = 900;
#else
@@ -893,7 +893,8 @@ void getPrivatePropertiesForPreview(
int* nameLimit, bool* overflow,
protocol::Array<PropertyPreview>* privateProperties) {
std::vector<PrivatePropertyMirror> mirrors =
- ValueMirror::getPrivateProperties(context, object);
+ ValueMirror::getPrivateProperties(context, object,
+ /* accessPropertiesOnly */ false);
for (auto& mirror : mirrors) {
std::unique_ptr<PropertyPreview> propertyPreview;
if (mirror.value) {
@@ -1429,7 +1430,8 @@ void ValueMirror::getInternalProperties(
// static
std::vector<PrivatePropertyMirror> ValueMirror::getPrivateProperties(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ bool accessorPropertiesOnly) {
std::vector<PrivatePropertyMirror> mirrors;
v8::Isolate* isolate = context->GetIsolate();
v8::MicrotasksScope microtasksScope(isolate,
@@ -1462,6 +1464,8 @@ std::vector<PrivatePropertyMirror> ValueMirror::getPrivateProperties(
if (!setter->IsNull()) {
setterMirror = ValueMirror::create(context, setter);
}
+ } else if (accessorPropertiesOnly) {
+ continue;
} else {
valueMirror = ValueMirror::create(context, value);
}
diff --git a/deps/v8/src/inspector/value-mirror.h b/deps/v8/src/inspector/value-mirror.h
index 721695e74d..b487d51b7d 100644
--- a/deps/v8/src/inspector/value-mirror.h
+++ b/deps/v8/src/inspector/value-mirror.h
@@ -81,7 +81,8 @@ class ValueMirror {
v8::Local<v8::Context> context, v8::Local<v8::Object> object,
std::vector<InternalPropertyMirror>* mirrors);
static std::vector<PrivatePropertyMirror> getPrivateProperties(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object);
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ bool accessorPropertiesOnly);
};
protocol::Response toProtocolValue(v8::Local<v8::Context> context,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index b71c8db77d..2b12287539 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -69,11 +69,11 @@ Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
// The parameter indices are shifted by 1 (receiver is the
// first entry).
- return Register::FromParameterIndex(parameter_index + 1, parameter_count());
+ return Register::FromParameterIndex(parameter_index + 1);
}
Register BytecodeArrayBuilder::Receiver() const {
- return Register::FromParameterIndex(0, parameter_count());
+ return Register::FromParameterIndex(0);
}
Register BytecodeArrayBuilder::Local(int index) const {
@@ -830,27 +830,27 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- OutputLdaNamedProperty(object, name_index, feedback_slot);
+ OutputGetNamedProperty(object, name_index, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedPropertyFromSuper(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- OutputLdaNamedPropertyFromSuper(object, name_index, feedback_slot);
+ OutputGetNamedPropertyFromSuper(object, name_index, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- OutputLdaKeyedProperty(object, feedback_slot);
+ OutputGetKeyedProperty(object, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
Register object, int feedback_slot) {
size_t name_index = IteratorSymbolConstantPoolEntry();
- OutputLdaNamedProperty(object, name_index, feedback_slot);
+ OutputGetNamedProperty(object, name_index, feedback_slot);
return *this;
}
@@ -863,14 +863,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAsyncIteratorProperty(
Register object, int feedback_slot) {
size_t name_index = AsyncIteratorSymbolConstantPoolEntry();
- OutputLdaNamedProperty(object, name_index, feedback_slot);
+ OutputGetNamedProperty(object, name_index, feedback_slot);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
- Register object, Register name, DataPropertyInLiteralFlags flags,
+BytecodeArrayBuilder& BytecodeArrayBuilder::DefineKeyedOwnPropertyInLiteral(
+ Register object, Register name, DefineKeyedOwnPropertyInLiteralFlags flags,
int feedback_slot) {
- OutputStaDataPropertyInLiteral(object, name, flags, feedback_slot);
+ OutputDefineKeyedOwnPropertyInLiteral(object, name, flags, feedback_slot);
return *this;
}
@@ -879,54 +879,54 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CollectTypeProfile(int position) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetNamedProperty(
Register object, size_t name_index, int feedback_slot,
LanguageMode language_mode) {
// Ensure that language mode is in sync with the IC slot kind.
DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
FeedbackVector::ToSlot(feedback_slot))),
language_mode);
- OutputStaNamedProperty(object, name_index, feedback_slot);
+ OutputSetNamedProperty(object, name_index, feedback_slot);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetNamedProperty(
Register object, const AstRawString* name, int feedback_slot,
LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
- return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
+ return SetNamedProperty(object, name_index, feedback_slot, language_mode);
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
+BytecodeArrayBuilder& BytecodeArrayBuilder::DefineNamedOwnProperty(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
// Ensure that the store operation is in sync with the IC slot kind.
DCHECK_EQ(
- FeedbackSlotKind::kStoreOwnNamed,
+ FeedbackSlotKind::kDefineNamedOwn,
feedback_vector_spec()->GetKind(FeedbackVector::ToSlot(feedback_slot)));
- OutputStaNamedOwnProperty(object, name_index, feedback_slot);
+ OutputDefineNamedOwnProperty(object, name_index, feedback_slot);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
// Ensure that language mode is in sync with the IC slot kind.
DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
FeedbackVector::ToSlot(feedback_slot))),
language_mode);
- OutputStaKeyedProperty(object, key, feedback_slot);
+ OutputSetKeyedProperty(object, key, feedback_slot);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::DefineKeyedProperty(
+BytecodeArrayBuilder& BytecodeArrayBuilder::DefineKeyedOwnProperty(
Register object, Register key, int feedback_slot) {
// Ensure that the IC uses a strict language mode, as this is the only
// supported mode for this use case.
DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
FeedbackVector::ToSlot(feedback_slot))),
LanguageMode::kStrict);
- OutputStaKeyedPropertyAsDefine(object, key, feedback_slot);
+ OutputDefineKeyedOwnProperty(object, key, feedback_slot);
return *this;
}
@@ -939,14 +939,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreInArrayLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreClassFieldsInitializer(
Register constructor, int feedback_slot) {
size_t name_index = ClassFieldsSymbolConstantPoolEntry();
- return StoreNamedProperty(constructor, name_index, feedback_slot,
- LanguageMode::kStrict);
+ return SetNamedProperty(constructor, name_index, feedback_slot,
+ LanguageMode::kStrict);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadClassFieldsInitializer(
Register constructor, int feedback_slot) {
size_t name_index = ClassFieldsSymbolConstantPoolEntry();
- OutputLdaNamedProperty(constructor, name_index, feedback_slot);
+ OutputGetNamedProperty(constructor, name_index, feedback_slot);
return *this;
}
@@ -1576,7 +1576,7 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
if (reg.is_current_context() || reg.is_function_closure()) {
return true;
} else if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count());
+ int parameter_index = reg.ToParameterIndex();
return parameter_index >= 0 && parameter_index < parameter_count();
} else if (reg.index() < fixed_register_count()) {
return true;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 8e90cd7d0d..ada289edd9 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -156,39 +156,49 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Store properties. Flag for NeedsSetFunctionName() should
// be in the accumulator.
- BytecodeArrayBuilder& StoreDataPropertyInLiteral(
- Register object, Register name, DataPropertyInLiteralFlags flags,
- int feedback_slot);
+ BytecodeArrayBuilder& DefineKeyedOwnPropertyInLiteral(
+ Register object, Register name,
+ DefineKeyedOwnPropertyInLiteralFlags flags, int feedback_slot);
// Collect type information for developer tools. The value for which we
// record the type is stored in the accumulator.
BytecodeArrayBuilder& CollectTypeProfile(int position);
- // Store a property named by a property name. The value to be stored should be
+ // Set a property named by a property name, trigger the setters and
+ // set traps if necessary. The value to be set should be in the
+ // accumulator.
+ BytecodeArrayBuilder& SetNamedProperty(Register object,
+ const AstRawString* name,
+ int feedback_slot,
+ LanguageMode language_mode);
+
+ // Set a property named by a constant from the constant pool,
+ // trigger the setters and set traps if necessary. The value to be
+ // set should be in the accumulator.
+ BytecodeArrayBuilder& SetNamedProperty(Register object,
+ size_t constant_pool_entry,
+ int feedback_slot,
+ LanguageMode language_mode);
+
+ // Define an own property named by a constant from the constant pool,
+ // trigger the defineProperty traps if necessary. The value to be
+ // defined should be in the accumulator.
+ BytecodeArrayBuilder& DefineNamedOwnProperty(Register object,
+ const AstRawString* name,
+ int feedback_slot);
+
+ // Set a property keyed by a value in a register, trigger the setters and
+ // set traps if necessary. The value to be set should be in the
+ // accumulator.
+ BytecodeArrayBuilder& SetKeyedProperty(Register object, Register key,
+ int feedback_slot,
+ LanguageMode language_mode);
+
+ // Define an own property keyed by a value in a register, trigger the
+ // defineProperty traps if necessary. The value to be defined should be
// in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object,
- const AstRawString* name,
- int feedback_slot,
- LanguageMode language_mode);
-
- // Store a property named by a constant from the constant pool. The value to
- // be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object,
- size_t constant_pool_entry,
- int feedback_slot,
- LanguageMode language_mode);
- // Store an own property named by a constant from the constant pool. The
- // value to be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedOwnProperty(Register object,
- const AstRawString* name,
- int feedback_slot);
- // Store a property keyed by a value in a register. The value to be stored
- // should be in the accumulator.
- BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
- int feedback_slot,
- LanguageMode language_mode);
- BytecodeArrayBuilder& DefineKeyedProperty(Register object, Register key,
- int feedback_slot);
+ BytecodeArrayBuilder& DefineKeyedOwnProperty(Register object, Register key,
+ int feedback_slot);
// Store an own element in an array literal. The value to be stored should be
// in the accumulator.
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index ad8451d00d..9e99f9cc57 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -53,14 +53,6 @@ void BytecodeArrayIterator::ApplyDebugBreak() {
*cursor = interpreter::Bytecodes::ToByte(debugbreak);
}
-int BytecodeArrayIterator::current_bytecode_size() const {
- return prefix_size_ + current_bytecode_size_without_prefix();
-}
-
-int BytecodeArrayIterator::current_bytecode_size_without_prefix() const {
- return Bytecodes::Size(current_bytecode(), current_operand_scale());
-}
-
uint32_t BytecodeArrayIterator::GetUnsignedOperand(
int operand_index, OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
@@ -130,15 +122,14 @@ FeedbackSlot BytecodeArrayIterator::GetSlotOperand(int operand_index) const {
}
Register BytecodeArrayIterator::GetReceiver() const {
- return Register::FromParameterIndex(0, bytecode_array()->parameter_count());
+ return Register::FromParameterIndex(0);
}
Register BytecodeArrayIterator::GetParameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
// The parameter indices are shifted by 1 (receiver is the
// first entry).
- return Register::FromParameterIndex(parameter_index + 1,
- bytecode_array()->parameter_count());
+ return Register::FromParameterIndex(parameter_index + 1);
}
Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
@@ -275,8 +266,7 @@ int BytecodeArrayIterator::GetAbsoluteOffset(int relative_offset) const {
}
std::ostream& BytecodeArrayIterator::PrintTo(std::ostream& os) const {
- return BytecodeDecoder::Decode(os, cursor_ - prefix_size_,
- bytecode_array()->parameter_count());
+ return BytecodeDecoder::Decode(os, cursor_ - prefix_size_);
}
void BytecodeArrayIterator::UpdatePointers() {
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 5e93cbccb8..145f85b6ef 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
inline void Advance() {
- cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
+ cursor_ += current_bytecode_size_without_prefix();
UpdateOperandScale();
}
void SetOffset(int offset);
@@ -92,11 +92,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
return current_bytecode;
}
- int current_bytecode_size() const;
- int current_bytecode_size_without_prefix() const;
+ int current_bytecode_size() const {
+ return prefix_size_ + current_bytecode_size_without_prefix();
+ }
+ int current_bytecode_size_without_prefix() const {
+ return Bytecodes::Size(current_bytecode(), current_operand_scale());
+ }
int current_offset() const {
return static_cast<int>(cursor_ - start_ - prefix_size_);
}
+ int next_offset() const { return current_offset() + current_bytecode_size(); }
OperandScale current_operand_scale() const { return operand_scale_; }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index c73a0d2e9e..5343edcc73 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -20,7 +20,7 @@ BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
void BytecodeArrayRandomIterator::Initialize() {
// Run forwards through the bytecode array to determine the offset of each
// bytecode.
- while (current_offset() < bytecode_array()->length()) {
+ while (!done()) {
offsets_.push_back(current_offset());
Advance();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 6f0ca2cfdd..ce3014e6b6 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
int current_index() const { return current_index_; }
- size_t size() const { return offsets_.size(); }
+ int size() const { return static_cast<int>(offsets_.size()); }
void GoToIndex(int index) {
current_index_ = index;
@@ -60,8 +60,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
UpdateOffsetFromIndex();
}
void GoToEnd() {
- DCHECK_LT(offsets_.size() - 1, static_cast<size_t>(INT_MAX));
- current_index_ = static_cast<int>(offsets_.size() - 1);
+ current_index_ = size() - 1;
UpdateOffsetFromIndex();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index b7da127253..784514e3db 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -238,6 +238,7 @@ void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
case Bytecode::kReThrow:
case Bytecode::kAbort:
case Bytecode::kJump:
+ case Bytecode::kJumpLoop:
case Bytecode::kJumpConstant:
case Bytecode::kSuspendGenerator:
exit_seen_in_block_ = true;
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index 1811e7874d..f2959b8326 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -93,8 +93,7 @@ const char* NameForNativeContextIndex(uint32_t idx) {
// static
std::ostream& BytecodeDecoder::Decode(std::ostream& os,
- const uint8_t* bytecode_start,
- int parameter_count) {
+ const uint8_t* bytecode_start) {
Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
int prefix_offset = 0;
OperandScale operand_scale = OperandScale::kSingle;
@@ -169,22 +168,22 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
case interpreter::OperandType::kRegOut: {
Register reg =
DecodeRegisterOperand(operand_start, op_type, operand_scale);
- os << reg.ToString(parameter_count);
+ os << reg.ToString();
break;
}
case interpreter::OperandType::kRegOutTriple: {
RegisterList reg_list =
DecodeRegisterListOperand(operand_start, 3, op_type, operand_scale);
- os << reg_list.first_register().ToString(parameter_count) << "-"
- << reg_list.last_register().ToString(parameter_count);
+ os << reg_list.first_register().ToString() << "-"
+ << reg_list.last_register().ToString();
break;
}
case interpreter::OperandType::kRegOutPair:
case interpreter::OperandType::kRegPair: {
RegisterList reg_list =
DecodeRegisterListOperand(operand_start, 2, op_type, operand_scale);
- os << reg_list.first_register().ToString(parameter_count) << "-"
- << reg_list.last_register().ToString(parameter_count);
+ os << reg_list.first_register().ToString() << "-"
+ << reg_list.last_register().ToString();
break;
}
case interpreter::OperandType::kRegOutList:
@@ -200,8 +199,8 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
reg_count_operand, OperandType::kRegCount, operand_scale);
RegisterList reg_list = DecodeRegisterListOperand(
operand_start, count, op_type, operand_scale);
- os << reg_list.first_register().ToString(parameter_count) << "-"
- << reg_list.last_register().ToString(parameter_count);
+ os << reg_list.first_register().ToString() << "-"
+ << reg_list.last_register().ToString();
i++; // Skip kRegCount.
break;
}
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
index 5be682b1f5..1bf93e092c 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.h
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -39,8 +39,7 @@ class V8_EXPORT_PRIVATE BytecodeDecoder final {
OperandScale operand_scale);
// Decode a single bytecode and operands to |os|.
- static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
- int number_of_parameters);
+ static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 4acf248c4d..9c9c72d476 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -942,8 +942,8 @@ class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
enum class SlotKind {
kStoreGlobalSloppy,
kStoreGlobalStrict,
- kStoreNamedStrict,
- kStoreNamedSloppy,
+ kSetNamedStrict,
+ kSetNamedSloppy,
kLoadProperty,
kLoadSuperProperty,
kLoadGlobalNotInsideTypeof,
@@ -1312,13 +1312,14 @@ void BytecodeGenerator::AllocateDeferredConstants(IsolateT* isolate,
}
// Build object literal constant properties
- for (std::pair<ObjectLiteral*, size_t> literal : object_literals_) {
- ObjectLiteral* object_literal = literal.first;
- if (object_literal->properties_count() > 0) {
+ for (std::pair<ObjectLiteralBoilerplateBuilder*, size_t> literal :
+ object_literals_) {
+ ObjectLiteralBoilerplateBuilder* object_literal_builder = literal.first;
+ if (object_literal_builder->properties_count() > 0) {
// If constant properties is an empty fixed array, we've already added it
// to the constant pool when visiting the object literal.
Handle<ObjectBoilerplateDescription> constant_properties =
- object_literal->GetOrBuildBoilerplateDescription(isolate);
+ object_literal_builder->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second,
constant_properties);
@@ -1326,10 +1327,11 @@ void BytecodeGenerator::AllocateDeferredConstants(IsolateT* isolate,
}
// Build array literal constant elements
- for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
- ArrayLiteral* array_literal = literal.first;
+ for (std::pair<ArrayLiteralBoilerplateBuilder*, size_t> literal :
+ array_literals_) {
+ ArrayLiteralBoilerplateBuilder* array_literal_builder = literal.first;
Handle<ArrayBoilerplateDescription> constant_elements =
- array_literal->GetOrBuildBoilerplateDescription(isolate);
+ array_literal_builder->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
@@ -1451,7 +1453,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind())) {
if (literal->class_scope_has_private_brand()) {
- BuildPrivateBrandInitialization(builder()->Receiver());
+ ClassScope* scope = info()->scope()->outer_scope()->AsClassScope();
+ DCHECK_NOT_NULL(scope->brand());
+ BuildPrivateBrandInitialization(builder()->Receiver(), scope->brand());
}
if (literal->requires_instance_members_initializer()) {
@@ -2765,12 +2769,15 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
->LoadLiteral(ast_string_constants()->name_string())
.StoreAccumulatorInRegister(key);
- DataPropertyInLiteralFlags data_property_flags =
- DataPropertyInLiteralFlag::kNoFlags;
+ DefineKeyedOwnPropertyInLiteralFlags data_property_flags =
+ DefineKeyedOwnPropertyInLiteralFlag::kNoFlags;
FeedbackSlot slot =
- feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
- builder()->LoadAccumulatorWithRegister(name).StoreDataPropertyInLiteral(
- class_constructor, key, data_property_flags, feedback_index(slot));
+ feedback_spec()->AddDefineKeyedOwnPropertyInLiteralICSlot();
+ builder()
+ ->LoadAccumulatorWithRegister(name)
+ .DefineKeyedOwnPropertyInLiteral(class_constructor, key,
+ data_property_flags,
+ feedback_index(slot));
}
RegisterList args = register_allocator()->NewRegisterList(1);
@@ -2844,15 +2851,15 @@ void BytecodeGenerator::BuildClassProperty(ClassLiteral::Property* property) {
VisitForAccumulatorValue(property->value());
if (is_literal_store) {
- FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
- builder()->StoreNamedOwnProperty(
+ FeedbackSlot slot = feedback_spec()->AddDefineNamedOwnICSlot();
+ builder()->DefineNamedOwnProperty(
builder()->Receiver(),
property->key()->AsLiteral()->AsRawPropertyName(),
feedback_index(slot));
} else {
- FeedbackSlot slot = feedback_spec()->AddKeyedDefineOwnICSlot();
- builder()->DefineKeyedProperty(builder()->Receiver(), key,
- feedback_index(slot));
+ FeedbackSlot slot = feedback_spec()->AddDefineKeyedOwnICSlot();
+ builder()->DefineKeyedOwnProperty(builder()->Receiver(), key,
+ feedback_index(slot));
}
}
@@ -2892,18 +2899,33 @@ void BytecodeGenerator::BuildInvalidPropertyAccess(MessageTemplate tmpl,
.Throw();
}
-void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) {
- Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand();
+void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver,
+ Variable* brand) {
+ BuildVariableLoad(brand, HoleCheckMode::kElided);
int depth = execution_context()->ContextChainDepth(brand->scope());
ContextScope* class_context = execution_context()->Previous(depth);
-
- BuildVariableLoad(brand, HoleCheckMode::kElided);
- Register brand_reg = register_allocator()->NewRegister();
- FeedbackSlot slot = feedback_spec()->AddKeyedDefineOwnICSlot();
- builder()
- ->StoreAccumulatorInRegister(brand_reg)
- .LoadAccumulatorWithRegister(class_context->reg())
- .DefineKeyedProperty(receiver, brand_reg, feedback_index(slot));
+ if (class_context) {
+ Register brand_reg = register_allocator()->NewRegister();
+ FeedbackSlot slot = feedback_spec()->AddDefineKeyedOwnICSlot();
+ builder()
+ ->StoreAccumulatorInRegister(brand_reg)
+ .LoadAccumulatorWithRegister(class_context->reg())
+ .DefineKeyedOwnProperty(receiver, brand_reg, feedback_index(slot));
+ } else {
+ // We are in the slow case where super() is called from a nested
+ // arrow function or a eval(), so the class scope context isn't
+ // tracked in a context register in the stack, and we have to
+ // walk the context chain from the runtime to find it.
+ DCHECK_NE(info()->literal()->scope()->outer_scope(), brand->scope());
+ RegisterList brand_args = register_allocator()->NewRegisterList(4);
+ builder()
+ ->StoreAccumulatorInRegister(brand_args[1])
+ .MoveRegister(receiver, brand_args[0])
+ .MoveRegister(execution_context()->reg(), brand_args[2])
+ .LoadLiteral(Smi::FromInt(depth))
+ .StoreAccumulatorInRegister(brand_args[3])
+ .CallRuntime(Runtime::kAddPrivateBrand, brand_args);
+ }
}
void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
@@ -3012,12 +3034,12 @@ void BytecodeGenerator::BuildCreateObjectLiteral(Register literal,
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- expr->InitDepthAndFlags();
+ expr->builder()->InitDepthAndFlags();
// Fast path for the empty object literal which doesn't need an
// AllocationSite.
- if (expr->IsEmptyObjectLiteral()) {
- DCHECK(expr->IsFastCloningSupported());
+ if (expr->builder()->IsEmptyObjectLiteral()) {
+ DCHECK(expr->builder()->IsFastCloningSupported());
builder()->CreateEmptyObjectLiteral();
return;
}
@@ -3032,7 +3054,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
- expr->ComputeFlags(), expr->IsFastCloningSupported());
+ expr->builder()->ComputeFlags(),
+ expr->builder()->IsFastCloningSupported());
Register literal = register_allocator()->NewRegister();
@@ -3056,11 +3079,11 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
size_t entry;
// If constant properties is an empty fixed array, use a cached empty fixed
// array to ensure it's only added to the constant pool once.
- if (expr->properties_count() == 0) {
+ if (expr->builder()->properties_count() == 0) {
entry = builder()->EmptyObjectBoilerplateDescriptionConstantPoolEntry();
} else {
entry = builder()->AllocateDeferredConstantPoolEntry();
- object_literals_.push_back(std::make_pair(expr, entry));
+ object_literals_.push_back(std::make_pair(expr->builder(), entry));
}
BuildCreateObjectLiteral(literal, flags, entry);
}
@@ -3100,13 +3123,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(property->value());
if (key->IsStringLiteral()) {
- FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
- builder()->StoreNamedOwnProperty(literal, key->AsRawPropertyName(),
- feedback_index(slot));
+ FeedbackSlot slot = feedback_spec()->AddDefineNamedOwnICSlot();
+ builder()->DefineNamedOwnProperty(literal, key->AsRawPropertyName(),
+ feedback_index(slot));
} else {
- FeedbackSlot slot = feedback_spec()->AddKeyedDefineOwnICSlot();
- builder()->DefineKeyedProperty(literal, key_reg,
- feedback_index(slot));
+ FeedbackSlot slot = feedback_spec()->AddDefineKeyedOwnICSlot();
+ builder()->DefineKeyedOwnProperty(literal, key_reg,
+ feedback_index(slot));
}
} else {
VisitForEffect(property->value());
@@ -3207,7 +3230,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Static class fields require the name property to be set on
// the class, meaning we can't wait until the
- // StoreDataPropertyInLiteral call later to set the name.
+ // DefineKeyedOwnPropertyInLiteral call later to set the name.
if (property->value()->IsClassLiteral() &&
property->value()->AsClassLiteral()->static_initializer() !=
nullptr) {
@@ -3218,18 +3241,19 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
value = VisitForRegisterValue(property->value());
}
- DataPropertyInLiteralFlags data_property_flags =
- DataPropertyInLiteralFlag::kNoFlags;
+ DefineKeyedOwnPropertyInLiteralFlags data_property_flags =
+ DefineKeyedOwnPropertyInLiteralFlag::kNoFlags;
if (property->NeedsSetFunctionName()) {
- data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+ data_property_flags |=
+ DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName;
}
FeedbackSlot slot =
- feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
+ feedback_spec()->AddDefineKeyedOwnPropertyInLiteralICSlot();
builder()
->LoadAccumulatorWithRegister(value)
- .StoreDataPropertyInLiteral(literal, key, data_property_flags,
- feedback_index(slot));
+ .DefineKeyedOwnPropertyInLiteral(literal, key, data_property_flags,
+ feedback_index(slot));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -3352,7 +3376,7 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
->LoadNamedProperty(array, length, length_load_slot)
.StoreAccumulatorInRegister(index);
}
- } else if (expr != nullptr) {
+ } else {
// There are some elements before the first (if any) spread, and we can
// use a boilerplate when creating the initial array from those elements.
@@ -3360,29 +3384,54 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
// be created during finalization, and will contain all the constant
// elements before the first spread. This also handle the empty array case
// and one-shot optimization.
+
+ ArrayLiteralBoilerplateBuilder* array_literal_builder = nullptr;
+ if (expr != nullptr) {
+ array_literal_builder = expr->builder();
+ } else {
+ DCHECK(!elements->is_empty());
+
+ // get first_spread_index
+ int first_spread_index = -1;
+ for (auto iter = elements->begin(); iter != elements->end(); iter++) {
+ if ((*iter)->IsSpread()) {
+ first_spread_index = static_cast<int>(iter - elements->begin());
+ break;
+ }
+ }
+
+ array_literal_builder = zone()->New<ArrayLiteralBoilerplateBuilder>(
+ elements, first_spread_index);
+ array_literal_builder->InitDepthAndFlags();
+ }
+
+ DCHECK(array_literal_builder != nullptr);
uint8_t flags = CreateArrayLiteralFlags::Encode(
- expr->IsFastCloningSupported(), expr->ComputeFlags());
+ array_literal_builder->IsFastCloningSupported(),
+ array_literal_builder->ComputeFlags());
if (is_empty) {
// Empty array literal fast-path.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- DCHECK(expr->IsFastCloningSupported());
+ DCHECK(array_literal_builder->IsFastCloningSupported());
builder()->CreateEmptyArrayLiteral(literal_index);
} else {
// Create array literal from boilerplate.
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- array_literals_.push_back(std::make_pair(expr, entry));
+ array_literals_.push_back(std::make_pair(array_literal_builder, entry));
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
builder()->CreateArrayLiteral(entry, literal_index, flags);
}
builder()->StoreAccumulatorInRegister(array);
+ ZonePtrList<Expression>::const_iterator first_spread_or_end =
+ array_literal_builder->first_spread_index() >= 0
+ ? current + array_literal_builder->first_spread_index()
+ : end;
+
// Insert the missing non-constant elements, up until the first spread
// index, into the initial array (the remaining elements will be inserted
// below).
DCHECK_EQ(current, elements->begin());
- ZonePtrList<Expression>::const_iterator first_spread_or_end =
- expr->first_spread_index() >= 0 ? current + expr->first_spread_index()
- : end;
int array_index = 0;
for (; current != first_spread_or_end; ++current, array_index++) {
Expression* subexpr = *current;
@@ -3405,17 +3454,6 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
->LoadLiteral(Smi::FromInt(array_index))
.StoreAccumulatorInRegister(index);
}
- } else {
- // TODO(v8:11582): Support allocating boilerplates here.
-
- // In other cases, we prepare an empty array to be filled in below.
- DCHECK(!elements->is_empty());
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- builder()
- ->CreateEmptyArrayLiteral(literal_index)
- .StoreAccumulatorInRegister(array);
- // Prepare the index for the first element.
- builder()->LoadLiteral(Smi::FromInt(0)).StoreAccumulatorInRegister(index);
}
// Now build insertions for the remaining elements from current to end.
@@ -3461,8 +3499,8 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
->LoadAccumulatorWithRegister(index)
.UnaryOperation(Token::INC, feedback_index(index_slot.Get()))
.StoreAccumulatorInRegister(index)
- .StoreNamedProperty(array, length, feedback_index(length_slot.Get()),
- LanguageMode::kStrict);
+ .SetNamedProperty(array, length, feedback_index(length_slot.Get()),
+ LanguageMode::kStrict);
}
}
@@ -3470,7 +3508,7 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- expr->InitDepthAndFlags();
+ expr->builder()->InitDepthAndFlags();
BuildCreateArrayLiteral(expr->values(), expr);
}
@@ -3627,12 +3665,10 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
} else {
DCHECK(IsAsyncFunction(info()->literal()->kind()) ||
IsAsyncModule(info()->literal()->kind()));
- RegisterList args = register_allocator()->NewRegisterList(3);
+ RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->MoveRegister(generator_object(), args[0]) // generator
.StoreAccumulatorInRegister(args[1]) // value
- .LoadBoolean(info()->literal()->CanSuspend())
- .StoreAccumulatorInRegister(args[2]) // can_suspend
.CallRuntime(Runtime::kInlineAsyncFunctionResolve, args);
}
@@ -3810,9 +3846,9 @@ void BytecodeGenerator::BuildLoadNamedProperty(const Expression* object_expr,
builder()->LoadNamedProperty(object, name, feedback_index(slot));
}
-void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
- Register object,
- const AstRawString* name) {
+void BytecodeGenerator::BuildSetNamedProperty(const Expression* object_expr,
+ Register object,
+ const AstRawString* name) {
Register value;
if (!execution_result()->IsEffect()) {
value = register_allocator()->NewRegister();
@@ -3820,8 +3856,8 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
}
FeedbackSlot slot = GetCachedStoreICSlot(object_expr, name);
- builder()->StoreNamedProperty(object, name, feedback_index(slot),
- language_mode());
+ builder()->SetNamedProperty(object, name, feedback_index(slot),
+ language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
@@ -4171,17 +4207,19 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
->LoadNamedProperty(next_result,
ast_string_constants()->done_string(),
feedback_index(next_done_load_slot))
- .JumpIfTrue(ToBooleanMode::kConvertToBoolean, is_done.New())
- .LoadNamedProperty(next_result,
- ast_string_constants()->value_string(),
- feedback_index(next_value_load_slot))
- .StoreAccumulatorInRegister(next_result)
- .LoadFalse()
- .StoreAccumulatorInRegister(done)
- .LoadAccumulatorWithRegister(next_result);
+ .JumpIfTrue(ToBooleanMode::kConvertToBoolean, is_done.New());
// Only do the assignment if this is not a hole (i.e. 'elided').
if (!target->IsTheHoleLiteral()) {
+ builder()
+ ->LoadNamedProperty(next_result,
+ ast_string_constants()->value_string(),
+ feedback_index(next_value_load_slot))
+ .StoreAccumulatorInRegister(next_result)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(done)
+ .LoadAccumulatorWithRegister(next_result);
+
// [<pattern> = <init>] = <value>
// becomes (roughly)
// temp = <value>.next();
@@ -4202,6 +4240,7 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
BuildAssignment(lhs_data, op, lookup_hoisting_mode);
} else {
+ builder()->LoadFalse().StoreAccumulatorInRegister(done);
DCHECK_EQ(lhs_data.assign_type(), NON_PROPERTY);
is_done.Bind(builder());
}
@@ -4282,7 +4321,8 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
// rest_runtime_callargs[2] = temp1;
// a() = value[temp1];
//
-// b.c = %CopyDataPropertiesWithExcludedProperties.call(rest_runtime_callargs);
+// b.c =
+// %CopyDataPropertiesWithExcludedPropertiesOnStack.call(rest_runtime_callargs);
void BytecodeGenerator::BuildDestructuringObjectAssignment(
ObjectLiteral* pattern, Token::Value op,
LookupHoistingMode lookup_hoisting_mode) {
@@ -4291,7 +4331,7 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
// Store the assignment value in a register.
Register value;
RegisterList rest_runtime_callargs;
- if (pattern->has_rest_property()) {
+ if (pattern->builder()->has_rest_property()) {
rest_runtime_callargs =
register_allocator()->NewRegisterList(pattern->properties()->length());
value = rest_runtime_callargs[0];
@@ -4355,8 +4395,8 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
if (pattern_key->IsPropertyName()) {
value_name = pattern_key->AsLiteral()->AsRawPropertyName();
}
- if (pattern->has_rest_property() || !value_name) {
- if (pattern->has_rest_property()) {
+ if (pattern->builder()->has_rest_property() || !value_name) {
+ if (pattern->builder()->has_rest_property()) {
value_key = rest_runtime_callargs[i + 1];
} else {
value_key = register_allocator()->NewRegister();
@@ -4373,9 +4413,9 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
} else {
// We only need the key for non-computed properties when it is numeric
// or is being saved for the rest_runtime_callargs.
- DCHECK(
- pattern_key->IsNumberLiteral() ||
- (pattern->has_rest_property() && pattern_key->IsPropertyName()));
+ DCHECK(pattern_key->IsNumberLiteral() ||
+ (pattern->builder()->has_rest_property() &&
+ pattern_key->IsPropertyName()));
VisitForRegisterValue(pattern_key, value_key);
}
}
@@ -4388,8 +4428,9 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
DCHECK_EQ(i, pattern->properties()->length() - 1);
DCHECK(!value_key.is_valid());
DCHECK_NULL(value_name);
- builder()->CallRuntime(Runtime::kCopyDataPropertiesWithExcludedProperties,
- rest_runtime_callargs);
+ builder()->CallRuntime(
+ Runtime::kInlineCopyDataPropertiesWithExcludedPropertiesOnStack,
+ rest_runtime_callargs);
} else if (value_name) {
builder()->LoadNamedProperty(
value, value_name, feedback_index(feedback_spec()->AddLoadICSlot()));
@@ -4445,8 +4486,8 @@ void BytecodeGenerator::BuildAssignment(
break;
}
case NAMED_PROPERTY: {
- BuildStoreNamedProperty(lhs_data.object_expr(), lhs_data.object(),
- lhs_data.name());
+ BuildSetNamedProperty(lhs_data.object_expr(), lhs_data.object(),
+ lhs_data.name());
break;
}
case KEYED_PROPERTY: {
@@ -4456,8 +4497,8 @@ void BytecodeGenerator::BuildAssignment(
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
- builder()->StoreKeyedProperty(lhs_data.object(), lhs_data.key(),
- feedback_index(slot), language_mode());
+ builder()->SetKeyedProperty(lhs_data.object(), lhs_data.key(),
+ feedback_index(slot), language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -5217,6 +5258,7 @@ void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
builder()->CompareReference(object).JumpIfTrue(
ToBooleanMode::kAlreadyBoolean, &return_check);
const AstRawString* name = scope->class_variable()->raw_name();
+ RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadLiteral(
@@ -5634,8 +5676,25 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
Register instance = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(instance);
- if (info()->literal()->class_scope_has_private_brand()) {
- BuildPrivateBrandInitialization(instance);
+ // The constructor scope always needs ScopeInfo, so we are certain that
+ // the first constructor scope found in the outer scope chain is the
+ // scope that we are looking for for this super() call.
+ // Note that this doesn't necessarily mean that the constructor needs
+ // a context, if it doesn't this would get handled specially in
+ // BuildPrivateBrandInitialization().
+ DeclarationScope* constructor_scope = info()->scope()->GetConstructorScope();
+
+ // We can rely on the class_scope_has_private_brand bit to tell if the
+ // constructor needs private brand initialization, and if that's
+ // the case we are certain that its outer class scope requires a context to
+ // keep the brand variable, so we can just get the brand variable
+ // from the outer scope.
+ if (constructor_scope->class_scope_has_private_brand()) {
+ DCHECK(constructor_scope->outer_scope()->is_class_scope());
+ ClassScope* class_scope = constructor_scope->outer_scope()->AsClassScope();
+ DCHECK_NOT_NULL(class_scope->brand());
+ Variable* brand = class_scope->brand();
+ BuildPrivateBrandInitialization(instance, brand);
}
// The derived constructor has the correct bit set always, so we
@@ -5991,8 +6050,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
- builder()->StoreNamedProperty(object, name, feedback_index(slot),
- language_mode());
+ builder()->SetNamedProperty(object, name, feedback_index(slot),
+ language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -6005,8 +6064,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
- builder()->StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->SetKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -7237,9 +7296,8 @@ FeedbackSlot BytecodeGenerator::GetCachedStoreICSlot(const Expression* expr,
return feedback_spec()->AddStoreICSlot(language_mode());
}
FeedbackSlotCache::SlotKind slot_kind =
- is_strict(language_mode())
- ? FeedbackSlotCache::SlotKind::kStoreNamedStrict
- : FeedbackSlotCache::SlotKind::kStoreNamedSloppy;
+ is_strict(language_mode()) ? FeedbackSlotCache::SlotKind::kSetNamedStrict
+ : FeedbackSlotCache::SlotKind::kSetNamedSloppy;
if (!expr->IsVariableProxy()) {
return feedback_spec()->AddStoreICSlot(language_mode());
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 1c11cbbb50..8fae6c077a 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -248,8 +248,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildLoadNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
- void BuildStoreNamedProperty(const Expression* object_expr, Register object,
- const AstRawString* name);
+ void BuildSetNamedProperty(const Expression* object_expr, Register object,
+ const AstRawString* name);
void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
@@ -333,7 +333,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
void VisitThisFunctionVariable(Variable* variable);
- void BuildPrivateBrandInitialization(Register receiver);
+ void BuildPrivateBrandInitialization(Register receiver, Variable* brand);
void BuildInstanceMemberInitialization(Register constructor,
Register instance);
void BuildGeneratorObjectVariableInitialization();
@@ -519,8 +519,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
native_function_literals_;
- ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
- ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
+ ZoneVector<std::pair<ObjectLiteralBoilerplateBuilder*, size_t>>
+ object_literals_;
+ ZoneVector<std::pair<ArrayLiteralBoilerplateBuilder*, size_t>>
+ array_literals_;
ZoneVector<std::pair<ClassLiteral*, size_t>> class_literals_;
ZoneVector<std::pair<GetTemplateObject*, size_t>> template_objects_;
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 3d9c9e1dac..f8761081d5 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -235,7 +235,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK_NE(parameter_count, 0);
int first_slot_index = parameter_count - 1;
register_info_table_offset_ =
- -Register::FromParameterIndex(first_slot_index, parameter_count).index();
+ -Register::FromParameterIndex(first_slot_index).index();
// Initialize register map for parameters, locals, and the
// accumulator.
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 5266f693d2..cb8fc81b70 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -37,15 +37,14 @@ static const int kArgumentCountRegisterIndex =
InterpreterFrameConstants::kArgCOffset) /
kSystemPointerSize;
-Register Register::FromParameterIndex(int index, int parameter_count) {
+Register Register::FromParameterIndex(int index) {
DCHECK_GE(index, 0);
- DCHECK_LT(index, parameter_count);
int register_index = kFirstParamRegisterIndex - index;
DCHECK_LT(register_index, 0);
return Register(register_index);
}
-int Register::ToParameterIndex(int parameter_count) const {
+int Register::ToParameterIndex() const {
DCHECK(is_parameter());
return kFirstParamRegisterIndex - index();
}
@@ -120,13 +119,13 @@ bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
return true;
}
-std::string Register::ToString(int parameter_count) const {
+std::string Register::ToString() const {
if (is_current_context()) {
return std::string("<context>");
} else if (is_function_closure()) {
return std::string("<closure>");
} else if (is_parameter()) {
- int parameter_index = ToParameterIndex(parameter_count);
+ int parameter_index = ToParameterIndex();
if (parameter_index == 0) {
return std::string("<this>");
} else {
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 270b3a4a3d..7fd47b681c 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -26,8 +26,8 @@ class V8_EXPORT_PRIVATE Register final {
bool is_parameter() const { return index() < 0; }
bool is_valid() const { return index_ != kInvalidIndex; }
- static Register FromParameterIndex(int index, int parameter_count);
- int ToParameterIndex(int parameter_count) const;
+ static Register FromParameterIndex(int index);
+ int ToParameterIndex() const;
// Returns an invalid register.
static Register invalid_value() { return Register(); }
@@ -65,7 +65,7 @@ class V8_EXPORT_PRIVATE Register final {
return Register(kRegisterFileStartOffset - operand);
}
- static Register FromShortStar(Bytecode bytecode) {
+ static constexpr Register FromShortStar(Bytecode bytecode) {
DCHECK(Bytecodes::IsShortStar(bytecode));
return Register(static_cast<int>(Bytecode::kStar0) -
static_cast<int>(bytecode));
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Register final {
Register reg4 = invalid_value(),
Register reg5 = invalid_value());
- std::string ToString(int parameter_count) const;
+ std::string ToString() const;
bool operator==(const Register& other) const {
return index() == other.index();
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 9c8f4dde95..d8ee9aa276 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -283,8 +283,8 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kLdaConstant:
case Bytecode::kLdaUndefined:
case Bytecode::kLdaGlobal:
- case Bytecode::kLdaNamedProperty:
- case Bytecode::kLdaKeyedProperty:
+ case Bytecode::kGetNamedProperty:
+ case Bytecode::kGetKeyedProperty:
case Bytecode::kLdaContextSlot:
case Bytecode::kLdaImmutableContextSlot:
case Bytecode::kLdaCurrentContextSlot:
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index f01f4f412c..a110edae01 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -8,7 +8,6 @@
#include <cstdint>
#include <iosfwd>
#include <string>
-#include <vector>
#include "src/common/globals.h"
#include "src/interpreter/bytecode-operands.h"
@@ -131,11 +130,11 @@ namespace interpreter {
OperandType::kIdx, OperandType::kFlag8) \
\
/* Property loads (LoadIC) operations */ \
- V(LdaNamedProperty, ImplicitRegisterUse::kWriteAccumulator, \
+ V(GetNamedProperty, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(LdaNamedPropertyFromSuper, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(GetNamedPropertyFromSuper, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(LdaKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(GetKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx) \
\
/* Operations on module variables */ \
@@ -145,17 +144,17 @@ namespace interpreter {
OperandType::kImm, OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
- V(StaNamedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(SetNamedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(StaNamedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(DefineNamedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(StaKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(SetKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(StaKeyedPropertyAsDefine, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(DefineKeyedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
V(StaInArrayLiteral, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(StaDataPropertyInLiteral, ImplicitRegisterUse::kReadAccumulator, \
+ V(DefineKeyedOwnPropertyInLiteral, ImplicitRegisterUse::kReadAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kFlag8, \
OperandType::kIdx) \
V(CollectTypeProfile, ImplicitRegisterUse::kReadAccumulator, \
@@ -538,6 +537,10 @@ namespace interpreter {
V(Return) \
V(SuspendGenerator)
+#define UNCONDITIONAL_THROW_BYTECODE_LIST(V) \
+ V(Throw) \
+ V(ReThrow)
+
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -802,6 +805,13 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
#undef OR_BYTECODE
}
+ // Returns true if the bytecode unconditionally throws.
+ static constexpr bool UnconditionallyThrows(Bytecode bytecode) {
+#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME
+ return false UNCONDITIONAL_THROW_BYTECODE_LIST(OR_BYTECODE);
+#undef OR_BYTECODE
+ }
+
// Returns the number of operands expected by |bytecode|.
static int NumberOfOperands(Bytecode bytecode) {
DCHECK_LE(bytecode, Bytecode::kLast);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index fe635115f6..e06053b628 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -713,20 +713,14 @@ void InterpreterAssembler::CallJSAndDispatch(
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
TNode<Word32T> args_count = args.reg_count();
- const bool receiver_included =
- receiver_mode != ConvertReceiverMode::kNullOrUndefined;
- if (kJSArgcIncludesReceiver && !receiver_included) {
- // Add receiver if we want to include it in argc and it isn't already.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Add receiver. It is not included in args as it is implicit.
args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
- } else if (!kJSArgcIncludesReceiver && receiver_included) {
- // Subtract receiver if we don't want to include it, but it is included.
- TNode<Int32T> receiver_count = Int32Constant(1);
- args_count = Int32Sub(args_count, receiver_count);
}
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
- TNode<Code> code_target = HeapConstant(callable.code());
+ TNode<CodeT> code_target = HeapConstant(callable.code());
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
@@ -747,7 +741,7 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::Call(isolate());
- TNode<Code> code_target = HeapConstant(callable.code());
+ TNode<CodeT> code_target = HeapConstant(callable.code());
arg_count = JSParameterCount(arg_count);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@@ -792,13 +786,9 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
- TNode<Code> code_target = HeapConstant(callable.code());
+ TNode<CodeT> code_target = HeapConstant(callable.code());
TNode<Word32T> args_count = args.reg_count();
- if (!kJSArgcIncludesReceiver) {
- TNode<Int32T> receiver_count = Int32Constant(1);
- args_count = Int32Sub(args_count, receiver_count);
- }
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
function);
@@ -981,7 +971,7 @@ TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
- TNode<Code> code_target = HeapConstant(callable.code());
+ TNode<CodeT> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
@@ -1038,11 +1028,10 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
BIND(&interrupt_check);
// JumpLoop should do a stack check as part of the interrupt.
- CallRuntime(
- bytecode() == Bytecode::kJumpLoop
- ? Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode
- : Runtime::kBytecodeBudgetInterruptFromBytecode,
- GetContext(), function);
+ CallRuntime(bytecode() == Bytecode::kJumpLoop
+ ? Runtime::kBytecodeBudgetInterruptWithStackCheck
+ : Runtime::kBytecodeBudgetInterrupt,
+ GetContext(), function);
Goto(&done);
BIND(&ok);
@@ -1462,7 +1451,7 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
Label loop(this, &var_index), done_loop(this);
TNode<IntPtrT> reg_base =
- IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
+ IntPtrConstant(Register::FromParameterIndex(0).ToOperand() + 1);
Goto(&loop);
BIND(&loop);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index b32804a6fd..47d0060700 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -515,11 +515,11 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
}
}
-// LdaNamedProperty <object> <name_index> <slot>
+// GetNamedProperty <object> <name_index> <slot>
//
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
// constant pool entry <name_index>.
-IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
+IGNITION_HANDLER(GetNamedProperty, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
// Load receiver.
@@ -550,12 +550,12 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
}
}
-// LdaNamedPropertyFromSuper <receiver> <name_index> <slot>
+// GetNamedPropertyFromSuper <receiver> <name_index> <slot>
//
// Calls the LoadSuperIC at FeedBackVector slot <slot> for <receiver>, home
// object's prototype (home object in the accumulator) and the name at constant
// pool entry <name_index>.
-IGNITION_HANDLER(LdaNamedPropertyFromSuper, InterpreterAssembler) {
+IGNITION_HANDLER(GetNamedPropertyFromSuper, InterpreterAssembler) {
TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
TNode<HeapObject> home_object = CAST(GetAccumulator());
TNode<Object> home_object_prototype = LoadMapPrototype(LoadMap(home_object));
@@ -571,11 +571,11 @@ IGNITION_HANDLER(LdaNamedPropertyFromSuper, InterpreterAssembler) {
Dispatch();
}
-// LdaKeyedProperty <object> <slot>
+// GetKeyedProperty <object> <slot>
//
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
-IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
+IGNITION_HANDLER(GetKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = GetAccumulator();
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
@@ -589,14 +589,14 @@ IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Dispatch();
}
-class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
+class InterpreterSetNamedPropertyAssembler : public InterpreterAssembler {
public:
- InterpreterStoreNamedPropertyAssembler(CodeAssemblerState* state,
- Bytecode bytecode,
- OperandScale operand_scale)
+ InterpreterSetNamedPropertyAssembler(CodeAssemblerState* state,
+ Bytecode bytecode,
+ OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
+ void SetNamedProperty(Callable ic, NamedPropertyType property_type) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
@@ -616,31 +616,37 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
}
};
-// StaNamedProperty <object> <name_index> <slot>
+// SetNamedProperty <object> <name_index> <slot>
//
// Calls the StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
+IGNITION_HANDLER(SetNamedProperty, InterpreterSetNamedPropertyAssembler) {
+ // StoreIC is currently a base class for multiple property store operations
+ // and contains mixed logic for named and keyed, set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can be
+ // called here.
Callable ic = Builtins::CallableFor(isolate(), Builtin::kStoreIC);
- StaNamedProperty(ic, NamedPropertyType::kNotOwn);
+ SetNamedProperty(ic, NamedPropertyType::kNotOwn);
}
-// StaNamedOwnProperty <object> <name_index> <slot>
+// DefineNamedOwnProperty <object> <name_index> <slot>
//
-// Calls the StoreOwnIC at FeedBackVector slot <slot> for <object> and
+// Calls the DefineNamedOwnIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
- Callable ic = Builtins::CallableFor(isolate(), Builtin::kStoreOwnIC);
- StaNamedProperty(ic, NamedPropertyType::kOwn);
+IGNITION_HANDLER(DefineNamedOwnProperty, InterpreterSetNamedPropertyAssembler) {
+ Callable ic = Builtins::CallableFor(isolate(), Builtin::kDefineNamedOwnIC);
+ SetNamedProperty(ic, NamedPropertyType::kOwn);
}
-// StaKeyedProperty <object> <key> <slot>
+// SetKeyedProperty <object> <key> <slot>
//
// Calls the KeyedStoreIC at FeedbackVector slot <slot> for <object> and
-// the key <key> with the value in the accumulator.
-IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
+// the key <key> with the value in the accumulator. This could trigger
+// the setter and the set traps if necessary.
+IGNITION_HANDLER(SetKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
@@ -648,6 +654,11 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
+ // KeyedStoreIC is currently a base class for multiple keyed property store
+ // operations and contains mixed logic for set and define operations,
+ // the paths are controlled by feedback.
+ // TODO(v8:12548): refactor SetKeyedIC as a subclass of KeyedStoreIC, which
+ // can be called here.
TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
@@ -659,14 +670,15 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Dispatch();
}
-// StaKeyedPropertyAsDefine <object> <key> <slot>
+// DefineKeyedOwnProperty <object> <key> <slot>
//
-// Calls the KeyedDefineOwnIC at FeedbackVector slot <slot> for <object> and
+// Calls the DefineKeyedOwnIC at FeedbackVector slot <slot> for <object> and
// the key <key> with the value in the accumulator.
//
-// This is similar to StaKeyedProperty, but avoids checking the prototype chain,
-// and in the case of private names, throws if the private name already exists.
-IGNITION_HANDLER(StaKeyedPropertyAsDefine, InterpreterAssembler) {
+// This is similar to SetKeyedProperty, but avoids checking the prototype
+// chain, and in the case of private names, throws if the private name already
+// exists.
+IGNITION_HANDLER(DefineKeyedOwnProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
@@ -675,7 +687,7 @@ IGNITION_HANDLER(StaKeyedPropertyAsDefine, InterpreterAssembler) {
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kKeyedDefineOwnIC, context, object, name,
+ var_result = CallBuiltin(Builtin::kDefineKeyedOwnIC, context, object, name,
value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
@@ -710,15 +722,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
Dispatch();
}
-// StaDataPropertyInLiteral <object> <name> <flags> <slot>
+// DefineKeyedOwnPropertyInLiteral <object> <name> <flags> <slot>
//
// Define a property <name> with value from the accumulator in <object>.
// Property attributes and whether set_function_name are stored in
-// DataPropertyInLiteralFlags <flags>.
+// DefineKeyedOwnPropertyInLiteralFlags <flags>.
//
// This definition is not observable and is used only for definitions
// in object or class literals.
-IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
+IGNITION_HANDLER(DefineKeyedOwnPropertyInLiteral, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
@@ -729,7 +741,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
+ CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral, context, object, name,
value, flags, feedback_vector, slot);
Dispatch();
}
@@ -995,13 +1007,12 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TVARIABLE(Smi, feedback);
-
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
- bitwise_op, left, right, [=] { return context; }, &feedback, false);
+ bitwise_op, left, right, [=] { return context; }, slot_index,
+ [=] { return maybe_feedback_vector; },
+ UpdateFeedbackMode::kOptionalFeedback, false);
- MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1013,13 +1024,12 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Smi, feedback);
-
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
- bitwise_op, left, right, [=] { return context; }, &feedback, true);
+ bitwise_op, left, right, [=] { return context; }, slot_index,
+ [=] { return maybe_feedback_vector; },
+ UpdateFeedbackMode::kOptionalFeedback, true);
- MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -3080,8 +3090,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{},
- CodeKind::BYTECODE_HANDLER, debug_name,
- builtin);
+ CodeKind::BYTECODE_HANDLER, debug_name, builtin);
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index 51d7acb785..9d6a861d8c 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -23,10 +23,6 @@ extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate,
Builtin builtin,
const AssemblerOptions& options);
-extern Handle<Code> GenerateDeserializeLazyHandler(
- Isolate* isolate, OperandScale operand_scale, Builtin builtin,
- const AssemblerOptions& options);
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index d11fd72c97..abb574f9e1 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -139,6 +139,21 @@ TNode<Object> IntrinsicsGenerator::CopyDataProperties(
arg_count);
}
+TNode<Object>
+IntrinsicsGenerator::CopyDataPropertiesWithExcludedPropertiesOnStack(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ TNode<IntPtrT> offset = __ TimesSystemPointerSize(__ IntPtrConstant(1));
+ auto base = __ Signed(__ IntPtrSub(args.base_reg_location(), offset));
+ Callable callable = Builtins::CallableFor(
+ isolate_, Builtin::kCopyDataPropertiesWithExcludedPropertiesOnStack);
+ TNode<IntPtrT> excluded_property_count = __ IntPtrSub(
+ __ ChangeInt32ToIntPtr(args.reg_count()), __ IntPtrConstant(1));
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0),
+ excluded_property_count, base);
+}
+
TNode<Object> IntrinsicsGenerator::CreateIterResultObject(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
int arg_count) {
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 6b82d33154..89087346ba 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -17,8 +17,8 @@ namespace interpreter {
V(AsyncFunctionAwaitCaught, async_function_await_caught, 2) \
V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 2) \
V(AsyncFunctionEnter, async_function_enter, 2) \
- V(AsyncFunctionReject, async_function_reject, 3) \
- V(AsyncFunctionResolve, async_function_resolve, 3) \
+ V(AsyncFunctionReject, async_function_reject, 2) \
+ V(AsyncFunctionResolve, async_function_resolve, 2) \
V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \
V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \
V(AsyncGeneratorReject, async_generator_reject, 2) \
@@ -29,6 +29,8 @@ namespace interpreter {
V(GeneratorClose, generator_close, 1) \
V(GetImportMetaObject, get_import_meta_object, 0) \
V(CopyDataProperties, copy_data_properties, 2) \
+ V(CopyDataPropertiesWithExcludedPropertiesOnStack, \
+ copy_data_properties_with_excluded_properties_on_stack, -1) \
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1)
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 26fe890914..b9ccae9a26 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -115,14 +115,15 @@ Builtin BuiltinIndexFromBytecode(Bytecode bytecode,
} // namespace
-Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale) {
+CodeT Interpreter::GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
Builtin builtin = BuiltinIndexFromBytecode(bytecode, operand_scale);
return isolate_->builtins()->code(builtin);
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale, Code handler) {
+ OperandScale operand_scale,
+ CodeT handler) {
DCHECK(handler.is_off_heap_trampoline());
DCHECK(handler.kind() == CodeKind::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
@@ -257,8 +258,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
- RCS_SCOPE(parse_info()->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundIgnitionFinalization);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileIgnitionFinalization,
+ RuntimeCallStats::kThreadSpecific);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
return DoFinalizeJobImpl(shared_info, isolate);
@@ -344,16 +345,16 @@ void Interpreter::Initialize() {
// Set the interpreter entry trampoline entry point now that builtins are
// initialized.
- Handle<Code> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
+ Handle<CodeT> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
DCHECK(builtins->is_initialized());
DCHECK(code->is_off_heap_trampoline() ||
- isolate_->heap()->IsImmovable(*code));
+ isolate_->heap()->IsImmovable(FromCodeT(*code)));
interpreter_entry_trampoline_instruction_start_ = code->InstructionStart();
// Initialize the dispatch table.
ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
Builtin builtin = BuiltinIndexFromBytecode(bytecode, operand_scale);
- Code handler = builtins->code(builtin);
+ CodeT handler = builtins->code(builtin);
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
std::string builtin_name(Builtins::name(builtin));
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 2210f78ee3..82fc8a9dea 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -62,12 +62,12 @@ class Interpreter {
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
- V8_EXPORT_PRIVATE Code GetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale);
+ V8_EXPORT_PRIVATE CodeT GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
- Code handler);
+ CodeT handler);
// Disassembler support.
V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 74a8046b2e..81e030e237 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
+#include "src/execution/frames-inl.h"
#include "src/numbers/conversions.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/field-type.h"
@@ -229,8 +230,8 @@ JsonParser<Char>::JsonParser(Isolate* isolate, Handle<String> source)
chars_may_relocate_ = false;
} else {
DisallowGarbageCollection no_gc;
- isolate->heap()->AddGCEpilogueCallback(UpdatePointersCallback,
- v8::kGCTypeAll, this);
+ isolate->main_thread_local_heap()->AddGCEpilogueCallback(
+ UpdatePointersCallback, this);
chars_ = SeqString::cast(*source_).GetChars(no_gc);
chars_may_relocate_ = true;
}
@@ -274,6 +275,17 @@ void JsonParser<Char>::ReportUnexpectedToken(JsonToken token) {
if (isolate()->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(isolate(), script);
}
+
+ StackTraceFrameIterator it(isolate_);
+ if (!it.done() && it.is_javascript()) {
+ FrameSummary summary = it.GetTopValidFrame();
+ script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
+ if (summary.script()->IsScript()) {
+ script->set_origin_options(
+ Script::cast(*summary.script()).origin_options());
+ }
+ }
+
// We should sent compile error event because we compile JSON object in
// separated source file.
isolate()->debug()->OnCompileError(script);
@@ -305,7 +317,8 @@ JsonParser<Char>::~JsonParser() {
// Check that the string shape hasn't changed. Otherwise our GC hooks are
// broken.
SeqString::cast(*source_);
- isolate()->heap()->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
+ isolate()->main_thread_local_heap()->RemoveGCEpilogueCallback(
+ UpdatePointersCallback, this);
}
}
@@ -473,14 +486,13 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
descriptor_index)),
isolate_);
} else {
- DisallowGarbageCollection no_gc;
- TransitionsAccessor transitions(isolate(), *map, &no_gc);
+ TransitionsAccessor transitions(isolate(), *map);
expected = transitions.ExpectedTransitionKey();
if (!expected.is_null()) {
// Directly read out the target while reading out the key, otherwise it
// might die while building the string below.
- target = TransitionsAccessor(isolate(), *map, &no_gc)
- .ExpectedTransitionTarget();
+ target =
+ TransitionsAccessor(isolate(), *map).ExpectedTransitionTarget();
}
}
@@ -492,7 +504,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
map = ParentOfDescriptorOwner(isolate_, map, feedback, descriptor);
feedback_descriptors = 0;
}
- if (!TransitionsAccessor(isolate(), map)
+ if (!TransitionsAccessor(isolate(), *map)
.FindTransitionToField(key)
.ToHandle(&target)) {
break;
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 227b01fe74..543f24c678 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -312,8 +312,7 @@ class JsonParser final {
static const int kInitialSpecialStringLength = 32;
- static void UpdatePointersCallback(v8::Isolate* v8_isolate, v8::GCType type,
- v8::GCCallbackFlags flags, void* parser) {
+ static void UpdatePointersCallback(void* parser) {
reinterpret_cast<JsonParser<Char>*>(parser)->UpdatePointers();
}
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 8dd3118447..8416ae131c 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -438,8 +438,8 @@ class CircularStructureMessageBuilder {
private:
void AppendConstructorName(Handle<Object> object) {
builder_.AppendCharacter('\'');
- Handle<String> constructor_name =
- JSReceiver::GetConstructorName(Handle<JSReceiver>::cast(object));
+ Handle<String> constructor_name = JSReceiver::GetConstructorName(
+ builder_.isolate(), Handle<JSReceiver>::cast(object));
builder_.AppendString(constructor_name);
builder_.AppendCharacter('\'');
}
@@ -845,8 +845,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
*map == object->map(cage_base)) {
DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- property = JSObject::FastPropertyAt(object, details.representation(),
- field_index);
+ property = JSObject::FastPropertyAt(
+ isolate_, object, details.representation(), field_index);
} else {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, property,
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 1cbc01193d..e360c661dd 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -87,13 +87,6 @@ void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
idle_time_in_seconds);
}
-void SetTracingController(
- v8::Platform* platform,
- v8::platform::tracing::TracingController* tracing_controller) {
- static_cast<DefaultPlatform*>(platform)->SetTracingController(
- std::unique_ptr<v8::TracingController>(tracing_controller));
-}
-
void NotifyIsolateShutdown(v8::Platform* platform, Isolate* isolate) {
static_cast<DefaultPlatform*>(platform)->NotifyIsolateShutdown(isolate);
}
@@ -128,7 +121,7 @@ DefaultPlatform::~DefaultPlatform() {
namespace {
double DefaultTimeFunction() {
- return base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ return base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
diff --git a/deps/v8/src/libplatform/tracing/recorder.h b/deps/v8/src/libplatform/tracing/recorder.h
index 8b8eb0e0e9..309697f128 100644
--- a/deps/v8/src/libplatform/tracing/recorder.h
+++ b/deps/v8/src/libplatform/tracing/recorder.h
@@ -13,7 +13,7 @@
#error "only include this file if V8_ENABLE_SYSTEM_INSTRUMENTATION"
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <os/signpost.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability"
@@ -47,7 +47,7 @@ class V8_PLATFORM_EXPORT Recorder {
void AddEvent(TraceObject* trace_event);
private:
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
os_log_t v8Provider;
#endif
};
@@ -56,7 +56,7 @@ class V8_PLATFORM_EXPORT Recorder {
} // namespace platform
} // namespace v8
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#pragma clang diagnostic pop
#endif
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 38f1ccb77e..55ca063184 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -106,7 +106,7 @@ void TracingController::Initialize(TraceBuffer* trace_buffer) {
}
int64_t TracingController::CurrentTimestampMicroseconds() {
- return base::TimeTicks::HighResolutionNow().ToInternalValue();
+ return base::TimeTicks::Now().ToInternalValue();
}
int64_t TracingController::CurrentCpuTimestampMicroseconds() {
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 111ae5e864..9e5569455f 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -19,7 +19,7 @@
#include <sys/syscall.h>
#endif
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <mach/mach.h>
// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
// and is a typedef for struct sigcontext. There is no uc_mcontext.
@@ -467,7 +467,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#error Unexpected iOS target architecture.
#endif // V8_TARGET_ARCH_ARM64
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
#if V8_HOST_ARCH_X64
state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
diff --git a/deps/v8/src/logging/code-events.h b/deps/v8/src/logging/code-events.h
index 59c7952bd6..2f252d62bc 100644
--- a/deps/v8/src/logging/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -103,8 +103,7 @@ class CodeEventListener {
virtual void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) = 0;
virtual void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
- Address pc, int fp_to_sp_delta,
- bool reuse_code) = 0;
+ Address pc, int fp_to_sp_delta) = 0;
// These events can happen when 1. an assumption made by optimized code fails
// or 2. a weakly embedded object dies.
virtual void CodeDependencyChangeEvent(Handle<Code> code,
@@ -234,9 +233,9 @@ class CodeEventDispatcher : public CodeEventListener {
});
}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) override {
+ int fp_to_sp_delta) override {
DispatchEventToListeners([=](CodeEventListener* listener) {
- listener->CodeDeoptEvent(code, kind, pc, fp_to_sp_delta, reuse_code);
+ listener->CodeDeoptEvent(code, kind, pc, fp_to_sp_delta);
});
}
void CodeDependencyChangeEvent(Handle<Code> code,
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index d3cdccd91a..e9b71d56df 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -5,20 +5,23 @@
#ifndef V8_LOGGING_COUNTERS_DEFINITIONS_H_
#define V8_LOGGING_COUNTERS_DEFINITIONS_H_
+#include "include/v8-internal.h"
+
namespace v8 {
namespace internal {
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
- HR(background_scavenger, V8.GCBackgroundScavenger, 0, 10000, 101) \
HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
- HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 25, 26) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, \
+ kGarbageCollectionReasonMaxValue, kGarbageCollectionReasonMaxValue + 1) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
- HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 25, 26) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, \
+ kGarbageCollectionReasonMaxValue, kGarbageCollectionReasonMaxValue + 1) \
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
@@ -28,12 +31,10 @@ namespace internal {
HR(gc_finalize_sweep, V8.GCFinalizeMC.Sweep, 0, 10000, 101) \
HR(gc_scavenger_scavenge_main, V8.GCScavenger.ScavengeMain, 0, 10000, 101) \
HR(gc_scavenger_scavenge_roots, V8.GCScavenger.ScavengeRoots, 0, 10000, 101) \
- HR(gc_mark_compactor, V8.GCMarkCompactor, 0, 10000, 101) \
HR(gc_marking_sum, V8.GCMarkingSum, 0, 10000, 101) \
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
100000, 50) \
- HR(scavenge_reason, V8.GCScavengeReason, 0, 25, 26) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
@@ -105,7 +106,18 @@ namespace internal {
HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, \
3) \
/* number of times a cache event is triggered for a wasm module */ \
- HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101)
+ HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101) \
+ SANDBOXED_HISTOGRAM_LIST(HR)
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+#define SANDBOXED_HISTOGRAM_LIST(HR) \
+ /* Number of in-use external pointers in the external pointer table */ \
+ /* Counted after sweeping the table at the end of mark-compact GC */ \
+ HR(sandboxed_external_pointers_count, V8.SandboxedExternalPointersCount, 0, \
+ kMaxSandboxedExternalPointers, 101)
+#else
+#define SANDBOXED_HISTOGRAM_LIST(HR)
+#endif // V8_SANDBOX_IS_AVAILABLE
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
@@ -132,17 +144,12 @@ namespace internal {
/* Time for lazily compiling Wasm functions. */ \
HT(wasm_lazy_compile_time, V8.WasmLazyCompileTimeMicroSeconds, 100000000, \
MICROSECOND) \
- /* Total time to decompress isolate snapshot. */ \
- HT(snapshot_decompress, V8.SnapshotDecompress, 10000000, MICROSECOND) \
- /* Time to decompress context snapshot. */ \
- HT(context_snapshot_decompress, V8.ContextSnapshotDecompress, 10000000, \
- MICROSECOND) \
HT(wasm_compile_after_deserialize, \
V8.WasmCompileAfterDeserializeMilliSeconds, 1000000, MILLISECOND)
#define NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT) \
/* Total V8 time (including JS and runtime calls, exluding callbacks) */ \
- HT(execute_precise, V8.ExecuteMicroSeconds, 1000000, MICROSECOND)
+ HT(execute, V8.ExecuteMicroSeconds, 1000000, MICROSECOND)
#define TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, thread safe: HT(name, caption, max, unit) */ \
@@ -161,9 +168,6 @@ namespace internal {
V8.GCFinalizeMCReduceMemoryBackground, 10000, MILLISECOND) \
HT(gc_finalize_reduce_memory_foreground, \
V8.GCFinalizeMCReduceMemoryForeground, 10000, MILLISECOND) \
- HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
- HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
- HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
MILLISECOND) \
HT(gc_time_to_global_safepoint, V8.GC.TimeToGlobalSafepoint, 10000000, \
@@ -329,7 +333,6 @@ namespace internal {
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(stack_interrupts, V8.StackInterrupts) \
- SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
diff --git a/deps/v8/src/logging/counters-scopes.h b/deps/v8/src/logging/counters-scopes.h
index 4f5c74b5ea..ea6151cc3d 100644
--- a/deps/v8/src/logging/counters-scopes.h
+++ b/deps/v8/src/logging/counters-scopes.h
@@ -17,25 +17,31 @@ class BaseTimedHistogramScope {
explicit BaseTimedHistogramScope(TimedHistogram* histogram)
: histogram_(histogram) {}
- void Start() {
- if (!histogram_->Enabled()) return;
+ void StartInternal() {
DCHECK(histogram_->ToggleRunningState(true));
timer_.Start();
}
- void Stop() {
- if (!histogram_->Enabled()) return;
+ void StopInternal() {
DCHECK(histogram_->ToggleRunningState(false));
histogram_->AddTimedSample(timer_.Elapsed());
timer_.Stop();
}
- void LogStart(Isolate* isolate) {
+ V8_INLINE void Start() {
+ if (histogram_->Enabled()) StartInternal();
+ }
+
+ V8_INLINE void Stop() {
+ if (histogram_->Enabled()) StopInternal();
+ }
+
+ V8_INLINE void LogStart(Isolate* isolate) {
Logger::CallEventLogger(isolate, histogram_->name(),
v8::LogEventStatus::kStart, true);
}
- void LogEnd(Isolate* isolate) {
+ V8_INLINE void LogEnd(Isolate* isolate) {
Logger::CallEventLogger(isolate, histogram_->name(),
v8::LogEventStatus::kEnd, true);
}
@@ -113,8 +119,9 @@ class V8_NODISCARD LazyTimedHistogramScope : public BaseTimedHistogramScope {
// Helper class for scoping a NestedHistogramTimer.
class V8_NODISCARD NestedTimedHistogramScope : public BaseTimedHistogramScope {
public:
- explicit NestedTimedHistogramScope(NestedTimedHistogram* histogram)
- : BaseTimedHistogramScope(histogram) {
+ explicit NestedTimedHistogramScope(NestedTimedHistogram* histogram,
+ Isolate* isolate = nullptr)
+ : BaseTimedHistogramScope(histogram), isolate_(isolate) {
Start();
}
~NestedTimedHistogramScope() { Stop(); }
@@ -123,24 +130,34 @@ class V8_NODISCARD NestedTimedHistogramScope : public BaseTimedHistogramScope {
friend NestedTimedHistogram;
friend PauseNestedTimedHistogramScope;
- void Start() {
+ void StartInteral() {
previous_scope_ = timed_histogram()->Enter(this);
- if (histogram_->Enabled()) {
- base::TimeTicks now = base::TimeTicks::HighResolutionNow();
- if (previous_scope_) previous_scope_->Pause(now);
- timer_.Start(now);
- }
- LogStart(timed_histogram()->counters()->isolate());
+ base::TimeTicks now = base::TimeTicks::Now();
+ if (previous_scope_) previous_scope_->Pause(now);
+ timer_.Start(now);
}
- void Stop() {
+ void StopInternal() {
timed_histogram()->Leave(previous_scope_);
- if (histogram_->Enabled()) {
- base::TimeTicks now = base::TimeTicks::HighResolutionNow();
- histogram_->AddTimedSample(timer_.Elapsed(now));
- timer_.Stop();
- if (previous_scope_) previous_scope_->Resume(now);
- }
+ base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeDelta elapsed = timer_.Elapsed(now);
+ histogram_->AddTimedSample(elapsed);
+ if (isolate_) RecordLongTaskTime(elapsed);
+#ifdef DEBUG
+ // StopInternal() is called in the destructor and don't access timer_
+ // after that.
+ timer_.Stop();
+#endif
+ if (previous_scope_) previous_scope_->Resume(now);
+ }
+
+ V8_INLINE void Start() {
+ if (histogram_->Enabled()) StartInteral();
+ LogStart(timed_histogram()->counters()->isolate());
+ }
+
+ V8_INLINE void Stop() {
+ if (histogram_->Enabled()) StopInternal();
LogEnd(timed_histogram()->counters()->isolate());
}
@@ -154,11 +171,19 @@ class V8_NODISCARD NestedTimedHistogramScope : public BaseTimedHistogramScope {
timer_.Resume(now);
}
+ void RecordLongTaskTime(base::TimeDelta elapsed) const {
+ if (histogram_ == isolate_->counters()->execute()) {
+ isolate_->GetCurrentLongTaskStats()->v8_execute_us +=
+ elapsed.InMicroseconds();
+ }
+ }
+
NestedTimedHistogram* timed_histogram() {
return static_cast<NestedTimedHistogram*>(histogram_);
}
NestedTimedHistogramScope* previous_scope_;
+ Isolate* isolate_;
};
// Temporarily pause a NestedTimedHistogram when for instance leaving V8 for
@@ -169,13 +194,13 @@ class V8_NODISCARD PauseNestedTimedHistogramScope {
: histogram_(histogram) {
previous_scope_ = histogram_->Enter(nullptr);
if (isEnabled()) {
- previous_scope_->Pause(base::TimeTicks::HighResolutionNow());
+ previous_scope_->Pause(base::TimeTicks::Now());
}
}
~PauseNestedTimedHistogramScope() {
histogram_->Leave(previous_scope_);
if (isEnabled()) {
- previous_scope_->Resume(base::TimeTicks::HighResolutionNow());
+ previous_scope_->Resume(base::TimeTicks::Now());
}
}
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index fb73184c3f..73cb8c668c 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -24,8 +24,26 @@ void StatsTable::SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
}
-int* StatsCounter::FindLocationInStatsTable() const {
- return counters_->FindLocation(name_);
+namespace {
+std::atomic<int> unused_counter_dump{0};
+}
+
+bool StatsCounter::Enabled() { return GetPtr() != &unused_counter_dump; }
+
+std::atomic<int>* StatsCounter::SetupPtrFromStatsTable() {
+ // {Init} must have been called.
+ DCHECK_NOT_NULL(counters_);
+ DCHECK_NOT_NULL(name_);
+ int* location = counters_->FindLocation(name_);
+ std::atomic<int>* ptr =
+ location ? base::AsAtomicPtr(location) : &unused_counter_dump;
+#ifdef DEBUG
+ std::atomic<int>* old_ptr = ptr_.exchange(ptr, std::memory_order_release);
+ DCHECK_IMPLIES(old_ptr, old_ptr == ptr);
+#else
+ ptr_.store(ptr, std::memory_order_release);
+#endif
+ return ptr;
}
void Histogram::AddSample(int sample) {
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index bb662b3e21..2c74a8ecd8 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -101,36 +101,24 @@ class StatsTable {
// This class is thread-safe.
class StatsCounter {
public:
- void Set(int value) {
- if (std::atomic<int>* loc = GetPtr()) {
- loc->store(value, std::memory_order_relaxed);
- }
- }
+ void Set(int value) { GetPtr()->store(value, std::memory_order_relaxed); }
void Increment(int value = 1) {
- if (std::atomic<int>* loc = GetPtr()) {
- loc->fetch_add(value, std::memory_order_relaxed);
- }
+ GetPtr()->fetch_add(value, std::memory_order_relaxed);
}
void Decrement(int value = 1) {
- if (std::atomic<int>* loc = GetPtr()) {
- loc->fetch_sub(value, std::memory_order_relaxed);
- }
+ GetPtr()->fetch_sub(value, std::memory_order_relaxed);
}
- // Is this counter enabled?
- // Returns false if table is full.
- bool Enabled() { return GetPtr() != nullptr; }
+ // Returns true if this counter is enabled (a lookup function was provided and
+ // it returned a non-null pointer).
+ V8_EXPORT_PRIVATE bool Enabled();
// Get the internal pointer to the counter. This is used
// by the code generator to emit code that manipulates a
// given counter without calling the runtime system.
- std::atomic<int>* GetInternalPointer() {
- std::atomic<int>* loc = GetPtr();
- DCHECK_NOT_NULL(loc);
- return loc;
- }
+ std::atomic<int>* GetInternalPointer() { return GetPtr(); }
private:
friend class Counters;
@@ -144,35 +132,22 @@ class StatsCounter {
name_ = name;
}
- V8_EXPORT_PRIVATE int* FindLocationInStatsTable() const;
+ V8_NOINLINE V8_EXPORT_PRIVATE std::atomic<int>* SetupPtrFromStatsTable();
// Reset the cached internal pointer.
- void Reset() {
- lookup_done_.store(false, std::memory_order_release);
- ptr_.store(nullptr, std::memory_order_release);
- }
+ void Reset() { ptr_.store(nullptr, std::memory_order_relaxed); }
// Returns the cached address of this counter location.
std::atomic<int>* GetPtr() {
- // {Init} must have been called.
- DCHECK_NOT_NULL(counters_);
- DCHECK_NOT_NULL(name_);
auto* ptr = ptr_.load(std::memory_order_acquire);
if (V8_LIKELY(ptr)) return ptr;
- if (!lookup_done_.load(std::memory_order_acquire)) {
- ptr = base::AsAtomicPtr(FindLocationInStatsTable());
- ptr_.store(ptr, std::memory_order_release);
- lookup_done_.store(true, std::memory_order_release);
- }
- // Re-load after checking {lookup_done_}.
- return ptr_.load(std::memory_order_acquire);
+ return SetupPtrFromStatsTable();
}
Counters* counters_ = nullptr;
const char* name_ = nullptr;
// A pointer to an atomic, set atomically in {GetPtr}.
std::atomic<std::atomic<int>*> ptr_{nullptr};
- std::atomic<bool> lookup_done_{false};
};
// A Histogram represents a dynamically created histogram in the
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 28a82bb5b9..a0e2223412 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -21,7 +21,6 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/perf-jit.h"
#include "src/execution/isolate.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
@@ -85,11 +84,11 @@ static v8::CodeEventType GetCodeEventTypeForTag(
static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
CodeKind kind = code.kind();
- // We record interpreter trampoline builting copies as having the
+ // We record interpreter trampoline builtin copies as having the
// "interpreted" marker.
if (FLAG_interpreted_frames_native_stack && kind == CodeKind::BUILTIN &&
code.GetCode().is_interpreter_trampoline_builtin() &&
- code.GetCode() !=
+ ToCodeT(code.GetCode()) !=
*BUILTIN_CODE(shared.GetIsolate(), InterpreterEntryTrampoline)) {
kind = CodeKind::INTERPRETED_FUNCTION;
}
@@ -944,7 +943,7 @@ class Ticker : public sampler::Sampler {
void SampleStack(const v8::RegisterState& state) override {
if (!profiler_) return;
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::WasEverUsed() &&
+ if (isolate->was_locker_ever_used() &&
(!isolate->thread_manager()->IsLockedByThread(
perThreadData_->thread_id()) ||
perThreadData_->thread_state() != nullptr))
@@ -1394,7 +1393,7 @@ void Logger::FeedbackVectorEvent(FeedbackVector vector, AbstractCode code) {
<< vector.length();
msg << kNext << reinterpret_cast<void*>(code.InstructionStart());
msg << kNext << vector.optimization_marker();
- msg << kNext << vector.optimization_tier();
+ msg << kNext << vector.maybe_has_optimized_code();
msg << kNext << vector.invocation_count();
msg << kNext << vector.profiler_ticks() << kNext;
@@ -1540,11 +1539,10 @@ void Logger::ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
}
void Logger::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) {
+ int fp_to_sp_delta) {
if (!is_logging() || !FLAG_log_deopt) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*code, pc);
- ProcessDeoptEvent(code, info.position,
- Deoptimizer::MessageFor(kind, reuse_code),
+ ProcessDeoptEvent(code, info.position, Deoptimizer::MessageFor(kind),
DeoptimizeReasonToString(info.deopt_reason));
}
@@ -1613,20 +1611,6 @@ void Logger::MoveEventInternal(LogEventsAndTags event, Address from,
msg.WriteToLogFile();
}
-void Logger::ResourceEvent(const char* name, const char* tag) {
- if (!FLAG_log) return;
- MSG_BUILDER();
- msg << name << kNext << tag << kNext;
-
- uint32_t sec, usec;
- if (base::OS::GetUserTime(&sec, &usec) != -1) {
- msg << sec << kNext << usec << kNext;
- }
- msg.AppendFormatString("%.0f",
- V8::GetCurrentPlatform()->CurrentClockTimeMillis());
- msg.WriteToLogFile();
-}
-
void Logger::SuspectReadEvent(Name name, Object obj) {
if (!FLAG_log_suspect) return;
MSG_BUILDER();
@@ -1896,7 +1880,7 @@ EnumerateCompiledFunctions(Heap* heap) {
obj = iterator.Next()) {
if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
- if (sfi.is_compiled() && !sfi.IsInterpreted()) {
+ if (sfi.is_compiled() && !sfi.HasBytecodeArray()) {
compiled_funcs.emplace_back(
handle(sfi, isolate),
handle(AbstractCode::cast(sfi.abstract_code(isolate)), isolate));
@@ -1912,7 +1896,7 @@ EnumerateCompiledFunctions(Heap* heap) {
Script::cast(function.shared().script()).HasValidSource()) {
compiled_funcs.emplace_back(
handle(function.shared(), isolate),
- handle(AbstractCode::cast(function.code()), isolate));
+ handle(AbstractCode::cast(FromCodeT(function.code())), isolate));
}
}
}
@@ -2168,7 +2152,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::TURBOFAN:
case CodeKind::BASELINE:
- case CodeKind::TURBOPROP:
+ case CodeKind::MAGLEV:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
@@ -2182,7 +2166,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
break;
case CodeKind::BUILTIN:
if (Code::cast(object).is_interpreter_trampoline_builtin() &&
- Code::cast(object) !=
+ ToCodeT(Code::cast(object)) !=
*BUILTIN_CODE(isolate_, InterpreterEntryTrampoline)) {
return;
}
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 34f5bfc9ec..8bb0c5f931 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -148,12 +148,6 @@ class Logger : public CodeEventListener {
void NewEvent(const char* name, void* object, size_t size);
void DeleteEvent(const char* name, void* object);
- // Emits an event with a tag, and some resource usage information.
- // -> (name, tag, <rusage information>).
- // Currently, the resource usage information is a process time stamp
- // and a real time timestamp.
- void ResourceEvent(const char* name, const char* tag);
-
// Emits an event that an undefined property was read from an
// object.
void SuspectReadEvent(Name name, Object obj);
@@ -229,7 +223,7 @@ class Logger : public CodeEventListener {
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) override;
+ int fp_to_sp_delta) override;
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override;
@@ -277,9 +271,10 @@ class Logger : public CodeEventListener {
static void DefaultEventLoggerSentinel(const char* name, int event) {}
- static void CallEventLogger(Isolate* isolate, const char* name,
- v8::LogEventStatus se, bool expose_to_api) {
- if (!isolate->event_logger()) return;
+ V8_INLINE static void CallEventLoggerInternal(Isolate* isolate,
+ const char* name,
+ v8::LogEventStatus se,
+ bool expose_to_api) {
if (isolate->event_logger() == DefaultEventLoggerSentinel) {
LOG(isolate, TimerEvent(se, name));
} else if (expose_to_api) {
@@ -287,6 +282,13 @@ class Logger : public CodeEventListener {
}
}
+ V8_INLINE static void CallEventLogger(Isolate* isolate, const char* name,
+ v8::LogEventStatus se,
+ bool expose_to_api) {
+ if (!isolate->event_logger()) return;
+ CallEventLoggerInternal(isolate, name, se, expose_to_api);
+ }
+
V8_EXPORT_PRIVATE bool is_logging();
bool is_listening_to_code_events() override {
@@ -450,7 +452,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) override {}
+ int fp_to_sp_delta) override {}
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
@@ -519,7 +521,7 @@ class ExternalCodeEventListener : public CodeEventListener {
Handle<SharedFunctionInfo> shared) override {}
void CodeMovingGCEvent() override {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) override {}
+ int fp_to_sp_delta) override {}
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
diff --git a/deps/v8/src/logging/runtime-call-stats-scope.h b/deps/v8/src/logging/runtime-call-stats-scope.h
index ffdf08378d..8ef7780907 100644
--- a/deps/v8/src/logging/runtime-call-stats-scope.h
+++ b/deps/v8/src/logging/runtime-call-stats-scope.h
@@ -30,11 +30,17 @@ RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
stats_->Enter(&timer_, counter_id);
}
-RuntimeCallTimerScope::RuntimeCallTimerScope(LocalIsolate* isolate,
- RuntimeCallCounterId counter_id) {
+RuntimeCallTimerScope::RuntimeCallTimerScope(
+ LocalIsolate* isolate, RuntimeCallCounterId counter_id,
+ RuntimeCallStats::CounterMode mode) {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
DCHECK_NOT_NULL(isolate->runtime_call_stats());
stats_ = isolate->runtime_call_stats();
+ if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
+ counter_id = stats_->CounterIdForThread(counter_id);
+ }
+
+ DCHECK(stats_->IsCounterAppropriateForThread(counter_id));
stats_->Enter(&timer_, counter_id);
}
diff --git a/deps/v8/src/logging/runtime-call-stats.cc b/deps/v8/src/logging/runtime-call-stats.cc
index a326c59c4c..5322b7412f 100644
--- a/deps/v8/src/logging/runtime-call-stats.cc
+++ b/deps/v8/src/logging/runtime-call-stats.cc
@@ -14,8 +14,7 @@
namespace v8 {
namespace internal {
-base::TimeTicks (*RuntimeCallTimer::Now)() =
- &base::TimeTicks::HighResolutionNow;
+base::TimeTicks (*RuntimeCallTimer::Now)() = &base::TimeTicks::Now;
base::TimeTicks RuntimeCallTimer::NowCPUTime() {
base::ThreadTicks ticks = base::ThreadTicks::Now();
@@ -323,8 +322,7 @@ void WorkerThreadRuntimeCallStats::AddToMainTable(
}
WorkerThreadRuntimeCallStatsScope::WorkerThreadRuntimeCallStatsScope(
- WorkerThreadRuntimeCallStats* worker_stats)
- : table_(nullptr) {
+ WorkerThreadRuntimeCallStats* worker_stats) {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
table_ = reinterpret_cast<RuntimeCallStats*>(
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index f1b5be5c3c..ff2893fe16 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -5,6 +5,8 @@
#ifndef V8_LOGGING_RUNTIME_CALL_STATS_H_
#define V8_LOGGING_RUNTIME_CALL_STATS_H_
+#include "src/base/macros.h"
+
#ifdef V8_RUNTIME_CALL_STATS
#include "src/base/atomic-utils.h"
@@ -314,8 +316,9 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
- \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, CompileTask) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateSIMD128Registers) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
@@ -371,6 +374,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopPeeling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmOptimization) \
\
@@ -390,7 +394,6 @@ class RuntimeCallTimer final {
V(CodeGenerationFromStringsCallbacks) \
V(CompileBackgroundBaselinePreVisit) \
V(CompileBackgroundBaselineVisit) \
- V(CompileBackgroundCompileTask) \
V(CompileBaseline) \
V(CompileBaselineFinalization) \
V(CompileBaselinePreVisit) \
@@ -683,14 +686,20 @@ class WorkerThreadRuntimeCallStats final {
// when it is destroyed.
class V8_NODISCARD WorkerThreadRuntimeCallStatsScope final {
public:
+ WorkerThreadRuntimeCallStatsScope() = default;
explicit WorkerThreadRuntimeCallStatsScope(
WorkerThreadRuntimeCallStats* off_thread_stats);
~WorkerThreadRuntimeCallStatsScope();
+ WorkerThreadRuntimeCallStatsScope(WorkerThreadRuntimeCallStatsScope&&) =
+ delete;
+ WorkerThreadRuntimeCallStatsScope(const WorkerThreadRuntimeCallStatsScope&) =
+ delete;
+
RuntimeCallStats* Get() const { return table_; }
private:
- RuntimeCallStats* table_;
+ RuntimeCallStats* table_ = nullptr;
};
#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
@@ -713,7 +722,9 @@ class V8_NODISCARD RuntimeCallTimerScope {
inline RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(LocalIsolate* isolate,
- RuntimeCallCounterId counter_id);
+ RuntimeCallCounterId counter_id,
+ RuntimeCallStats::CounterMode mode =
+ RuntimeCallStats::CounterMode::kExact);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
RuntimeCallCounterId counter_id,
RuntimeCallStats::CounterMode mode =
diff --git a/deps/v8/src/maglev/DEPS b/deps/v8/src/maglev/DEPS
new file mode 100644
index 0000000000..f3fa2ecc66
--- /dev/null
+++ b/deps/v8/src/maglev/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ # Allow Maglev to depend on TurboFan data structures.
+ # TODO(v8:7700): Clean up these dependencies by extracting common code to a
+ # separate directory.
+ "+src/compiler",
+]
diff --git a/deps/v8/src/maglev/OWNERS b/deps/v8/src/maglev/OWNERS
new file mode 100644
index 0000000000..dca7476a04
--- /dev/null
+++ b/deps/v8/src/maglev/OWNERS
@@ -0,0 +1,3 @@
+leszeks@chromium.org
+jgruber@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/maglev/maglev-basic-block.h b/deps/v8/src/maglev/maglev-basic-block.h
new file mode 100644
index 0000000000..f6222944fe
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-basic-block.h
@@ -0,0 +1,107 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
+#define V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
+
+#include <vector>
+
+#include "src/codegen/label.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+using NodeIterator = Node::List::Iterator;
+using NodeConstIterator = Node::List::Iterator;
+
+class BasicBlock {
+ public:
+ explicit BasicBlock(MergePointInterpreterFrameState* state)
+ : control_node_(nullptr), state_(state) {}
+
+ uint32_t first_id() const {
+ if (has_phi()) return phis()->first()->id();
+ return nodes_.is_empty() ? control_node()->id() : nodes_.first()->id();
+ }
+
+ uint32_t FirstNonGapMoveId() const {
+ if (has_phi()) return phis()->first()->id();
+ if (!nodes_.is_empty()) {
+ for (const Node* node : nodes_) {
+ if (node->Is<GapMove>()) continue;
+ return node->id();
+ }
+ }
+ return control_node()->id();
+ }
+
+ Node::List& nodes() { return nodes_; }
+
+ ControlNode* control_node() const { return control_node_; }
+ void set_control_node(ControlNode* control_node) {
+ DCHECK_NULL(control_node_);
+ control_node_ = control_node;
+ }
+
+ bool has_phi() const { return has_state() && state_->has_phi(); }
+
+ bool is_empty_block() const { return is_empty_block_; }
+
+ BasicBlock* empty_block_predecessor() const {
+ DCHECK(is_empty_block());
+ return empty_block_predecessor_;
+ }
+
+ void set_empty_block_predecessor(BasicBlock* predecessor) {
+ DCHECK(nodes_.is_empty());
+ DCHECK(control_node()->Is<Jump>());
+ DCHECK_NULL(state_);
+ is_empty_block_ = true;
+ empty_block_predecessor_ = predecessor;
+ }
+
+ Phi::List* phis() const {
+ DCHECK(has_phi());
+ return state_->phis();
+ }
+
+ BasicBlock* predecessor_at(int i) const {
+ DCHECK_NOT_NULL(state_);
+ return state_->predecessor_at(i);
+ }
+
+ int predecessor_id() const {
+ return control_node()->Cast<UnconditionalControlNode>()->predecessor_id();
+ }
+ void set_predecessor_id(int id) {
+ control_node()->Cast<UnconditionalControlNode>()->set_predecessor_id(id);
+ }
+
+ Label* label() { return &label_; }
+ MergePointInterpreterFrameState* state() const {
+ DCHECK(has_state());
+ return state_;
+ }
+ bool has_state() const { return state_ != nullptr && !is_empty_block(); }
+
+ private:
+ bool is_empty_block_ = false;
+ Node::List nodes_;
+ ControlNode* control_node_;
+ union {
+ MergePointInterpreterFrameState* state_;
+ BasicBlock* empty_block_predecessor_;
+ };
+ Label label_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
new file mode 100644
index 0000000000..ecf8bbccda
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -0,0 +1,135 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
+#define V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/common/globals.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCodeGenState {
+ public:
+ class DeferredCodeInfo {
+ public:
+ virtual void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) = 0;
+ Label deferred_code_label;
+ Label return_label;
+ };
+
+ MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
+ SafepointTableBuilder* safepoint_table_builder)
+ : compilation_unit_(compilation_unit),
+ safepoint_table_builder_(safepoint_table_builder),
+ masm_(isolate(), CodeObjectRequired::kNo) {}
+
+ void SetVregSlots(int slots) { vreg_slots_ = slots; }
+
+ void PushDeferredCode(DeferredCodeInfo* deferred_code) {
+ deferred_code_.push_back(deferred_code);
+ }
+ void EmitDeferredCode() {
+ for (auto& deferred_code : deferred_code_) {
+ masm()->RecordComment("-- Deferred block");
+ masm()->bind(&deferred_code->deferred_code_label);
+ deferred_code->Generate(this, &deferred_code->return_label);
+ masm()->int3();
+ }
+ }
+
+ compiler::NativeContextRef native_context() const {
+ return broker()->target_native_context();
+ }
+ Isolate* isolate() const { return compilation_unit_->isolate(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+ int register_count() const { return compilation_unit_->register_count(); }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+ compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
+ const compiler::BytecodeArrayRef& bytecode() const {
+ return compilation_unit_->bytecode();
+ }
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+ MacroAssembler* masm() { return &masm_; }
+ int vreg_slots() const { return vreg_slots_; }
+ SafepointTableBuilder* safepoint_table_builder() const {
+ return safepoint_table_builder_;
+ }
+ MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
+
+ // TODO(v8:7700): Clean up after all code paths are supported.
+ void set_found_unsupported_code_paths(bool val) {
+ found_unsupported_code_paths_ = val;
+ }
+ bool found_unsupported_code_paths() const {
+ return found_unsupported_code_paths_;
+ }
+
+ private:
+ MaglevCompilationUnit* const compilation_unit_;
+ SafepointTableBuilder* const safepoint_table_builder_;
+
+ MacroAssembler masm_;
+ std::vector<DeferredCodeInfo*> deferred_code_;
+ int vreg_slots_ = 0;
+
+ // Allow marking some codegen paths as unsupported, so that we can test maglev
+ // incrementally.
+ // TODO(v8:7700): Clean up after all code paths are supported.
+ bool found_unsupported_code_paths_ = false;
+};
+
+// Some helpers for codegen.
+// TODO(leszeks): consider moving this to a separate header.
+
+inline MemOperand GetStackSlot(int index) {
+ return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
+}
+
+inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
+ return GetStackSlot(operand.index());
+}
+
+inline Register ToRegister(const compiler::InstructionOperand& operand) {
+ return compiler::AllocatedOperand::cast(operand).GetRegister();
+}
+
+inline Register ToRegister(const ValueLocation& location) {
+ return ToRegister(location.operand());
+}
+
+inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
+ return GetStackSlot(compiler::AllocatedOperand::cast(operand));
+}
+
+inline MemOperand ToMemOperand(const ValueLocation& location) {
+ return ToMemOperand(location.operand());
+}
+
+inline int GetSafepointIndexForStackSlot(int i) {
+ // Safepoint tables also contain slots for all fixed frame slots (both
+ // above and below the fp).
+ return StandardFrameConstants::kFixedSlotCount + i;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
new file mode 100644
index 0000000000..f578d53777
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -0,0 +1,378 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-code-generator.h"
+
+#include "src/codegen/code-desc.h"
+#include "src/codegen/register.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+#define __ masm()->
+
+namespace {
+
+template <typename T, size_t... Is>
+std::array<T, sizeof...(Is)> repeat(T value, std::index_sequence<Is...>) {
+ return {((void)Is, value)...};
+}
+
+template <size_t N, typename T>
+std::array<T, N> repeat(T value) {
+ return repeat<T>(value, std::make_index_sequence<N>());
+}
+
+using RegisterMoves = std::array<Register, Register::kNumRegisters>;
+using StackToRegisterMoves =
+ std::array<compiler::InstructionOperand, Register::kNumRegisters>;
+
+class MaglevCodeGeneratingNodeProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
+ : code_gen_state_(code_gen_state) {}
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ if (FLAG_maglev_break_on_entry) {
+ __ int3();
+ }
+
+ __ EnterFrame(StackFrame::BASELINE);
+
+ // Save arguments in frame.
+ // TODO(leszeks): Consider eliding this frame if we don't make any calls
+ // that could clobber these registers.
+ __ Push(kContextRegister);
+ __ Push(kJSFunctionRegister); // Callee's JS function.
+ __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
+
+ // Extend rsp by the size of the frame.
+ code_gen_state_->SetVregSlots(graph->stack_slots());
+ __ subq(rsp, Immediate(code_gen_state_->vreg_slots() * kSystemPointerSize));
+
+ // Initialize stack slots.
+ // TODO(jgruber): Update logic once the register allocator is further along.
+ {
+ ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
+ __ Move(rax, Immediate(0));
+ __ Move(rcx, Immediate(code_gen_state_->vreg_slots()));
+ __ leaq(rdi, GetStackSlot(code_gen_state_->vreg_slots() - 1));
+ __ repstosq();
+ }
+
+ // We don't emit proper safepoint data yet; instead, define a single
+ // safepoint at the end of the code object, with all-tagged stack slots.
+ // TODO(jgruber): Real safepoint handling.
+ SafepointTableBuilder::Safepoint safepoint =
+ safepoint_table_builder()->DefineSafepoint(masm());
+ for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
+ safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
+ }
+ }
+
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ code_gen_state_->EmitDeferredCode();
+ }
+
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- Block b" << graph_labeller()->BlockId(block);
+ __ RecordComment(ss.str());
+ }
+
+ __ bind(block->label());
+ }
+
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- " << graph_labeller()->NodeId(node) << ": "
+ << PrintNode(graph_labeller(), node);
+ __ RecordComment(ss.str());
+ }
+
+ // Emit Phi moves before visiting the control node.
+ if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
+ EmitBlockEndGapMoves(node->template Cast<UnconditionalControlNode>(),
+ state);
+ }
+
+ node->GenerateCode(code_gen_state_, state);
+
+ if (std::is_base_of<ValueNode, NodeT>::value) {
+ ValueNode* value_node = node->template Cast<ValueNode>();
+ if (value_node->is_spilled()) {
+ compiler::AllocatedOperand source =
+ compiler::AllocatedOperand::cast(value_node->result().operand());
+ // We shouldn't spill nodes which already output to the stack.
+ if (!source.IsStackSlot()) {
+ if (FLAG_code_comments) __ RecordComment("-- Spill:");
+ DCHECK(!source.IsStackSlot());
+ __ movq(GetStackSlot(value_node->spill_slot()), ToRegister(source));
+ } else {
+ // Otherwise, the result source stack slot should be equal to the
+ // spill slot.
+ DCHECK_EQ(source.index(), value_node->spill_slot().index());
+ }
+ }
+ }
+ }
+
+ void EmitSingleParallelMove(Register source, Register target,
+ RegisterMoves& moves) {
+ DCHECK(!moves[target.code()].is_valid());
+ __ movq(target, source);
+ moves[source.code()] = Register::no_reg();
+ }
+
+ bool RecursivelyEmitParallelMoveChain(Register chain_start, Register source,
+ Register target, RegisterMoves& moves) {
+ if (target == chain_start) {
+ // The target of this move is the start of the move chain -- this
+ // means that there is a cycle, and we have to break it by moving
+ // the chain start into a temporary.
+
+ __ RecordComment("-- * Cycle");
+ EmitSingleParallelMove(target, kScratchRegister, moves);
+ EmitSingleParallelMove(source, target, moves);
+ return true;
+ }
+ bool is_cycle = false;
+ if (moves[target.code()].is_valid()) {
+ is_cycle = RecursivelyEmitParallelMoveChain(chain_start, target,
+ moves[target.code()], moves);
+ } else {
+ __ RecordComment("-- * Chain start");
+ }
+ if (is_cycle && source == chain_start) {
+ EmitSingleParallelMove(kScratchRegister, target, moves);
+ __ RecordComment("-- * end cycle");
+ } else {
+ EmitSingleParallelMove(source, target, moves);
+ }
+ return is_cycle;
+ }
+
+ void EmitParallelMoveChain(Register source, RegisterMoves& moves) {
+ Register target = moves[source.code()];
+ if (!target.is_valid()) return;
+
+ DCHECK_NE(source, target);
+ RecursivelyEmitParallelMoveChain(source, source, target, moves);
+ }
+
+ void EmitStackToRegisterGapMove(compiler::InstructionOperand source,
+ Register target) {
+ if (!source.IsAllocated()) return;
+ __ movq(target, GetStackSlot(compiler::AllocatedOperand::cast(source)));
+ }
+
+ void RecordGapMove(compiler::AllocatedOperand source, Register target_reg,
+ RegisterMoves& register_moves,
+ StackToRegisterMoves& stack_to_register_moves) {
+ if (source.IsStackSlot()) {
+ // For stack->reg moves, don't emit the move yet, but instead record the
+ // move in the set of stack-to-register moves, to be executed after the
+ // reg->reg parallel moves.
+ stack_to_register_moves[target_reg.code()] = source;
+ } else {
+ // For reg->reg moves, don't emit the move yet, but instead record the
+ // move in the set of parallel register moves, to be resolved later.
+ Register source_reg = ToRegister(source);
+ if (target_reg != source_reg) {
+ DCHECK(!register_moves[source_reg.code()].is_valid());
+ register_moves[source_reg.code()] = target_reg;
+ }
+ }
+ }
+
+ void RecordGapMove(compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target,
+ RegisterMoves& register_moves,
+ StackToRegisterMoves& stack_to_register_moves) {
+ if (target.IsRegister()) {
+ RecordGapMove(source, ToRegister(target), register_moves,
+ stack_to_register_moves);
+ return;
+ }
+
+ // stack->stack and reg->stack moves should be executed before registers are
+ // clobbered by reg->reg or stack->reg, so emit them immediately.
+ if (source.IsRegister()) {
+ Register source_reg = ToRegister(source);
+ __ movq(GetStackSlot(target), source_reg);
+ } else {
+ __ movq(kScratchRegister, GetStackSlot(source));
+ __ movq(GetStackSlot(target), kScratchRegister);
+ }
+ }
+
+ void EmitBlockEndGapMoves(UnconditionalControlNode* node,
+ const ProcessingState& state) {
+ BasicBlock* target = node->target();
+ if (!target->has_state()) {
+ __ RecordComment("-- Target has no state, must be a fallthrough");
+ return;
+ }
+
+ int predecessor_id = state.block()->predecessor_id();
+
+ // Save register moves in an array, so that we can resolve them as parallel
+ // moves. Note that the mapping is:
+ //
+ // register_moves[source] = target.
+ RegisterMoves register_moves =
+ repeat<Register::kNumRegisters>(Register::no_reg());
+
+ // Save stack to register moves in an array, so that we can execute them
+ // after the parallel moves have read the register values. Note that the
+ // mapping is:
+ //
+ // stack_to_register_moves[target] = source.
+ StackToRegisterMoves stack_to_register_moves;
+
+ __ RecordComment("-- Gap moves:");
+
+ for (auto entry : target->state()->register_state()) {
+ RegisterMerge* merge;
+ if (LoadMergeState(entry.state, &merge)) {
+ compiler::AllocatedOperand source = merge->operand(predecessor_id);
+ Register target_reg = entry.reg;
+
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- * " << source << " → " << target_reg;
+ __ RecordComment(ss.str());
+ }
+ RecordGapMove(source, target_reg, register_moves,
+ stack_to_register_moves);
+ }
+ }
+
+ if (target->has_phi()) {
+ Phi::List* phis = target->phis();
+ for (Phi* phi : *phis) {
+ compiler::AllocatedOperand source = compiler::AllocatedOperand::cast(
+ phi->input(state.block()->predecessor_id()).operand());
+ compiler::AllocatedOperand target =
+ compiler::AllocatedOperand::cast(phi->result().operand());
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- * " << source << " → " << target << " (n"
+ << graph_labeller()->NodeId(phi) << ")";
+ __ RecordComment(ss.str());
+ }
+ RecordGapMove(source, target, register_moves, stack_to_register_moves);
+ }
+ }
+
+#define EMIT_MOVE_FOR_REG(Name) EmitParallelMoveChain(Name, register_moves);
+ ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
+#undef EMIT_MOVE_FOR_REG
+
+#define EMIT_MOVE_FOR_REG(Name) \
+ EmitStackToRegisterGapMove(stack_to_register_moves[Name.code()], Name);
+ ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
+#undef EMIT_MOVE_FOR_REG
+ }
+
+ Isolate* isolate() const { return code_gen_state_->isolate(); }
+ MacroAssembler* masm() const { return code_gen_state_->masm(); }
+ MaglevGraphLabeller* graph_labeller() const {
+ return code_gen_state_->graph_labeller();
+ }
+ SafepointTableBuilder* safepoint_table_builder() const {
+ return code_gen_state_->safepoint_table_builder();
+ }
+
+ private:
+ MaglevCodeGenState* code_gen_state_;
+};
+
+} // namespace
+
+class MaglevCodeGeneratorImpl final {
+ public:
+ static MaybeHandle<Code> Generate(MaglevCompilationUnit* compilation_unit,
+ Graph* graph) {
+ return MaglevCodeGeneratorImpl(compilation_unit, graph).Generate();
+ }
+
+ private:
+ MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
+ : safepoint_table_builder_(compilation_unit->zone()),
+ code_gen_state_(compilation_unit, safepoint_table_builder()),
+ processor_(compilation_unit, &code_gen_state_),
+ graph_(graph) {}
+
+ MaybeHandle<Code> Generate() {
+ EmitCode();
+ if (code_gen_state_.found_unsupported_code_paths()) return {};
+ EmitMetadata();
+ return BuildCodeObject();
+ }
+
+ void EmitCode() { processor_.ProcessGraph(graph_); }
+
+ void EmitMetadata() {
+ // Final alignment before starting on the metadata section.
+ masm()->Align(Code::kMetadataAlignment);
+
+ safepoint_table_builder()->Emit(masm(),
+ stack_slot_count_with_fixed_frame());
+ }
+
+ MaybeHandle<Code> BuildCodeObject() {
+ CodeDesc desc;
+ static constexpr int kNoHandlerTableOffset = 0;
+ masm()->GetCode(isolate(), &desc, safepoint_table_builder(),
+ kNoHandlerTableOffset);
+ return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
+ .set_stack_slots(stack_slot_count_with_fixed_frame())
+ .TryBuild();
+ }
+
+ int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
+ int stack_slot_count_with_fixed_frame() const {
+ return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
+ }
+
+ Isolate* isolate() const {
+ return code_gen_state_.compilation_unit()->isolate();
+ }
+ MacroAssembler* masm() { return code_gen_state_.masm(); }
+ SafepointTableBuilder* safepoint_table_builder() {
+ return &safepoint_table_builder_;
+ }
+
+ SafepointTableBuilder safepoint_table_builder_;
+ MaglevCodeGenState code_gen_state_;
+ GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
+ Graph* const graph_;
+};
+
+// static
+MaybeHandle<Code> MaglevCodeGenerator::Generate(
+ MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ return MaglevCodeGeneratorImpl::Generate(compilation_unit, graph);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-code-generator.h b/deps/v8/src/maglev/maglev-code-generator.h
new file mode 100644
index 0000000000..ea584cd179
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-generator.h
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
+#define V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class Graph;
+class MaglevCompilationUnit;
+
+class MaglevCodeGenerator : public AllStatic {
+ public:
+ static MaybeHandle<Code> Generate(MaglevCompilationUnit* compilation_unit,
+ Graph* graph);
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
new file mode 100644
index 0000000000..630d341a66
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compilation-info.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/persistent-handles.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-compiler.h"
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/objects/js-function-inl.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/locked-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+namespace {
+
+constexpr char kMaglevZoneName[] = "maglev-compilation-job-zone";
+
+class V8_NODISCARD MaglevCompilationHandleScope final {
+ public:
+ MaglevCompilationHandleScope(Isolate* isolate,
+ maglev::MaglevCompilationInfo* info)
+ : info_(info),
+ persistent_(isolate),
+ exported_info_(info),
+ canonical_(isolate, &exported_info_) {
+ info->ReopenHandlesInNewHandleScope(isolate);
+ }
+
+ ~MaglevCompilationHandleScope() {
+ info_->set_persistent_handles(persistent_.Detach());
+ }
+
+ private:
+ maglev::MaglevCompilationInfo* const info_;
+ PersistentHandlesScope persistent_;
+ ExportedMaglevCompilationInfo exported_info_;
+ CanonicalHandleScopeForMaglev canonical_;
+};
+
+} // namespace
+
+MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
+ Handle<JSFunction> function)
+ : zone_(isolate->allocator(), kMaglevZoneName),
+ isolate_(isolate),
+ broker_(new compiler::JSHeapBroker(
+ isolate, zone(), FLAG_trace_heap_broker, CodeKind::MAGLEV)),
+ shared_(function->shared(), isolate),
+ function_(function)
+#define V(Name) , Name##_(FLAG_##Name)
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+{
+ DCHECK(FLAG_maglev);
+
+ MaglevCompilationHandleScope compilation(isolate, this);
+
+ compiler::CompilationDependencies* deps =
+ zone()->New<compiler::CompilationDependencies>(broker(), zone());
+ USE(deps); // The deps register themselves in the heap broker.
+
+ broker()->SetTargetNativeContextRef(
+ handle(function->native_context(), isolate));
+ broker()->InitializeAndStartSerializing();
+ broker()->StopSerializing();
+
+ toplevel_compilation_unit_ =
+ MaglevCompilationUnit::New(zone(), this, function);
+}
+
+MaglevCompilationInfo::~MaglevCompilationInfo() = default;
+
+void MaglevCompilationInfo::set_graph_labeller(
+ MaglevGraphLabeller* graph_labeller) {
+ graph_labeller_.reset(graph_labeller);
+}
+
+void MaglevCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
+ DCHECK(!shared_.is_null());
+ shared_ = handle(*shared_, isolate);
+ DCHECK(!function_.is_null());
+ function_ = handle(*function_, isolate);
+}
+
+void MaglevCompilationInfo::set_persistent_handles(
+ std::unique_ptr<PersistentHandles>&& persistent_handles) {
+ DCHECK_NULL(ph_);
+ ph_ = std::move(persistent_handles);
+ DCHECK_NOT_NULL(ph_);
+}
+
+std::unique_ptr<PersistentHandles>
+MaglevCompilationInfo::DetachPersistentHandles() {
+ DCHECK_NOT_NULL(ph_);
+ return std::move(ph_);
+}
+
+void MaglevCompilationInfo::set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
+ DCHECK_NULL(canonical_handles_);
+ canonical_handles_ = std::move(canonical_handles);
+ DCHECK_NOT_NULL(canonical_handles_);
+}
+
+std::unique_ptr<CanonicalHandlesMap>
+MaglevCompilationInfo::DetachCanonicalHandles() {
+ DCHECK_NOT_NULL(canonical_handles_);
+ return std::move(canonical_handles_);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-info.h b/deps/v8/src/maglev/maglev-compilation-info.h
new file mode 100644
index 0000000000..70490de218
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-info.h
@@ -0,0 +1,137 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
+#define V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
+
+#include <memory>
+
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class PersistentHandles;
+class SharedFunctionInfo;
+class Zone;
+
+namespace compiler {
+class JSHeapBroker;
+}
+
+namespace maglev {
+
+class Graph;
+class MaglevCompilationUnit;
+class MaglevGraphLabeller;
+
+#define MAGLEV_COMPILATION_FLAG_LIST(V) \
+ V(code_comments) \
+ V(maglev) \
+ V(print_maglev_code) \
+ V(print_maglev_graph) \
+ V(trace_maglev_regalloc)
+
+class MaglevCompilationInfo final {
+ public:
+ static std::unique_ptr<MaglevCompilationInfo> New(
+ Isolate* isolate, Handle<JSFunction> function) {
+ // Doesn't use make_unique due to the private ctor.
+ return std::unique_ptr<MaglevCompilationInfo>(
+ new MaglevCompilationInfo(isolate, function));
+ }
+ ~MaglevCompilationInfo();
+
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() { return &zone_; }
+ compiler::JSHeapBroker* broker() const { return broker_.get(); }
+ MaglevCompilationUnit* toplevel_compilation_unit() const {
+ return toplevel_compilation_unit_;
+ }
+ Handle<JSFunction> function() const { return function_; }
+
+ bool has_graph_labeller() const { return !!graph_labeller_; }
+ void set_graph_labeller(MaglevGraphLabeller* graph_labeller);
+ MaglevGraphLabeller* graph_labeller() const {
+ DCHECK(has_graph_labeller());
+ return graph_labeller_.get();
+ }
+
+ void set_graph(Graph* graph) { graph_ = graph; }
+ Graph* graph() const { return graph_; }
+
+ void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
+ MaybeHandle<CodeT> codet() const { return codet_; }
+
+ // Flag accessors (for thread-safe access to global flags).
+ // TODO(v8:7700): Consider caching these.
+#define V(Name) \
+ bool Name() const { return Name##_; }
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+
+ // Must be called from within a MaglevCompilationHandleScope. Transfers owned
+ // handles (e.g. shared_, function_) to the new scope.
+ void ReopenHandlesInNewHandleScope(Isolate* isolate);
+
+ // Persistent and canonical handles are passed back and forth between the
+ // Isolate, this info, and the LocalIsolate.
+ void set_persistent_handles(
+ std::unique_ptr<PersistentHandles>&& persistent_handles);
+ std::unique_ptr<PersistentHandles> DetachPersistentHandles();
+ void set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
+ std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
+
+ private:
+ MaglevCompilationInfo(Isolate* isolate, Handle<JSFunction> function);
+
+ Zone zone_;
+ Isolate* const isolate_;
+ const std::unique_ptr<compiler::JSHeapBroker> broker_;
+ // Must be initialized late since it requires an initialized heap broker.
+ MaglevCompilationUnit* toplevel_compilation_unit_ = nullptr;
+
+ Handle<SharedFunctionInfo> shared_;
+ Handle<JSFunction> function_;
+
+ std::unique_ptr<MaglevGraphLabeller> graph_labeller_;
+
+ // Produced off-thread during ExecuteJobImpl.
+ Graph* graph_ = nullptr;
+
+ // Produced during FinalizeJobImpl.
+ MaybeHandle<CodeT> codet_;
+
+#define V(Name) const bool Name##_;
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+
+ // 1) PersistentHandles created via PersistentHandlesScope inside of
+ // CompilationHandleScope.
+ // 2) Owned by MaglevCompilationInfo.
+ // 3) Owned by the broker's LocalHeap when entering the LocalHeapScope.
+ // 4) Back to MaglevCompilationInfo when exiting the LocalHeapScope.
+ //
+ // TODO(jgruber,v8:7700): Update this comment:
+ //
+ // In normal execution it gets destroyed when PipelineData gets destroyed.
+ // There is a special case in GenerateCodeForTesting where the JSHeapBroker
+ // will not be retired in that same method. In this case, we need to re-attach
+ // the PersistentHandles container to the JSHeapBroker.
+ std::unique_ptr<PersistentHandles> ph_;
+
+ // Canonical handles follow the same path as described by the persistent
+ // handles above. The only difference is that is created in the
+ // CanonicalHandleScope(i.e step 1) is different).
+ std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
new file mode 100644
index 0000000000..f35f418de7
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -0,0 +1,45 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compilation-unit.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/objects/js-function-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
+ Handle<JSFunction> function)
+ : info_(info),
+ bytecode_(
+ MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
+ feedback_(MakeRef(broker(), function->feedback_vector())),
+ bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
+ true),
+ register_count_(bytecode_.register_count()),
+ parameter_count_(bytecode_.parameter_count()) {}
+
+compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
+ return info_->broker();
+}
+
+Isolate* MaglevCompilationUnit::isolate() const { return info_->isolate(); }
+
+Zone* MaglevCompilationUnit::zone() const { return info_->zone(); }
+
+bool MaglevCompilationUnit::has_graph_labeller() const {
+ return info_->has_graph_labeller();
+}
+
+MaglevGraphLabeller* MaglevCompilationUnit::graph_labeller() const {
+ DCHECK(has_graph_labeller());
+ return info_->graph_labeller();
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.h b/deps/v8/src/maglev/maglev-compilation-unit.h
new file mode 100644
index 0000000000..52e1a775d6
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-unit.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
+#define V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/heap-refs.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCompilationInfo;
+class MaglevGraphLabeller;
+
+// Per-unit data, i.e. once per top-level function and once per inlined
+// function.
+class MaglevCompilationUnit : public ZoneObject {
+ public:
+ static MaglevCompilationUnit* New(Zone* zone, MaglevCompilationInfo* data,
+ Handle<JSFunction> function) {
+ return zone->New<MaglevCompilationUnit>(data, function);
+ }
+ MaglevCompilationUnit(MaglevCompilationInfo* data,
+ Handle<JSFunction> function);
+
+ MaglevCompilationInfo* info() const { return info_; }
+ compiler::JSHeapBroker* broker() const;
+ Isolate* isolate() const;
+ Zone* zone() const;
+ int register_count() const { return register_count_; }
+ int parameter_count() const { return parameter_count_; }
+ bool has_graph_labeller() const;
+ MaglevGraphLabeller* graph_labeller() const;
+ const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
+ const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return bytecode_analysis_;
+ }
+
+ private:
+ MaglevCompilationInfo* const info_;
+ const compiler::BytecodeArrayRef bytecode_;
+ const compiler::FeedbackVectorRef feedback_;
+ const compiler::BytecodeAnalysis bytecode_analysis_;
+ const int register_count_;
+ const int parameter_count_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
new file mode 100644
index 0000000000..f4a23d869e
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -0,0 +1,209 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compiler.h"
+
+#include <iomanip>
+#include <ostream>
+#include <type_traits>
+
+#include "src/base/iterator.h"
+#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/heap-refs.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/frames.h"
+#include "src/ic/handler-configuration.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-code-generator.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-builder.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc.h"
+#include "src/maglev/maglev-vreg-allocator.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/js-function.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class NumberingProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ node->set_id(node_id_++);
+ }
+
+ private:
+ uint32_t node_id_;
+};
+
+class UseMarkingProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
+ for (Input& input : *node) {
+ input.node()->mark_use(node->id(), &input);
+ }
+ }
+
+ void Process(Phi* node, const ProcessingState& state) {
+ // Don't mark Phi uses when visiting the node, because of loop phis.
+ // Instead, they'll be visited while processing Jump/JumpLoop.
+ }
+
+ // Specialize the two unconditional jumps to extend their Phis' inputs' live
+ // ranges.
+
+ void Process(JumpLoop* node, const ProcessingState& state) {
+ int i = state.block()->predecessor_id();
+ BasicBlock* target = node->target();
+ if (!target->has_phi()) return;
+ uint32_t use = node->id();
+ for (Phi* phi : *target->phis()) {
+ ValueNode* input = phi->input(i).node();
+ input->mark_use(use, &phi->input(i));
+ }
+ }
+ void Process(Jump* node, const ProcessingState& state) {
+ int i = state.block()->predecessor_id();
+ BasicBlock* target = node->target();
+ if (!target->has_phi()) return;
+ uint32_t use = node->id();
+ for (Phi* phi : *target->phis()) {
+ ValueNode* input = phi->input(i).node();
+ input->mark_use(use, &phi->input(i));
+ }
+ }
+
+ private:
+ void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
+ const InterpreterFrameState* checkpoint_state =
+ state.checkpoint_frame_state();
+ int use_id = node->id();
+
+ for (int i = 0; i < state.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ ValueNode* node = checkpoint_state->get(reg);
+ if (node) node->mark_use(use_id, nullptr);
+ }
+ for (int i = 0; i < state.register_count(); i++) {
+ interpreter::Register reg = interpreter::Register(i);
+ ValueNode* node = checkpoint_state->get(reg);
+ if (node) node->mark_use(use_id, nullptr);
+ }
+ if (checkpoint_state->accumulator()) {
+ checkpoint_state->accumulator()->mark_use(use_id, nullptr);
+ }
+ }
+};
+
+// static
+void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) {
+ MaglevCompiler compiler(toplevel_compilation_unit);
+ compiler.Compile();
+}
+
+void MaglevCompiler::Compile() {
+ compiler::UnparkedScopeIfNeeded unparked_scope(broker());
+
+ // Build graph.
+ if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
+ FLAG_trace_maglev_regalloc) {
+ toplevel_compilation_unit_->info()->set_graph_labeller(
+ new MaglevGraphLabeller());
+ }
+
+ MaglevGraphBuilder graph_builder(toplevel_compilation_unit_);
+
+ graph_builder.Build();
+
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ if (graph_builder.found_unsupported_bytecode()) {
+ return;
+ }
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After graph buiding" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ {
+ GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
+ MaglevVregAllocator>
+ processor(toplevel_compilation_unit_);
+ processor.ProcessGraph(graph_builder.graph());
+ }
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After node processor" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ StraightForwardRegisterAllocator allocator(toplevel_compilation_unit_,
+ graph_builder.graph());
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After register allocation" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ // Stash the compiled graph on the compilation info.
+ toplevel_compilation_unit_->info()->set_graph(graph_builder.graph());
+}
+
+// static
+MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
+ MaglevCompilationUnit* toplevel_compilation_unit) {
+ Graph* const graph = toplevel_compilation_unit->info()->graph();
+ if (graph == nullptr) return {}; // Compilation failed.
+
+ Handle<Code> code;
+ if (!MaglevCodeGenerator::Generate(toplevel_compilation_unit, graph)
+ .ToHandle(&code)) {
+ return {};
+ }
+
+ compiler::JSHeapBroker* const broker = toplevel_compilation_unit->broker();
+ const bool deps_committed_successfully = broker->dependencies()->Commit(code);
+ CHECK(deps_committed_successfully);
+
+ if (FLAG_print_maglev_code) {
+ code->Print();
+ }
+
+ Isolate* const isolate = toplevel_compilation_unit->isolate();
+ return ToCodeT(code, isolate);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
new file mode 100644
index 0000000000..79b71552d1
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -0,0 +1,53 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILER_H_
+#define V8_MAGLEV_MAGLEV_COMPILER_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/heap-refs.h"
+#include "src/maglev/maglev-compilation-unit.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class JSHeapBroker;
+}
+
+namespace maglev {
+
+class Graph;
+
+class MaglevCompiler {
+ public:
+ // May be called from any thread.
+ static void Compile(MaglevCompilationUnit* toplevel_compilation_unit);
+
+ // Called on the main thread after Compile has completed.
+ // TODO(v8:7700): Move this to a different class?
+ static MaybeHandle<CodeT> GenerateCode(
+ MaglevCompilationUnit* toplevel_compilation_unit);
+
+ private:
+ explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit)
+ : toplevel_compilation_unit_(toplevel_compilation_unit) {}
+
+ void Compile();
+
+ compiler::JSHeapBroker* broker() const {
+ return toplevel_compilation_unit_->broker();
+ }
+ Zone* zone() { return toplevel_compilation_unit_->zone(); }
+ Isolate* isolate() { return toplevel_compilation_unit_->isolate(); }
+
+ MaglevCompilationUnit* const toplevel_compilation_unit_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILER_H_
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
new file mode 100644
index 0000000000..762de2455a
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -0,0 +1,194 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/persistent-handles.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-compiler.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/objects/js-function-inl.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/locked-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+void JSHeapBroker::AttachLocalIsolateForMaglev(
+ maglev::MaglevCompilationInfo* info, LocalIsolate* local_isolate) {
+ set_canonical_handles(info->DetachCanonicalHandles());
+ DCHECK_NULL(local_isolate_);
+ local_isolate_ = local_isolate;
+ DCHECK_NOT_NULL(local_isolate_);
+ local_isolate_->heap()->AttachPersistentHandles(
+ info->DetachPersistentHandles());
+}
+
+void JSHeapBroker::DetachLocalIsolateForMaglev(
+ maglev::MaglevCompilationInfo* info) {
+ DCHECK_NULL(ph_);
+ DCHECK_NOT_NULL(local_isolate_);
+ std::unique_ptr<PersistentHandles> ph =
+ local_isolate_->heap()->DetachPersistentHandles();
+ local_isolate_ = nullptr;
+ info->set_canonical_handles(DetachCanonicalHandles());
+ info->set_persistent_handles(std::move(ph));
+}
+
+} // namespace compiler
+
+namespace maglev {
+
+namespace {
+
+constexpr char kMaglevCompilerName[] = "Maglev";
+
+// LocalIsolateScope encapsulates the phase where persistent handles are
+// attached to the LocalHeap inside {local_isolate}.
+class V8_NODISCARD LocalIsolateScope final {
+ public:
+ explicit LocalIsolateScope(MaglevCompilationInfo* info,
+ LocalIsolate* local_isolate)
+ : info_(info) {
+ info_->broker()->AttachLocalIsolateForMaglev(info_, local_isolate);
+ }
+
+ ~LocalIsolateScope() { info_->broker()->DetachLocalIsolateForMaglev(info_); }
+
+ private:
+ MaglevCompilationInfo* const info_;
+};
+
+} // namespace
+
+Zone* ExportedMaglevCompilationInfo::zone() const { return info_->zone(); }
+
+void ExportedMaglevCompilationInfo::set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
+ info_->set_canonical_handles(std::move(canonical_handles));
+}
+
+// static
+std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
+ Isolate* isolate, Handle<JSFunction> function) {
+ auto info = maglev::MaglevCompilationInfo::New(isolate, function);
+ return std::unique_ptr<MaglevCompilationJob>(
+ new MaglevCompilationJob(std::move(info)));
+}
+
+MaglevCompilationJob::MaglevCompilationJob(
+ std::unique_ptr<MaglevCompilationInfo>&& info)
+ : OptimizedCompilationJob(nullptr, kMaglevCompilerName),
+ info_(std::move(info)) {
+ // TODO(jgruber, v8:7700): Remove the OptimizedCompilationInfo (which should
+ // be renamed to TurbofanCompilationInfo) from OptimizedCompilationJob.
+ DCHECK(FLAG_maglev);
+}
+
+MaglevCompilationJob::~MaglevCompilationJob() = default;
+
+CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
+ LocalIsolateScope scope{info(), local_isolate};
+ maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit());
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
+ info()->set_codet(maglev::MaglevCompiler::GenerateCode(
+ info()->toplevel_compilation_unit()));
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
+// processing the incoming queue on a worker thread.
+class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
+ public:
+ explicit JobTask(MaglevConcurrentDispatcher* dispatcher)
+ : dispatcher_(dispatcher) {}
+
+ void Run(JobDelegate* delegate) override {
+ LocalIsolate local_isolate(isolate(), ThreadKind::kBackground);
+ DCHECK(local_isolate.heap()->IsParked());
+
+ while (!incoming_queue()->IsEmpty() && !delegate->ShouldYield()) {
+ std::unique_ptr<MaglevCompilationJob> job;
+ if (!incoming_queue()->Dequeue(&job)) break;
+ DCHECK_NOT_NULL(job);
+ RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement.
+ CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ outgoing_queue()->Enqueue(std::move(job));
+ }
+ // TODO(v8:7700):
+ // isolate_->stack_guard()->RequestInstallMaglevCode();
+ }
+
+ size_t GetMaxConcurrency(size_t) const override {
+ return incoming_queue()->size();
+ }
+
+ private:
+ Isolate* isolate() const { return dispatcher_->isolate_; }
+ QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; }
+ QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; }
+
+ MaglevConcurrentDispatcher* const dispatcher_;
+ const Handle<JSFunction> function_;
+};
+
+MaglevConcurrentDispatcher::MaglevConcurrentDispatcher(Isolate* isolate)
+ : isolate_(isolate) {
+ if (FLAG_concurrent_recompilation && FLAG_maglev) {
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible, std::make_unique<JobTask>(this));
+ DCHECK(is_enabled());
+ } else {
+ DCHECK(!is_enabled());
+ }
+}
+
+MaglevConcurrentDispatcher::~MaglevConcurrentDispatcher() {
+ if (is_enabled() && job_handle_->IsValid()) {
+ // Wait for the job handle to complete, so that we know the queue
+ // pointers are safe.
+ job_handle_->Cancel();
+ }
+}
+
+void MaglevConcurrentDispatcher::EnqueueJob(
+ std::unique_ptr<MaglevCompilationJob>&& job) {
+ DCHECK(is_enabled());
+ // TODO(v8:7700): RCS.
+ // RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileMaglev);
+ incoming_queue_.Enqueue(std::move(job));
+ job_handle_->NotifyConcurrencyIncrease();
+}
+
+void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
+ while (!outgoing_queue_.IsEmpty()) {
+ std::unique_ptr<MaglevCompilationJob> job;
+ outgoing_queue_.Dequeue(&job);
+ CompilationJob::Status status = job->FinalizeJob(isolate_);
+ // TODO(v8:7700): Use the result.
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
new file mode 100644
index 0000000000..0b2a086e5a
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -0,0 +1,92 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
+#define V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
+
+#ifdef V8_ENABLE_MAGLEV
+
+#include <memory>
+
+#include "src/codegen/compiler.h" // For OptimizedCompilationJob.
+#include "src/utils/locked-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace maglev {
+
+class MaglevCompilationInfo;
+
+// Exports needed functionality without exposing implementation details.
+class ExportedMaglevCompilationInfo final {
+ public:
+ explicit ExportedMaglevCompilationInfo(MaglevCompilationInfo* info)
+ : info_(info) {}
+
+ Zone* zone() const;
+ void set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
+
+ private:
+ MaglevCompilationInfo* const info_;
+};
+
+// The job is a single actual compilation task.
+class MaglevCompilationJob final : public OptimizedCompilationJob {
+ public:
+ static std::unique_ptr<MaglevCompilationJob> New(Isolate* isolate,
+ Handle<JSFunction> function);
+ virtual ~MaglevCompilationJob();
+
+ Status PrepareJobImpl(Isolate* isolate) override;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) override;
+ Status FinalizeJobImpl(Isolate* isolate) override;
+
+ private:
+ explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
+
+ MaglevCompilationInfo* info() const { return info_.get(); }
+
+ const std::unique_ptr<MaglevCompilationInfo> info_;
+};
+
+// The public API for Maglev concurrent compilation.
+// Keep this as minimal as possible.
+class MaglevConcurrentDispatcher final {
+ class JobTask;
+
+ // TODO(jgruber): There's no reason to use locking queues here, we only use
+ // them for simplicity - consider replacing with lock-free data structures.
+ using QueueT = LockedQueue<std::unique_ptr<MaglevCompilationJob>>;
+
+ public:
+ explicit MaglevConcurrentDispatcher(Isolate* isolate);
+ ~MaglevConcurrentDispatcher();
+
+ // Called from the main thread.
+ void EnqueueJob(std::unique_ptr<MaglevCompilationJob>&& job);
+
+ // Called from the main thread.
+ void FinalizeFinishedJobs();
+
+ bool is_enabled() const { return static_cast<bool>(job_handle_); }
+
+ private:
+ Isolate* const isolate_;
+ std::unique_ptr<JobHandle> job_handle_;
+ QueueT incoming_queue_;
+ QueueT outgoing_queue_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ENABLE_MAGLEV
+
+#endif // V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
new file mode 100644
index 0000000000..b38bece1d5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -0,0 +1,616 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-graph-builder.h"
+
+#include "src/compiler/feedback-source.h"
+#include "src/compiler/heap-refs.h"
+#include "src/handles/maybe-handles-inl.h"
+#include "src/ic/handler-configuration.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/slots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
+ : compilation_unit_(compilation_unit),
+ iterator_(bytecode().object()),
+ jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
+ // Overallocate merge_states_ by one to allow always looking up the
+ // next offset.
+ merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
+ bytecode().length() + 1)),
+ graph_(Graph::New(zone())),
+ current_interpreter_frame_(*compilation_unit_) {
+ memset(merge_states_, 0,
+ bytecode().length() * sizeof(InterpreterFrameState*));
+ // Default construct basic block refs.
+ // TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
+ for (int i = 0; i < bytecode().length(); ++i) {
+ new (&jump_targets_[i]) BasicBlockRef();
+ }
+
+ CalculatePredecessorCounts();
+
+ for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) {
+ int offset = offset_and_info.first;
+ const compiler::LoopInfo& loop_info = offset_and_info.second;
+
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(offset);
+
+ merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
+ *compilation_unit_, offset, NumPredecessors(offset), liveness,
+ &loop_info);
+ }
+
+ current_block_ = zone()->New<BasicBlock>(nullptr);
+ block_offset_ = -1;
+
+ for (int i = 0; i < parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
+ }
+
+ // TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
+ // to be able to read in the machine register but also use the frame-spilled
+ // slot.
+ interpreter::Register regs[] = {interpreter::Register::current_context(),
+ interpreter::Register::function_closure()};
+ for (interpreter::Register& reg : regs) {
+ current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
+ }
+
+ interpreter::Register new_target_or_generator_register =
+ bytecode().incoming_new_target_or_generator_register();
+
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(0);
+ int register_index = 0;
+ // TODO(leszeks): Don't emit if not needed.
+ ValueNode* undefined_value =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ if (new_target_or_generator_register.is_valid()) {
+ int new_target_index = new_target_or_generator_register.index();
+ for (; register_index < new_target_index; register_index++) {
+ StoreRegister(interpreter::Register(register_index), undefined_value,
+ liveness);
+ }
+ StoreRegister(
+ new_target_or_generator_register,
+ // TODO(leszeks): Expose in Graph.
+ AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
+ liveness);
+ register_index++;
+ }
+ for (; register_index < register_count(); register_index++) {
+ StoreRegister(interpreter::Register(register_index), undefined_value,
+ liveness);
+ }
+
+ BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
+ MergeIntoFrameState(first_block, 0);
+}
+
+// TODO(v8:7700): Clean up after all bytecodes are supported.
+#define MAGLEV_UNIMPLEMENTED(BytecodeName) \
+ do { \
+ std::cerr << "Maglev: Can't compile, bytecode " #BytecodeName \
+ " is not supported\n"; \
+ found_unsupported_bytecode_ = true; \
+ this_field_will_be_unused_once_all_bytecodes_are_supported_ = true; \
+ } while (false)
+
+#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
+ void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
+
+template <Operation kOperation, typename... Args>
+ValueNode* MaglevGraphBuilder::AddNewOperationNode(
+ std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ switch (kOperation) {
+#define CASE(Name) \
+ case Operation::k##Name: \
+ return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
+ OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
+ FeedbackSlot slot_index = GetSlotOperand(0);
+ ValueNode* value = GetAccumulator();
+ ValueNode* node = AddNewOperationNode<kOperation>(
+ {value}, compiler::FeedbackSource{feedback(), slot_index});
+ SetAccumulator(node);
+ MarkPossibleSideEffect();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
+ ValueNode* left = LoadRegister(0);
+ FeedbackSlot slot_index = GetSlotOperand(1);
+ ValueNode* right = GetAccumulator();
+ ValueNode* node = AddNewOperationNode<kOperation>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index});
+ SetAccumulator(node);
+ MarkPossibleSideEffect();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitUnaryOperation() {
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericUnaryOperationNode<kOperation>();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitBinaryOperation() {
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericBinaryOperationNode<kOperation>();
+}
+
+void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
+
+void MaglevGraphBuilder::VisitLdaZero() {
+ SetAccumulator(AddNewNode<SmiConstant>({}, Smi::zero()));
+}
+void MaglevGraphBuilder::VisitLdaSmi() {
+ Smi constant = Smi::FromInt(iterator_.GetImmediateOperand(0));
+ SetAccumulator(AddNewNode<SmiConstant>({}, constant));
+}
+void MaglevGraphBuilder::VisitLdaUndefined() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue));
+}
+void MaglevGraphBuilder::VisitLdaNull() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kNullValue));
+}
+void MaglevGraphBuilder::VisitLdaTheHole() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTheHoleValue));
+}
+void MaglevGraphBuilder::VisitLdaTrue() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTrueValue));
+}
+void MaglevGraphBuilder::VisitLdaFalse() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kFalseValue));
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaConstant)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaCurrentContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableCurrentContextSlot)
+void MaglevGraphBuilder::VisitStar() {
+ StoreRegister(
+ iterator_.GetRegisterOperand(0), GetAccumulator(),
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+}
+void MaglevGraphBuilder::VisitMov() {
+ StoreRegister(
+ iterator_.GetRegisterOperand(1), LoadRegister(0),
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(PushContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(PopContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestReferenceEqual)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndetectable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestTypeOf)
+void MaglevGraphBuilder::VisitLdaGlobal() {
+ // LdaGlobal <name_index> <slot>
+
+ static const int kNameOperandIndex = 0;
+ static const int kSlotOperandIndex = 1;
+
+ compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
+ FeedbackSlot slot_index = GetSlotOperand(kSlotOperandIndex);
+ ValueNode* context = GetContext();
+
+ USE(slot_index); // TODO(v8:7700): Use the feedback info.
+
+ SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
+ MarkPossibleSideEffect();
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaGlobalInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaGlobal)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaCurrentContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupContextSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot)
+void MaglevGraphBuilder::VisitGetNamedProperty() {
+ // GetNamedProperty <object> <name_index> <slot>
+ ValueNode* object = LoadRegister(0);
+ FeedbackNexus nexus = feedback_nexus(2);
+
+ if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
+ EnsureCheckpoint();
+ AddNewNode<SoftDeopt>({});
+ } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ std::vector<MapAndHandler> maps_and_handlers;
+ nexus.ExtractMapsAndHandlers(&maps_and_handlers);
+ DCHECK_EQ(maps_and_handlers.size(), 1);
+ MapAndHandler& map_and_handler = maps_and_handlers[0];
+ if (map_and_handler.second->IsSmi()) {
+ int handler = map_and_handler.second->ToSmi().value();
+ LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
+ if (kind == LoadHandler::Kind::kField &&
+ !LoadHandler::IsWasmStructBits::decode(handler)) {
+ EnsureCheckpoint();
+ AddNewNode<CheckMaps>({object},
+ MakeRef(broker(), map_and_handler.first));
+ SetAccumulator(AddNewNode<LoadField>({object}, handler));
+ return;
+ }
+ }
+ }
+
+ ValueNode* context = GetContext();
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ SetAccumulator(AddNewNode<LoadNamedGeneric>({context, object}, name));
+ MarkPossibleSideEffect();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetKeyedProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaModuleVariable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable)
+
+void MaglevGraphBuilder::VisitSetNamedProperty() {
+ // SetNamedProperty <object> <name_index> <slot>
+ ValueNode* object = LoadRegister(0);
+ FeedbackNexus nexus = feedback_nexus(2);
+
+ if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
+ EnsureCheckpoint();
+ AddNewNode<SoftDeopt>({});
+ } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ std::vector<MapAndHandler> maps_and_handlers;
+ nexus.ExtractMapsAndHandlers(&maps_and_handlers);
+ DCHECK_EQ(maps_and_handlers.size(), 1);
+ MapAndHandler& map_and_handler = maps_and_handlers[0];
+ if (map_and_handler.second->IsSmi()) {
+ int handler = map_and_handler.second->ToSmi().value();
+ StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
+ if (kind == StoreHandler::Kind::kField) {
+ EnsureCheckpoint();
+ AddNewNode<CheckMaps>({object},
+ MakeRef(broker(), map_and_handler.first));
+ ValueNode* value = GetAccumulator();
+ AddNewNode<StoreField>({object, value}, handler);
+ return;
+ }
+ }
+ }
+
+ // TODO(victorgomes): Generic store.
+ MAGLEV_UNIMPLEMENTED(VisitSetNamedProperty);
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineNamedOwnProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SetKeyedProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaInArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnPropertyInLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CollectTypeProfile)
+
+void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
+void MaglevGraphBuilder::VisitSub() {
+ VisitBinaryOperation<Operation::kSubtract>();
+}
+void MaglevGraphBuilder::VisitMul() {
+ VisitBinaryOperation<Operation::kMultiply>();
+}
+void MaglevGraphBuilder::VisitDiv() {
+ VisitBinaryOperation<Operation::kDivide>();
+}
+void MaglevGraphBuilder::VisitMod() {
+ VisitBinaryOperation<Operation::kModulus>();
+}
+void MaglevGraphBuilder::VisitExp() {
+ VisitBinaryOperation<Operation::kExponentiate>();
+}
+void MaglevGraphBuilder::VisitBitwiseOr() {
+ VisitBinaryOperation<Operation::kBitwiseOr>();
+}
+void MaglevGraphBuilder::VisitBitwiseXor() {
+ VisitBinaryOperation<Operation::kBitwiseXor>();
+}
+void MaglevGraphBuilder::VisitBitwiseAnd() {
+ VisitBinaryOperation<Operation::kBitwiseAnd>();
+}
+void MaglevGraphBuilder::VisitShiftLeft() {
+ VisitBinaryOperation<Operation::kShiftLeft>();
+}
+void MaglevGraphBuilder::VisitShiftRight() {
+ VisitBinaryOperation<Operation::kShiftRight>();
+}
+void MaglevGraphBuilder::VisitShiftRightLogical() {
+ VisitBinaryOperation<Operation::kShiftRightLogical>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DivSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ModSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ExpSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOrSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXorSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
+
+void MaglevGraphBuilder::VisitInc() {
+ VisitUnaryOperation<Operation::kIncrement>();
+}
+void MaglevGraphBuilder::VisitDec() {
+ VisitUnaryOperation<Operation::kDecrement>();
+}
+void MaglevGraphBuilder::VisitNegate() {
+ VisitUnaryOperation<Operation::kNegate>();
+}
+void MaglevGraphBuilder::VisitBitwiseNot() {
+ VisitUnaryOperation<Operation::kBitwiseNot>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToBooleanLogicalNot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LogicalNot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertyStrict)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertySloppy)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetSuperConstructor)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallAnyReceiver)
+
+// TODO(leszeks): For all of these:
+// a) Read feedback and implement inlining
+// b) Wrap in a helper.
+void MaglevGraphBuilder::VisitCallProperty() {
+ ValueNode* function = LoadRegister(0);
+
+ interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
+ ValueNode* context = GetContext();
+
+ static constexpr int kTheContext = 1;
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ args.register_count() + kTheContext, function, context);
+ // TODO(leszeks): Move this for loop into the CallProperty constructor,
+ // pre-size the args array.
+ for (int i = 0; i < args.register_count(); ++i) {
+ call_property->set_arg(i, current_interpreter_frame_.get(args[i]));
+ }
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty0() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property =
+ AddNewNode<CallProperty>({function, context, LoadRegister(1)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty1() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ {function, context, LoadRegister(1), LoadRegister(2)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty2() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ {function, context, LoadRegister(1), LoadRegister(2), LoadRegister(3)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver0)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver1)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver2)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallWithSpread)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntime)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntimeForPair)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallJSRuntime)
+MAGLEV_UNIMPLEMENTED_BYTECODE(InvokeIntrinsic)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Construct)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
+
+void MaglevGraphBuilder::VisitTestLessThan() {
+ VisitBinaryOperation<Operation::kLessThan>();
+}
+void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
+ VisitBinaryOperation<Operation::kLessThanOrEqual>();
+}
+void MaglevGraphBuilder::VisitTestGreaterThan() {
+ VisitBinaryOperation<Operation::kGreaterThan>();
+}
+void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
+ VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestIn)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToName)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToNumber)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToNumeric)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToString)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateRegExpLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateArrayFromIterable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEmptyArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateObjectLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEmptyObjectLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CloneObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetTemplateObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateClosure)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateBlockContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateCatchContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateFunctionContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEvalContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateWithContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateMappedArguments)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateUnmappedArguments)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateRestParameter)
+
+void MaglevGraphBuilder::VisitJumpLoop() {
+ int target = iterator_.GetJumpTargetOffset();
+ BasicBlock* block =
+ target == iterator_.current_offset()
+ ? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target])
+ : FinishBlock<JumpLoop>(next_offset(), {},
+ jump_targets_[target].block_ptr());
+
+ merge_states_[target]->MergeLoop(*compilation_unit_,
+ current_interpreter_frame_, block, target);
+ block->set_predecessor_id(0);
+}
+void MaglevGraphBuilder::VisitJump() {
+ BasicBlock* block = FinishBlock<Jump>(
+ next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ DCHECK_LT(next_offset(), bytecode().length());
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpConstant)
+void MaglevGraphBuilder::VisitJumpIfNullConstant() { VisitJumpIfNull(); }
+void MaglevGraphBuilder::VisitJumpIfNotNullConstant() { VisitJumpIfNotNull(); }
+void MaglevGraphBuilder::VisitJumpIfUndefinedConstant() {
+ VisitJumpIfUndefined();
+}
+void MaglevGraphBuilder::VisitJumpIfNotUndefinedConstant() {
+ VisitJumpIfNotUndefined();
+}
+void MaglevGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
+ VisitJumpIfUndefinedOrNull();
+}
+void MaglevGraphBuilder::VisitJumpIfTrueConstant() { VisitJumpIfTrue(); }
+void MaglevGraphBuilder::VisitJumpIfFalseConstant() { VisitJumpIfFalse(); }
+void MaglevGraphBuilder::VisitJumpIfJSReceiverConstant() {
+ VisitJumpIfJSReceiver();
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
+ VisitJumpIfToBooleanTrue();
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
+ VisitJumpIfToBooleanFalse();
+}
+
+void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
+ int target) {
+ if (merge_states_[target] == nullptr) {
+ DCHECK(!bytecode_analysis().IsLoopHeader(target));
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(target);
+ // If there's no target frame state, allocate a new one.
+ merge_states_[target] = zone()->New<MergePointInterpreterFrameState>(
+ *compilation_unit_, current_interpreter_frame_, target,
+ NumPredecessors(target), predecessor, liveness);
+ } else {
+ // If there already is a frame state, merge.
+ merge_states_[target]->Merge(*compilation_unit_, current_interpreter_frame_,
+ predecessor, target);
+ }
+}
+
+void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
+ int false_target) {
+ // TODO(verwaest): Materialize true/false in the respective environments.
+ if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
+ BasicBlock* block = FinishBlock<BranchIfTrue>(next_offset(), {node},
+ &jump_targets_[true_target],
+ &jump_targets_[false_target]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
+ int true_target,
+ int false_target) {
+ // TODO(verwaest): Materialize true/false in the respective environments.
+ if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
+ BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
+ next_offset(), {node}, &jump_targets_[true_target],
+ &jump_targets_[false_target]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
+ BuildBranchIfToBooleanTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
+ next_offset());
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
+ BuildBranchIfToBooleanTrue(GetAccumulator(), next_offset(),
+ iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::VisitJumpIfTrue() {
+ BuildBranchIfTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
+ next_offset());
+}
+void MaglevGraphBuilder::VisitJumpIfFalse() {
+ BuildBranchIfTrue(GetAccumulator(), next_offset(),
+ iterator_.GetJumpTargetOffset());
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNotNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNotUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfUndefinedOrNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfJSReceiver)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnSmiNoFeedback)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInEnumerate)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInPrepare)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInContinue)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInNext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInStep)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SetPendingMessage)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Throw)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
+void MaglevGraphBuilder::VisitReturn() {
+ FinishBlock<Return>(next_offset(), {GetAccumulator()});
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperNotCalledIfHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperAlreadyCalledIfNotHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowIfNotSuperConstructor)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnGeneratorState)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SuspendGenerator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ResumeGenerator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetIterator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Debugger)
+MAGLEV_UNIMPLEMENTED_BYTECODE(IncBlockCounter)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Abort)
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { \
+ StoreRegister( \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
+ GetAccumulator(), \
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset())); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
+void MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
+void MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
+#define DEBUG_BREAK(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
+#undef DEBUG_BREAK
+void MaglevGraphBuilder::VisitIllegal() { UNREACHABLE(); }
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
new file mode 100644
index 0000000000..da86b80841
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -0,0 +1,383 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
+
+#include <type_traits>
+
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/compiler/heap-refs.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/utils/memcopy.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevGraphBuilder {
+ public:
+ explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit);
+
+ void Build() {
+ for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
+ VisitSingleBytecode();
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ if (found_unsupported_bytecode()) break;
+ }
+ }
+
+ Graph* graph() const { return graph_; }
+
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ bool found_unsupported_bytecode() const {
+ return found_unsupported_bytecode_;
+ }
+
+ private:
+ BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
+ DCHECK_NULL(current_block_);
+ current_block_ = zone()->New<BasicBlock>(nullptr);
+ BasicBlock* result = CreateBlock<Jump>({}, &jump_targets_[offset]);
+ result->set_empty_block_predecessor(predecessor);
+ return result;
+ }
+
+ void ProcessMergePoint(int offset) {
+ // First copy the merge state to be the current state.
+ MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
+ current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
+
+ if (merge_state.predecessor_count() == 1) return;
+
+ // Set up edge-split.
+ int predecessor_index = merge_state.predecessor_count() - 1;
+ BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
+ while (old_jump_targets != nullptr) {
+ BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
+ ControlNode* control = predecessor->control_node();
+ if (control->Is<ConditionalControlNode>()) {
+ // CreateEmptyBlock automatically registers itself with the offset.
+ predecessor = CreateEmptyBlock(offset, predecessor);
+ // Set the old predecessor's (the conditional block) reference to
+ // point to the new empty predecessor block.
+ old_jump_targets =
+ old_jump_targets->SetToBlockAndReturnNext(predecessor);
+ } else {
+ // Re-register the block in the offset's ref list.
+ old_jump_targets =
+ old_jump_targets->MoveToRefList(&jump_targets_[offset]);
+ }
+ predecessor->set_predecessor_id(predecessor_index--);
+ }
+#ifdef DEBUG
+ if (bytecode_analysis().IsLoopHeader(offset)) {
+ // For loops, the JumpLoop block hasn't been generated yet, and so isn't
+ // in the list of jump targets. It's defined to be at index 0, so once
+ // we've processed all the jump targets, the 0 index should be the one
+ // remaining.
+ DCHECK_EQ(predecessor_index, 0);
+ } else {
+ DCHECK_EQ(predecessor_index, -1);
+ }
+#endif
+ if (has_graph_labeller()) {
+ for (Phi* phi : *merge_states_[offset]->phis()) {
+ graph_labeller()->RegisterNode(phi);
+ }
+ }
+ }
+
+ void VisitSingleBytecode() {
+ int offset = iterator_.current_offset();
+ if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
+ if (current_block_ != nullptr) {
+ DCHECK(!current_block_->nodes().is_empty());
+ FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
+
+ merge_states_[offset]->Merge(*compilation_unit_,
+ current_interpreter_frame_,
+ graph()->last_block(), offset);
+ }
+ ProcessMergePoint(offset);
+ StartNewBlock(offset);
+ }
+ DCHECK_NOT_NULL(current_block_);
+ switch (iterator_.current_bytecode()) {
+#define BYTECODE_CASE(name, ...) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(); \
+ break;
+ BYTECODE_LIST(BYTECODE_CASE)
+#undef BYTECODE_CASE
+ }
+ }
+
+#define BYTECODE_VISITOR(name, ...) void Visit##name();
+ BYTECODE_LIST(BYTECODE_VISITOR)
+#undef BYTECODE_VISITOR
+
+ template <typename NodeT>
+ NodeT* AddNode(NodeT* node) {
+ current_block_->nodes().Add(node);
+ return node;
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* NewNode(size_t input_count, Args&&... args) {
+ NodeT* node =
+ Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ return node;
+ }
+
+ template <Operation kOperation, typename... Args>
+ ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
+ Args&&... args);
+
+ template <typename NodeT, typename... Args>
+ NodeT* AddNewNode(size_t input_count, Args&&... args) {
+ return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* NewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ NodeT* node = Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ return node;
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ return AddNode(NewNode<NodeT>(inputs, std::forward<Args>(args)...));
+ }
+
+ ValueNode* GetContext() const {
+ return current_interpreter_frame_.get(
+ interpreter::Register::current_context());
+ }
+
+ FeedbackSlot GetSlotOperand(int operand_index) const {
+ return iterator_.GetSlotOperand(operand_index);
+ }
+
+ template <class T, typename = std::enable_if_t<
+ std::is_convertible<T*, Object*>::value>>
+ typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
+ return MakeRef(broker(),
+ Handle<T>::cast(iterator_.GetConstantForIndexOperand(
+ operand_index, isolate())));
+ }
+
+ void SetAccumulator(ValueNode* node) {
+ current_interpreter_frame_.set_accumulator(node);
+ }
+
+ ValueNode* GetAccumulator() const {
+ return current_interpreter_frame_.accumulator();
+ }
+
+ ValueNode* LoadRegister(int operand_index) {
+ interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
+ return current_interpreter_frame_.get(source);
+ }
+
+ void StoreRegister(interpreter::Register target, ValueNode* value,
+ const compiler::BytecodeLivenessState* liveness) {
+ if (target.index() >= 0 && !liveness->RegisterIsLive(target.index())) {
+ return;
+ }
+ current_interpreter_frame_.set(target, value);
+ AddNewNode<StoreToFrame>({}, value, target);
+ }
+
+ void AddCheckpoint() {
+ // TODO(v8:7700): Verify this calls the initializer list overload.
+ AddNewNode<Checkpoint>({}, iterator_.current_offset(),
+ GetInLiveness()->AccumulatorIsLive(),
+ GetAccumulator());
+ has_valid_checkpoint_ = true;
+ }
+
+ void EnsureCheckpoint() {
+ if (!has_valid_checkpoint_) AddCheckpoint();
+ }
+
+ void MarkPossibleSideEffect() {
+ // If there was a potential side effect, invalidate the previous checkpoint.
+ has_valid_checkpoint_ = false;
+ }
+
+ int next_offset() const {
+ return iterator_.current_offset() + iterator_.current_bytecode_size();
+ }
+ const compiler::BytecodeLivenessState* GetInLiveness() const {
+ return bytecode_analysis().GetInLivenessFor(iterator_.current_offset());
+ }
+ const compiler::BytecodeLivenessState* GetOutLiveness() const {
+ return bytecode_analysis().GetOutLivenessFor(iterator_.current_offset());
+ }
+
+ void StartNewBlock(int offset) {
+ DCHECK_NULL(current_block_);
+ current_block_ = zone()->New<BasicBlock>(merge_states_[offset]);
+ block_offset_ = offset;
+ }
+
+ template <typename ControlNodeT, typename... Args>
+ BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
+ Args&&... args) {
+ current_block_->set_control_node(NodeBase::New<ControlNodeT>(
+ zone(), control_inputs, std::forward<Args>(args)...));
+
+ BasicBlock* block = current_block_;
+ current_block_ = nullptr;
+
+ graph()->Add(block);
+ if (has_graph_labeller()) {
+ graph_labeller()->RegisterBasicBlock(block);
+ }
+ return block;
+ }
+
+ template <typename ControlNodeT, typename... Args>
+ BasicBlock* FinishBlock(int next_block_offset,
+ std::initializer_list<ValueNode*> control_inputs,
+ Args&&... args) {
+ BasicBlock* block =
+ CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
+
+ // Resolve pointers to this basic block.
+ BasicBlockRef* jump_target_refs_head =
+ jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
+ while (jump_target_refs_head != nullptr) {
+ jump_target_refs_head =
+ jump_target_refs_head->SetToBlockAndReturnNext(block);
+ }
+ DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
+
+ // If the next block has merge states, then it's not a simple fallthrough,
+ // and we should reset the checkpoint validity.
+ if (merge_states_[next_block_offset] != nullptr) {
+ has_valid_checkpoint_ = false;
+ }
+ // Start a new block for the fallthrough path, unless it's a merge point, in
+ // which case we merge our state into it. That merge-point could also be a
+ // loop header, in which case the merge state might not exist yet (if the
+ // only predecessors are this path and the JumpLoop).
+ if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
+ if (NumPredecessors(next_block_offset) == 1) {
+ StartNewBlock(next_block_offset);
+ } else {
+ DCHECK_NULL(current_block_);
+ MergeIntoFrameState(block, next_block_offset);
+ }
+ }
+ return block;
+ }
+
+ template <Operation kOperation>
+ void BuildGenericUnaryOperationNode();
+ template <Operation kOperation>
+ void BuildGenericBinaryOperationNode();
+
+ template <Operation kOperation>
+ void VisitUnaryOperation();
+ template <Operation kOperation>
+ void VisitBinaryOperation();
+
+ void MergeIntoFrameState(BasicBlock* block, int target);
+ void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
+ void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,
+ int false_target);
+
+ void CalculatePredecessorCounts() {
+ // Add 1 after the end of the bytecode so we can always write to the offset
+ // after the last bytecode.
+ size_t array_length = bytecode().length() + 1;
+ predecessors_ = zone()->NewArray<uint32_t>(array_length);
+ MemsetUint32(predecessors_, 1, array_length);
+
+ interpreter::BytecodeArrayIterator iterator(bytecode().object());
+ for (; !iterator.done(); iterator.Advance()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ if (interpreter::Bytecodes::IsJump(bytecode)) {
+ predecessors_[iterator.GetJumpTargetOffset()]++;
+ if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ predecessors_[iterator.next_offset()]--;
+ }
+ } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
+ for (auto offset : iterator.GetJumpTableTargetOffsets()) {
+ predecessors_[offset.target_offset]++;
+ }
+ } else if (interpreter::Bytecodes::Returns(bytecode) ||
+ interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
+ predecessors_[iterator.next_offset()]--;
+ }
+ // TODO(leszeks): Also consider handler entries (the bytecode analysis)
+ // will do this automatically I guess if we merge this into that.
+ }
+ DCHECK_EQ(0, predecessors_[bytecode().length()]);
+ }
+
+ int NumPredecessors(int offset) { return predecessors_[offset]; }
+
+ compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
+ const compiler::FeedbackVectorRef& feedback() const {
+ return compilation_unit_->feedback();
+ }
+ const FeedbackNexus feedback_nexus(int slot_operand_index) const {
+ // TODO(leszeks): Use JSHeapBroker here.
+ return FeedbackNexus(feedback().object(),
+ GetSlotOperand(slot_operand_index));
+ }
+ const compiler::BytecodeArrayRef& bytecode() const {
+ return compilation_unit_->bytecode();
+ }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+ Isolate* isolate() const { return compilation_unit_->isolate(); }
+ Zone* zone() const { return compilation_unit_->zone(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+ int register_count() const { return compilation_unit_->register_count(); }
+ bool has_graph_labeller() const {
+ return compilation_unit_->has_graph_labeller();
+ }
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ MaglevCompilationUnit* const compilation_unit_;
+ interpreter::BytecodeArrayIterator iterator_;
+ uint32_t* predecessors_;
+
+ // Current block information.
+ BasicBlock* current_block_ = nullptr;
+ int block_offset_ = 0;
+ bool has_valid_checkpoint_ = false;
+
+ BasicBlockRef* jump_targets_;
+ MergePointInterpreterFrameState** merge_states_;
+
+ Graph* const graph_;
+ InterpreterFrameState current_interpreter_frame_;
+
+ // Allow marking some bytecodes as unsupported during graph building, so that
+ // we can test maglev incrementally.
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ bool found_unsupported_bytecode_ = false;
+ bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-labeller.h b/deps/v8/src/maglev/maglev-graph-labeller.h
new file mode 100644
index 0000000000..252b2152ac
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-labeller.h
@@ -0,0 +1,65 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
+
+#include <map>
+
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevGraphLabeller {
+ public:
+ void RegisterNode(const Node* node) {
+ if (node_ids_.emplace(node, next_node_id_).second) {
+ next_node_id_++;
+ }
+ }
+ void RegisterBasicBlock(const BasicBlock* block) {
+ block_ids_[block] = next_block_id_++;
+ if (node_ids_.emplace(block->control_node(), next_node_id_).second) {
+ next_node_id_++;
+ }
+ }
+
+ int BlockId(const BasicBlock* block) { return block_ids_[block]; }
+ int NodeId(const NodeBase* node) { return node_ids_[node]; }
+
+ int max_node_id() const { return next_node_id_ - 1; }
+
+ int max_node_id_width() const { return std::ceil(std::log10(max_node_id())); }
+
+ void PrintNodeLabel(std::ostream& os, const Node* node) {
+ auto node_id_it = node_ids_.find(node);
+
+ if (node_id_it == node_ids_.end()) {
+ os << "<invalid node " << node << ">";
+ return;
+ }
+
+ os << "n" << node_id_it->second;
+ }
+
+ void PrintInput(std::ostream& os, const Input& input) {
+ PrintNodeLabel(os, input.node());
+ os << ":" << input.operand();
+ }
+
+ private:
+ std::map<const BasicBlock*, int> block_ids_;
+ std::map<const NodeBase*, int> node_ids_;
+ int next_block_id_ = 1;
+ int next_node_id_ = 1;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
new file mode 100644
index 0000000000..ccd7bfbad8
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -0,0 +1,446 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-graph-printer.h"
+
+#include <initializer_list>
+#include <iomanip>
+#include <ostream>
+#include <type_traits>
+#include <vector>
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+namespace {
+
+void PrintPaddedId(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ NodeBase* node, std::string padding = " ",
+ int padding_adjustement = 0) {
+ int id = graph_labeller->NodeId(node);
+ int id_width = std::ceil(std::log10(id + 1));
+ int max_width = graph_labeller->max_node_id_width() + 2 + padding_adjustement;
+ int padding_width = std::max(0, max_width - id_width);
+
+ for (int i = 0; i < padding_width; ++i) {
+ os << padding;
+ }
+ os << graph_labeller->NodeId(node) << ": ";
+}
+
+void PrintPadding(std::ostream& os, int size) {
+ os << std::setfill(' ') << std::setw(size) << "";
+}
+
+void PrintPadding(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ int padding_adjustement = 0) {
+ PrintPadding(os,
+ graph_labeller->max_node_id_width() + 2 + padding_adjustement);
+}
+
+enum ConnectionLocation {
+ kTop = 1 << 0,
+ kLeft = 1 << 1,
+ kRight = 1 << 2,
+ kBottom = 1 << 3
+};
+
+struct Connection {
+ void Connect(ConnectionLocation loc) { connected |= loc; }
+
+ void AddHorizontal() {
+ Connect(kLeft);
+ Connect(kRight);
+ }
+
+ void AddVertical() {
+ Connect(kTop);
+ Connect(kBottom);
+ }
+
+ const char* ToString() const {
+ switch (connected) {
+ case 0:
+ return " ";
+ case kTop:
+ return "╵";
+ case kLeft:
+ return "╴";
+ case kRight:
+ return "╶";
+ case kBottom:
+ return "╷";
+ case kTop | kLeft:
+ return "╯";
+ case kTop | kRight:
+ return "╰";
+ case kBottom | kLeft:
+ return "╮";
+ case kBottom | kRight:
+ return "╭";
+ case kTop | kBottom:
+ return "│";
+ case kLeft | kRight:
+ return "─";
+ case kTop | kBottom | kLeft:
+ return "┤";
+ case kTop | kBottom | kRight:
+ return "├";
+ case kLeft | kRight | kTop:
+ return "┴";
+ case kLeft | kRight | kBottom:
+ return "┬";
+ case kTop | kLeft | kRight | kBottom:
+ return "┼";
+ }
+ UNREACHABLE();
+ }
+
+ uint8_t connected = 0;
+};
+
+std::ostream& operator<<(std::ostream& os, const Connection& c) {
+ return os << c.ToString();
+}
+
+// Print the vertical parts of connection arrows, optionally connecting arrows
+// that were only first created on this line (passed in "arrows_starting_here")
+// and should therefore connect rightwards instead of upwards.
+void PrintVerticalArrows(
+ std::ostream& os, const std::vector<BasicBlock*>& targets,
+ const std::set<size_t>& arrows_starting_here = {},
+ const std::set<BasicBlock*>& targets_starting_here = {},
+ bool is_loop = false) {
+ bool saw_start = false;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ Connection c;
+ if (saw_start) {
+ c.AddHorizontal();
+ }
+ if (arrows_starting_here.find(i) != arrows_starting_here.end() ||
+ targets_starting_here.find(targets[i]) != targets_starting_here.end()) {
+ c.Connect(kRight);
+ c.Connect(is_loop ? kTop : kBottom);
+ saw_start = true;
+ }
+
+ // Only add the vertical connection if there was no other connection.
+ if (c.connected == 0 && targets[i] != nullptr) {
+ c.AddVertical();
+ }
+ os << c;
+ }
+}
+
+// Add a target to the target list in the first non-null position from the end.
+// This might have to extend the target list if there is no free spot.
+size_t AddTarget(std::vector<BasicBlock*>& targets, BasicBlock* target) {
+ if (targets.size() == 0 || targets.back() != nullptr) {
+ targets.push_back(target);
+ return targets.size() - 1;
+ }
+
+ size_t i = targets.size();
+ while (i > 0) {
+ if (targets[i - 1] != nullptr) break;
+ i--;
+ }
+ targets[i] = target;
+ return i;
+}
+
+// If the target is not a fallthrough, add i to the target list in the first
+// non-null position from the end. This might have to extend the target list if
+// there is no free spot. Returns true if it was added, false if it was a
+// fallthrough.
+bool AddTargetIfNotNext(std::vector<BasicBlock*>& targets, BasicBlock* target,
+ BasicBlock* next_block,
+ std::set<size_t>* arrows_starting_here = nullptr) {
+ if (next_block == target) return false;
+ size_t index = AddTarget(targets, target);
+ if (arrows_starting_here != nullptr) arrows_starting_here->insert(index);
+ return true;
+}
+
+class MaglevPrintingVisitorOstream : public std::ostream,
+ private std::streambuf {
+ public:
+ MaglevPrintingVisitorOstream(std::ostream& os,
+ std::vector<BasicBlock*>* targets)
+ : std::ostream(this), os_(os), targets_(targets), padding_size_(0) {}
+ ~MaglevPrintingVisitorOstream() override = default;
+
+ static MaglevPrintingVisitorOstream* cast(
+ const std::unique_ptr<std::ostream>& os) {
+ return static_cast<MaglevPrintingVisitorOstream*>(os.get());
+ }
+
+ void set_padding(int padding_size) { padding_size_ = padding_size; }
+
+ protected:
+ int overflow(int c) override;
+
+ private:
+ std::ostream& os_;
+ std::vector<BasicBlock*>* targets_;
+ int padding_size_;
+ bool previous_was_new_line_ = true;
+};
+
+int MaglevPrintingVisitorOstream::overflow(int c) {
+ if (c == EOF) return c;
+
+ if (previous_was_new_line_) {
+ PrintVerticalArrows(os_, *targets_);
+ PrintPadding(os_, padding_size_);
+ }
+ os_.rdbuf()->sputc(c);
+ previous_was_new_line_ = (c == '\n');
+ return c;
+}
+
+} // namespace
+
+MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os)
+ : os_(os),
+ os_for_additional_info_(new MaglevPrintingVisitorOstream(os_, &targets)) {
+}
+
+void MaglevPrintingVisitor::PreProcessGraph(
+ MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ os_ << "Graph (param count: " << compilation_unit->parameter_count()
+ << ", frame size: " << compilation_unit->register_count() << ")\n\n";
+
+ for (BasicBlock* block : *graph) {
+ if (block->control_node()->Is<JumpLoop>()) {
+ loop_headers.insert(block->control_node()->Cast<JumpLoop>()->target());
+ }
+ }
+
+ // Precalculate the maximum number of targets.
+ for (BlockConstIterator block_it = graph->begin(); block_it != graph->end();
+ ++block_it) {
+ BasicBlock* block = *block_it;
+ std::replace(targets.begin(), targets.end(), block,
+ static_cast<BasicBlock*>(nullptr));
+
+ if (loop_headers.find(block) != loop_headers.end()) {
+ AddTarget(targets, block);
+ }
+ ControlNode* node = block->control_node();
+ if (node->Is<JumpLoop>()) {
+ BasicBlock* target = node->Cast<JumpLoop>()->target();
+ std::replace(targets.begin(), targets.end(), target,
+ static_cast<BasicBlock*>(nullptr));
+ } else if (node->Is<UnconditionalControlNode>()) {
+ AddTargetIfNotNext(targets,
+ node->Cast<UnconditionalControlNode>()->target(),
+ *(block_it + 1));
+ } else if (node->Is<ConditionalControlNode>()) {
+ AddTargetIfNotNext(targets,
+ node->Cast<ConditionalControlNode>()->if_true(),
+ *(block_it + 1));
+ AddTargetIfNotNext(targets,
+ node->Cast<ConditionalControlNode>()->if_false(),
+ *(block_it + 1));
+ }
+ }
+ DCHECK(std::all_of(targets.begin(), targets.end(),
+ [](BasicBlock* block) { return block == nullptr; }));
+}
+
+void MaglevPrintingVisitor::PreProcessBasicBlock(
+ MaglevCompilationUnit* compilation_unit, BasicBlock* block) {
+ MaglevGraphLabeller* graph_labeller = compilation_unit->graph_labeller();
+
+ size_t loop_position = static_cast<size_t>(-1);
+ if (loop_headers.erase(block) > 0) {
+ loop_position = AddTarget(targets, block);
+ }
+ {
+ bool saw_start = false;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ Connection c;
+ if (saw_start) {
+ c.AddHorizontal();
+ }
+ // If this is one of the arrows pointing to this block, terminate the
+ // line by connecting it rightwards.
+ if (targets[i] == block) {
+ c.Connect(kRight);
+ // If this is the loop header, go down instead of up and don't clear
+ // the target.
+ if (i == loop_position) {
+ c.Connect(kBottom);
+ } else {
+ c.Connect(kTop);
+ targets[i] = nullptr;
+ }
+ saw_start = true;
+ } else if (c.connected == 0 && targets[i] != nullptr) {
+ // If this is another arrow, connect it, but only if that doesn't
+ // clobber any existing drawing.
+ c.AddVertical();
+ }
+ os_ << c;
+ }
+ os_ << (saw_start ? "►" : " ");
+ }
+
+ int block_id = graph_labeller->BlockId(block);
+ os_ << "Block b" << block_id << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
+}
+
+void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, phi);
+ os_ << "Phi (";
+ // Manually walk Phi inputs to print just the node labels, without
+ // input locations (which are shown in the predecessor block's gap
+ // moves).
+ for (int i = 0; i < phi->input_count(); ++i) {
+ if (i > 0) os_ << ", ";
+ os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ }
+ os_ << ") → " << phi->result().operand() << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, node);
+ os_ << PrintNode(graph_labeller, node) << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void MaglevPrintingVisitor::Process(ControlNode* control_node,
+ const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ bool has_fallthrough = false;
+
+ if (control_node->Is<JumpLoop>()) {
+ BasicBlock* target = control_node->Cast<JumpLoop>()->target();
+
+ PrintVerticalArrows(os_, targets, {}, {target}, true);
+ os_ << "◄─";
+ PrintPaddedId(os_, graph_labeller, control_node, "─", -2);
+ std::replace(targets.begin(), targets.end(), target,
+ static_cast<BasicBlock*>(nullptr));
+
+ } else if (control_node->Is<UnconditionalControlNode>()) {
+ BasicBlock* target =
+ control_node->Cast<UnconditionalControlNode>()->target();
+
+ std::set<size_t> arrows_starting_here;
+ has_fallthrough |= !AddTargetIfNotNext(targets, target, state.next_block(),
+ &arrows_starting_here);
+ PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintPaddedId(os_, graph_labeller, control_node,
+ has_fallthrough ? " " : "─");
+
+ } else if (control_node->Is<ConditionalControlNode>()) {
+ BasicBlock* true_target =
+ control_node->Cast<ConditionalControlNode>()->if_true();
+ BasicBlock* false_target =
+ control_node->Cast<ConditionalControlNode>()->if_false();
+
+ std::set<size_t> arrows_starting_here;
+ has_fallthrough |= !AddTargetIfNotNext(
+ targets, false_target, state.next_block(), &arrows_starting_here);
+ has_fallthrough |= !AddTargetIfNotNext(
+ targets, true_target, state.next_block(), &arrows_starting_here);
+ PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintPaddedId(os_, graph_labeller, control_node, "─");
+
+ } else {
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, control_node);
+ }
+
+ os_ << PrintNode(graph_labeller, control_node) << "\n";
+
+ bool printed_phis = false;
+ if (control_node->Is<UnconditionalControlNode>()) {
+ BasicBlock* target =
+ control_node->Cast<UnconditionalControlNode>()->target();
+ if (target->has_phi()) {
+ printed_phis = true;
+ PrintVerticalArrows(os_, targets);
+ PrintPadding(os_, graph_labeller, -1);
+ os_ << (has_fallthrough ? "│" : " ");
+ os_ << " with gap moves:\n";
+ int pid = state.block()->predecessor_id();
+ for (Phi* phi : *target->phis()) {
+ PrintVerticalArrows(os_, targets);
+ PrintPadding(os_, graph_labeller, -1);
+ os_ << (has_fallthrough ? "│" : " ");
+ os_ << " - ";
+ graph_labeller->PrintInput(os_, phi->input(pid));
+ os_ << " → " << graph_labeller->NodeId(phi) << ": Phi "
+ << phi->result().operand() << "\n";
+ }
+ }
+ }
+
+ PrintVerticalArrows(os_, targets);
+ if (has_fallthrough) {
+ PrintPadding(os_, graph_labeller, -1);
+ if (printed_phis) {
+ os_ << "▼";
+ } else {
+ os_ << "↓";
+ }
+ }
+ os_ << "\n";
+
+ // TODO(leszeks): Allow MaglevPrintingVisitorOstream to print the arrowhead
+ // so that it overlaps the fallthrough arrow.
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
+ Graph* const graph) {
+ GraphProcessor<MaglevPrintingVisitor> printer(compilation_unit, os);
+ printer.ProcessGraph(graph);
+}
+
+void PrintNode::Print(std::ostream& os) const {
+ node_->Print(os, graph_labeller_);
+}
+
+std::ostream& operator<<(std::ostream& os, const PrintNode& printer) {
+ printer.Print(os);
+ return os;
+}
+
+void PrintNodeLabel::Print(std::ostream& os) const {
+ graph_labeller_->PrintNodeLabel(os, node_);
+}
+
+std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer) {
+ printer.Print(os);
+ return os;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-graph-printer.h b/deps/v8/src/maglev/maglev-graph-printer.h
new file mode 100644
index 0000000000..d416293d08
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-printer.h
@@ -0,0 +1,85 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
+
+#include <memory>
+#include <ostream>
+#include <set>
+#include <vector>
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class ControlNode;
+class Graph;
+class MaglevCompilationUnit;
+class MaglevGraphLabeller;
+class Node;
+class NodeBase;
+class Phi;
+class ProcessingState;
+
+class MaglevPrintingVisitor {
+ public:
+ // Could be interesting to print checkpoints too.
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ explicit MaglevPrintingVisitor(std::ostream& os);
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
+ void Process(Phi* phi, const ProcessingState& state);
+ void Process(Node* node, const ProcessingState& state);
+ void Process(ControlNode* node, const ProcessingState& state);
+
+ std::ostream& os() { return *os_for_additional_info_; }
+
+ private:
+ std::ostream& os_;
+ std::unique_ptr<std::ostream> os_for_additional_info_;
+ std::set<BasicBlock*> loop_headers;
+ std::vector<BasicBlock*> targets;
+};
+
+void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
+ Graph* const graph);
+
+class PrintNode {
+ public:
+ PrintNode(MaglevGraphLabeller* graph_labeller, const NodeBase* node)
+ : graph_labeller_(graph_labeller), node_(node) {}
+
+ void Print(std::ostream& os) const;
+
+ private:
+ MaglevGraphLabeller* graph_labeller_;
+ const NodeBase* node_;
+};
+
+std::ostream& operator<<(std::ostream& os, const PrintNode& printer);
+
+class PrintNodeLabel {
+ public:
+ PrintNodeLabel(MaglevGraphLabeller* graph_labeller, const Node* node)
+ : graph_labeller_(graph_labeller), node_(node) {}
+
+ void Print(std::ostream& os) const;
+
+ private:
+ MaglevGraphLabeller* graph_labeller_;
+ const Node* node_;
+};
+
+std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer);
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-processor.h b/deps/v8/src/maglev/maglev-graph-processor.h
new file mode 100644
index 0000000000..892fe6071b
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-processor.h
@@ -0,0 +1,423 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
+
+#include "src/compiler/bytecode-analysis.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+// The GraphProcessor takes a NodeProcessor, and applies it to each Node in the
+// Graph by calling NodeProcessor::Process on each Node.
+//
+// The GraphProcessor also keeps track of the current ProcessingState, including
+// the inferred corresponding InterpreterFrameState and (optionally) the state
+// at the most recent Checkpoint, and passes this to the Process method.
+//
+// It expects a NodeProcessor class with:
+//
+// // True if the GraphProcessor should snapshot Checkpoint states for
+// // deopting nodes.
+// static constexpr bool kNeedsCheckpointStates;
+//
+// // A function that processes the graph before the nodes are walked.
+// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
+//
+// // A function that processes the graph after the nodes are walked.
+// void PostProcessGraph(MaglevCompilationUnit*, Graph* graph);
+//
+// // A function that processes each basic block before its nodes are walked.
+// void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
+//
+// // Process methods for each Node type. The GraphProcessor switches over
+// // the Node's opcode, casts it to the appropriate FooNode, and dispatches
+// // to NodeProcessor::Process. It's then up to the NodeProcessor to provide
+// // either distinct Process methods per Node type, or using templates or
+// // overloading as appropriate to group node processing.
+// void Process(FooNode* node, const ProcessingState& state) {}
+//
+template <typename NodeProcessor>
+class GraphProcessor;
+
+class ProcessingState {
+ public:
+ explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
+ BlockConstIterator block_it,
+ const InterpreterFrameState* interpreter_frame_state,
+ const Checkpoint* checkpoint,
+ const InterpreterFrameState* checkpoint_frame_state)
+ : compilation_unit_(compilation_unit),
+ block_it_(block_it),
+ interpreter_frame_state_(interpreter_frame_state),
+ checkpoint_(checkpoint),
+ checkpoint_frame_state_(checkpoint_frame_state) {}
+
+ // Disallow copies, since the underlying frame states stay mutable.
+ ProcessingState(const ProcessingState&) = delete;
+ ProcessingState& operator=(const ProcessingState&) = delete;
+
+ BasicBlock* block() const { return *block_it_; }
+ BasicBlock* next_block() const { return *(block_it_ + 1); }
+
+ const InterpreterFrameState* interpreter_frame_state() const {
+ DCHECK_NOT_NULL(interpreter_frame_state_);
+ return interpreter_frame_state_;
+ }
+
+ const Checkpoint* checkpoint() const {
+ DCHECK_NOT_NULL(checkpoint_);
+ return checkpoint_;
+ }
+
+ const InterpreterFrameState* checkpoint_frame_state() const {
+ DCHECK_NOT_NULL(checkpoint_frame_state_);
+ return checkpoint_frame_state_;
+ }
+
+ int register_count() const { return compilation_unit_->register_count(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ private:
+ MaglevCompilationUnit* compilation_unit_;
+ BlockConstIterator block_it_;
+ const InterpreterFrameState* interpreter_frame_state_;
+ const Checkpoint* checkpoint_;
+ const InterpreterFrameState* checkpoint_frame_state_;
+};
+
+template <typename NodeProcessor>
+class GraphProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates =
+ NodeProcessor::kNeedsCheckpointStates;
+
+ template <typename... Args>
+ explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
+ Args&&... args)
+ : compilation_unit_(compilation_unit),
+ node_processor_(std::forward<Args>(args)...),
+ current_frame_state_(*compilation_unit_) {
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_.emplace(*compilation_unit_);
+ }
+ }
+
+ void ProcessGraph(Graph* graph) {
+ graph_ = graph;
+
+ node_processor_.PreProcessGraph(compilation_unit_, graph);
+
+ for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
+ BasicBlock* block = *block_it_;
+
+ node_processor_.PreProcessBasicBlock(compilation_unit_, block);
+
+ if (block->has_state()) {
+ current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_->last_checkpoint_block_it = block_it_;
+ checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
+ }
+ }
+
+ if (block->has_phi()) {
+ for (Phi* phi : *block->phis()) {
+ node_processor_.Process(phi, GetCurrentState());
+ }
+ }
+
+ for (node_it_ = block->nodes().begin(); node_it_ != block->nodes().end();
+ ++node_it_) {
+ Node* node = *node_it_;
+ ProcessNodeBase(node, GetCurrentState());
+ }
+
+ ProcessNodeBase(block->control_node(), GetCurrentState());
+ }
+
+ node_processor_.PostProcessGraph(compilation_unit_, graph);
+ }
+
+ NodeProcessor& node_processor() { return node_processor_; }
+ const NodeProcessor& node_processor() const { return node_processor_; }
+
+ private:
+ ProcessingState GetCurrentState() {
+ return ProcessingState(
+ compilation_unit_, block_it_, &current_frame_state_,
+ kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
+ kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
+ : nullptr);
+ }
+
+ void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(OPCODE) \
+ case Opcode::k##OPCODE: \
+ PreProcess(node->Cast<OPCODE>(), state); \
+ node_processor_.Process(node->Cast<OPCODE>(), state); \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+ }
+
+ void PreProcess(NodeBase* node, const ProcessingState& state) {}
+
+ void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
+ current_frame_state_.set_accumulator(checkpoint->accumulator());
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_->latest_checkpoint = checkpoint;
+ if (checkpoint->is_used()) {
+ checkpoint_state_->checkpoint_frame_state.CopyFrom(
+ *compilation_unit_, current_frame_state_);
+ checkpoint_state_->last_checkpoint_block_it = block_it_;
+ checkpoint_state_->last_checkpoint_node_it = node_it_;
+ ClearDeadCheckpointNodes();
+ }
+ }
+ }
+
+ void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
+ current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
+ }
+
+ void PreProcess(SoftDeopt* node, const ProcessingState& state) {
+ PreProcessDeoptingNode();
+ }
+
+ void PreProcess(CheckMaps* node, const ProcessingState& state) {
+ PreProcessDeoptingNode();
+ }
+
+ void PreProcessDeoptingNode() {
+ if (!kNeedsCheckpointStates) return;
+
+ Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
+ if (checkpoint->is_used()) {
+ DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
+ DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
+ return;
+ }
+ DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
+ checkpoint != *checkpoint_state_->last_checkpoint_node_it);
+
+ // TODO(leszeks): The following code is _ugly_, should figure out how to
+ // clean it up.
+
+ // Go to the previous state checkpoint (either on the Checkpoint that
+ // provided the current checkpoint snapshot, or on a BasicBlock).
+ BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
+ NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
+ if (node_it.is_null()) {
+ // There was no recent enough Checkpoint node, and the block iterator
+ // points at a basic block with a state snapshot. Copy that snapshot and
+ // start iterating from there.
+ BasicBlock* block = *block_it;
+ DCHECK(block->has_state());
+ checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
+ *block->state());
+
+ // Start iterating from the first node in the block.
+ node_it = block->nodes().begin();
+ } else {
+ // The node iterator should point at the previous Checkpoint node. We
+ // don't need that Checkpoint state snapshot anymore, we're making a new
+ // one, so we can just reuse the snapshot as-is without copying it.
+ DCHECK_NE(*node_it, checkpoint);
+ DCHECK((*node_it)->Is<Checkpoint>());
+ DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
+
+ // Advance it by one since we don't need to check this node anymore.
+ ++node_it;
+ }
+
+ // Now walk forward to the checkpoint, and apply any StoreToFrame operations
+ // along the way into the snapshotted checkpoint state.
+ BasicBlock* block = *block_it;
+ while (true) {
+ // Check if we've run out of nodes in this block, and advance to the
+ // next block if so.
+ while (node_it == block->nodes().end()) {
+ DCHECK_NE(block_it, graph_->end());
+
+ // We should only end up visiting blocks with fallthrough to the next
+ // block -- otherwise, the block should have had a frame state snapshot,
+ // as either a merge block or a non-fallthrough jump target.
+ if ((*block_it)->control_node()->Is<Jump>()) {
+ DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
+ *(block_it + 1));
+ } else {
+ DCHECK_IMPLIES((*block_it)
+ ->control_node()
+ ->Cast<ConditionalControlNode>()
+ ->if_true() != *(block_it + 1),
+ (*block_it)
+ ->control_node()
+ ->Cast<ConditionalControlNode>()
+ ->if_false() != *(block_it + 1));
+ }
+
+ // Advance to the next block (which the above DCHECKs confirm is the
+ // unconditional fallthrough from the previous block), and update the
+ // cached block pointer.
+ block_it++;
+ block = *block_it;
+
+ // We should never visit a block with state (aside from the very first
+ // block we visit), since then that should have been our start point
+ // to start with.
+ DCHECK(!(*block_it)->has_state());
+ node_it = (*block_it)->nodes().begin();
+ }
+
+ // We should never reach the current node, the "until" checkpoint node
+ // should be before it.
+ DCHECK_NE(node_it, node_it_);
+
+ Node* node = *node_it;
+
+ // Break once we hit the given Checkpoint node. This could be right at
+ // the start of the iteration, if the BasicBlock held the snapshot and the
+ // Checkpoint was the first node in it.
+ if (node == checkpoint) break;
+
+ // Update the state from the current node, if it's a state update.
+ if (node->Is<StoreToFrame>()) {
+ StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
+ checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
+ store_to_frame->value());
+ } else {
+ // Any checkpoints we meet along the way should be unused, otherwise
+ // they should have provided the most recent state snapshot.
+ DCHECK_IMPLIES(node->Is<Checkpoint>(),
+ !node->Cast<Checkpoint>()->is_used());
+ }
+
+ // Continue to the next node.
+ ++node_it;
+ }
+
+ checkpoint_state_->last_checkpoint_block_it = block_it;
+ checkpoint_state_->last_checkpoint_node_it = node_it;
+ checkpoint_state_->checkpoint_frame_state.set_accumulator(
+ checkpoint->accumulator());
+ ClearDeadCheckpointNodes();
+ checkpoint->SetUsed();
+ }
+
+ // Walk the checkpointed state, and null out any values that are dead at this
+ // checkpoint.
+ // TODO(leszeks): Consider doing this on checkpoint copy, not as a
+ // post-process step.
+ void ClearDeadCheckpointNodes() {
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(
+ checkpoint_state_->latest_checkpoint->bytecode_position());
+ for (int i = 0; i < register_count(); ++i) {
+ if (!liveness->RegisterIsLive(i)) {
+ checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
+ nullptr);
+ }
+ }
+
+ // The accumulator is on the checkpoint node itself, and should have already
+ // been nulled out during graph building if it's dead.
+ DCHECK_EQ(
+ !liveness->AccumulatorIsLive(),
+ checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
+ }
+
+ int register_count() const { return compilation_unit_->register_count(); }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+
+ MaglevCompilationUnit* const compilation_unit_;
+ NodeProcessor node_processor_;
+ Graph* graph_;
+ BlockConstIterator block_it_;
+ NodeConstIterator node_it_;
+ InterpreterFrameState current_frame_state_;
+
+ // The CheckpointState field only exists if the node processor needs
+ // checkpoint states.
+ struct CheckpointState {
+ explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
+ : checkpoint_frame_state(compilation_unit) {}
+ Checkpoint* latest_checkpoint = nullptr;
+ BlockConstIterator last_checkpoint_block_it;
+ NodeConstIterator last_checkpoint_node_it;
+ InterpreterFrameState checkpoint_frame_state;
+ };
+ base::Optional<CheckpointState> checkpoint_state_;
+};
+
+// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
+// them iteratively.
+template <typename... Processors>
+class NodeMultiProcessor;
+
+template <>
+class NodeMultiProcessor<> {
+ public:
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+ void Process(NodeBase* node, const ProcessingState& state) {}
+};
+
+template <typename Processor, typename... Processors>
+class NodeMultiProcessor<Processor, Processors...>
+ : NodeMultiProcessor<Processors...> {
+ using Base = NodeMultiProcessor<Processors...>;
+
+ public:
+ static constexpr bool kNeedsCheckpointStates =
+ Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
+
+ template <typename Node>
+ void Process(Node* node, const ProcessingState& state) {
+ processor_.Process(node, state);
+ Base::Process(node, state);
+ }
+ void PreProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
+ processor_.PreProcessGraph(unit, graph);
+ Base::PreProcessGraph(unit, graph);
+ }
+ void PostProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
+ // Post process in reverse order because that kind of makes sense.
+ Base::PostProcessGraph(unit, graph);
+ processor_.PostProcessGraph(unit, graph);
+ }
+ void PreProcessBasicBlock(MaglevCompilationUnit* unit, BasicBlock* block) {
+ processor_.PreProcessBasicBlock(unit, block);
+ Base::PreProcessBasicBlock(unit, block);
+ }
+
+ private:
+ Processor processor_;
+};
+
+template <typename... Processors>
+using GraphMultiProcessor = GraphProcessor<NodeMultiProcessor<Processors...>>;
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
diff --git a/deps/v8/src/maglev/maglev-graph.h b/deps/v8/src/maglev/maglev-graph.h
new file mode 100644
index 0000000000..d2fa0726e5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph.h
@@ -0,0 +1,60 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_H_
+
+#include <vector>
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/zone/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+using BlockConstIterator =
+ std::vector<BasicBlock*, ZoneAllocator<BasicBlock*>>::const_iterator;
+using BlockConstReverseIterator =
+ std::vector<BasicBlock*,
+ ZoneAllocator<BasicBlock*>>::const_reverse_iterator;
+
+class Graph final : public ZoneObject {
+ public:
+ static Graph* New(Zone* zone) { return zone->New<Graph>(zone); }
+
+ // Shouldn't be used directly; public so that Zone::New can access it.
+ explicit Graph(Zone* zone) : blocks_(zone) {}
+
+ BasicBlock* operator[](int i) { return blocks_[i]; }
+ const BasicBlock* operator[](int i) const { return blocks_[i]; }
+
+ int num_blocks() const { return static_cast<int>(blocks_.size()); }
+
+ BlockConstIterator begin() const { return blocks_.begin(); }
+ BlockConstIterator end() const { return blocks_.end(); }
+ BlockConstReverseIterator rbegin() const { return blocks_.rbegin(); }
+ BlockConstReverseIterator rend() const { return blocks_.rend(); }
+
+ BasicBlock* last_block() const { return blocks_.back(); }
+
+ void Add(BasicBlock* block) { blocks_.push_back(block); }
+
+ uint32_t stack_slots() const { return stack_slots_; }
+ void set_stack_slots(uint32_t stack_slots) {
+ DCHECK_EQ(kMaxUInt32, stack_slots_);
+ DCHECK_NE(kMaxUInt32, stack_slots);
+ stack_slots_ = stack_slots;
+ }
+
+ private:
+ uint32_t stack_slots_ = kMaxUInt32;
+ ZoneVector<BasicBlock*> blocks_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_H_
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
new file mode 100644
index 0000000000..5a907607f9
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -0,0 +1,400 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
+#define V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
+
+#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+#include "src/maglev/maglev-register-frame-array.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class MergePointInterpreterFrameState;
+
+class InterpreterFrameState {
+ public:
+ explicit InterpreterFrameState(const MaglevCompilationUnit& info)
+ : frame_(info) {}
+
+ InterpreterFrameState(const MaglevCompilationUnit& info,
+ const InterpreterFrameState& state)
+ : accumulator_(state.accumulator_), frame_(info) {
+ frame_.CopyFrom(info, state.frame_, nullptr);
+ }
+
+ void CopyFrom(const MaglevCompilationUnit& info,
+ const InterpreterFrameState& state) {
+ accumulator_ = state.accumulator_;
+ frame_.CopyFrom(info, state.frame_, nullptr);
+ }
+
+ inline void CopyFrom(const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state);
+
+ void set_accumulator(ValueNode* value) { accumulator_ = value; }
+ ValueNode* accumulator() const { return accumulator_; }
+
+ void set(interpreter::Register reg, ValueNode* value) {
+ DCHECK_IMPLIES(reg.is_parameter(),
+ reg == interpreter::Register::current_context() ||
+ reg == interpreter::Register::function_closure() ||
+ reg.ToParameterIndex() >= 0);
+ frame_[reg] = value;
+ }
+ ValueNode* get(interpreter::Register reg) const {
+ DCHECK_IMPLIES(reg.is_parameter(),
+ reg == interpreter::Register::current_context() ||
+ reg == interpreter::Register::function_closure() ||
+ reg.ToParameterIndex() >= 0);
+ return frame_[reg];
+ }
+
+ const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
+
+ private:
+ ValueNode* accumulator_ = nullptr;
+ RegisterFrameArray<ValueNode*> frame_;
+};
+
+class MergePointRegisterState {
+ public:
+ class Iterator {
+ public:
+ struct Entry {
+ RegisterState& state;
+ Register reg;
+ };
+ explicit Iterator(RegisterState* value_pointer,
+ RegList::Iterator reg_iterator)
+ : current_value_(value_pointer), reg_iterator_(reg_iterator) {}
+ Entry operator*() { return {*current_value_, *reg_iterator_}; }
+ void operator++() {
+ ++current_value_;
+ ++reg_iterator_;
+ }
+ bool operator!=(const Iterator& other) const {
+ return current_value_ != other.current_value_;
+ }
+
+ private:
+ RegisterState* current_value_;
+ RegList::Iterator reg_iterator_;
+ };
+
+ bool is_initialized() const { return values_[0].GetPayload().is_initialized; }
+
+ Iterator begin() {
+ return Iterator(values_, kAllocatableGeneralRegisters.begin());
+ }
+ Iterator end() {
+ return Iterator(values_ + kAllocatableGeneralRegisterCount,
+ kAllocatableGeneralRegisters.end());
+ }
+
+ private:
+ RegisterState values_[kAllocatableGeneralRegisterCount] = {{}};
+};
+
+class MergePointInterpreterFrameState {
+ public:
+ void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
+ int merge_offset, interpreter::Register reg,
+ ValueNode* value) {
+#ifdef DEBUG
+ const auto& analysis = compilation_unit.bytecode_analysis();
+ if (!analysis.IsLoopHeader(merge_offset)) return;
+ auto& assignments = analysis.GetLoopInfoFor(merge_offset).assignments();
+ if (reg.is_parameter()) {
+ if (!assignments.ContainsParameter(reg.ToParameterIndex())) return;
+ } else {
+ DCHECK(
+ analysis.GetInLivenessFor(merge_offset)->RegisterIsLive(reg.index()));
+ if (!assignments.ContainsLocal(reg.index())) return;
+ }
+ DCHECK(value->Is<Phi>());
+#endif
+ }
+
+ MergePointInterpreterFrameState(
+ const MaglevCompilationUnit& info, const InterpreterFrameState& state,
+ int merge_offset, int predecessor_count, BasicBlock* predecessor,
+ const compiler::BytecodeLivenessState* liveness)
+ : predecessor_count_(predecessor_count),
+ predecessors_so_far_(1),
+ live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness),
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
+ int live_index = 0;
+ ForEachRegister(info, [&](interpreter::Register reg) {
+ live_registers_and_accumulator_[live_index++] = state.get(reg);
+ });
+ if (liveness_->AccumulatorIsLive()) {
+ live_registers_and_accumulator_[live_index++] = state.accumulator();
+ }
+ predecessors_[0] = predecessor;
+ }
+
+ MergePointInterpreterFrameState(
+ const MaglevCompilationUnit& info, int merge_offset,
+ int predecessor_count, const compiler::BytecodeLivenessState* liveness,
+ const compiler::LoopInfo* loop_info)
+ : predecessor_count_(predecessor_count),
+ predecessors_so_far_(1),
+ live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness),
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
+ int live_index = 0;
+ auto& assignments = loop_info->assignments();
+ ForEachParameter(info, [&](interpreter::Register reg) {
+ ValueNode* value = nullptr;
+ if (assignments.ContainsParameter(reg.ToParameterIndex())) {
+ value = NewLoopPhi(info.zone(), reg, merge_offset, value);
+ }
+ live_registers_and_accumulator_[live_index++] = value;
+ });
+ ForEachLocal([&](interpreter::Register reg) {
+ ValueNode* value = nullptr;
+ if (assignments.ContainsLocal(reg.index())) {
+ value = NewLoopPhi(info.zone(), reg, merge_offset, value);
+ }
+ live_registers_and_accumulator_[live_index++] = value;
+ });
+ DCHECK(!liveness_->AccumulatorIsLive());
+
+#ifdef DEBUG
+ predecessors_[0] = nullptr;
+#endif
+ }
+
+ // Merges an unmerged framestate with a possibly merged framestate into |this|
+ // framestate.
+ void Merge(const MaglevCompilationUnit& compilation_unit,
+ const InterpreterFrameState& unmerged, BasicBlock* predecessor,
+ int merge_offset) {
+ DCHECK_GT(predecessor_count_, 1);
+ DCHECK_LT(predecessors_so_far_, predecessor_count_);
+ predecessors_[predecessors_so_far_] = predecessor;
+
+ ForEachValue(
+ compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
+ CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
+
+ value = MergeValue(compilation_unit.zone(), reg, value,
+ unmerged.get(reg), merge_offset);
+ });
+ predecessors_so_far_++;
+ DCHECK_LE(predecessors_so_far_, predecessor_count_);
+ }
+
+ MergePointRegisterState& register_state() { return register_state_; }
+
+ // Merges an unmerged framestate with a possibly merged framestate into |this|
+ // framestate.
+ void MergeLoop(const MaglevCompilationUnit& compilation_unit,
+ const InterpreterFrameState& loop_end_state,
+ BasicBlock* loop_end_block, int merge_offset) {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_NULL(predecessors_[0]);
+ predecessors_[0] = loop_end_block;
+
+ ForEachValue(
+ compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
+ CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
+
+ MergeLoopValue(compilation_unit.zone(), reg, value,
+ loop_end_state.get(reg), merge_offset);
+ });
+ DCHECK(!liveness_->AccumulatorIsLive());
+ }
+
+ bool has_phi() const { return !phis_.is_empty(); }
+ Phi::List* phis() { return &phis_; }
+
+ void SetPhis(Phi::List&& phis) {
+ // Move the collected phis to the live interpreter frame.
+ DCHECK(phis_.is_empty());
+ phis_.MoveTail(&phis, phis.begin());
+ }
+
+ int predecessor_count() const { return predecessor_count_; }
+
+ BasicBlock* predecessor_at(int i) const {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_LT(i, predecessor_count_);
+ return predecessors_[i];
+ }
+
+ private:
+ friend void InterpreterFrameState::CopyFrom(
+ const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state);
+
+ ValueNode* MergeValue(Zone* zone, interpreter::Register owner,
+ ValueNode* merged, ValueNode* unmerged,
+ int merge_offset) {
+ // If the merged node is null, this is a pre-created loop header merge
+ // frame will null values for anything that isn't a loop Phi.
+ if (merged == nullptr) {
+ DCHECK_NULL(predecessors_[0]);
+ DCHECK_EQ(predecessors_so_far_, 1);
+ return unmerged;
+ }
+
+ Phi* result = merged->TryCast<Phi>();
+ if (result != nullptr && result->merge_offset() == merge_offset) {
+ // It's possible that merged == unmerged at this point since loop-phis are
+ // not dropped if they are only assigned to themselves in the loop.
+ DCHECK_EQ(result->owner(), owner);
+ result->set_input(predecessors_so_far_, unmerged);
+ return result;
+ }
+
+ if (merged == unmerged) return merged;
+
+ // Up to this point all predecessors had the same value for this interpreter
+ // frame slot. Now that we find a distinct value, insert a copy of the first
+ // value for each predecessor seen so far, in addition to the new value.
+ // TODO(verwaest): Unclear whether we want this for Maglev: Instead of
+ // letting the register allocator remove phis, we could always merge through
+ // the frame slot. In that case we only need the inputs for representation
+ // selection, and hence could remove duplicate inputs. We'd likely need to
+ // attach the interpreter register to the phi in that case?
+ result = Node::New<Phi>(zone, predecessor_count_, owner, merge_offset);
+
+ for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
+ result->set_input(predecessors_so_far_, unmerged);
+
+ phis_.Add(result);
+ return result;
+ }
+
+ void MergeLoopValue(Zone* zone, interpreter::Register owner,
+ ValueNode* merged, ValueNode* unmerged,
+ int merge_offset) {
+ Phi* result = merged->TryCast<Phi>();
+ if (result == nullptr || result->merge_offset() != merge_offset) {
+ DCHECK_EQ(merged, unmerged);
+ return;
+ }
+ DCHECK_EQ(result->owner(), owner);
+ // The loop jump is defined to unconditionally be index 0.
+#ifdef DEBUG
+ DCHECK_NULL(result->input(0).node());
+#endif
+ result->set_input(0, unmerged);
+ }
+
+ ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
+ ValueNode* initial_value) {
+ DCHECK_EQ(predecessors_so_far_, 1);
+ // Create a new loop phi, which for now is empty.
+ Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
+#ifdef DEBUG
+ result->set_input(0, nullptr);
+#endif
+ phis_.Add(result);
+ return result;
+ }
+ static int SizeFor(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness) {
+ return info.parameter_count() + liveness->live_value_count();
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(Function&& f) const {
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(Function&& f) {
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachParameter(info, f);
+ ForEachLocal(f);
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachParameter(info, f);
+ ForEachLocal(f);
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
+ int live_index = 0;
+ ForEachRegister(info, [&](interpreter::Register reg) {
+ f(reg, live_registers_and_accumulator_[live_index++]);
+ });
+ if (liveness_->AccumulatorIsLive()) {
+ f(interpreter::Register::virtual_accumulator(),
+ live_registers_and_accumulator_[live_index++]);
+ live_index++;
+ }
+ DCHECK_EQ(live_index, SizeFor(info, liveness_));
+ }
+
+ int predecessor_count_;
+ int predecessors_so_far_;
+ Phi::List phis_;
+ ValueNode** live_registers_and_accumulator_;
+ const compiler::BytecodeLivenessState* liveness_ = nullptr;
+ BasicBlock** predecessors_;
+
+ MergePointRegisterState register_state_;
+};
+
+void InterpreterFrameState::CopyFrom(
+ const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state) {
+ int live_index = 0;
+ state.ForEachRegister(info, [&](interpreter::Register reg) {
+ frame_[reg] = state.live_registers_and_accumulator_[live_index++];
+ });
+ if (state.liveness_->AccumulatorIsLive()) {
+ accumulator_ = state.live_registers_and_accumulator_[live_index++];
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
new file mode 100644
index 0000000000..929a748330
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -0,0 +1,922 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-ir.h"
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/ic/handler-configuration.h"
+#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-vreg-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+const char* ToString(Opcode opcode) {
+#define DEF_NAME(Name) #Name,
+ static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
+#undef DEF_NAME
+ return names[static_cast<int>(opcode)];
+}
+
+#define __ code_gen_state->masm()->
+
+// TODO(v8:7700): Clean up after all code paths are supported.
+static bool g_this_field_will_be_unused_once_all_code_paths_are_supported;
+#define UNSUPPORTED() \
+ do { \
+ std::cerr << "Maglev: Can't compile, unsuppored codegen path.\n"; \
+ code_gen_state->set_found_unsupported_code_paths(true); \
+ g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
+ } while (false)
+
+namespace {
+
+// ---
+// Vreg allocation helpers.
+// ---
+
+int GetVirtualRegister(Node* node) {
+ return compiler::UnallocatedOperand::cast(node->result().operand())
+ .virtual_register();
+}
+
+void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
+ node->result().SetUnallocated(
+ compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ vreg_state->AllocateVirtualRegister());
+}
+
+void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
+ Register reg) {
+ node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
+ reg.code(),
+ vreg_state->AllocateVirtualRegister());
+}
+
+// TODO(victorgomes): Use this for smi binary operation and remove attribute
+// [[maybe_unused]].
+[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
+ Node* node) {
+ node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
+}
+
+void UseRegister(Input& input) {
+ input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ compiler::UnallocatedOperand::USED_AT_START,
+ GetVirtualRegister(input.node()));
+}
+void UseAny(Input& input) {
+ input.SetUnallocated(
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ compiler::UnallocatedOperand::USED_AT_START,
+ GetVirtualRegister(input.node()));
+}
+void UseFixed(Input& input, Register reg) {
+ input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
+ GetVirtualRegister(input.node()));
+}
+
+// ---
+// Code gen helpers.
+// ---
+
+void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
+ // TODO(leszeks): Consider special casing the value. (Toon: could possibly
+ // be done through Input directly?)
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input.operand());
+
+ if (operand.IsRegister()) {
+ __ Push(operand.GetRegister());
+ } else {
+ DCHECK(operand.IsStackSlot());
+ __ Push(GetStackSlot(operand));
+ }
+}
+
+// ---
+// Deferred code handling.
+// ---
+
+// Base case provides an error.
+template <typename T, typename Enable = void>
+struct CopyForDeferredHelper {
+ template <typename U>
+ struct No_Copy_Helper_Implemented_For_Type;
+ static void Copy(MaglevCompilationUnit* compilation_unit,
+ No_Copy_Helper_Implemented_For_Type<T>);
+};
+
+// Helper for copies by value.
+template <typename T, typename Enable = void>
+struct CopyForDeferredByValue {
+ static T Copy(MaglevCompilationUnit* compilation_unit, T node) {
+ return node;
+ }
+};
+
+// Node pointers are copied by value.
+template <typename T>
+struct CopyForDeferredHelper<
+ T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
+ : public CopyForDeferredByValue<T*> {};
+// Arithmetic values and enums are copied by value.
+template <typename T>
+struct CopyForDeferredHelper<
+ T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
+ : public CopyForDeferredByValue<T> {};
+template <typename T>
+struct CopyForDeferredHelper<
+ T, typename std::enable_if<std::is_enum<T>::value>::type>
+ : public CopyForDeferredByValue<T> {};
+// MaglevCompilationUnits are copied by value.
+template <>
+struct CopyForDeferredHelper<MaglevCompilationUnit*>
+ : public CopyForDeferredByValue<MaglevCompilationUnit*> {};
+// Machine registers are copied by value.
+template <>
+struct CopyForDeferredHelper<Register>
+ : public CopyForDeferredByValue<Register> {};
+
+// InterpreterFrameState is cloned.
+template <>
+struct CopyForDeferredHelper<const InterpreterFrameState*> {
+ static const InterpreterFrameState* Copy(
+ MaglevCompilationUnit* compilation_unit,
+ const InterpreterFrameState* frame_state) {
+ return compilation_unit->zone()->New<InterpreterFrameState>(
+ *compilation_unit, *frame_state);
+ }
+};
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit,
+ std::forward<T>(value));
+}
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
+}
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, const T& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
+}
+
+template <typename Function, typename FunctionPointer = Function>
+struct FunctionArgumentsTupleHelper
+ : FunctionArgumentsTupleHelper<Function,
+ decltype(&FunctionPointer::operator())> {};
+
+template <typename T, typename C, typename R, typename... A>
+struct FunctionArgumentsTupleHelper<T, R (C::*)(A...) const> {
+ using FunctionPointer = R (*)(A...);
+ using Tuple = std::tuple<A...>;
+ static constexpr size_t kSize = sizeof...(A);
+};
+
+template <typename T>
+struct StripFirstTwoTupleArgs;
+
+template <typename T1, typename T2, typename... T>
+struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
+ using Stripped = std::tuple<T...>;
+};
+
+template <typename Function>
+class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
+ public:
+ using FunctionPointer =
+ typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
+ using Tuple = typename StripFirstTwoTupleArgs<
+ typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
+ static constexpr size_t kSize = FunctionArgumentsTupleHelper<Function>::kSize;
+
+ template <typename... InArgs>
+ explicit DeferredCodeInfoImpl(MaglevCompilationUnit* compilation_unit,
+ FunctionPointer function, InArgs&&... args)
+ : function(function),
+ args(CopyForDeferred(compilation_unit, std::forward<InArgs>(args))...) {
+ }
+
+ DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
+ DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
+
+ void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) override {
+ DoCall(code_gen_state, return_label, std::make_index_sequence<kSize - 2>{});
+ }
+
+ private:
+ template <size_t... I>
+ auto DoCall(MaglevCodeGenState* code_gen_state, Label* return_label,
+ std::index_sequence<I...>) {
+ // TODO(leszeks): This could be replaced with std::apply in C++17.
+ return function(code_gen_state, return_label, std::get<I>(args)...);
+ }
+
+ FunctionPointer function;
+ Tuple args;
+};
+
+template <typename Function, typename... Args>
+void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ Function&& deferred_code_gen, Args&&... args) {
+ using DeferredCodeInfoT = DeferredCodeInfoImpl<Function>;
+ DeferredCodeInfoT* deferred_code =
+ code_gen_state->compilation_unit()->zone()->New<DeferredCodeInfoT>(
+ code_gen_state->compilation_unit(), deferred_code_gen,
+ std::forward<Args>(args)...);
+
+ code_gen_state->PushDeferredCode(deferred_code);
+ if (FLAG_code_comments) {
+ __ RecordComment("-- Jump to deferred code");
+ }
+ __ j(cond, &deferred_code->deferred_code_label);
+ __ bind(&deferred_code->return_label);
+}
+
+// ---
+// Deopt
+// ---
+
+void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
+ int deopt_bytecode_position,
+ const InterpreterFrameState* checkpoint_state) {
+ DCHECK(node->properties().can_deopt());
+ // TODO(leszeks): Extract to separate call, or at the very least defer.
+
+ // TODO(leszeks): Stack check.
+ MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
+ int maglev_frame_size = code_gen_state->vreg_slots();
+
+ ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
+ __ RecordComment("Push registers and load accumulator");
+ int num_saved_slots = 0;
+ // TODO(verwaest): We probably shouldn't be spilling all values that go
+ // through deopt :)
+ for (int i = 0; i < compilation_unit->register_count(); ++i) {
+ ValueNode* node = checkpoint_state->get(interpreter::Register(i));
+ if (node == nullptr) continue;
+ __ Push(ToMemOperand(node->spill_slot()));
+ num_saved_slots++;
+ }
+ ValueNode* accumulator = checkpoint_state->accumulator();
+ if (accumulator) {
+ __ movq(kInterpreterAccumulatorRegister,
+ ToMemOperand(accumulator->spill_slot()));
+ }
+
+ __ RecordComment("Load registers from extra pushed slots");
+ int slot = 0;
+ for (int i = 0; i < compilation_unit->register_count(); ++i) {
+ ValueNode* node = checkpoint_state->get(interpreter::Register(i));
+ if (node == nullptr) continue;
+ __ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
+ kSystemPointerSize));
+ __ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
+ i * kSystemPointerSize),
+ kScratchRegister);
+ }
+ DCHECK_EQ(slot, num_saved_slots);
+
+ __ RecordComment("Materialize bytecode array and offset");
+ __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
+ compilation_unit->bytecode().object());
+ __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Smi::FromInt(deopt_bytecode_position +
+ (BytecodeArray::kHeaderSize - kHeapObjectTag)));
+
+ // Reset rsp to bytecode sized frame.
+ __ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
+ (2 + compilation_unit->register_count())) *
+ kSystemPointerSize));
+ __ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
+}
+
+void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
+ const ProcessingState& state) {
+ EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
+ state.checkpoint_frame_state());
+}
+
+// ---
+// Print
+// ---
+
+void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {
+ if (!node->has_inputs()) return;
+
+ os << " [";
+ for (int i = 0; i < node->input_count(); i++) {
+ if (i != 0) os << ", ";
+ graph_labeller->PrintInput(os, node->input(i));
+ }
+ os << "]";
+}
+
+void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {}
+
+void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const ValueNode* node) {
+ os << " → " << node->result().operand();
+ if (node->has_valid_live_range()) {
+ os << ", live range: [" << node->live_range().start << "-"
+ << node->live_range().end << "]";
+ }
+}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const UnconditionalControlNode* node) {
+ os << " b" << graph_labeller->BlockId(node->target());
+}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const ConditionalControlNode* node) {
+ os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
+ << graph_labeller->BlockId(node->if_false());
+}
+
+template <typename NodeT>
+void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeT* node) {
+ os << node->opcode();
+ node->PrintParams(os, graph_labeller);
+ PrintInputs(os, graph_labeller, node);
+ PrintResult(os, graph_labeller, node);
+ PrintTargets(os, graph_labeller, node);
+}
+
+} // namespace
+
+void NodeBase::Print(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return PrintImpl(os, graph_labeller, this->Cast<Name>());
+ NODE_BASE_LIST(V)
+#undef V
+ }
+ UNREACHABLE();
+}
+
+// ---
+// Nodes
+// ---
+void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ Move(ToRegister(result()), Immediate(value()));
+}
+void SmiConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {}
+void Checkpoint::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << PrintNodeLabel(graph_labeller, accumulator()) << ")";
+}
+
+void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ EmitDeopt(code_gen_state, this, state);
+}
+
+void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << object_ << ")";
+}
+
+void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Make this nicer.
+ result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
+ (StandardFrameConstants::kExpressionsOffset -
+ UnoptimizedFrameConstants::kRegisterFileFromFp) /
+ kSystemPointerSize +
+ source().index(),
+ vreg_state->AllocateVirtualRegister());
+}
+void InitialValue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // No-op, the value is already in the appropriate slot.
+}
+void InitialValue::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source().ToString() << ")";
+}
+
+void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(context(), kContextRegister);
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void LoadGlobal::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
+
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+
+ // TODO(jgruber): Detect properly.
+ const int ic_kind =
+ static_cast<int>(FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
+
+ __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
+ LoadGlobalNoFeedbackDescriptor::kName),
+ name().object());
+ __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
+ LoadGlobalNoFeedbackDescriptor::kICKind),
+ Immediate(Smi::FromInt(ic_kind)));
+
+ // TODO(jgruber): Implement full LoadGlobal handling.
+ __ CallBuiltin(Builtin::kLoadGlobalIC_NoFeedback);
+}
+void LoadGlobal::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name() << ")";
+}
+
+void RegisterInput::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsFixed(vreg_state, this, input());
+}
+void RegisterInput::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // Nothing to be done, the value is already in the register.
+}
+void RegisterInput::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << input() << ")";
+}
+
+void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ if (!has_valid_live_range()) return;
+
+ Register reg = ToRegister(result());
+ __ LoadRoot(reg, index());
+}
+void RootConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << RootsTable::name(index()) << ")";
+}
+
+void CheckMaps::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(actual_map_input());
+ set_temporaries_needed(1);
+}
+void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register object = ToRegister(actual_map_input());
+ RegList temps = temporaries();
+ Register map_tmp = temps.PopFirst();
+
+ __ LoadMap(map_tmp, object);
+ __ Cmp(map_tmp, map().object());
+
+ // TODO(leszeks): Encode as a bit on CheckMaps.
+ if (map().object()->is_migration_target()) {
+ JumpToDeferredIf(
+ not_equal, code_gen_state,
+ [](MaglevCodeGenState* code_gen_state, Label* return_label,
+ Register object, CheckMaps* node, int checkpoint_position,
+ const InterpreterFrameState* checkpoint_state_snapshot,
+ Register map_tmp) {
+ Label deopt;
+
+ // If the map is not deprecated, deopt straight away.
+ __ movl(kScratchRegister,
+ FieldOperand(map_tmp, Map::kBitField3Offset));
+ __ testl(kScratchRegister,
+ Immediate(Map::Bits3::IsDeprecatedBit::kMask));
+ __ j(zero, &deopt);
+
+ // Otherwise, try migrating the object. If the migration returns Smi
+ // zero, then it failed and we should deopt.
+ __ Push(object);
+ __ Move(kContextRegister,
+ code_gen_state->broker()->target_native_context().object());
+ // TODO(verwaest): We're calling so we need to spill around it.
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ __ cmpl(kReturnRegister0, Immediate(0));
+ __ j(equal, &deopt);
+
+ // The migrated object is returned on success, retry the map check.
+ __ Move(object, kReturnRegister0);
+ __ LoadMap(map_tmp, object);
+ __ Cmp(map_tmp, node->map().object());
+ __ j(equal, return_label);
+
+ __ bind(&deopt);
+ EmitDeopt(code_gen_state, node, checkpoint_position,
+ checkpoint_state_snapshot);
+ },
+ object, this, state.checkpoint()->bytecode_position(),
+ state.checkpoint_frame_state(), map_tmp);
+ } else {
+ Label is_ok;
+ __ j(equal, &is_ok);
+ EmitDeopt(code_gen_state, this, state);
+ __ bind(&is_ok);
+ }
+}
+void CheckMaps::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *map().object() << ")";
+}
+
+void LoadField::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(object_input());
+ DefineAsRegister(vreg_state, this);
+}
+void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // os << "kField, is in object = "
+ // << LoadHandler::IsInobjectBits::decode(raw_handler)
+ // << ", is double = " << LoadHandler::IsDoubleBits::decode(raw_handler)
+ // << ", field index = " <<
+ // LoadHandler::FieldIndexBits::decode(raw_handler);
+
+ Register object = ToRegister(object_input());
+ int handler = this->handler();
+
+ if (LoadHandler::IsInobjectBits::decode(handler)) {
+ Operand input_field_operand = FieldOperand(
+ object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
+ __ DecompressAnyTagged(ToRegister(result()), input_field_operand);
+ if (LoadHandler::IsDoubleBits::decode(handler)) {
+ // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
+ UNSUPPORTED();
+ }
+ } else {
+ // TODO(leszeks): Handle out-of-object properties.
+ UNSUPPORTED();
+ }
+}
+void LoadField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << std::hex << handler() << std::dec << ")";
+}
+
+void StoreField::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(object_input());
+ UseRegister(value_input());
+}
+void StoreField::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register value = ToRegister(value_input());
+
+ if (StoreHandler::IsInobjectBits::decode(this->handler())) {
+ Operand operand = FieldOperand(
+ object,
+ StoreHandler::FieldIndexBits::decode(this->handler()) * kTaggedSize);
+ __ StoreTaggedField(operand, value);
+ } else {
+ // TODO(victorgomes): Out-of-object properties.
+ UNSUPPORTED();
+ }
+}
+
+void StoreField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << std::hex << handler() << std::dec << ")";
+}
+
+void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ using D = LoadNoFeedbackDescriptor;
+ UseFixed(context(), kContextRegister);
+ UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ using D = LoadNoFeedbackDescriptor;
+ const int ic_kind = static_cast<int>(FeedbackSlotKind::kLoadProperty);
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+ DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
+ __ Move(D::GetRegisterParameter(D::kName), name().object());
+ __ Move(D::GetRegisterParameter(D::kICKind),
+ Immediate(Smi::FromInt(ic_kind)));
+ __ CallBuiltin(Builtin::kLoadIC_NoFeedback);
+}
+void LoadNamedGeneric::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {}
+void StoreToFrame::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << target().ToString() << " ← "
+ << PrintNodeLabel(graph_labeller, value()) << ")";
+}
+
+void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ if (source().IsAnyRegister()) {
+ Register source_reg = ToRegister(source());
+ if (target().IsAnyRegister()) {
+ __ movq(ToRegister(target()), source_reg);
+ } else {
+ __ movq(ToMemOperand(target()), source_reg);
+ }
+ } else {
+ MemOperand source_op = ToMemOperand(source());
+ if (target().IsAnyRegister()) {
+ __ movq(ToRegister(target()), source_op);
+ } else {
+ __ movq(kScratchRegister, source_op);
+ __ movq(ToMemOperand(target()), kScratchRegister);
+ }
+ }
+}
+void GapMove::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source() << " → " << target() << ")";
+}
+
+namespace {
+
+constexpr Builtin BuiltinFor(Operation operation) {
+ switch (operation) {
+#define CASE(name) \
+ case Operation::k##name: \
+ return Builtin::k##name##_WithFeedback;
+ OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
+} // namespace
+
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
+ MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
+ __ Move(kContextRegister, code_gen_state->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+}
+
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
+ MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
+ UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
+ DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
+ __ Move(kContextRegister, code_gen_state->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+}
+
+#define DEF_OPERATION(Name) \
+ void Name::AllocateVreg(MaglevVregAllocationState* vreg_state, \
+ const ProcessingState& state) { \
+ Base::AllocateVreg(vreg_state, state); \
+ } \
+ void Name::GenerateCode(MaglevCodeGenState* code_gen_state, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(code_gen_state, state); \
+ }
+GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
+#undef DEF_OPERATION
+
+void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ // Phi inputs are processed in the post-process, once loop phis' inputs'
+ // v-regs are allocated.
+ result().SetUnallocated(
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ vreg_state->AllocateVirtualRegister());
+}
+// TODO(verwaest): Remove after switching the register allocator.
+void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
+ for (Input& input : *this) {
+ UseAny(input);
+ }
+}
+void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
+}
+void Phi::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << owner().ToString() << ")";
+}
+
+void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kFunction));
+ UseFixed(context(), kContextRegister);
+ for (int i = 0; i < num_args(); i++) {
+ UseAny(arg(i));
+ }
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
+
+ DCHECK_EQ(ToRegister(function()),
+ CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kFunction));
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+
+ for (int i = num_args() - 1; i >= 0; --i) {
+ PushInput(code_gen_state, arg(i));
+ }
+
+ uint32_t arg_count = num_args();
+ __ Move(CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kActualArgumentsCount),
+ Immediate(arg_count));
+
+ // TODO(leszeks): This doesn't collect feedback yet, either pass in the
+ // feedback vector by Handle.
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
+}
+
+void CallUndefinedReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void CallUndefinedReceiver::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+
+// ---
+// Control nodes
+// ---
+void Return::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(value_input(), kReturnRegister0);
+}
+void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+
+ __ LeaveFrame(StackFrame::BASELINE);
+ __ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
+ kScratchRegister);
+}
+
+void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // Avoid emitting a jump to the next block.
+ if (target() != state.next_block()) {
+ __ jmp(target()->label());
+ }
+}
+
+void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ jmp(target()->label());
+}
+
+void BranchIfTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(condition_input());
+}
+void BranchIfTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register value = ToRegister(condition_input());
+
+ auto* next_block = state.next_block();
+
+ // We don't have any branch probability information, so try to jump
+ // over whatever the next block emitted is.
+ if (if_false() == next_block) {
+ // Jump over the false block if true, otherwise fall through into it.
+ __ JumpIfRoot(value, RootIndex::kTrueValue, if_true()->label());
+ } else {
+ // Jump to the false block if true.
+ __ JumpIfNotRoot(value, RootIndex::kTrueValue, if_false()->label());
+ // Jump to the true block if it's not the next block.
+ if (if_true() != next_block) {
+ __ jmp(if_true()->label());
+ }
+ }
+}
+
+void BranchIfCompare::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void BranchIfCompare::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ USE(operation_);
+ UNREACHABLE();
+}
+
+void BranchIfToBooleanTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(condition_input(),
+ ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
+}
+void BranchIfToBooleanTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(condition_input()),
+ ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
+
+ // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
+ // the original value into kInterpreterAccumulatorRegister, so we don't have
+ // to worry about it getting clobbered.
+ __ CallBuiltin(Builtin::kToBooleanForBaselineJump);
+ __ SmiCompare(kReturnRegister1, Smi::zero());
+
+ auto* next_block = state.next_block();
+
+ // We don't have any branch probability information, so try to jump
+ // over whatever the next block emitted is.
+ if (if_false() == next_block) {
+ // Jump over the false block if non zero, otherwise fall through into it.
+ __ j(not_equal, if_true()->label());
+ } else {
+ // Jump to the false block if zero.
+ __ j(equal, if_false()->label());
+ // Fall through or jump to the true block.
+ if (if_true() != next_block) {
+ __ jmp(if_true()->label());
+ }
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
new file mode 100644
index 0000000000..398f9254d9
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -0,0 +1,1461 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_IR_H_
+#define V8_MAGLEV_MAGLEV_IR_H_
+
+#include "src/base/bit-field.h"
+#include "src/base/macros.h"
+#include "src/base/small-vector.h"
+#include "src/base/threaded-list.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
+#include "src/common/operation.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/heap-refs.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/objects/smi.h"
+#include "src/roots/roots.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class ProcessingState;
+class MaglevCodeGenState;
+class MaglevGraphLabeller;
+class MaglevVregAllocationState;
+
+// Nodes are either
+// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
+// 2. Control nodes that store the control flow at the end of basic blocks, and
+// form a separate node hierarchy to non-control nodes.
+//
+// The macro lists below must match the node class hierarchy.
+
+#define GENERIC_OPERATIONS_NODE_LIST(V) \
+ V(GenericAdd) \
+ V(GenericSubtract) \
+ V(GenericMultiply) \
+ V(GenericDivide) \
+ V(GenericModulus) \
+ V(GenericExponentiate) \
+ V(GenericBitwiseAnd) \
+ V(GenericBitwiseOr) \
+ V(GenericBitwiseXor) \
+ V(GenericShiftLeft) \
+ V(GenericShiftRight) \
+ V(GenericShiftRightLogical) \
+ V(GenericBitwiseNot) \
+ V(GenericNegate) \
+ V(GenericIncrement) \
+ V(GenericDecrement) \
+ V(GenericEqual) \
+ V(GenericStrictEqual) \
+ V(GenericLessThan) \
+ V(GenericLessThanOrEqual) \
+ V(GenericGreaterThan) \
+ V(GenericGreaterThanOrEqual)
+
+#define VALUE_NODE_LIST(V) \
+ V(CallProperty) \
+ V(CallUndefinedReceiver) \
+ V(Constant) \
+ V(InitialValue) \
+ V(LoadField) \
+ V(LoadGlobal) \
+ V(LoadNamedGeneric) \
+ V(Phi) \
+ V(RegisterInput) \
+ V(RootConstant) \
+ V(SmiConstant) \
+ GENERIC_OPERATIONS_NODE_LIST(V)
+
+#define NODE_LIST(V) \
+ V(Checkpoint) \
+ V(CheckMaps) \
+ V(GapMove) \
+ V(SoftDeopt) \
+ V(StoreField) \
+ V(StoreToFrame) \
+ VALUE_NODE_LIST(V)
+
+#define CONDITIONAL_CONTROL_NODE_LIST(V) \
+ V(BranchIfTrue) \
+ V(BranchIfToBooleanTrue)
+
+#define UNCONDITIONAL_CONTROL_NODE_LIST(V) \
+ V(Jump) \
+ V(JumpLoop)
+
+#define CONTROL_NODE_LIST(V) \
+ V(Return) \
+ CONDITIONAL_CONTROL_NODE_LIST(V) \
+ UNCONDITIONAL_CONTROL_NODE_LIST(V)
+
+#define NODE_BASE_LIST(V) \
+ NODE_LIST(V) \
+ CONTROL_NODE_LIST(V)
+
+// Define the opcode enum.
+#define DEF_OPCODES(type) k##type,
+enum class Opcode : uint8_t { NODE_BASE_LIST(DEF_OPCODES) };
+#undef DEF_OPCODES
+#define PLUS_ONE(type) +1
+static constexpr int kOpcodeCount = NODE_BASE_LIST(PLUS_ONE);
+static constexpr Opcode kFirstOpcode = static_cast<Opcode>(0);
+static constexpr Opcode kLastOpcode = static_cast<Opcode>(kOpcodeCount - 1);
+#undef PLUS_ONE
+
+const char* ToString(Opcode opcode);
+inline std::ostream& operator<<(std::ostream& os, Opcode opcode) {
+ return os << ToString(opcode);
+}
+
+#define V(Name) Opcode::k##Name,
+static constexpr Opcode kFirstValueNodeOpcode =
+ std::min({VALUE_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastValueNodeOpcode =
+ std::max({VALUE_NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kFirstConditionalControlNodeOpcode =
+ std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastConditionalControlNodeOpcode =
+ std::max({CONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kLastUnconditionalControlNodeOpcode =
+ std::max({UNCONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
+static constexpr Opcode kFirstUnconditionalControlNodeOpcode =
+ std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
+
+static constexpr Opcode kFirstControlNodeOpcode =
+ std::min({CONTROL_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastControlNodeOpcode =
+ std::max({CONTROL_NODE_LIST(V) kFirstOpcode});
+#undef V
+
+constexpr bool IsValueNode(Opcode opcode) {
+ return kFirstValueNodeOpcode <= opcode && opcode <= kLastValueNodeOpcode;
+}
+constexpr bool IsConditionalControlNode(Opcode opcode) {
+ return kFirstConditionalControlNodeOpcode <= opcode &&
+ opcode <= kLastConditionalControlNodeOpcode;
+}
+constexpr bool IsUnconditionalControlNode(Opcode opcode) {
+ return kFirstUnconditionalControlNodeOpcode <= opcode &&
+ opcode <= kLastUnconditionalControlNodeOpcode;
+}
+
+// Forward-declare NodeBase sub-hierarchies.
+class Node;
+class ControlNode;
+class ConditionalControlNode;
+class UnconditionalControlNode;
+class ValueNode;
+
+#define DEF_FORWARD_DECLARATION(type, ...) class type;
+NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
+#undef DEF_FORWARD_DECLARATION
+
+using NodeIdT = uint32_t;
+static constexpr uint32_t kInvalidNodeId = 0;
+
+class OpProperties {
+ public:
+ bool is_call() const { return kIsCallBit::decode(bitfield_); }
+ bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
+ bool can_read() const { return kCanReadBit::decode(bitfield_); }
+ bool can_write() const { return kCanWriteBit::decode(bitfield_); }
+ bool non_memory_side_effects() const {
+ return kNonMemorySideEffectsBit::decode(bitfield_);
+ }
+
+ bool is_pure() const { return (bitfield_ | kPureMask) == kPureValue; }
+ bool is_required_when_unused() const {
+ return can_write() || non_memory_side_effects();
+ }
+
+ constexpr OpProperties operator|(const OpProperties& that) {
+ return OpProperties(bitfield_ | that.bitfield_);
+ }
+
+ static constexpr OpProperties Pure() { return OpProperties(kPureValue); }
+ static constexpr OpProperties Call() {
+ return OpProperties(kIsCallBit::encode(true));
+ }
+ static constexpr OpProperties Deopt() {
+ return OpProperties(kCanDeoptBit::encode(true));
+ }
+ static constexpr OpProperties Reading() {
+ return OpProperties(kCanReadBit::encode(true));
+ }
+ static constexpr OpProperties Writing() {
+ return OpProperties(kCanWriteBit::encode(true));
+ }
+ static constexpr OpProperties NonMemorySideEffects() {
+ return OpProperties(kNonMemorySideEffectsBit::encode(true));
+ }
+ static constexpr OpProperties AnySideEffects() {
+ return Reading() | Writing() | NonMemorySideEffects();
+ }
+
+ private:
+ using kIsCallBit = base::BitField<bool, 0, 1>;
+ using kCanDeoptBit = kIsCallBit::Next<bool, 1>;
+ using kCanReadBit = kCanDeoptBit::Next<bool, 1>;
+ using kCanWriteBit = kCanReadBit::Next<bool, 1>;
+ using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
+
+ static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask |
+ kNonMemorySideEffectsBit::kMask;
+ static const uint32_t kPureValue = kCanReadBit::encode(false) |
+ kCanWriteBit::encode(false) |
+ kNonMemorySideEffectsBit::encode(false);
+
+ constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+
+ uint32_t bitfield_;
+};
+
+class ValueLocation {
+ public:
+ ValueLocation() = default;
+
+ template <typename... Args>
+ void SetUnallocated(Args&&... args) {
+ DCHECK(operand_.IsInvalid());
+ operand_ = compiler::UnallocatedOperand(args...);
+ }
+
+ template <typename... Args>
+ void SetAllocated(Args&&... args) {
+ DCHECK(operand_.IsUnallocated());
+ operand_ = compiler::AllocatedOperand(args...);
+ }
+
+ // Only to be used on inputs that inherit allocation.
+ template <typename... Args>
+ void InjectAllocated(Args&&... args) {
+ operand_ = compiler::AllocatedOperand(args...);
+ }
+
+ template <typename... Args>
+ void SetConstant(Args&&... args) {
+ DCHECK(operand_.IsUnallocated());
+ operand_ = compiler::ConstantOperand(args...);
+ }
+
+ Register AssignedRegister() const {
+ return Register::from_code(
+ compiler::AllocatedOperand::cast(operand_).register_code());
+ }
+
+ const compiler::InstructionOperand& operand() const { return operand_; }
+ const compiler::InstructionOperand& operand() { return operand_; }
+
+ private:
+ compiler::InstructionOperand operand_;
+};
+
+class Input : public ValueLocation {
+ public:
+ explicit Input(ValueNode* node) : node_(node) {}
+
+ ValueNode* node() const { return node_; }
+
+ NodeIdT next_use_id() const { return next_use_id_; }
+
+ // Used in ValueNode::mark_use
+ NodeIdT* get_next_use_id_address() { return &next_use_id_; }
+
+ private:
+ ValueNode* node_;
+ NodeIdT next_use_id_ = kInvalidNodeId;
+};
+
+// Dummy type for the initial raw allocation.
+struct NodeWithInlineInputs {};
+
+namespace detail {
+// Helper for getting the static opcode of a Node subclass. This is in a
+// "detail" namespace rather than in NodeBase because we can't template
+// specialize outside of namespace scopes before C++17.
+template <class T>
+struct opcode_of_helper;
+
+#define DEF_OPCODE_OF(Name) \
+ template <> \
+ struct opcode_of_helper<Name> { \
+ static constexpr Opcode value = Opcode::k##Name; \
+ };
+NODE_BASE_LIST(DEF_OPCODE_OF)
+#undef DEF_OPCODE_OF
+} // namespace detail
+
+class NodeBase : public ZoneObject {
+ protected:
+ template <class T>
+ static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
+
+ public:
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs,
+ Args&&... args) {
+ Derived* node =
+ Allocate<Derived>(zone, inputs.size(), std::forward<Args>(args)...);
+
+ int i = 0;
+ for (ValueNode* input : inputs) {
+ DCHECK_NOT_NULL(input);
+ node->set_input(i++, input);
+ }
+
+ return node;
+ }
+
+ // Inputs must be initialized manually.
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
+ Derived* node =
+ Allocate<Derived>(zone, input_count, std::forward<Args>(args)...);
+ return node;
+ }
+
+ // Overwritten by subclasses.
+ static constexpr OpProperties kProperties = OpProperties::Pure();
+ inline const OpProperties& properties() const;
+
+ constexpr Opcode opcode() const { return OpcodeField::decode(bit_field_); }
+
+ template <class T>
+ constexpr bool Is() const;
+
+ template <class T>
+ constexpr T* Cast() {
+ DCHECK(Is<T>());
+ return static_cast<T*>(this);
+ }
+ template <class T>
+ constexpr const T* Cast() const {
+ DCHECK(Is<T>());
+ return static_cast<const T*>(this);
+ }
+ template <class T>
+ constexpr T* TryCast() {
+ return Is<T>() ? static_cast<T*>(this) : nullptr;
+ }
+
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const {
+ return InputCountField::decode(bit_field_);
+ }
+
+ Input& input(int index) { return *input_address(index); }
+ const Input& input(int index) const { return *input_address(index); }
+
+ // Input iterators, use like:
+ //
+ // for (Input& input : *node) { ... }
+ auto begin() { return std::make_reverse_iterator(input_address(-1)); }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ constexpr NodeIdT id() const {
+ DCHECK_NE(id_, kInvalidNodeId);
+ return id_;
+ }
+ void set_id(NodeIdT id) {
+ DCHECK_EQ(id_, kInvalidNodeId);
+ DCHECK_NE(id, kInvalidNodeId);
+ id_ = id;
+ }
+
+ int num_temporaries_needed() const {
+#ifdef DEBUG
+ if (kTemporariesState == kUnset) {
+ DCHECK_EQ(num_temporaries_needed_, 0);
+ } else {
+ DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
+ }
+#endif // DEBUG
+ return num_temporaries_needed_;
+ }
+
+ RegList temporaries() const {
+ DCHECK_EQ(kTemporariesState, kHasTemporaries);
+ return temporaries_;
+ }
+
+ void assign_temporaries(RegList list) {
+#ifdef DEBUG
+ if (kTemporariesState == kUnset) {
+ DCHECK_EQ(num_temporaries_needed_, 0);
+ } else {
+ DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
+ }
+ kTemporariesState = kHasTemporaries;
+#endif // DEBUG
+ temporaries_ = list;
+ }
+
+ void Print(std::ostream& os, MaglevGraphLabeller*) const;
+
+ protected:
+ NodeBase(Opcode opcode, size_t input_count)
+ : bit_field_(OpcodeField::encode(opcode) |
+ InputCountField::encode(input_count)) {}
+
+ Input* input_address(int index) {
+ DCHECK_LT(index, input_count());
+ return reinterpret_cast<Input*>(this) - (index + 1);
+ }
+ const Input* input_address(int index) const {
+ DCHECK_LT(index, input_count());
+ return reinterpret_cast<const Input*>(this) - (index + 1);
+ }
+
+ void set_input(int index, ValueNode* input) {
+ new (input_address(index)) Input(input);
+ }
+
+ private:
+ template <class Derived, typename... Args>
+ static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
+ const size_t size = sizeof(Derived) + input_count * sizeof(Input);
+ intptr_t raw_buffer =
+ reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
+ void* node_buffer =
+ reinterpret_cast<void*>(raw_buffer + input_count * sizeof(Input));
+ Derived* node =
+ new (node_buffer) Derived(input_count, std::forward<Args>(args)...);
+ return node;
+ }
+
+ protected:
+ // Bitfield specification.
+ using OpcodeField = base::BitField<Opcode, 0, 6>;
+ STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
+ using InputCountField = OpcodeField::Next<uint16_t, 16>;
+ // Subclasses may use the remaining bits.
+ template <class T, int size>
+ using NextBitField = InputCountField::Next<T, size>;
+
+ void set_temporaries_needed(int value) {
+#ifdef DEBUG
+ DCHECK_EQ(kTemporariesState, kUnset);
+ kTemporariesState = kNeedsTemporaries;
+#endif // DEBUG
+ num_temporaries_needed_ = value;
+ }
+
+ uint32_t bit_field_;
+
+ private:
+ NodeIdT id_ = kInvalidNodeId;
+
+ union {
+ int num_temporaries_needed_ = 0;
+ RegList temporaries_;
+ };
+#ifdef DEBUG
+ enum {
+ kUnset,
+ kNeedsTemporaries,
+ kHasTemporaries
+ } kTemporariesState = kUnset;
+#endif // DEBUG
+
+ NodeBase() = delete;
+ NodeBase(const NodeBase&) = delete;
+ NodeBase(NodeBase&&) = delete;
+ NodeBase& operator=(const NodeBase&) = delete;
+ NodeBase& operator=(NodeBase&&) = delete;
+};
+
+template <class T>
+constexpr bool NodeBase::Is() const {
+ return opcode() == opcode_of<T>;
+}
+
+// Specialized sub-hierarchy type checks.
+template <>
+constexpr bool NodeBase::Is<ValueNode>() const {
+ return IsValueNode(opcode());
+}
+template <>
+constexpr bool NodeBase::Is<ConditionalControlNode>() const {
+ return IsConditionalControlNode(opcode());
+}
+template <>
+constexpr bool NodeBase::Is<UnconditionalControlNode>() const {
+ return IsUnconditionalControlNode(opcode());
+}
+
+// The Node class hierarchy contains all non-control nodes.
+class Node : public NodeBase {
+ public:
+ using List = base::ThreadedList<Node>;
+
+ inline ValueLocation& result();
+
+ protected:
+ explicit Node(Opcode opcode, size_t input_count)
+ : NodeBase(opcode, input_count) {}
+
+ private:
+ Node** next() { return &next_; }
+ Node* next_ = nullptr;
+ friend List;
+ friend base::ThreadedListTraits<Node>;
+};
+
+// All non-control nodes with a result.
+class ValueNode : public Node {
+ public:
+ ValueLocation& result() { return result_; }
+ const ValueLocation& result() const { return result_; }
+
+ const compiler::InstructionOperand& hint() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ DCHECK(spill_or_hint_.IsInvalid() || spill_or_hint_.IsUnallocated());
+ return spill_or_hint_;
+ }
+
+ void SetNoSpillOrHint() {
+ DCHECK_EQ(state_, kLastUse);
+#ifdef DEBUG
+ state_ = kSpillOrHint;
+#endif // DEBUG
+ spill_or_hint_ = compiler::InstructionOperand();
+ }
+
+ bool is_spilled() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ return spill_or_hint_.IsStackSlot();
+ }
+
+ void Spill(compiler::AllocatedOperand operand) {
+#ifdef DEBUG
+ if (state_ == kLastUse) {
+ state_ = kSpillOrHint;
+ } else {
+ DCHECK(!is_spilled());
+ }
+#endif // DEBUG
+ DCHECK(operand.IsAnyStackSlot());
+ spill_or_hint_ = operand;
+ }
+
+ compiler::AllocatedOperand spill_slot() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ DCHECK(is_spilled());
+ return compiler::AllocatedOperand::cast(spill_or_hint_);
+ }
+
+ void mark_use(NodeIdT id, Input* use) {
+ DCHECK_EQ(state_, kLastUse);
+ DCHECK_NE(id, kInvalidNodeId);
+ DCHECK_LT(start_id(), id);
+ DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_);
+ end_id_ = id;
+ *last_uses_next_use_id_ = id;
+ if (use) {
+ last_uses_next_use_id_ = use->get_next_use_id_address();
+ }
+ }
+
+ struct LiveRange {
+ NodeIdT start = kInvalidNodeId;
+ NodeIdT end = kInvalidNodeId;
+ };
+
+ bool has_valid_live_range() const { return end_id_ != 0; }
+ LiveRange live_range() const { return {start_id(), end_id_}; }
+ NodeIdT next_use() const { return next_use_; }
+
+ // The following metods should only be used during register allocation, to
+ // mark the _current_ state of this Node according to the register allocator.
+ void set_next_use(NodeIdT use) { next_use_ = use; }
+
+ // A node is dead once it has no more upcoming uses.
+ bool is_dead() const { return next_use_ == kInvalidNodeId; }
+
+ void AddRegister(Register reg) { registers_with_result_.set(reg); }
+ void RemoveRegister(Register reg) { registers_with_result_.clear(reg); }
+ RegList ClearRegisters() {
+ return std::exchange(registers_with_result_, kEmptyRegList);
+ }
+
+ int num_registers() const { return registers_with_result_.Count(); }
+ bool has_register() const { return registers_with_result_ != kEmptyRegList; }
+
+ compiler::AllocatedOperand allocation() const {
+ if (has_register()) {
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ registers_with_result_.first().code());
+ }
+ DCHECK(is_spilled());
+ return compiler::AllocatedOperand::cast(spill_or_hint_);
+ }
+
+ protected:
+ explicit ValueNode(Opcode opcode, size_t input_count)
+ : Node(opcode, input_count),
+ last_uses_next_use_id_(&next_use_)
+#ifdef DEBUG
+ ,
+ state_(kLastUse)
+#endif // DEBUG
+ {
+ }
+
+ // Rename for better pairing with `end_id`.
+ NodeIdT start_id() const { return id(); }
+
+ NodeIdT end_id_ = kInvalidNodeId;
+ NodeIdT next_use_ = kInvalidNodeId;
+ ValueLocation result_;
+ RegList registers_with_result_ = kEmptyRegList;
+ union {
+ // Pointer to the current last use's next_use_id field. Most of the time
+ // this will be a pointer to an Input's next_use_id_ field, but it's
+ // initialized to this node's next_use_ to track the first use.
+ NodeIdT* last_uses_next_use_id_;
+ compiler::InstructionOperand spill_or_hint_;
+ };
+#ifdef DEBUG
+ // TODO(leszeks): Consider spilling into kSpill and kHint.
+ enum { kLastUse, kSpillOrHint } state_;
+#endif // DEBUG
+};
+
+ValueLocation& Node::result() {
+ DCHECK(Is<ValueNode>());
+ return Cast<ValueNode>()->result();
+}
+
+template <class Derived>
+class NodeT : public Node {
+ STATIC_ASSERT(!IsValueNode(opcode_of<Derived>));
+
+ public:
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ const OpProperties& properties() const { return Derived::kProperties; }
+
+ protected:
+ explicit NodeT(size_t input_count) : Node(opcode_of<Derived>, input_count) {}
+};
+
+template <size_t InputCount, class Derived>
+class FixedInputNodeT : public NodeT<Derived> {
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(this->input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit FixedInputNodeT(size_t input_count) : NodeT<Derived>(kInputCount) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+template <class Derived>
+class ValueNodeT : public ValueNode {
+ STATIC_ASSERT(IsValueNode(opcode_of<Derived>));
+
+ public:
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ const OpProperties& properties() const { return Derived::kProperties; }
+
+ protected:
+ explicit ValueNodeT(size_t input_count)
+ : ValueNode(opcode_of<Derived>, input_count) {}
+};
+
+template <size_t InputCount, class Derived>
+class FixedInputValueNodeT : public ValueNodeT<Derived> {
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(this->input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit FixedInputValueNodeT(size_t input_count)
+ : ValueNodeT<Derived>(InputCount) {
+ DCHECK_EQ(input_count, InputCount);
+ USE(input_count);
+ }
+};
+
+template <class Derived, Operation kOperation>
+class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
+ using Base = FixedInputValueNodeT<1, Derived>;
+
+ public:
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ static constexpr int kOperandIndex = 0;
+ Input& operand_input() { return Node::input(kOperandIndex); }
+ compiler::FeedbackSource feedback() const { return feedback_; }
+
+ protected:
+ explicit UnaryWithFeedbackNode(size_t input_count,
+ const compiler::FeedbackSource& feedback)
+ : Base(input_count), feedback_(feedback) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ const compiler::FeedbackSource feedback_;
+};
+
+template <class Derived, Operation kOperation>
+class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
+ using Base = FixedInputValueNodeT<2, Derived>;
+
+ public:
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+ compiler::FeedbackSource feedback() const { return feedback_; }
+
+ protected:
+ BinaryWithFeedbackNode(size_t input_count,
+ const compiler::FeedbackSource& feedback)
+ : Base(input_count), feedback_(feedback) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ const compiler::FeedbackSource feedback_;
+};
+
+#define DEF_OPERATION_NODE(Name, Super, OpName) \
+ class Name : public Super<Name, Operation::k##OpName> { \
+ using Base = Super<Name, Operation::k##OpName>; \
+ \
+ public: \
+ Name(size_t input_count, const compiler::FeedbackSource& feedback) \
+ : Base(input_count, feedback) {} \
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
+ };
+
+#define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
+ DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
+#define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
+ DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
+UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
+ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
+COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
+#undef DEF_UNARY_WITH_FEEDBACK_NODE
+#undef DEF_BINARY_WITH_FEEDBACK_NODE
+
+class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
+ using Base = FixedInputValueNodeT<0, InitialValue>;
+
+ public:
+ explicit InitialValue(size_t input_count, interpreter::Register source)
+ : Base(input_count), source_(source) {}
+
+ interpreter::Register source() const { return source_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const interpreter::Register source_;
+};
+
+class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
+ using Base = FixedInputValueNodeT<0, RegisterInput>;
+
+ public:
+ explicit RegisterInput(size_t input_count, Register input)
+ : Base(input_count), input_(input) {}
+
+ Register input() const { return input_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const Register input_;
+};
+
+class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
+ using Base = FixedInputValueNodeT<0, SmiConstant>;
+
+ public:
+ explicit SmiConstant(size_t input_count, Smi value)
+ : Base(input_count), value_(value) {}
+
+ Smi value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const Smi value_;
+};
+
+class Constant : public FixedInputValueNodeT<0, Constant> {
+ using Base = FixedInputValueNodeT<0, Constant>;
+
+ public:
+ explicit Constant(size_t input_count, const compiler::HeapObjectRef& object)
+ : Base(input_count), object_(object) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::HeapObjectRef object_;
+};
+
+class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
+ using Base = FixedInputValueNodeT<0, RootConstant>;
+
+ public:
+ explicit RootConstant(size_t input_count, RootIndex index)
+ : Base(input_count), index_(index) {}
+
+ RootIndex index() const { return index_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const RootIndex index_;
+};
+
+class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
+ using Base = FixedInputNodeT<0, Checkpoint>;
+
+ public:
+ explicit Checkpoint(size_t input_count, int bytecode_position,
+ bool accumulator_is_live, ValueNode* accumulator)
+ : Base(input_count),
+ bytecode_position_(bytecode_position),
+ accumulator_(accumulator_is_live ? accumulator : nullptr) {}
+
+ int bytecode_position() const { return bytecode_position_; }
+ bool is_used() const { return IsUsedBit::decode(bit_field_); }
+ void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
+ ValueNode* accumulator() const { return accumulator_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ using IsUsedBit = NextBitField<bool, 1>;
+
+ const int bytecode_position_;
+ ValueNode* const accumulator_;
+};
+
+class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
+ using Base = FixedInputNodeT<0, SoftDeopt>;
+
+ public:
+ explicit SoftDeopt(size_t input_count) : Base(input_count) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Deopt();
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
+ using Base = FixedInputNodeT<1, CheckMaps>;
+
+ public:
+ explicit CheckMaps(size_t input_count, const compiler::MapRef& map)
+ : Base(input_count), map_(map) {}
+
+ // TODO(verwaest): This just calls in deferred code, so probably we'll need to
+ // mark that to generate stack maps. Mark as call so we at least clear the
+ // registers since we currently don't properly spill either.
+ static constexpr OpProperties kProperties =
+ OpProperties::Deopt() | OpProperties::Call();
+
+ compiler::MapRef map() const { return map_; }
+
+ static constexpr int kActualMapIndex = 0;
+ Input& actual_map_input() { return input(kActualMapIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::MapRef map_;
+};
+
+class LoadField : public FixedInputValueNodeT<1, LoadField> {
+ using Base = FixedInputValueNodeT<1, LoadField>;
+
+ public:
+ explicit LoadField(size_t input_count, int handler)
+ : Base(input_count), handler_(handler) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ int handler() const { return handler_; }
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return input(kObjectIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int handler_;
+};
+
+class StoreField : public FixedInputNodeT<2, StoreField> {
+ using Base = FixedInputNodeT<2, StoreField>;
+
+ public:
+ explicit StoreField(size_t input_count, int handler)
+ : Base(input_count), handler_(handler) {}
+
+ int handler() const { return handler_; }
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kValueIndex = 1;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int handler_;
+};
+
+class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
+ using Base = FixedInputValueNodeT<1, LoadGlobal>;
+
+ public:
+ explicit LoadGlobal(size_t input_count, const compiler::NameRef& name)
+ : Base(input_count), name_(name) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& context() { return input(0); }
+ const compiler::NameRef& name() const { return name_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::NameRef name_;
+};
+
+class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
+ using Base = FixedInputValueNodeT<2, LoadNamedGeneric>;
+
+ public:
+ explicit LoadNamedGeneric(size_t input_count, const compiler::NameRef& name)
+ : Base(input_count), name_(name) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ compiler::NameRef name() const { return name_; }
+
+ static constexpr int kContextIndex = 0;
+ static constexpr int kObjectIndex = 1;
+ Input& context() { return input(kContextIndex); }
+ Input& object_input() { return input(kObjectIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::NameRef name_;
+};
+
+class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
+ using Base = FixedInputNodeT<0, StoreToFrame>;
+
+ public:
+ StoreToFrame(size_t input_count, ValueNode* value,
+ interpreter::Register target)
+ : Base(input_count), value_(value), target_(target) {}
+
+ interpreter::Register target() const { return target_; }
+ ValueNode* value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ ValueNode* const value_;
+ const interpreter::Register target_;
+};
+
+class GapMove : public FixedInputNodeT<0, GapMove> {
+ using Base = FixedInputNodeT<0, GapMove>;
+
+ public:
+ GapMove(size_t input_count, compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target)
+ : Base(input_count), source_(source), target_(target) {}
+
+ compiler::AllocatedOperand source() const { return source_; }
+ compiler::AllocatedOperand target() const { return target_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ compiler::AllocatedOperand source_;
+ compiler::AllocatedOperand target_;
+};
+
+// TODO(verwaest): It may make more sense to buffer phis in merged_states until
+// we set up the interpreter frame state for code generation. At that point we
+// can generate correctly-sized phis.
+class Phi : public ValueNodeT<Phi> {
+ using Base = ValueNodeT<Phi>;
+
+ public:
+ using List = base::ThreadedList<Phi>;
+
+ // TODO(jgruber): More intuitive constructors, if possible.
+ Phi(size_t input_count, interpreter::Register owner, int merge_offset)
+ : Base(input_count), owner_(owner), merge_offset_(merge_offset) {}
+
+ interpreter::Register owner() const { return owner_; }
+ int merge_offset() const { return merge_offset_; }
+
+ using Node::set_input;
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void AllocateVregInPostProcess(MaglevVregAllocationState*);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ Phi** next() { return &next_; }
+
+ const interpreter::Register owner_;
+ Phi* next_ = nullptr;
+ const int merge_offset_;
+ friend List;
+ friend base::ThreadedListTraits<Phi>;
+};
+
+class CallProperty : public ValueNodeT<CallProperty> {
+ using Base = ValueNodeT<CallProperty>;
+
+ public:
+ explicit CallProperty(size_t input_count) : Base(input_count) {}
+
+ // This ctor is used when for variable input counts.
+ // Inputs must be initialized manually.
+ CallProperty(size_t input_count, ValueNode* function, ValueNode* context)
+ : Base(input_count) {
+ set_input(0, function);
+ set_input(1, context);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& function() { return input(0); }
+ const Input& function() const { return input(0); }
+ Input& context() { return input(1); }
+ const Input& context() const { return input(1); }
+ int num_args() const { return input_count() - 2; }
+ Input& arg(int i) { return input(i + 2); }
+ void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CallUndefinedReceiver : public ValueNodeT<CallUndefinedReceiver> {
+ using Base = ValueNodeT<CallUndefinedReceiver>;
+
+ public:
+ explicit CallUndefinedReceiver(size_t input_count) : Base(input_count) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& function() { return input(0); }
+ const Input& function() const { return input(0); }
+ Input& context() { return input(1); }
+ const Input& context() const { return input(1); }
+ int num_args() const { return input_count() - 2; }
+ Input& arg(int i) { return input(i + 2); }
+ void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+// Represents either a direct BasicBlock pointer, or an entry in a list of
+// unresolved BasicBlockRefs which will be mutated (in place) at some point into
+// direct BasicBlock pointers.
+class BasicBlockRef {
+ struct BasicBlockRefBuilder;
+
+ public:
+ BasicBlockRef() : next_ref_(nullptr) {
+#ifdef DEBUG
+ state_ = kRefList;
+#endif
+ }
+ explicit BasicBlockRef(BasicBlock* block) : block_ptr_(block) {
+#ifdef DEBUG
+ state_ = kBlockPointer;
+#endif
+ }
+
+ // Refs can't be copied or moved, since they are referenced by `this` pointer
+ // in the ref list.
+ BasicBlockRef(const BasicBlockRef&) = delete;
+ BasicBlockRef(BasicBlockRef&&) = delete;
+ BasicBlockRef& operator=(const BasicBlockRef&) = delete;
+ BasicBlockRef& operator=(BasicBlockRef&&) = delete;
+
+ // Construct a new ref-list mode BasicBlockRef and add it to the given ref
+ // list.
+ explicit BasicBlockRef(BasicBlockRef* ref_list_head) : BasicBlockRef() {
+ BasicBlockRef* old_next_ptr = MoveToRefList(ref_list_head);
+ USE(old_next_ptr);
+ DCHECK_NULL(old_next_ptr);
+ }
+
+ // Change this ref to a direct basic block pointer, returning the old "next"
+ // pointer of the current ref.
+ BasicBlockRef* SetToBlockAndReturnNext(BasicBlock* block) {
+ DCHECK_EQ(state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ block_ptr_ = block;
+#ifdef DEBUG
+ state_ = kBlockPointer;
+#endif
+ return old_next_ptr;
+ }
+
+ // Reset this ref list to null, returning the old ref list (i.e. the old
+ // "next" pointer).
+ BasicBlockRef* Reset() {
+ DCHECK_EQ(state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ next_ref_ = nullptr;
+ return old_next_ptr;
+ }
+
+ // Move this ref to the given ref list, returning the old "next" pointer of
+ // the current ref.
+ BasicBlockRef* MoveToRefList(BasicBlockRef* ref_list_head) {
+ DCHECK_EQ(state_, kRefList);
+ DCHECK_EQ(ref_list_head->state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ next_ref_ = ref_list_head->next_ref_;
+ ref_list_head->next_ref_ = this;
+ return old_next_ptr;
+ }
+
+ BasicBlock* block_ptr() const {
+ DCHECK_EQ(state_, kBlockPointer);
+ return block_ptr_;
+ }
+
+ BasicBlockRef* next_ref() const {
+ DCHECK_EQ(state_, kRefList);
+ return next_ref_;
+ }
+
+ bool has_ref() const {
+ DCHECK_EQ(state_, kRefList);
+ return next_ref_ != nullptr;
+ }
+
+ private:
+ union {
+ BasicBlock* block_ptr_;
+ BasicBlockRef* next_ref_;
+ };
+#ifdef DEBUG
+ enum { kBlockPointer, kRefList } state_;
+#endif // DEBUG
+};
+
+class ControlNode : public NodeBase {
+ public:
+ // A "hole" in control flow is a control node that unconditionally interrupts
+ // linear control flow (either by jumping or by exiting).
+ //
+ // A "post-dominating" hole is a hole that is guaranteed to be be reached in
+ // control flow after this node (i.e. it is a hole that is a post-dominator
+ // of this node).
+ ControlNode* next_post_dominating_hole() const {
+ return next_post_dominating_hole_;
+ }
+ void set_next_post_dominating_hole(ControlNode* node) {
+ DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
+ node->Is<JumpLoop>());
+ next_post_dominating_hole_ = node;
+ }
+
+ protected:
+ explicit ControlNode(Opcode opcode, size_t input_count)
+ : NodeBase(opcode, input_count) {}
+
+ private:
+ ControlNode* next_post_dominating_hole_ = nullptr;
+};
+
+class UnconditionalControlNode : public ControlNode {
+ public:
+ BasicBlock* target() const { return target_.block_ptr(); }
+ int predecessor_id() const { return predecessor_id_; }
+ void set_predecessor_id(int id) { predecessor_id_ = id; }
+
+ protected:
+ explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlockRef* target_refs)
+ : ControlNode(opcode, input_count), target_(target_refs) {}
+ explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlock* target)
+ : ControlNode(opcode, input_count), target_(target) {}
+
+ private:
+ const BasicBlockRef target_;
+ int predecessor_id_ = 0;
+};
+
+template <class Derived>
+class UnconditionalControlNodeT : public UnconditionalControlNode {
+ STATIC_ASSERT(IsUnconditionalControlNode(opcode_of<Derived>));
+ static constexpr size_t kInputCount = 0;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit UnconditionalControlNodeT(size_t input_count,
+ BasicBlockRef* target_refs)
+ : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target_refs) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+ explicit UnconditionalControlNodeT(size_t input_count, BasicBlock* target)
+ : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+class ConditionalControlNode : public ControlNode {
+ public:
+ ConditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : ControlNode(opcode, input_count),
+ if_true_(if_true_refs),
+ if_false_(if_false_refs) {}
+
+ BasicBlock* if_true() const { return if_true_.block_ptr(); }
+ BasicBlock* if_false() const { return if_false_.block_ptr(); }
+
+ private:
+ BasicBlockRef if_true_;
+ BasicBlockRef if_false_;
+};
+
+template <size_t InputCount, class Derived>
+class ConditionalControlNodeT : public ConditionalControlNode {
+ STATIC_ASSERT(IsConditionalControlNode(opcode_of<Derived>));
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit ConditionalControlNodeT(size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : ConditionalControlNode(opcode_of<Derived>, kInputCount, if_true_refs,
+ if_false_refs) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+class Jump : public UnconditionalControlNodeT<Jump> {
+ using Base = UnconditionalControlNodeT<Jump>;
+
+ public:
+ explicit Jump(size_t input_count, BasicBlockRef* target_refs)
+ : Base(input_count, target_refs) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
+ using Base = UnconditionalControlNodeT<JumpLoop>;
+
+ public:
+ explicit JumpLoop(size_t input_count, BasicBlock* target)
+ : Base(input_count, target) {}
+
+ explicit JumpLoop(size_t input_count, BasicBlockRef* ref)
+ : Base(input_count, ref) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Return : public ControlNode {
+ public:
+ explicit Return(size_t input_count)
+ : ControlNode(opcode_of<Return>, input_count) {}
+
+ Input& value_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
+ using Base = ConditionalControlNodeT<1, BranchIfTrue>;
+
+ public:
+ explicit BranchIfTrue(size_t input_count, BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs) {}
+
+ Input& condition_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfToBooleanTrue
+ : public ConditionalControlNodeT<1, BranchIfToBooleanTrue> {
+ using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
+
+ public:
+ explicit BranchIfToBooleanTrue(size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& condition_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfCompare
+ : public ConditionalControlNodeT<2, BranchIfToBooleanTrue> {
+ using Base = ConditionalControlNodeT<2, BranchIfToBooleanTrue>;
+
+ public:
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return NodeBase::input(kLeftIndex); }
+ Input& right_input() { return NodeBase::input(kRightIndex); }
+
+ explicit BranchIfCompare(size_t input_count, Operation operation,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs), operation_(operation) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ Operation operation_;
+};
+
+const OpProperties& NodeBase::properties() const {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return Name::kProperties;
+ NODE_BASE_LIST(V)
+#undef V
+ }
+ UNREACHABLE();
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_IR_H_
diff --git a/deps/v8/src/maglev/maglev-regalloc-data.h b/deps/v8/src/maglev/maglev-regalloc-data.h
new file mode 100644
index 0000000000..f46b701147
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc-data.h
@@ -0,0 +1,83 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
+#define V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
+
+#include "src/base/pointer-with-payload.h"
+#include "src/codegen/register.h"
+#include "src/compiler/backend/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class ValueNode;
+
+#define COUNT(V) +1
+static constexpr int kAllocatableGeneralRegisterCount =
+ ALLOCATABLE_GENERAL_REGISTERS(COUNT);
+#undef COUNT
+
+struct RegisterStateFlags {
+ // TODO(v8:7700): Use the good old Flags mechanism.
+ static constexpr int kIsMergeShift = 0;
+ static constexpr int kIsInitializedShift = 1;
+
+ const bool is_initialized = false;
+ const bool is_merge = false;
+
+ explicit constexpr operator uintptr_t() const {
+ return (is_initialized ? 1 << kIsInitializedShift : 0) |
+ (is_merge ? 1 << kIsMergeShift : 0);
+ }
+ constexpr explicit RegisterStateFlags(uintptr_t state)
+ : is_initialized((state & (1 << kIsInitializedShift)) != 0),
+ is_merge((state & (1 << kIsMergeShift)) != 0) {}
+ constexpr RegisterStateFlags(bool is_initialized, bool is_merge)
+ : is_initialized(is_initialized), is_merge(is_merge) {}
+};
+constexpr bool operator==(const RegisterStateFlags& left,
+ const RegisterStateFlags& right) {
+ return left.is_initialized == right.is_initialized &&
+ left.is_merge == right.is_merge;
+}
+
+typedef base::PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
+
+struct RegisterMerge {
+ compiler::AllocatedOperand* operands() {
+ return reinterpret_cast<compiler::AllocatedOperand*>(this + 1);
+ }
+ compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; }
+
+ ValueNode* node;
+};
+
+inline bool LoadMergeState(RegisterState state, RegisterMerge** merge) {
+ DCHECK(state.GetPayload().is_initialized);
+ if (state.GetPayload().is_merge) {
+ *merge = static_cast<RegisterMerge*>(state.GetPointer());
+ return true;
+ }
+ *merge = nullptr;
+ return false;
+}
+
+inline bool LoadMergeState(RegisterState state, ValueNode** node,
+ RegisterMerge** merge) {
+ DCHECK(state.GetPayload().is_initialized);
+ if (LoadMergeState(state, merge)) {
+ *node = (*merge)->node;
+ return true;
+ }
+ *node = static_cast<ValueNode*>(state.GetPointer());
+ return false;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
new file mode 100644
index 0000000000..897f2a2d0e
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -0,0 +1,875 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-regalloc.h"
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+namespace {
+
+constexpr RegisterStateFlags initialized_node{true, false};
+constexpr RegisterStateFlags initialized_merge{true, true};
+
+using BlockReverseIterator = std::vector<BasicBlock>::reverse_iterator;
+
+// A target is a fallthrough of a control node if its ID is the next ID
+// after the control node.
+//
+// TODO(leszeks): Consider using the block iterator instead.
+bool IsTargetOfNodeFallthrough(ControlNode* node, BasicBlock* target) {
+ return node->id() + 1 == target->first_id();
+}
+
+ControlNode* NearestPostDominatingHole(ControlNode* node) {
+ // Conditional control nodes don't cause holes themselves. So, the nearest
+ // post-dominating hole is the conditional control node's next post-dominating
+ // hole.
+ if (node->Is<ConditionalControlNode>()) {
+ return node->next_post_dominating_hole();
+ }
+
+ // If the node is a Jump, it may be a hole, but only if it is not a
+ // fallthrough (jump to the immediately next block). Otherwise, it will point
+ // to the nearest post-dominating hole in its own "next" field.
+ if (Jump* jump = node->TryCast<Jump>()) {
+ if (IsTargetOfNodeFallthrough(jump, jump->target())) {
+ return jump->next_post_dominating_hole();
+ }
+ }
+
+ return node;
+}
+
+bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
+ DCHECK_NOT_NULL(node);
+
+ // TODO(leszeks): We shouldn't have any dead nodes passed into here.
+ if (node->is_dead()) return false;
+
+ // If we're looping, a value can only be live if it was live before the loop.
+ if (target->control_node()->id() <= source->id()) {
+ // Gap moves may already be inserted in the target, so skip over those.
+ return node->id() < target->FirstNonGapMoveId();
+ }
+ // TODO(verwaest): This should be true but isn't because we don't yet
+ // eliminate dead code.
+ // DCHECK_GT(node->next_use, source->id());
+ // TODO(verwaest): Since we don't support deopt yet we can only deal with
+ // direct branches. Add support for holes.
+ return node->live_range().end >= target->first_id();
+}
+
+} // namespace
+
+StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
+ MaglevCompilationUnit* compilation_unit, Graph* graph)
+ : compilation_unit_(compilation_unit) {
+ ComputePostDominatingHoles(graph);
+ AllocateRegisters(graph);
+ graph->set_stack_slots(top_of_stack_);
+}
+
+StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;
+
+// Compute, for all forward control nodes (i.e. excluding Return and JumpLoop) a
+// tree of post-dominating control flow holes.
+//
+// Control flow which interrupts linear control flow fallthrough for basic
+// blocks is considered to introduce a control flow "hole".
+//
+// A──────┐ │
+// │ Jump │ │
+// └──┬───┘ │
+// { │ B──────┐ │
+// Control flow { │ │ Jump │ │ Linear control flow
+// hole after A { │ └─┬────┘ │
+// { ▼ ▼ Fallthrough │
+// C──────┐ │
+// │Return│ │
+// └──────┘ ▼
+//
+// It is interesting, for each such hole, to know what the next hole will be
+// that we will unconditionally reach on our way to an exit node. Such
+// subsequent holes are in "post-dominators" of the current block.
+//
+// As an example, consider the following CFG, with the annotated holes. The
+// post-dominating hole tree is the transitive closure of the post-dominator
+// tree, up to nodes which are holes (in this example, A, D, F and H).
+//
+// CFG Immediate Post-dominating
+// post-dominators holes
+// A──────┐
+// │ Jump │ A A
+// └──┬───┘ │ │
+// { │ B──────┐ │ │
+// Control flow { │ │ Jump │ │ B │ B
+// hole after A { │ └─┬────┘ │ │ │ │
+// { ▼ ▼ │ │ │ │
+// C──────┐ │ │ │ │
+// │Branch│ └►C◄┘ │ C │
+// └┬────┬┘ │ │ │ │
+// ▼ │ │ │ │ │
+// D──────┐│ │ │ │ │
+// │ Jump ││ D │ │ D │ │
+// └──┬───┘▼ │ │ │ │ │ │
+// { │ E──────┐ │ │ │ │ │ │
+// Control flow { │ │ Jump │ │ │ E │ │ │ E │
+// hole after D { │ └─┬────┘ │ │ │ │ │ │ │ │
+// { ▼ ▼ │ │ │ │ │ │ │ │
+// F──────┐ │ ▼ │ │ │ ▼ │ │
+// │ Jump │ └►F◄┘ └─┴►F◄┴─┘
+// └─────┬┘ │ │
+// { │ G──────┐ │ │
+// Control flow { │ │ Jump │ │ G │ G
+// hole after F { │ └─┬────┘ │ │ │ │
+// { ▼ ▼ │ │ │ │
+// H──────┐ ▼ │ ▼ │
+// │Return│ H◄┘ H◄┘
+// └──────┘
+//
+// Since we only care about forward control, loop jumps are treated the same as
+// returns -- they terminate the post-dominating hole chain.
+//
+void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
+ Graph* graph) {
+ // For all blocks, find the list of jumps that jump over code unreachable from
+ // the block. Such a list of jumps terminates in return or jumploop.
+ for (BasicBlock* block : base::Reversed(*graph)) {
+ ControlNode* control = block->control_node();
+ if (auto node = control->TryCast<Jump>()) {
+ // If the current control node is a jump, prepend it to the list of jumps
+ // at the target.
+ control->set_next_post_dominating_hole(
+ NearestPostDominatingHole(node->target()->control_node()));
+ } else if (auto node = control->TryCast<ConditionalControlNode>()) {
+ ControlNode* first =
+ NearestPostDominatingHole(node->if_true()->control_node());
+ ControlNode* second =
+ NearestPostDominatingHole(node->if_false()->control_node());
+
+ // Either find the merge-point of both branches, or the highest reachable
+ // control-node of the longest branch after the last node of the shortest
+ // branch.
+
+ // As long as there's no merge-point.
+ while (first != second) {
+ // Walk the highest branch to find where it goes.
+ if (first->id() > second->id()) std::swap(first, second);
+
+ // If the first branch returns or jumps back, we've found highest
+ // reachable control-node of the longest branch (the second control
+ // node).
+ if (first->Is<Return>() || first->Is<JumpLoop>()) {
+ control->set_next_post_dominating_hole(second);
+ break;
+ }
+
+ // Continue one step along the highest branch. This may cross over the
+ // lowest branch in case it returns or loops. If labelled blocks are
+ // involved such swapping of which branch is the highest branch can
+ // occur multiple times until a return/jumploop/merge is discovered.
+ first = first->next_post_dominating_hole();
+ }
+
+ // Once the branches merged, we've found the gap-chain that's relevant for
+ // the control node.
+ control->set_next_post_dominating_hole(first);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::PrintLiveRegs() const {
+ bool first = true;
+ for (Register reg : used_registers()) {
+ ValueNode* node = GetRegisterValue(reg);
+ if (first) {
+ first = false;
+ } else {
+ printing_visitor_->os() << ", ";
+ }
+ printing_visitor_->os() << reg << "=v" << node->id();
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_.reset(new MaglevPrintingVisitor(std::cout));
+ printing_visitor_->PreProcessGraph(compilation_unit_, graph);
+ }
+
+ for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
+ BasicBlock* block = *block_it_;
+
+ // Restore mergepoint state.
+ if (block->has_state()) {
+ InitializeRegisterValues(block->state()->register_state());
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->PreProcessBasicBlock(compilation_unit_, block);
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+
+ ControlNode* control = NearestPostDominatingHole(block->control_node());
+ if (!control->Is<JumpLoop>()) {
+ printing_visitor_->os() << "\n[holes:";
+ while (true) {
+ if (control->Is<Jump>()) {
+ BasicBlock* target = control->Cast<Jump>()->target();
+ printing_visitor_->os()
+ << " " << control->id() << "-" << target->first_id();
+ control = control->next_post_dominating_hole();
+ DCHECK_NOT_NULL(control);
+ continue;
+ } else if (control->Is<Return>()) {
+ printing_visitor_->os() << " " << control->id() << ".";
+ break;
+ } else if (control->Is<JumpLoop>()) {
+ printing_visitor_->os() << " " << control->id() << "↰";
+ break;
+ }
+ UNREACHABLE();
+ }
+ printing_visitor_->os() << "]";
+ }
+ printing_visitor_->os() << std::endl;
+ }
+
+ // Activate phis.
+ if (block->has_phi()) {
+ // Firstly, make the phi live, and try to assign it to an input
+ // location.
+ for (Phi* phi : *block->phis()) {
+ phi->SetNoSpillOrHint();
+ TryAllocateToInput(phi);
+ }
+ // Secondly try to assign the phi to a free register.
+ for (Phi* phi : *block->phis()) {
+ if (phi->result().operand().IsAllocated()) continue;
+ compiler::InstructionOperand allocation = TryAllocateRegister(phi);
+ if (allocation.IsAllocated()) {
+ phi->result().SetAllocated(
+ compiler::AllocatedOperand::cast(allocation));
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (new reg) " << phi->result().operand() << std::endl;
+ }
+ }
+ }
+ // Finally just use a stack slot.
+ for (Phi* phi : *block->phis()) {
+ if (phi->result().operand().IsAllocated()) continue;
+ AllocateSpillSlot(phi);
+ // TODO(verwaest): Will this be used at all?
+ phi->result().SetAllocated(phi->spill_slot());
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (stack) " << phi->result().operand() << std::endl;
+ }
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+ printing_visitor_->os() << std::endl;
+ }
+ }
+
+ node_it_ = block->nodes().begin();
+ for (; node_it_ != block->nodes().end(); ++node_it_) {
+ AllocateNode(*node_it_);
+ }
+ AllocateControlNode(block->control_node(), block);
+ }
+}
+
+void StraightForwardRegisterAllocator::UpdateInputUse(uint32_t use,
+ const Input& input) {
+ ValueNode* node = input.node();
+
+ // The value was already cleared through a previous input.
+ if (node->is_dead()) return;
+
+ // Update the next use.
+ node->set_next_use(input.next_use_id());
+
+ // If a value is dead, make sure it's cleared.
+ if (node->is_dead()) {
+ FreeRegisters(node);
+
+ // If the stack slot is a local slot, free it so it can be reused.
+ if (node->is_spilled()) {
+ compiler::AllocatedOperand slot = node->spill_slot();
+ if (slot.index() > 0) free_slots_.push_back(slot.index());
+ }
+ return;
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
+ for (Input& input : *node) AssignInput(input);
+ AssignTemporaries(node);
+ for (Input& input : *node) UpdateInputUse(node->id(), input);
+
+ if (node->properties().is_call()) SpillAndClearRegisters();
+ // TODO(verwaest): This isn't a good idea :)
+ if (node->properties().can_deopt()) SpillRegisters();
+
+ // Allocate node output.
+ if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
+ nullptr));
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+ printing_visitor_->os() << "\n";
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
+ DCHECK(!node->Is<Phi>());
+
+ node->SetNoSpillOrHint();
+
+ compiler::UnallocatedOperand operand =
+ compiler::UnallocatedOperand::cast(node->result().operand());
+
+ if (operand.basic_policy() == compiler::UnallocatedOperand::FIXED_SLOT) {
+ DCHECK(node->Is<InitialValue>());
+ DCHECK_LT(operand.fixed_slot_index(), 0);
+ // Set the stack slot to exactly where the value is.
+ compiler::AllocatedOperand location(compiler::AllocatedOperand::STACK_SLOT,
+ MachineRepresentation::kTagged,
+ operand.fixed_slot_index());
+ node->result().SetAllocated(location);
+ node->Spill(location);
+ return;
+ }
+
+ switch (operand.extended_policy()) {
+ case compiler::UnallocatedOperand::FIXED_REGISTER: {
+ Register r = Register::from_code(operand.fixed_register_index());
+ node->result().SetAllocated(ForceAllocate(r, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
+ node->result().SetAllocated(AllocateRegister(node));
+ break;
+
+ case compiler::UnallocatedOperand::SAME_AS_INPUT: {
+ Input& input = node->input(operand.input_index());
+ Register r = input.AssignedRegister();
+ node->result().SetAllocated(ForceAllocate(r, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ case compiler::UnallocatedOperand::NONE:
+ case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
+ case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
+ UNREACHABLE();
+ }
+
+ // Immediately kill the register use if the node doesn't have a valid
+ // live-range.
+ // TODO(verwaest): Remove once we can avoid allocating such registers.
+ if (!node->has_valid_live_range() &&
+ node->result().operand().IsAnyRegister()) {
+ DCHECK(node->has_register());
+ FreeRegisters(node);
+ DCHECK(!node->has_register());
+ DCHECK(node->is_dead());
+ }
+}
+
+void StraightForwardRegisterAllocator::DropRegisterValue(Register reg) {
+ // The register should not already be free.
+ DCHECK(!free_registers_.has(reg));
+
+ ValueNode* node = GetRegisterValue(reg);
+
+ // Remove the register from the node's list.
+ node->RemoveRegister(reg);
+
+ // Return if the removed value already has another register or is spilled.
+ if (node->has_register() || node->is_spilled()) return;
+
+ // Try to move the value to another register.
+ if (free_registers_ != kEmptyRegList) {
+ Register target_reg = free_registers_.PopFirst();
+ SetRegister(target_reg, node);
+ // Emit a gapmove.
+ compiler::AllocatedOperand source(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ reg.code());
+ compiler::AllocatedOperand target(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ target_reg.code());
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": "
+ << target << " ← " << source << std::endl;
+ }
+ AddMoveBeforeCurrentNode(source, target);
+ return;
+ }
+
+ // If all else fails, spill the value.
+ Spill(node);
+}
+
+void StraightForwardRegisterAllocator::InitializeConditionalBranchRegisters(
+ ConditionalControlNode* control_node, BasicBlock* target) {
+ if (target->is_empty_block()) {
+ // Jumping over an empty block, so we're in fact merging.
+ Jump* jump = target->control_node()->Cast<Jump>();
+ target = jump->target();
+ return MergeRegisterValues(control_node, target, jump->predecessor_id());
+ }
+ if (target->has_state()) {
+ // Not a fall-through branch, copy the state over.
+ return InitializeBranchTargetRegisterValues(control_node, target);
+ }
+ // Clear dead fall-through registers.
+ DCHECK_EQ(control_node->id() + 1, target->first_id());
+ RegList registers = used_registers();
+ while (registers != kEmptyRegList) {
+ Register reg = registers.PopFirst();
+ ValueNode* node = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(node, control_node, target)) {
+ FreeRegisters(node);
+ // Update the registers we're visiting to avoid revisiting this node.
+ registers.clear(free_registers_);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
+ BasicBlock* block) {
+ for (Input& input : *node) AssignInput(input);
+ AssignTemporaries(node);
+ for (Input& input : *node) UpdateInputUse(node->id(), input);
+
+ if (node->properties().is_call()) SpillAndClearRegisters();
+
+ // Inject allocation into target phis.
+ if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
+ BasicBlock* target = unconditional->target();
+ if (target->has_phi()) {
+ Phi::List* phis = target->phis();
+ for (Phi* phi : *phis) {
+ Input& input = phi->input(block->predecessor_id());
+ input.InjectAllocated(input.node()->allocation());
+ }
+ for (Phi* phi : *phis) {
+ UpdateInputUse(phi->id(), phi->input(block->predecessor_id()));
+ }
+ }
+ }
+
+ // TODO(verwaest): This isn't a good idea :)
+ if (node->properties().can_deopt()) SpillRegisters();
+
+ // Merge register values. Values only flowing into phis and not being
+ // independently live will be killed as part of the merge.
+ if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
+ // Empty blocks are immediately merged at the control of their predecessor.
+ if (!block->is_empty_block()) {
+ MergeRegisterValues(unconditional, unconditional->target(),
+ block->predecessor_id());
+ }
+ } else if (auto conditional = node->TryCast<ConditionalControlNode>()) {
+ InitializeConditionalBranchRegisters(conditional, conditional->if_true());
+ InitializeConditionalBranchRegisters(conditional, conditional->if_false());
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
+ nullptr));
+ }
+}
+
+void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
+ // Try allocate phis to a register used by any of the inputs.
+ for (Input& input : *phi) {
+ if (input.operand().IsRegister()) {
+ Register reg = input.AssignedRegister();
+ if (free_registers_.has(reg)) {
+ phi->result().SetAllocated(ForceAllocate(reg, phi));
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (reuse) " << input.operand() << std::endl;
+ }
+ return;
+ }
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
+ compiler::AllocatedOperand source, compiler::AllocatedOperand target) {
+ GapMove* gap_move =
+ Node::New<GapMove>(compilation_unit_->zone(), {}, source, target);
+ if (compilation_unit_->has_graph_labeller()) {
+ graph_labeller()->RegisterNode(gap_move);
+ }
+ if (*node_it_ == nullptr) {
+ // We're at the control node, so append instead.
+ (*block_it_)->nodes().Add(gap_move);
+ node_it_ = (*block_it_)->nodes().end();
+ } else {
+ DCHECK_NE(node_it_, (*block_it_)->nodes().end());
+ node_it_.InsertBefore(gap_move);
+ }
+}
+
+void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
+ if (node->is_spilled()) return;
+ AllocateSpillSlot(node);
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "spill: " << node->spill_slot() << " ← "
+ << PrintNodeLabel(graph_labeller(), node) << std::endl;
+ }
+}
+
+void StraightForwardRegisterAllocator::AssignInput(Input& input) {
+ compiler::UnallocatedOperand operand =
+ compiler::UnallocatedOperand::cast(input.operand());
+ ValueNode* node = input.node();
+ compiler::AllocatedOperand location = node->allocation();
+
+ switch (operand.extended_policy()) {
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ input.SetAllocated(location);
+ break;
+
+ case compiler::UnallocatedOperand::FIXED_REGISTER: {
+ Register reg = Register::from_code(operand.fixed_register_index());
+ input.SetAllocated(ForceAllocate(reg, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
+ if (location.IsAnyRegister()) {
+ input.SetAllocated(location);
+ } else {
+ input.SetAllocated(AllocateRegister(node));
+ }
+ break;
+
+ case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
+ case compiler::UnallocatedOperand::SAME_AS_INPUT:
+ case compiler::UnallocatedOperand::NONE:
+ case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
+ UNREACHABLE();
+ }
+
+ compiler::AllocatedOperand allocated =
+ compiler::AllocatedOperand::cast(input.operand());
+ if (location != allocated) {
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "gap move: " << allocated << " ← " << location << std::endl;
+ }
+ AddMoveBeforeCurrentNode(location, allocated);
+ }
+}
+
+void StraightForwardRegisterAllocator::SpillRegisters() {
+ for (Register reg : used_registers()) {
+ ValueNode* node = GetRegisterValue(reg);
+ Spill(node);
+ }
+}
+
+void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
+ while (used_registers() != kEmptyRegList) {
+ Register reg = used_registers().first();
+ ValueNode* node = GetRegisterValue(reg);
+ Spill(node);
+ FreeRegisters(node);
+ DCHECK(!used_registers().has(reg));
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
+ DCHECK(!node->is_spilled());
+ uint32_t free_slot;
+ if (free_slots_.empty()) {
+ free_slot = top_of_stack_++;
+ } else {
+ free_slot = free_slots_.back();
+ free_slots_.pop_back();
+ }
+ node->Spill(compiler::AllocatedOperand(compiler::AllocatedOperand::STACK_SLOT,
+ MachineRepresentation::kTagged,
+ free_slot));
+}
+
+void StraightForwardRegisterAllocator::FreeSomeRegister() {
+ int furthest_use = 0;
+ Register best = Register::no_reg();
+ for (Register reg : used_registers()) {
+ ValueNode* value = GetRegisterValue(reg);
+ // The cheapest register to clear is a register containing a value that's
+ // contained in another register as well.
+ if (value->num_registers() > 1) {
+ best = reg;
+ break;
+ }
+ int use = value->next_use();
+ if (use > furthest_use) {
+ furthest_use = use;
+ best = reg;
+ }
+ }
+ DCHECK(best.is_valid());
+ FreeRegister(best);
+}
+
+compiler::AllocatedOperand StraightForwardRegisterAllocator::AllocateRegister(
+ ValueNode* node) {
+ if (free_registers_ == kEmptyRegList) FreeSomeRegister();
+ compiler::InstructionOperand allocation = TryAllocateRegister(node);
+ DCHECK(allocation.IsAllocated());
+ return compiler::AllocatedOperand::cast(allocation);
+}
+
+compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
+ Register reg, ValueNode* node) {
+ if (free_registers_.has(reg)) {
+ // If it's already free, remove it from the free list.
+ free_registers_.clear(reg);
+ } else if (GetRegisterValue(reg) == node) {
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ reg.code());
+ } else {
+ DropRegisterValue(reg);
+ }
+#ifdef DEBUG
+ DCHECK(!free_registers_.has(reg));
+#endif
+ SetRegister(reg, node);
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged, reg.code());
+}
+
+void StraightForwardRegisterAllocator::SetRegister(Register reg,
+ ValueNode* node) {
+ DCHECK(!free_registers_.has(reg));
+ register_values_[reg.code()] = node;
+ node->AddRegister(reg);
+}
+
+compiler::InstructionOperand
+StraightForwardRegisterAllocator::TryAllocateRegister(ValueNode* node) {
+ if (free_registers_ == kEmptyRegList) return compiler::InstructionOperand();
+ Register reg = free_registers_.PopFirst();
+
+ // Allocation succeeded. This might have found an existing allocation.
+ // Simply update the state anyway.
+ SetRegister(reg, node);
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged, reg.code());
+}
+
+void StraightForwardRegisterAllocator::AssignTemporaries(NodeBase* node) {
+ int num_temporaries_needed = node->num_temporaries_needed();
+ int num_free_registers = free_registers_.Count();
+
+ // Free extra registers if necessary.
+ for (int i = num_free_registers; i < num_temporaries_needed; ++i) {
+ FreeSomeRegister();
+ }
+
+ DCHECK_GE(free_registers_.Count(), num_temporaries_needed);
+ node->assign_temporaries(free_registers_);
+}
+
+void StraightForwardRegisterAllocator::InitializeRegisterValues(
+ MergePointRegisterState& target_state) {
+ // First clear the register state.
+ while (used_registers() != kEmptyRegList) {
+ Register reg = used_registers().first();
+ ValueNode* node = GetRegisterValue(reg);
+ FreeRegisters(node);
+ DCHECK(!used_registers().has(reg));
+ }
+
+ // All registers should be free by now.
+ DCHECK_EQ(free_registers_, kAllocatableGeneralRegisters);
+
+ // Then fill it in with target information.
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+ if (node != nullptr) {
+ free_registers_.clear(reg);
+ SetRegister(reg, node);
+ } else {
+ DCHECK(!entry.state.GetPayload().is_merge);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::EnsureInRegister(
+ MergePointRegisterState& target_state, ValueNode* incoming) {
+#ifdef DEBUG
+ bool found = false;
+ for (auto entry : target_state) {
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+ if (node == incoming) found = true;
+ }
+ DCHECK(found);
+#endif
+}
+
+void StraightForwardRegisterAllocator::InitializeBranchTargetRegisterValues(
+ ControlNode* source, BasicBlock* target) {
+ MergePointRegisterState& target_state = target->state()->register_state();
+ DCHECK(!target_state.is_initialized());
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+ ValueNode* node = nullptr;
+ if (!free_registers_.has(reg)) {
+ node = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(node, source, target)) node = nullptr;
+ }
+ entry.state = {node, initialized_node};
+ }
+}
+
+void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
+ BasicBlock* target,
+ int predecessor_id) {
+ MergePointRegisterState& target_state = target->state()->register_state();
+ if (!target_state.is_initialized()) {
+ // This is the first block we're merging, initialize the values.
+ return InitializeBranchTargetRegisterValues(control, target);
+ }
+
+ int predecessor_count = target->state()->predecessor_count();
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+
+ compiler::AllocatedOperand register_info = {
+ compiler::LocationOperand::REGISTER, MachineRepresentation::kTagged,
+ reg.code()};
+
+ ValueNode* incoming = nullptr;
+ if (!free_registers_.has(reg)) {
+ incoming = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(incoming, control, target)) {
+ incoming = nullptr;
+ }
+ }
+
+ if (incoming == node) {
+ // We're using the same register as the target already has. If registers
+ // are merged, add input information.
+ if (merge) merge->operand(predecessor_id) = register_info;
+ continue;
+ }
+
+ if (merge) {
+ // The register is already occupied with a different node. Figure out
+ // where that node is allocated on the incoming branch.
+ merge->operand(predecessor_id) = node->allocation();
+
+ // If there's a value in the incoming state, that value is either
+ // already spilled or in another place in the merge state.
+ if (incoming != nullptr && incoming->is_spilled()) {
+ EnsureInRegister(target_state, incoming);
+ }
+ continue;
+ }
+
+ DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
+ if (node == nullptr && !incoming->is_spilled()) {
+ // If the register is unallocated at the merge point, and the incoming
+ // value isn't spilled, that means we must have seen it already in a
+ // different register.
+ EnsureInRegister(target_state, incoming);
+ continue;
+ }
+
+ const size_t size = sizeof(RegisterMerge) +
+ predecessor_count * sizeof(compiler::AllocatedOperand);
+ void* buffer = compilation_unit_->zone()->Allocate<void*>(size);
+ merge = new (buffer) RegisterMerge();
+ merge->node = node == nullptr ? incoming : node;
+
+ // If the register is unallocated at the merge point, allocation so far
+ // is the spill slot for the incoming value. Otherwise all incoming
+ // branches agree that the current node is in the register info.
+ compiler::AllocatedOperand info_so_far =
+ node == nullptr
+ ? compiler::AllocatedOperand::cast(incoming->spill_slot())
+ : register_info;
+
+ // Initialize the entire array with info_so_far since we don't know in
+ // which order we've seen the predecessors so far. Predecessors we
+ // haven't seen yet will simply overwrite their entry later.
+ for (int i = 0; i < predecessor_count; i++) {
+ merge->operand(i) = info_so_far;
+ }
+ // If the register is unallocated at the merge point, fill in the
+ // incoming value. Otherwise find the merge-point node in the incoming
+ // state.
+ if (node == nullptr) {
+ merge->operand(predecessor_id) = register_info;
+ } else {
+ merge->operand(predecessor_id) = node->allocation();
+ }
+ entry.state = {merge, initialized_merge};
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
new file mode 100644
index 0000000000..c198d2f8fc
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -0,0 +1,112 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGALLOC_H_
+#define V8_MAGLEV_MAGLEV_REGALLOC_H_
+
+#include "src/codegen/reglist.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCompilationUnit;
+class MaglevPrintingVisitor;
+class MergePointRegisterState;
+
+class StraightForwardRegisterAllocator {
+ public:
+ StraightForwardRegisterAllocator(MaglevCompilationUnit* compilation_unit,
+ Graph* graph);
+ ~StraightForwardRegisterAllocator();
+
+ int stack_slots() const { return top_of_stack_; }
+
+ private:
+ std::vector<int> future_register_uses_[Register::kNumRegisters];
+ ValueNode* register_values_[Register::kNumRegisters];
+
+ int top_of_stack_ = 0;
+ RegList free_registers_ = kAllocatableGeneralRegisters;
+ std::vector<uint32_t> free_slots_;
+
+ RegList used_registers() const {
+ // Only allocatable registers should be free.
+ DCHECK_EQ(free_registers_, free_registers_ & kAllocatableGeneralRegisters);
+ return kAllocatableGeneralRegisters ^ free_registers_;
+ }
+
+ void ComputePostDominatingHoles(Graph* graph);
+ void AllocateRegisters(Graph* graph);
+
+ void PrintLiveRegs() const;
+
+ void UpdateInputUse(uint32_t use, const Input& input);
+
+ void AllocateControlNode(ControlNode* node, BasicBlock* block);
+ void AllocateNode(Node* node);
+ void AllocateNodeResult(ValueNode* node);
+ void AssignInput(Input& input);
+ void AssignTemporaries(NodeBase* node);
+ void TryAllocateToInput(Phi* phi);
+
+ void FreeRegisters(ValueNode* node) {
+ RegList list = node->ClearRegisters();
+ DCHECK_EQ(free_registers_ & list, kEmptyRegList);
+ free_registers_ |= list;
+ }
+ void FreeRegister(Register reg) { free_registers_.set(reg); }
+
+ ValueNode* GetRegisterValue(Register reg) const {
+ DCHECK(!free_registers_.has(reg));
+ ValueNode* node = register_values_[reg.code()];
+ DCHECK_NOT_NULL(node);
+ return node;
+ }
+
+ void FreeSomeRegister();
+ void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target);
+
+ void AllocateSpillSlot(ValueNode* node);
+ void Spill(ValueNode* node);
+ void SpillAndClearRegisters();
+ void SpillRegisters();
+
+ compiler::AllocatedOperand AllocateRegister(ValueNode* node);
+ compiler::AllocatedOperand ForceAllocate(Register reg, ValueNode* node);
+ void SetRegister(Register reg, ValueNode* node);
+ void DropRegisterValue(Register reg);
+ compiler::InstructionOperand TryAllocateRegister(ValueNode* node);
+
+ void InitializeRegisterValues(MergePointRegisterState& target_state);
+ void EnsureInRegister(MergePointRegisterState& target_state,
+ ValueNode* incoming);
+
+ void InitializeBranchTargetRegisterValues(ControlNode* source,
+ BasicBlock* target);
+ void InitializeConditionalBranchRegisters(ConditionalControlNode* source,
+ BasicBlock* target);
+ void MergeRegisterValues(ControlNode* control, BasicBlock* target,
+ int predecessor_id);
+
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ MaglevCompilationUnit* compilation_unit_;
+ std::unique_ptr<MaglevPrintingVisitor> printing_visitor_;
+ BlockConstIterator block_it_;
+ NodeIterator node_it_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGALLOC_H_
diff --git a/deps/v8/src/maglev/maglev-register-frame-array.h b/deps/v8/src/maglev/maglev-register-frame-array.h
new file mode 100644
index 0000000000..c3032533f6
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-register-frame-array.h
@@ -0,0 +1,113 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
+#define V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
+
+#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+// Vector of values associated with a bytecode's register frame. Indexable by
+// interpreter register.
+template <typename T>
+class RegisterFrameArray {
+ public:
+ explicit RegisterFrameArray(const MaglevCompilationUnit& info) {
+ // The first local is at index zero, parameters are behind it with
+ // negative indices, and the unoptimized frame header is between the two,
+ // so the entire frame state including parameters is the distance from the
+ // last parameter to the last local frame register, plus one to include both
+ // ends.
+ interpreter::Register last_local =
+ interpreter::Register(info.register_count() - 1);
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
+ DCHECK_LT(last_param.index(), 0);
+ T* frame =
+ info.zone()->NewArray<T>(last_local.index() - last_param.index() + 1);
+
+ // Set frame_start_ to a "butterfly" pointer into the middle of the above
+ // Zone-allocated array. Parameters are at a negative index, so we have to
+ // subtract it from the above frame pointer.
+ frame_start_ = frame - last_param.index();
+ }
+
+ // Disallow copy (use CopyFrom instead).
+ RegisterFrameArray(const RegisterFrameArray& other) V8_NOEXCEPT = delete;
+ RegisterFrameArray& operator=(const RegisterFrameArray& other)
+ V8_NOEXCEPT = delete;
+
+ // Allow move.
+ RegisterFrameArray(RegisterFrameArray&& other) V8_NOEXCEPT = default;
+ RegisterFrameArray& operator=(RegisterFrameArray&& other)
+ V8_NOEXCEPT = default;
+
+ void CopyFrom(const MaglevCompilationUnit& info,
+ const RegisterFrameArray& other,
+ const compiler::BytecodeLivenessState* liveness) {
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
+ int end = 1;
+ if (!liveness) {
+ interpreter::Register last_local =
+ interpreter::Register(info.register_count() - 1);
+ end = last_local.index();
+ }
+ // All parameters are live.
+ for (int index = last_param.index(); index <= end; ++index) {
+ interpreter::Register reg(index);
+ (*this)[reg] = other[reg];
+ }
+ if (liveness) {
+ for (int index : *liveness) {
+ interpreter::Register reg(index);
+ (*this)[reg] = other[reg];
+ }
+ }
+ }
+
+ T& operator[](interpreter::Register reg) { return frame_start_[reg.index()]; }
+
+ const T& operator[](interpreter::Register reg) const {
+ return frame_start_[reg.index()];
+ }
+
+ private:
+ static int DataSize(int register_count, int parameter_count) {
+ // The first local is at index zero, parameters are behind it with
+ // negative indices, and the unoptimized frame header is between the two,
+ // so the entire frame state including parameters is the distance from the
+ // last parameter to the last local frame register, plus one to include both
+ // ends.
+ interpreter::Register last_local =
+ interpreter::Register(register_count - 1);
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(parameter_count - 1);
+ return last_local.index() - last_param.index() + 1;
+ }
+
+ T* data_begin(int parameter_count) const {
+ return frame_start_ +
+ interpreter::Register::FromParameterIndex(parameter_count - 1)
+ .index();
+ }
+
+ // Butterfly pointer for registers, pointing into the middle of a
+ // Zone-allocated Node array.
+ // |
+ // v
+ // [Parameters] [Unoptimized Frame Header] [Locals]
+ T* frame_start_ = nullptr;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
diff --git a/deps/v8/src/maglev/maglev-vreg-allocator.h b/deps/v8/src/maglev/maglev-vreg-allocator.h
new file mode 100644
index 0000000000..19d5517f70
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-vreg-allocator.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
+#define V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class ProcessingState;
+
+class MaglevVregAllocationState {
+ public:
+ int AllocateVirtualRegister() { return next_virtual_register_++; }
+ int num_allocated_registers() const { return next_virtual_register_; }
+
+ private:
+ int next_virtual_register_ = 0;
+};
+
+class MaglevVregAllocator {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ for (BasicBlock* block : *graph) {
+ if (!block->has_phi()) continue;
+ for (Phi* phi : *block->phis()) {
+ phi->AllocateVregInPostProcess(&state_);
+ }
+ }
+ }
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+#define DEF_PROCESS_NODE(NAME) \
+ void Process(NAME* node, const ProcessingState& state) { \
+ node->AllocateVreg(&state_, state); \
+ }
+ NODE_BASE_LIST(DEF_PROCESS_NODE)
+#undef DEF_PROCESS_NODE
+
+ private:
+ MaglevVregAllocationState state_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
new file mode 100644
index 0000000000..6397d02e60
--- /dev/null
+++ b/deps/v8/src/maglev/maglev.cc
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev.h"
+
+#include "src/common/globals.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
+ Handle<JSFunction> function) {
+ DCHECK(FLAG_maglev);
+ auto info = maglev::MaglevCompilationInfo::New(isolate, function);
+ maglev::MaglevCompilationUnit* const unit = info->toplevel_compilation_unit();
+ maglev::MaglevCompiler::Compile(unit);
+ return maglev::MaglevCompiler::GenerateCode(unit);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev.h b/deps/v8/src/maglev/maglev.h
new file mode 100644
index 0000000000..e55df23b15
--- /dev/null
+++ b/deps/v8/src/maglev/maglev.h
@@ -0,0 +1,28 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_H_
+#define V8_MAGLEV_MAGLEV_H_
+
+#ifdef V8_ENABLE_MAGLEV
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class JSFunction;
+
+class Maglev : public AllStatic {
+ public:
+ static MaybeHandle<CodeT> Compile(Isolate* isolate,
+ Handle<JSFunction> function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ENABLE_MAGLEV
+#endif // V8_MAGLEV_MAGLEV_H_
diff --git a/deps/v8/src/numbers/conversions-inl.h b/deps/v8/src/numbers/conversions-inl.h
index 5eddfe8abe..8bfeb4787d 100644
--- a/deps/v8/src/numbers/conversions-inl.h
+++ b/deps/v8/src/numbers/conversions-inl.h
@@ -193,12 +193,12 @@ bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
int32_t NumberToInt32(Object number) {
if (number.IsSmi()) return Smi::ToInt(number);
- return DoubleToInt32(number.Number());
+ return DoubleToInt32(HeapNumber::cast(number).value());
}
uint32_t NumberToUint32(Object number) {
if (number.IsSmi()) return Smi::ToInt(number);
- return DoubleToUint32(number.Number());
+ return DoubleToUint32(HeapNumber::cast(number).value());
}
uint32_t PositiveNumberToUint32(Object number) {
@@ -207,8 +207,7 @@ uint32_t PositiveNumberToUint32(Object number) {
if (value <= 0) return 0;
return value;
}
- DCHECK(number.IsHeapNumber());
- double value = number.Number();
+ double value = HeapNumber::cast(number).value();
// Catch all values smaller than 1 and use the double-negation trick for NANs.
if (!(value >= 1)) return 0;
uint32_t max = std::numeric_limits<uint32_t>::max();
@@ -218,7 +217,7 @@ uint32_t PositiveNumberToUint32(Object number) {
int64_t NumberToInt64(Object number) {
if (number.IsSmi()) return Smi::ToInt(number);
- double d = number.Number();
+ double d = HeapNumber::cast(number).value();
if (std::isnan(d)) return 0;
if (d >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
return std::numeric_limits<int64_t>::max();
@@ -235,8 +234,7 @@ uint64_t PositiveNumberToUint64(Object number) {
if (value <= 0) return 0;
return value;
}
- DCHECK(number.IsHeapNumber());
- double value = number.Number();
+ double value = HeapNumber::cast(number).value();
// Catch all values smaller than 1 and use the double-negation trick for NANs.
if (!(value >= 1)) return 0;
uint64_t max = std::numeric_limits<uint64_t>::max();
@@ -257,7 +255,6 @@ bool TryNumberToSize(Object number, size_t* result) {
}
return false;
} else {
- DCHECK(number.IsHeapNumber());
double value = HeapNumber::cast(number).value();
// If value is compared directly to the limit, the limit will be
// casted to a double and could end up as limit + 1,
diff --git a/deps/v8/src/numbers/integer-literal-inl.h b/deps/v8/src/numbers/integer-literal-inl.h
new file mode 100644
index 0000000000..561f9880de
--- /dev/null
+++ b/deps/v8/src/numbers/integer-literal-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_NUMBERS_INTEGER_LITERAL_INL_H_
+#define V8_NUMBERS_INTEGER_LITERAL_INL_H_
+
+#include "src/numbers/integer-literal.h"
+
+namespace v8 {
+namespace internal {
+
+inline std::string IntegerLiteral::ToString() const {
+ if (negative_) return std::string("-") + std::to_string(absolute_value_);
+ return std::to_string(absolute_value_);
+}
+
+inline IntegerLiteral operator<<(const IntegerLiteral& x,
+ const IntegerLiteral& y) {
+ DCHECK(!y.is_negative());
+ DCHECK_LT(y.absolute_value(), sizeof(uint64_t) * kBitsPerByte);
+ return IntegerLiteral(x.is_negative(), x.absolute_value()
+ << y.absolute_value());
+}
+
+inline IntegerLiteral operator+(const IntegerLiteral& x,
+ const IntegerLiteral& y) {
+ if (x.is_negative() == y.is_negative()) {
+ DCHECK_GE(x.absolute_value() + y.absolute_value(), x.absolute_value());
+ return IntegerLiteral(x.is_negative(),
+ x.absolute_value() + y.absolute_value());
+ }
+ if (x.absolute_value() >= y.absolute_value()) {
+ return IntegerLiteral(x.is_negative(),
+ x.absolute_value() - y.absolute_value());
+ }
+ return IntegerLiteral(!x.is_negative(),
+ y.absolute_value() - x.absolute_value());
+}
+
+} // namespace internal
+} // namespace v8
+#endif // V8_NUMBERS_INTEGER_LITERAL_INL_H_
diff --git a/deps/v8/src/numbers/integer-literal.h b/deps/v8/src/numbers/integer-literal.h
new file mode 100644
index 0000000000..5ac3ae76ee
--- /dev/null
+++ b/deps/v8/src/numbers/integer-literal.h
@@ -0,0 +1,106 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_NUMBERS_INTEGER_LITERAL_H_
+#define V8_NUMBERS_INTEGER_LITERAL_H_
+
+#include "src/base/optional.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class IntegerLiteral {
+ public:
+ IntegerLiteral(bool negative, uint64_t absolute_value)
+ : negative_(negative), absolute_value_(absolute_value) {
+ if (absolute_value == 0) negative_ = false;
+ }
+
+ template <typename T>
+ explicit IntegerLiteral(T value) : IntegerLiteral(value, true) {}
+
+ bool is_negative() const { return negative_; }
+ uint64_t absolute_value() const { return absolute_value_; }
+
+ template <typename T>
+ bool IsRepresentableAs() const {
+ static_assert(std::is_integral<T>::value, "Integral type required");
+ static_assert(sizeof(T) <= sizeof(uint64_t),
+ "Types with more than 64 bits are not supported");
+ return Compare(IntegerLiteral(std::numeric_limits<T>::min(), false)) >= 0 &&
+ Compare(IntegerLiteral(std::numeric_limits<T>::max(), false)) <= 0;
+ }
+
+ template <typename T>
+ T To() const {
+ static_assert(std::is_integral<T>::value, "Integral type required");
+ DCHECK(IsRepresentableAs<T>());
+ uint64_t v = absolute_value_;
+ if (negative_) v = ~v + 1;
+ return static_cast<T>(v);
+ }
+
+ template <typename T>
+ base::Optional<T> TryTo() const {
+ static_assert(std::is_integral<T>::value, "Integral type required");
+ if (!IsRepresentableAs<T>()) return base::nullopt;
+ return To<T>();
+ }
+
+ int Compare(const IntegerLiteral& other) const {
+ if (absolute_value_ == other.absolute_value_) {
+ if (absolute_value_ == 0 || negative_ == other.negative_) return 0;
+ return negative_ ? -1 : 1;
+ } else if (absolute_value_ < other.absolute_value_) {
+ return other.negative_ ? 1 : -1;
+ } else {
+ return negative_ ? -1 : 1;
+ }
+ }
+
+ std::string ToString() const;
+
+ private:
+ template <typename T>
+ explicit IntegerLiteral(T value, bool perform_dcheck) : negative_(false) {
+ static_assert(std::is_integral<T>::value, "Integral type required");
+ absolute_value_ = static_cast<uint64_t>(value);
+ if (value < T(0)) {
+ negative_ = true;
+ absolute_value_ = ~absolute_value_ + 1;
+ }
+ if (perform_dcheck) DCHECK_EQ(To<T>(), value);
+ }
+
+ bool negative_;
+ uint64_t absolute_value_;
+};
+
+inline bool operator==(const IntegerLiteral& x, const IntegerLiteral& y) {
+ return x.Compare(y) == 0;
+}
+
+inline bool operator!=(const IntegerLiteral& x, const IntegerLiteral& y) {
+ return x.Compare(y) != 0;
+}
+
+inline std::ostream& operator<<(std::ostream& stream,
+ const IntegerLiteral& literal) {
+ return stream << literal.ToString();
+}
+
+inline IntegerLiteral operator|(const IntegerLiteral& x,
+ const IntegerLiteral& y) {
+ DCHECK(!x.is_negative());
+ DCHECK(!y.is_negative());
+ return IntegerLiteral(false, x.absolute_value() | y.absolute_value());
+}
+
+IntegerLiteral operator<<(const IntegerLiteral& x, const IntegerLiteral& y);
+IntegerLiteral operator+(const IntegerLiteral& x, const IntegerLiteral& y);
+
+} // namespace internal
+} // namespace v8
+#endif // V8_NUMBERS_INTEGER_LITERAL_H_
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
index 07e6fe44f0..0f894f9669 100644
--- a/deps/v8/src/objects/all-objects-inl.h
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -14,6 +14,7 @@
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint-inl.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-table-inl.h"
@@ -46,6 +47,8 @@
#include "src/objects/js-proxy-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
+#include "src/objects/js-shadow-realms-inl.h"
+#include "src/objects/js-struct-inl.h"
#include "src/objects/js-temporal-objects-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
@@ -72,7 +75,6 @@
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/string-table-inl.h"
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 5dca72929a..605f5aa042 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -10,7 +10,7 @@
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/sandbox.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/trap-handler/trap-handler.h"
@@ -55,13 +55,15 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason
};
-// Attempts to allocate memory inside the virtual memory cage currently fall
-// back to allocating memory outside of the cage if necessary. Once this
-// fallback is no longer allowed/possible, these cases will become allocation
-// failures instead. To track the frequency of such events, the outcome of
-// memory allocation attempts inside the cage is reported to UMA.
+// Attempts to allocate memory inside the sandbox currently fall back to
+// allocating memory outside of the sandbox if necessary. Once this fallback is
+// no longer allowed/possible, these cases will become allocation failures
+// instead. To track the frequency of such events, the outcome of memory
+// allocation attempts inside the sandbox is reported to UMA.
//
// See caged_memory_allocation_outcome in counters-definitions.h
+// This class and the entry in counters-definitions.h use the term "cage"
+// instead of "sandbox" for historical reasons.
enum class CagedMemoryAllocationOutcome {
kSuccess, // Allocation succeeded inside the cage
kOutsideCage, // Allocation failed inside the cage but succeeded outside
@@ -107,18 +109,17 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
static_cast<int>(status));
}
-// When the virtual memory cage is active, this function records the outcome of
-// attempts to allocate memory inside the cage which fall back to allocating
-// memory outside of the cage. Passing a value of nullptr for the result
-// indicates that the memory could not be allocated at all.
-void RecordCagedMemoryAllocationResult(Isolate* isolate, void* result) {
- // This metric is only meaningful when the virtual memory cage is active.
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (GetProcessWideVirtualMemoryCage()->is_initialized()) {
+// When the sandbox is active, this function records the outcome of attempts to
+// allocate memory inside the sandbox which fall back to allocating memory
+// outside of the sandbox. Passing a value of nullptr for the result indicates
+// that the memory could not be allocated at all.
+void RecordSandboxMemoryAllocationResult(Isolate* isolate, void* result) {
+ // This metric is only meaningful when the sandbox is active.
+#ifdef V8_SANDBOX
+ if (GetProcessWideSandbox()->is_initialized()) {
CagedMemoryAllocationOutcome outcome;
if (result) {
- bool allocation_in_cage =
- GetProcessWideVirtualMemoryCage()->Contains(result);
+ bool allocation_in_cage = GetProcessWideSandbox()->Contains(result);
outcome = allocation_in_cage ? CagedMemoryAllocationOutcome::kSuccess
: CagedMemoryAllocationOutcome::kOutsideCage;
} else {
@@ -210,11 +211,11 @@ BackingStore::~BackingStore() {
// TODO(saelo) here and elsewhere in this file, replace with
// GetArrayBufferPageAllocator once the fallback to the platform page
// allocator is no longer allowed.
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
- page_allocator = GetVirtualMemoryCagePageAllocator();
+#ifdef V8_SANDBOX
+ if (GetProcessWideSandbox()->Contains(buffer_start_)) {
+ page_allocator = GetSandboxPageAllocator();
} else {
- DCHECK(kAllowBackingStoresOutsideCage);
+ DCHECK(kAllowBackingStoresOutsideSandbox);
}
#endif
@@ -240,11 +241,10 @@ BackingStore::~BackingStore() {
auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
- bool pages_were_freed =
- region.size() == 0 /* no need to free any pages */ ||
- FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
- region.size());
- CHECK(pages_were_freed);
+ if (!region.is_empty()) {
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
+ }
Clear();
return;
}
@@ -256,11 +256,10 @@ BackingStore::~BackingStore() {
auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
- bool pages_were_freed =
- region.size() == 0 /* no need to free any pages */ ||
- FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
- region.size());
- CHECK(pages_were_freed);
+ if (!region.is_empty()) {
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
+ }
Clear();
return;
}
@@ -327,7 +326,6 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
}
}
- DCHECK(IsValidBackingStorePointer(buffer_start));
auto result = new BackingStore(buffer_start, // start
byte_length, // length
byte_length, // max length
@@ -428,18 +426,18 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
void* allocation_base = nullptr;
PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- page_allocator = GetVirtualMemoryCagePageAllocator();
+#ifdef V8_SANDBOX
+ page_allocator = GetSandboxPageAllocator();
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
if (allocation_base) return true;
// We currently still allow falling back to the platform page allocator if
- // the cage page allocator fails. This will eventually be removed.
+ // the sandbox page allocator fails. This will eventually be removed.
// TODO(chromium:1218005) once we forbid the fallback, we should have a
// single API, e.g. GetArrayBufferPageAllocator(), that returns the correct
- // page allocator to use here depending on whether the virtual memory cage
- // is enabled or not.
- if (!kAllowBackingStoresOutsideCage) return false;
+ // page allocator to use here depending on whether the sandbox is enabled
+ // or not.
+ if (!kAllowBackingStoresOutsideSandbox) return false;
page_allocator = GetPlatformPageAllocator();
#endif
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
@@ -449,13 +447,11 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
if (!gc_retry(allocate_pages)) {
// Page allocator could not reserve enough pages.
RecordStatus(isolate, AllocationStatus::kOtherFailure);
- RecordCagedMemoryAllocationResult(isolate, nullptr);
+ RecordSandboxMemoryAllocationResult(isolate, nullptr);
TRACE_BS("BSw:try failed to allocate pages\n");
return {};
}
- DCHECK(IsValidBackingStorePointer(allocation_base));
-
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
#if V8_ENABLE_WEBASSEMBLY
@@ -478,15 +474,17 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
committed_byte_length);
+ FreePages(page_allocator, allocation_base, reservation_size);
// SetPermissions put us over the process memory limit.
- V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateMemory()");
+ // We return an empty result so that the caller can throw an exception.
+ return {};
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
- RecordCagedMemoryAllocationResult(isolate, allocation_base);
+ RecordSandboxMemoryAllocationResult(isolate, allocation_base);
ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
@@ -544,13 +542,13 @@ std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
}
std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
- size_t new_pages) {
+ size_t new_pages,
+ size_t max_pages) {
// Note that we could allocate uninitialized to save initialization cost here,
// but since Wasm memories are allocated by the page allocator, the zeroing
// cost is already built-in.
- // TODO(titzer): should we use a suitable maximum here?
auto new_backing_store = BackingStore::AllocateWasmMemory(
- isolate, new_pages, new_pages,
+ isolate, new_pages, max_pages,
is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
if (!new_backing_store ||
@@ -752,7 +750,6 @@ BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
- DCHECK(IsValidBackingStorePointer(allocation_base));
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
allocation_length, // max length
@@ -774,7 +771,6 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
void* allocation_base, size_t allocation_length,
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
- DCHECK(IsValidBackingStorePointer(allocation_base));
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index beaa9e8f30..6fb1e5fc43 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -122,7 +122,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Allocate a new, larger, backing store for this Wasm memory and copy the
// contents of this backing store into it.
std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate,
- size_t new_pages);
+ size_t new_pages,
+ size_t max_pages);
// Attach the given memory object to this backing store. The memory object
// will be updated if this backing store is grown.
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index aa9ff9d30b..869f0edaf6 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -1034,7 +1034,7 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
if (obj->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, obj,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(obj),
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(obj),
ToPrimitiveHint::kNumber),
BigInt);
}
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/call-site-info-inl.h
index 66fc551997..b0e78af81f 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/call-site-info-inl.h
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_STACK_FRAME_INFO_INL_H_
-#define V8_OBJECTS_STACK_FRAME_INFO_INL_H_
-
-#include "src/objects/stack-frame-info.h"
+#ifndef V8_OBJECTS_CALL_SITE_INFO_INL_H_
+#define V8_OBJECTS_CALL_SITE_INFO_INL_H_
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/call-site-info.h"
#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
@@ -17,22 +16,22 @@
namespace v8 {
namespace internal {
-#include "torque-generated/src/objects/stack-frame-info-tq-inl.inc"
+#include "torque-generated/src/objects/call-site-info-tq-inl.inc"
-TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
-NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
+TQ_OBJECT_CONSTRUCTORS_IMPL(CallSiteInfo)
+NEVER_READ_ONLY_SPACE_IMPL(CallSiteInfo)
#if V8_ENABLE_WEBASSEMBLY
-BOOL_GETTER(StackFrameInfo, flags, IsWasm, IsWasmBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsAsmJsWasm, IsAsmJsWasmBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsAsmJsAtNumberConversion,
+BOOL_GETTER(CallSiteInfo, flags, IsWasm, IsWasmBit::kShift)
+BOOL_GETTER(CallSiteInfo, flags, IsAsmJsWasm, IsAsmJsWasmBit::kShift)
+BOOL_GETTER(CallSiteInfo, flags, IsAsmJsAtNumberConversion,
IsAsmJsAtNumberConversionBit::kShift)
#endif // V8_ENABLE_WEBASSEMBLY
-BOOL_GETTER(StackFrameInfo, flags, IsStrict, IsStrictBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsConstructor, IsConstructorBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsAsync, IsAsyncBit::kShift)
+BOOL_GETTER(CallSiteInfo, flags, IsStrict, IsStrictBit::kShift)
+BOOL_GETTER(CallSiteInfo, flags, IsConstructor, IsConstructorBit::kShift)
+BOOL_GETTER(CallSiteInfo, flags, IsAsync, IsAsyncBit::kShift)
-DEF_GETTER(StackFrameInfo, code_object, HeapObject) {
+DEF_GETTER(CallSiteInfo, code_object, HeapObject) {
HeapObject value = TorqueGeneratedClass::code_object(cage_base);
// The |code_object| field can contain many types of objects, but only CodeT
// values have to be converted to Code.
@@ -42,7 +41,7 @@ DEF_GETTER(StackFrameInfo, code_object, HeapObject) {
return value;
}
-void StackFrameInfo::set_code_object(HeapObject code, WriteBarrierMode mode) {
+void CallSiteInfo::set_code_object(HeapObject code, WriteBarrierMode mode) {
// The |code_object| field can contain many types of objects, but only Code
// values have to be converted to CodeT.
if (V8_EXTERNAL_CODE_SPACE_BOOL && code.IsCode()) {
@@ -57,4 +56,4 @@ void StackFrameInfo::set_code_object(HeapObject code, WriteBarrierMode mode) {
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_STACK_FRAME_INFO_INL_H_
+#endif // V8_OBJECTS_CALL_SITE_INFO_INL_H_
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/call-site-info.cc
index 62f97afd19..75abced310 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/call-site-info.cc
@@ -2,63 +2,67 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects/stack-frame-info.h"
+#include "src/objects/call-site-info.h"
#include "src/base/strings.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/shared-function-info.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/strings/string-builder-inl.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
-bool StackFrameInfo::IsPromiseAll() const {
+bool CallSiteInfo::IsPromiseAll() const {
if (!IsAsync()) return false;
JSFunction fun = JSFunction::cast(function());
return fun == fun.native_context().promise_all();
}
-bool StackFrameInfo::IsPromiseAny() const {
+bool CallSiteInfo::IsPromiseAny() const {
if (!IsAsync()) return false;
JSFunction fun = JSFunction::cast(function());
return fun == fun.native_context().promise_any();
}
-bool StackFrameInfo::IsNative() const {
+bool CallSiteInfo::IsNative() const {
if (auto script = GetScript()) {
return script->type() == Script::TYPE_NATIVE;
}
return false;
}
-bool StackFrameInfo::IsEval() const {
+bool CallSiteInfo::IsEval() const {
if (auto script = GetScript()) {
return script->compilation_type() == Script::COMPILATION_TYPE_EVAL;
}
return false;
}
-bool StackFrameInfo::IsUserJavaScript() const {
+bool CallSiteInfo::IsUserJavaScript() const {
#if V8_ENABLE_WEBASSEMBLY
if (IsWasm()) return false;
#endif // V8_ENABLE_WEBASSEMBLY
return GetSharedFunctionInfo().IsUserJavaScript();
}
-bool StackFrameInfo::IsMethodCall() const {
+bool CallSiteInfo::IsMethodCall() const {
#if V8_ENABLE_WEBASSEMBLY
if (IsWasm()) return false;
#endif // V8_ENABLE_WEBASSEMBLY
return !IsToplevel() && !IsConstructor();
}
-bool StackFrameInfo::IsToplevel() const {
+bool CallSiteInfo::IsToplevel() const {
return receiver_or_instance().IsJSGlobalProxy() ||
receiver_or_instance().IsNullOrUndefined();
}
// static
-int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
+int CallSiteInfo::GetLineNumber(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
@@ -78,7 +82,7 @@ int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
}
// static
-int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
+int CallSiteInfo::GetColumnNumber(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
int position = GetSourcePosition(info);
#if V8_ENABLE_WEBASSEMBLY
@@ -100,7 +104,7 @@ int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
}
// static
-int StackFrameInfo::GetEnclosingLineNumber(Handle<StackFrameInfo> info) {
+int CallSiteInfo::GetEnclosingLineNumber(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
@@ -125,7 +129,7 @@ int StackFrameInfo::GetEnclosingLineNumber(Handle<StackFrameInfo> info) {
}
// static
-int StackFrameInfo::GetEnclosingColumnNumber(Handle<StackFrameInfo> info) {
+int CallSiteInfo::GetEnclosingColumnNumber(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
@@ -151,28 +155,28 @@ int StackFrameInfo::GetEnclosingColumnNumber(Handle<StackFrameInfo> info) {
return Script::GetColumnNumber(script, position) + 1;
}
-int StackFrameInfo::GetScriptId() const {
+int CallSiteInfo::GetScriptId() const {
if (auto script = GetScript()) {
return script->id();
}
return Message::kNoScriptIdInfo;
}
-Object StackFrameInfo::GetScriptName() const {
+Object CallSiteInfo::GetScriptName() const {
if (auto script = GetScript()) {
return script->name();
}
return ReadOnlyRoots(GetIsolate()).null_value();
}
-Object StackFrameInfo::GetScriptNameOrSourceURL() const {
+Object CallSiteInfo::GetScriptNameOrSourceURL() const {
if (auto script = GetScript()) {
return script->GetNameOrSourceURL();
}
return ReadOnlyRoots(GetIsolate()).null_value();
}
-Object StackFrameInfo::GetScriptSource() const {
+Object CallSiteInfo::GetScriptSource() const {
if (auto script = GetScript()) {
if (script->HasValidSource()) {
return script->source();
@@ -181,7 +185,7 @@ Object StackFrameInfo::GetScriptSource() const {
return ReadOnlyRoots(GetIsolate()).null_value();
}
-Object StackFrameInfo::GetScriptSourceMappingURL() const {
+Object CallSiteInfo::GetScriptSourceMappingURL() const {
if (auto script = GetScript()) {
return script->source_mapping_url();
}
@@ -242,8 +246,8 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
// static
-Handle<PrimitiveHeapObject> StackFrameInfo::GetEvalOrigin(
- Handle<StackFrameInfo> info) {
+Handle<PrimitiveHeapObject> CallSiteInfo::GetEvalOrigin(
+ Handle<CallSiteInfo> info) {
auto isolate = info->GetIsolate();
Handle<Script> script;
if (!GetScript(isolate, info).ToHandle(&script) ||
@@ -254,7 +258,8 @@ Handle<PrimitiveHeapObject> StackFrameInfo::GetEvalOrigin(
}
// static
-Handle<Object> StackFrameInfo::GetFunctionName(Handle<StackFrameInfo> info) {
+Handle<PrimitiveHeapObject> CallSiteInfo::GetFunctionName(
+ Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
@@ -277,6 +282,24 @@ Handle<Object> StackFrameInfo::GetFunctionName(Handle<StackFrameInfo> info) {
return isolate->factory()->null_value();
}
+// static
+Handle<String> CallSiteInfo::GetFunctionDebugName(Handle<CallSiteInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
+ if (info->IsWasm()) {
+ return GetWasmFunctionDebugName(isolate,
+ handle(info->GetWasmInstance(), isolate),
+ info->GetWasmFunctionIndex());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ Handle<JSFunction> function(JSFunction::cast(info->function()), isolate);
+ Handle<String> name = JSFunction::GetDebugName(function);
+ if (name->length() == 0 && info->IsEval()) {
+ name = isolate->factory()->eval_string();
+ }
+ return name;
+}
+
namespace {
PrimitiveHeapObject InferMethodNameFromFastObject(Isolate* isolate,
@@ -370,7 +393,7 @@ PrimitiveHeapObject InferMethodName(Isolate* isolate, JSReceiver receiver,
} // namespace
// static
-Handle<Object> StackFrameInfo::GetMethodName(Handle<StackFrameInfo> info) {
+Handle<Object> CallSiteInfo::GetMethodName(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
Handle<Object> receiver_or_instance(info->receiver_or_instance(), isolate);
#if V8_ENABLE_WEBASSEMBLY
@@ -436,7 +459,7 @@ Handle<Object> StackFrameInfo::GetMethodName(Handle<StackFrameInfo> info) {
}
// static
-Handle<Object> StackFrameInfo::GetTypeName(Handle<StackFrameInfo> info) {
+Handle<Object> CallSiteInfo::GetTypeName(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
if (!info->IsMethodCall()) {
return isolate->factory()->null_value();
@@ -448,22 +471,22 @@ Handle<Object> StackFrameInfo::GetTypeName(Handle<StackFrameInfo> info) {
if (receiver->IsJSProxy()) {
return isolate->factory()->Proxy_string();
}
- return JSReceiver::GetConstructorName(receiver);
+ return JSReceiver::GetConstructorName(isolate, receiver);
}
#if V8_ENABLE_WEBASSEMBLY
-uint32_t StackFrameInfo::GetWasmFunctionIndex() const {
+uint32_t CallSiteInfo::GetWasmFunctionIndex() const {
DCHECK(IsWasm());
return Smi::ToInt(Smi::cast(function()));
}
-WasmInstanceObject StackFrameInfo::GetWasmInstance() const {
+WasmInstanceObject CallSiteInfo::GetWasmInstance() const {
DCHECK(IsWasm());
return WasmInstanceObject::cast(receiver_or_instance());
}
// static
-Handle<Object> StackFrameInfo::GetWasmModuleName(Handle<StackFrameInfo> info) {
+Handle<Object> CallSiteInfo::GetWasmModuleName(Handle<CallSiteInfo> info) {
Isolate* isolate = info->GetIsolate();
if (info->IsWasm()) {
Handle<String> name;
@@ -479,7 +502,7 @@ Handle<Object> StackFrameInfo::GetWasmModuleName(Handle<StackFrameInfo> info) {
#endif // V8_ENABLE_WEBASSEMBLY
// static
-int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
+int CallSiteInfo::GetSourcePosition(Handle<CallSiteInfo> info) {
if (info->flags() & kIsSourcePositionComputed) {
return info->code_offset_or_source_position();
}
@@ -493,8 +516,8 @@ int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
}
// static
-bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
- MessageLocation* location) {
+bool CallSiteInfo::ComputeLocation(Handle<CallSiteInfo> info,
+ MessageLocation* location) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
@@ -523,8 +546,7 @@ bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
}
// static
-int StackFrameInfo::ComputeSourcePosition(Handle<StackFrameInfo> info,
- int offset) {
+int CallSiteInfo::ComputeSourcePosition(Handle<CallSiteInfo> info, int offset) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
@@ -541,7 +563,7 @@ int StackFrameInfo::ComputeSourcePosition(Handle<StackFrameInfo> info,
return AbstractCode::cast(info->code_object()).SourcePosition(offset);
}
-base::Optional<Script> StackFrameInfo::GetScript() const {
+base::Optional<Script> CallSiteInfo::GetScript() const {
#if V8_ENABLE_WEBASSEMBLY
if (IsWasm()) {
return GetWasmInstance().module_object().script();
@@ -552,7 +574,7 @@ base::Optional<Script> StackFrameInfo::GetScript() const {
return base::nullopt;
}
-SharedFunctionInfo StackFrameInfo::GetSharedFunctionInfo() const {
+SharedFunctionInfo CallSiteInfo::GetSharedFunctionInfo() const {
#if V8_ENABLE_WEBASSEMBLY
DCHECK(!IsWasm());
#endif // V8_ENABLE_WEBASSEMBLY
@@ -560,8 +582,8 @@ SharedFunctionInfo StackFrameInfo::GetSharedFunctionInfo() const {
}
// static
-MaybeHandle<Script> StackFrameInfo::GetScript(Isolate* isolate,
- Handle<StackFrameInfo> info) {
+MaybeHandle<Script> CallSiteInfo::GetScript(Isolate* isolate,
+ Handle<CallSiteInfo> info) {
if (auto script = info->GetScript()) {
return handle(*script, isolate);
}
@@ -574,13 +596,13 @@ bool IsNonEmptyString(Handle<Object> object) {
return (object->IsString() && String::cast(*object).length() > 0);
}
-void AppendFileLocation(Isolate* isolate, Handle<StackFrameInfo> frame,
+void AppendFileLocation(Isolate* isolate, Handle<CallSiteInfo> frame,
IncrementalStringBuilder* builder) {
Handle<Object> script_name_or_source_url(frame->GetScriptNameOrSourceURL(),
isolate);
if (!script_name_or_source_url->IsString() && frame->IsEval()) {
builder->AppendString(
- Handle<String>::cast(StackFrameInfo::GetEvalOrigin(frame)));
+ Handle<String>::cast(CallSiteInfo::GetEvalOrigin(frame)));
// Expecting source position to follow.
builder->AppendCStringLiteral(", ");
}
@@ -594,12 +616,12 @@ void AppendFileLocation(Isolate* isolate, Handle<StackFrameInfo> frame,
builder->AppendCStringLiteral("<anonymous>");
}
- int line_number = StackFrameInfo::GetLineNumber(frame);
+ int line_number = CallSiteInfo::GetLineNumber(frame);
if (line_number != Message::kNoLineNumberInfo) {
builder->AppendCharacter(':');
builder->AppendInt(line_number);
- int column_number = StackFrameInfo::GetColumnNumber(frame);
+ int column_number = CallSiteInfo::GetColumnNumber(frame);
if (column_number != Message::kNoColumnInfo) {
builder->AppendCharacter(':');
builder->AppendInt(column_number);
@@ -644,11 +666,11 @@ bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
return true;
}
-void AppendMethodCall(Isolate* isolate, Handle<StackFrameInfo> frame,
+void AppendMethodCall(Isolate* isolate, Handle<CallSiteInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> type_name = StackFrameInfo::GetTypeName(frame);
- Handle<Object> method_name = StackFrameInfo::GetMethodName(frame);
- Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
+ Handle<Object> type_name = CallSiteInfo::GetTypeName(frame);
+ Handle<Object> method_name = CallSiteInfo::GetMethodName(frame);
+ Handle<Object> function_name = CallSiteInfo::GetFunctionName(frame);
if (IsNonEmptyString(function_name)) {
Handle<String> function_string = Handle<String>::cast(function_name);
@@ -684,16 +706,16 @@ void AppendMethodCall(Isolate* isolate, Handle<StackFrameInfo> frame,
}
}
-void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
+void SerializeJSStackFrame(Isolate* isolate, Handle<CallSiteInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
+ Handle<Object> function_name = CallSiteInfo::GetFunctionName(frame);
if (frame->IsAsync()) {
builder->AppendCStringLiteral("async ");
if (frame->IsPromiseAll() || frame->IsPromiseAny()) {
builder->AppendCStringLiteral("Promise.");
builder->AppendString(Handle<String>::cast(function_name));
builder->AppendCStringLiteral(" (index ");
- builder->AppendInt(StackFrameInfo::GetSourcePosition(frame));
+ builder->AppendInt(CallSiteInfo::GetSourcePosition(frame));
builder->AppendCharacter(')');
return;
}
@@ -719,10 +741,10 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
}
#if V8_ENABLE_WEBASSEMBLY
-void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
+void SerializeWasmStackFrame(Isolate* isolate, Handle<CallSiteInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> module_name = StackFrameInfo::GetWasmModuleName(frame);
- Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
+ Handle<Object> module_name = CallSiteInfo::GetWasmModuleName(frame);
+ Handle<Object> function_name = CallSiteInfo::GetFunctionName(frame);
const bool has_name = !module_name->IsNull() || !function_name->IsNull();
if (has_name) {
if (module_name->IsNull()) {
@@ -752,7 +774,7 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
char buffer[16];
SNPrintF(base::ArrayVector(buffer), "0x%x",
- StackFrameInfo::GetColumnNumber(frame) - 1);
+ CallSiteInfo::GetColumnNumber(frame) - 1);
builder->AppendCString(buffer);
if (has_name) builder->AppendCharacter(')');
@@ -761,8 +783,8 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
} // namespace
-void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
- IncrementalStringBuilder* builder) {
+void SerializeCallSiteInfo(Isolate* isolate, Handle<CallSiteInfo> frame,
+ IncrementalStringBuilder* builder) {
#if V8_ENABLE_WEBASSEMBLY
if (frame->IsWasm() && !frame->IsAsmJsWasm()) {
SerializeWasmStackFrame(isolate, frame, builder);
@@ -772,10 +794,10 @@ void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
SerializeJSStackFrame(isolate, frame, builder);
}
-MaybeHandle<String> SerializeStackFrameInfo(Isolate* isolate,
- Handle<StackFrameInfo> frame) {
+MaybeHandle<String> SerializeCallSiteInfo(Isolate* isolate,
+ Handle<CallSiteInfo> frame) {
IncrementalStringBuilder builder(isolate);
- SerializeStackFrameInfo(isolate, frame, &builder);
+ SerializeCallSiteInfo(isolate, frame, &builder);
return builder.Finish();
}
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/call-site-info.h
index dad792bee1..42d8788351 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/call-site-info.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_STACK_FRAME_INFO_H_
-#define V8_OBJECTS_STACK_FRAME_INFO_H_
+#ifndef V8_OBJECTS_CALL_SITE_INFO_H_
+#define V8_OBJECTS_CALL_SITE_INFO_H_
#include "src/objects/struct.h"
#include "torque-generated/bit-fields.h"
@@ -18,12 +18,12 @@ class MessageLocation;
class WasmInstanceObject;
class StructBodyDescriptor;
-#include "torque-generated/src/objects/stack-frame-info-tq.inc"
+#include "torque-generated/src/objects/call-site-info-tq.inc"
-class StackFrameInfo
- : public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
+class CallSiteInfo : public TorqueGeneratedCallSiteInfo<CallSiteInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
+ DEFINE_TORQUE_GENERATED_CALL_SITE_INFO_FLAGS()
#if V8_ENABLE_WEBASSEMBLY
inline bool IsWasm() const;
@@ -36,6 +36,7 @@ class StackFrameInfo
inline bool IsAsync() const;
bool IsEval() const;
bool IsUserJavaScript() const;
+ bool IsSubjectToDebugging() const;
bool IsMethodCall() const;
bool IsToplevel() const;
bool IsPromiseAll() const;
@@ -45,77 +46,73 @@ class StackFrameInfo
DECL_ACCESSORS(code_object, HeapObject)
// Dispatched behavior.
- DECL_VERIFIER(StackFrameInfo)
+ DECL_VERIFIER(CallSiteInfo)
// Used to signal that the requested field is unknown.
static constexpr int kUnknown = kNoSourcePosition;
- V8_EXPORT_PRIVATE static int GetLineNumber(Handle<StackFrameInfo> info);
- V8_EXPORT_PRIVATE static int GetColumnNumber(Handle<StackFrameInfo> info);
+ V8_EXPORT_PRIVATE static int GetLineNumber(Handle<CallSiteInfo> info);
+ V8_EXPORT_PRIVATE static int GetColumnNumber(Handle<CallSiteInfo> info);
- static int GetEnclosingLineNumber(Handle<StackFrameInfo> info);
- static int GetEnclosingColumnNumber(Handle<StackFrameInfo> info);
+ static int GetEnclosingLineNumber(Handle<CallSiteInfo> info);
+ static int GetEnclosingColumnNumber(Handle<CallSiteInfo> info);
// Returns the script ID if one is attached,
// Message::kNoScriptIdInfo otherwise.
+ static MaybeHandle<Script> GetScript(Isolate* isolate,
+ Handle<CallSiteInfo> info);
int GetScriptId() const;
Object GetScriptName() const;
Object GetScriptNameOrSourceURL() const;
Object GetScriptSource() const;
Object GetScriptSourceMappingURL() const;
- static Handle<PrimitiveHeapObject> GetEvalOrigin(Handle<StackFrameInfo> info);
- V8_EXPORT_PRIVATE static Handle<Object> GetFunctionName(
- Handle<StackFrameInfo> info);
- static Handle<Object> GetMethodName(Handle<StackFrameInfo> info);
- static Handle<Object> GetTypeName(Handle<StackFrameInfo> info);
+ static Handle<PrimitiveHeapObject> GetEvalOrigin(Handle<CallSiteInfo> info);
+ V8_EXPORT_PRIVATE static Handle<PrimitiveHeapObject> GetFunctionName(
+ Handle<CallSiteInfo> info);
+ static Handle<String> GetFunctionDebugName(Handle<CallSiteInfo> info);
+ static Handle<Object> GetMethodName(Handle<CallSiteInfo> info);
+ static Handle<Object> GetTypeName(Handle<CallSiteInfo> info);
#if V8_ENABLE_WEBASSEMBLY
// These methods are only valid for Wasm and asm.js Wasm frames.
uint32_t GetWasmFunctionIndex() const;
WasmInstanceObject GetWasmInstance() const;
- static Handle<Object> GetWasmModuleName(Handle<StackFrameInfo> info);
+ static Handle<Object> GetWasmModuleName(Handle<CallSiteInfo> info);
#endif // V8_ENABLE_WEBASSEMBLY
// Returns the 0-based source position, which is the offset into the
// Script in case of JavaScript and Asm.js, and the bytecode offset
// in the module in case of actual Wasm. In case of async promise
// combinator frames, this returns the index of the promise.
- static int GetSourcePosition(Handle<StackFrameInfo> info);
+ static int GetSourcePosition(Handle<CallSiteInfo> info);
// Attempts to fill the |location| based on the |info|, and avoids
// triggering source position table building for JavaScript frames.
- static bool ComputeLocation(Handle<StackFrameInfo> info,
+ static bool ComputeLocation(Handle<CallSiteInfo> info,
MessageLocation* location);
using BodyDescriptor = StructBodyDescriptor;
private:
- // Bit position in the flag, from least significant bit position.
- DEFINE_TORQUE_GENERATED_STACK_FRAME_INFO_FLAGS()
- friend class StackTraceBuilder;
-
- static int ComputeSourcePosition(Handle<StackFrameInfo> info, int offset);
+ static int ComputeSourcePosition(Handle<CallSiteInfo> info, int offset);
base::Optional<Script> GetScript() const;
SharedFunctionInfo GetSharedFunctionInfo() const;
- static MaybeHandle<Script> GetScript(Isolate* isolate,
- Handle<StackFrameInfo> info);
-
- TQ_OBJECT_CONSTRUCTORS(StackFrameInfo)
+ TQ_OBJECT_CONSTRUCTORS(CallSiteInfo)
};
class IncrementalStringBuilder;
-void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
- IncrementalStringBuilder* builder);
+void SerializeCallSiteInfo(Isolate* isolate, Handle<CallSiteInfo> frame,
+ IncrementalStringBuilder* builder);
V8_EXPORT_PRIVATE
-MaybeHandle<String> SerializeStackFrameInfo(Isolate* isolate,
- Handle<StackFrameInfo> frame);
+MaybeHandle<String> SerializeCallSiteInfo(Isolate* isolate,
+ Handle<CallSiteInfo> frame);
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_STACK_FRAME_INFO_H_
+#endif // V8_OBJECTS_CALL_SITE_INFO_H_
diff --git a/deps/v8/src/objects/stack-frame-info.tq b/deps/v8/src/objects/call-site-info.tq
index 5e60628aa5..fda832618b 100644
--- a/deps/v8/src/objects/stack-frame-info.tq
+++ b/deps/v8/src/objects/call-site-info.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-bitfield struct StackFrameInfoFlags extends uint31 {
+bitfield struct CallSiteInfoFlags extends uint31 {
is_wasm: bool: 1 bit;
is_asm_js_wasm: bool: 1 bit; // Implies that is_wasm bit is set.
is_strict: bool: 1 bit;
@@ -14,11 +14,11 @@ bitfield struct StackFrameInfoFlags extends uint31 {
is_source_position_computed: bool: 1 bit;
}
-extern class StackFrameInfo extends Struct {
+extern class CallSiteInfo extends Struct {
receiver_or_instance: JSAny;
function: JSFunction|Smi;
code_object: HeapObject;
code_offset_or_source_position: Smi;
- flags: SmiTagged<StackFrameInfoFlags>;
+ flags: SmiTagged<CallSiteInfoFlags>;
parameters: FixedArray;
}
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index a4fc2439f4..36f04a424e 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -36,6 +36,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(BytecodeArray)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakArrayList)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(CodeDataContainer)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -170,11 +171,11 @@ INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
}
// Same as RELEASE_ACQUIRE_ACCESSORS_CHECKED2 macro but with Code as a host and
-// using main_cage_base() for computing the base.
+// using main_cage_base(kRelaxedLoad) for computing the base.
#define RELEASE_ACQUIRE_CODE_ACCESSORS_CHECKED2(name, type, offset, \
get_condition, set_condition) \
type Code::name(AcquireLoadTag tag) const { \
- PtrComprCageBase cage_base = main_cage_base(); \
+ PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad); \
return Code::name(cage_base, tag); \
} \
type Code::name(PtrComprCageBase cage_base, AcquireLoadTag) const { \
@@ -235,17 +236,27 @@ PtrComprCageBase Code::main_cage_base() const {
#endif
}
-void Code::set_main_cage_base(Address cage_base) {
+PtrComprCageBase Code::main_cage_base(RelaxedLoadTag) const {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ Address cage_base_hi =
+ Relaxed_ReadField<Tagged_t>(kMainCageBaseUpper32BitsOffset);
+ return PtrComprCageBase(cage_base_hi << 32);
+#else
+ return GetPtrComprCageBase(*this);
+#endif
+}
+
+void Code::set_main_cage_base(Address cage_base, RelaxedStoreTag) {
#ifdef V8_EXTERNAL_CODE_SPACE
Tagged_t cage_base_hi = static_cast<Tagged_t>(cage_base >> 32);
- WriteField<Tagged_t>(kMainCageBaseUpper32BitsOffset, cage_base_hi);
+ Relaxed_WriteField<Tagged_t>(kMainCageBaseUpper32BitsOffset, cage_base_hi);
#else
UNREACHABLE();
#endif
}
CodeDataContainer Code::GCSafeCodeDataContainer(AcquireLoadTag) const {
- PtrComprCageBase cage_base = main_cage_base();
+ PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad);
HeapObject object =
TaggedField<HeapObject, kCodeDataContainerOffset>::Acquire_Load(cage_base,
*this);
@@ -265,6 +276,25 @@ inline CodeT ToCodeT(Code code) {
#endif
}
+inline Handle<CodeT> ToCodeT(Handle<Code> code, Isolate* isolate) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return handle(ToCodeT(*code), isolate);
+#else
+ return code;
+#endif
+}
+
+inline MaybeHandle<CodeT> ToCodeT(MaybeHandle<Code> maybe_code,
+ Isolate* isolate) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ Handle<Code> code;
+ if (maybe_code.ToHandle(&code)) return ToCodeT(code, isolate);
+ return {};
+#else
+ return maybe_code;
+#endif
+}
+
inline Code FromCodeT(CodeT code) {
#ifdef V8_EXTERNAL_CODE_SPACE
return code.code();
@@ -281,6 +311,19 @@ inline Code FromCodeT(CodeT code, RelaxedLoadTag) {
#endif
}
+inline Handle<Code> FromCodeT(Handle<CodeT> code, Isolate* isolate) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return handle(FromCodeT(*code), isolate);
+#else
+ return code;
+#endif
+}
+
+inline Handle<AbstractCode> ToAbstractCode(Handle<CodeT> code,
+ Isolate* isolate) {
+ return Handle<AbstractCode>::cast(FromCodeT(code, isolate));
+}
+
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
#ifdef V8_EXTERNAL_CODE_SPACE
return code;
@@ -296,7 +339,7 @@ void Code::WipeOutHeader() {
WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- set_main_cage_base(kNullAddress);
+ set_main_cage_base(kNullAddress, kRelaxedStore);
}
}
@@ -536,29 +579,35 @@ void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
RELAXED_WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
- DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
+ DCHECK_IMPLIES(stack_slots != 0, uses_safepoint_table());
+ DCHECK_IMPLIES(!uses_safepoint_table(), stack_slots == 0);
}
inline bool Code::is_interpreter_trampoline_builtin() const {
- // Check for kNoBuiltinId first to abort early when the current Code object
- // is not a builtin.
- return builtin_id() != Builtin::kNoBuiltinId &&
- (builtin_id() == Builtin::kInterpreterEntryTrampoline ||
- builtin_id() == Builtin::kInterpreterEnterAtBytecode ||
- builtin_id() == Builtin::kInterpreterEnterAtNextBytecode);
+ return IsInterpreterTrampolineBuiltin(builtin_id());
}
inline bool Code::is_baseline_trampoline_builtin() const {
- return builtin_id() != Builtin::kNoBuiltinId &&
- (builtin_id() == Builtin::kBaselineOutOfLinePrologue ||
- builtin_id() == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
- builtin_id() == Builtin::kBaselineOrInterpreterEnterAtNextBytecode);
+ return IsBaselineTrampolineBuiltin(builtin_id());
}
inline bool Code::is_baseline_leave_frame_builtin() const {
return builtin_id() == Builtin::kBaselineLeaveFrame;
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+// Note, must be in sync with Code::checks_optimization_marker().
+inline bool CodeDataContainer::checks_optimization_marker() const {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ bool checks_marker = (builtin_id() == Builtin::kCompileLazy ||
+ builtin_id() == Builtin::kInterpreterEntryTrampoline ||
+ CodeKindCanTierUp(kind()));
+ return checks_marker ||
+ (CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
+}
+#endif // V8_EXTERNAL_CODE_SPACE
+
+// Note, must be in sync with CodeDataContainer::checks_optimization_marker().
inline bool Code::checks_optimization_marker() const {
bool checks_marker = (builtin_id() == Builtin::kCompileLazy ||
builtin_id() == Builtin::kInterpreterEntryTrampoline ||
@@ -577,6 +626,8 @@ inline bool Code::is_turbofanned() const {
return IsTurbofannedField::decode(flags);
}
+bool Code::is_maglevved() const { return kind() == CodeKind::MAGLEV; }
+
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
int32_t flags =
@@ -607,21 +658,6 @@ inline void Code::set_is_promise_rejection(bool value) {
container.set_kind_specific_flags(updated, kRelaxedStore);
}
-inline bool Code::is_exception_caught() const {
- DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags =
- code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
- return IsExceptionCaughtField::decode(flags);
-}
-
-inline void Code::set_is_exception_caught(bool value) {
- DCHECK(kind() == CodeKind::BUILTIN);
- CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags(kRelaxedLoad);
- int32_t updated = IsExceptionCaughtField::update(previous, value);
- container.set_kind_specific_flags(updated, kRelaxedStore);
-}
-
inline bool Code::is_off_heap_trampoline() const {
const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
return IsOffHeapTrampoline::decode(flags);
@@ -629,7 +665,6 @@ inline bool Code::is_off_heap_trampoline() const {
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
if (is_promise_rejection()) return HandlerTable::PROMISE;
- if (is_exception_caught()) return HandlerTable::CAUGHT;
return HandlerTable::UNCAUGHT;
}
@@ -659,50 +694,46 @@ void Code::set_inlined_bytecode_size(unsigned size) {
RELAXED_WRITE_UINT_FIELD(*this, kInlinedBytecodeSizeOffset, size);
}
-bool Code::has_safepoint_info() const {
- return is_turbofanned() || is_wasm_code();
+bool Code::uses_safepoint_table() const {
+ return is_turbofanned() || is_maglevved() || is_wasm_code();
}
int Code::stack_slots() const {
- DCHECK(has_safepoint_info());
const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
- return StackSlotsField::decode(flags);
+ const int slots = StackSlotsField::decode(flags);
+ DCHECK_IMPLIES(!uses_safepoint_table(), slots == 0);
+ return slots;
}
-bool Code::marked_for_deoptimization() const {
+bool CodeDataContainer::marked_for_deoptimization() const {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // kind field is not available on CodeDataContainer when external code space
+ // is not enabled.
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags =
- code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
- return MarkedForDeoptimizationField::decode(flags);
+#endif // V8_EXTERNAL_CODE_SPACE
+ int32_t flags = kind_specific_flags(kRelaxedLoad);
+ return Code::MarkedForDeoptimizationField::decode(flags);
}
-void Code::set_marked_for_deoptimization(bool flag) {
+bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags(kRelaxedLoad);
- int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- container.set_kind_specific_flags(updated, kRelaxedStore);
+ return code_data_container(kAcquireLoad).marked_for_deoptimization();
}
-int Code::deoptimization_count() const {
+void CodeDataContainer::set_marked_for_deoptimization(bool flag) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // kind field is not available on CodeDataContainer when external code space
+ // is not enabled.
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags =
- code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
- int count = DeoptCountField::decode(flags);
- DCHECK_GE(count, 0);
- return count;
+#endif // V8_EXTERNAL_CODE_SPACE
+ DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
+ int32_t previous = kind_specific_flags(kRelaxedLoad);
+ int32_t updated = Code::MarkedForDeoptimizationField::update(previous, flag);
+ set_kind_specific_flags(updated, kRelaxedStore);
}
-void Code::increment_deoptimization_count() {
- DCHECK(CodeKindCanDeoptimize(kind()));
- CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t flags = container.kind_specific_flags(kRelaxedLoad);
- int32_t count = DeoptCountField::decode(flags);
- DCHECK_GE(count, 0);
- CHECK_LE(count + 1, DeoptCountField::kMax);
- int32_t updated = DeoptCountField::update(flags, count + 1);
- container.set_kind_specific_flags(updated, kRelaxedStore);
+void Code::set_marked_for_deoptimization(bool flag) {
+ code_data_container(kAcquireLoad).set_marked_for_deoptimization(flag);
}
bool Code::embedded_objects_cleared() const {
@@ -857,26 +888,25 @@ static_assert(!V8_EXTERNAL_CODE_SPACE_BOOL,
"for big endian architectures");
#endif
-DEF_GETTER(CodeDataContainer, raw_code, Object) {
+Object CodeDataContainer::raw_code() const {
+ PtrComprCageBase cage_base = code_cage_base();
+ return CodeDataContainer::raw_code(cage_base);
+}
+
+Object CodeDataContainer::raw_code(PtrComprCageBase cage_base) const {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Given the fields layout we can write the Code reference as a full word
- // (see the static asserts above).
- Address* p = reinterpret_cast<Address*>(address() + kCodeOffset);
- Object value = Object(*p);
+ Object value = TaggedField<Object, kCodeOffset>::load(cage_base, *this);
return value;
}
void CodeDataContainer::set_raw_code(Object value, WriteBarrierMode mode) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Given the fields layout we can write the Code reference as a full word
- // (see the static asserts above).
- Address* p = reinterpret_cast<Address*>(address() + kCodeOffset);
- *p = value.ptr();
+ TaggedField<Object, kCodeOffset>::store(*this, value);
CONDITIONAL_WRITE_BARRIER(*this, kCodeOffset, value, mode);
}
Object CodeDataContainer::raw_code(RelaxedLoadTag tag) const {
- PtrComprCageBase cage_base = code_cage_base();
+ PtrComprCageBase cage_base = code_cage_base(tag);
return CodeDataContainer::raw_code(cage_base, tag);
}
@@ -892,7 +922,7 @@ ACCESSORS(CodeDataContainer, next_code_link, Object, kNextCodeLinkOffset)
PtrComprCageBase CodeDataContainer::code_cage_base() const {
#ifdef V8_EXTERNAL_CODE_SPACE
- CHECK(!V8_HEAP_SANDBOX_BOOL);
+ // TODO(v8:10391): consider protecting this value with the sandbox.
Address code_cage_base_hi =
ReadField<Tagged_t>(kCodeCageBaseUpper32BitsOffset);
return PtrComprCageBase(code_cage_base_hi << 32);
@@ -903,7 +933,6 @@ PtrComprCageBase CodeDataContainer::code_cage_base() const {
void CodeDataContainer::set_code_cage_base(Address code_cage_base) {
#ifdef V8_EXTERNAL_CODE_SPACE
- CHECK(!V8_HEAP_SANDBOX_BOOL);
Tagged_t code_cage_base_hi = static_cast<Tagged_t>(code_cage_base >> 32);
WriteField<Tagged_t>(kCodeCageBaseUpper32BitsOffset, code_cage_base_hi);
#else
@@ -911,9 +940,31 @@ void CodeDataContainer::set_code_cage_base(Address code_cage_base) {
#endif
}
+PtrComprCageBase CodeDataContainer::code_cage_base(RelaxedLoadTag) const {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // TODO(v8:10391): consider protecting this value with the sandbox.
+ Address code_cage_base_hi =
+ Relaxed_ReadField<Tagged_t>(kCodeCageBaseUpper32BitsOffset);
+ return PtrComprCageBase(code_cage_base_hi << 32);
+#else
+ return GetPtrComprCageBase(*this);
+#endif
+}
+
+void CodeDataContainer::set_code_cage_base(Address code_cage_base,
+ RelaxedStoreTag) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ Tagged_t code_cage_base_hi = static_cast<Tagged_t>(code_cage_base >> 32);
+ Relaxed_WriteField<Tagged_t>(kCodeCageBaseUpper32BitsOffset,
+ code_cage_base_hi);
+#else
+ UNREACHABLE();
+#endif
+}
+
void CodeDataContainer::AllocateExternalPointerEntries(Isolate* isolate) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- InitExternalPointerField(kCodeEntryPointOffset, isolate);
+ InitExternalPointerField(kCodeEntryPointOffset, isolate, kCodeEntryPointTag);
}
Code CodeDataContainer::code() const {
@@ -926,7 +977,7 @@ Code CodeDataContainer::code(PtrComprCageBase cage_base) const {
}
Code CodeDataContainer::code(RelaxedLoadTag tag) const {
- PtrComprCageBase cage_base = code_cage_base();
+ PtrComprCageBase cage_base = code_cage_base(tag);
return CodeDataContainer::code(cage_base, tag);
}
@@ -938,7 +989,7 @@ Code CodeDataContainer::code(PtrComprCageBase cage_base,
DEF_GETTER(CodeDataContainer, code_entry_point, Address) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Isolate* isolate = GetIsolateForSandbox(*this);
return ReadExternalPointerField(kCodeEntryPointOffset, isolate,
kCodeEntryPointTag);
}
@@ -967,12 +1018,62 @@ Address CodeDataContainer::InstructionStart() const {
return code_entry_point();
}
+Address CodeDataContainer::raw_instruction_start() {
+ return code_entry_point();
+}
+
+Address CodeDataContainer::entry() const { return code_entry_point(); }
+
void CodeDataContainer::clear_padding() {
memset(reinterpret_cast<void*>(address() + kUnalignedSize), 0,
kSize - kUnalignedSize);
}
+RELAXED_UINT16_ACCESSORS(CodeDataContainer, flags, kFlagsOffset)
+
+// Ensure builtin_id field fits into int16_t, so that we can rely on sign
+// extension to convert int16_t{-1} to kNoBuiltinId.
+// If the asserts fail, update the code that use kBuiltinIdOffset below.
+STATIC_ASSERT(static_cast<int>(Builtin::kNoBuiltinId) == -1);
+STATIC_ASSERT(Builtins::kBuiltinCount < std::numeric_limits<int16_t>::max());
+
+void CodeDataContainer::initialize_flags(CodeKind kind, Builtin builtin_id) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ uint16_t value = KindField::encode(kind);
+ set_flags(value, kRelaxedStore);
+
+ WriteField<int16_t>(kBuiltinIdOffset, static_cast<int16_t>(builtin_id));
+}
+
#ifdef V8_EXTERNAL_CODE_SPACE
+
+CodeKind CodeDataContainer::kind() const {
+ return KindField::decode(flags(kRelaxedLoad));
+}
+
+Builtin CodeDataContainer::builtin_id() const {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // Rely on sign-extension when converting int16_t to int to preserve
+ // kNoBuiltinId value.
+ STATIC_ASSERT(static_cast<int>(static_cast<int16_t>(Builtin::kNoBuiltinId)) ==
+ static_cast<int>(Builtin::kNoBuiltinId));
+ int value = ReadField<int16_t>(kBuiltinIdOffset);
+ return static_cast<Builtin>(value);
+}
+
+bool CodeDataContainer::is_builtin() const {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ return builtin_id() != Builtin::kNoBuiltinId;
+}
+
+bool CodeDataContainer::is_optimized_code() const {
+ return CodeKindIsOptimizedJSFunction(kind());
+}
+
+inline bool CodeDataContainer::is_interpreter_trampoline_builtin() const {
+ return IsInterpreterTrampolineBuiltin(builtin_id());
+}
+
//
// A collection of getters and predicates that forward queries to associated
// Code object.
@@ -986,10 +1087,8 @@ void CodeDataContainer::clear_padding() {
return FromCodeT(*this).name(cage_base); \
}
-DEF_PRIMITIVE_FORWARDING_CDC_GETTER(kind, CodeKind)
-DEF_PRIMITIVE_FORWARDING_CDC_GETTER(builtin_id, Builtin)
-DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_builtin, bool)
-DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_interpreter_trampoline_builtin, bool)
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_turbofanned, bool)
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_off_heap_trampoline, bool)
DEF_FORWARDING_CDC_GETTER(deoptimization_data, FixedArray)
DEF_FORWARDING_CDC_GETTER(bytecode_or_interpreter_data, HeapObject)
diff --git a/deps/v8/src/objects/code-kind.cc b/deps/v8/src/objects/code-kind.cc
index 5c4ab5d299..84b7436ef1 100644
--- a/deps/v8/src/objects/code-kind.cc
+++ b/deps/v8/src/objects/code-kind.cc
@@ -24,8 +24,6 @@ const char* CodeKindToMarker(CodeKind kind) {
return "~";
case CodeKind::BASELINE:
return "^";
- case CodeKind::TURBOPROP:
- return "+";
case CodeKind::TURBOFAN:
return "*";
default:
diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h
index 888e04c42b..dbb007df31 100644
--- a/deps/v8/src/objects/code-kind.h
+++ b/deps/v8/src/objects/code-kind.h
@@ -15,36 +15,35 @@ namespace internal {
// The order of INTERPRETED_FUNCTION to TURBOFAN is important. We use it to
// check the relative ordering of the tiers when fetching / installing optimized
// code.
-#define CODE_KIND_LIST(V) \
- V(BYTECODE_HANDLER) \
- V(FOR_TESTING) \
- V(BUILTIN) \
- V(REGEXP) \
- V(WASM_FUNCTION) \
- V(WASM_TO_CAPI_FUNCTION) \
- V(WASM_TO_JS_FUNCTION) \
- V(JS_TO_WASM_FUNCTION) \
- V(JS_TO_JS_FUNCTION) \
- V(C_WASM_ENTRY) \
- V(INTERPRETED_FUNCTION) \
- V(BASELINE) \
- V(TURBOPROP) \
+#define CODE_KIND_LIST(V) \
+ V(BYTECODE_HANDLER) \
+ V(FOR_TESTING) \
+ V(BUILTIN) \
+ V(REGEXP) \
+ V(WASM_FUNCTION) \
+ V(WASM_TO_CAPI_FUNCTION) \
+ V(WASM_TO_JS_FUNCTION) \
+ V(JS_TO_WASM_FUNCTION) \
+ V(JS_TO_JS_FUNCTION) \
+ V(C_WASM_ENTRY) \
+ V(INTERPRETED_FUNCTION) \
+ V(BASELINE) \
+ V(MAGLEV) \
V(TURBOFAN)
-enum class CodeKind {
+enum class CodeKind : uint8_t {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
};
-STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::TURBOPROP &&
- CodeKind::INTERPRETED_FUNCTION < CodeKind::BASELINE);
-STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOPROP);
-STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN &&
- CodeKind::TURBOPROP < CodeKind::TURBOFAN);
+STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::BASELINE);
+STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN);
#define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
#undef V
+// Unlikely, but just to be safe:
+STATIC_ASSERT(kCodeKindCount <= std::numeric_limits<uint8_t>::max());
const char* CodeKindToString(CodeKind kind);
@@ -58,6 +57,10 @@ inline constexpr bool CodeKindIsBaselinedJSFunction(CodeKind kind) {
return kind == CodeKind::BASELINE;
}
+inline constexpr bool CodeKindIsStaticallyCompiled(CodeKind kind) {
+ return kind == CodeKind::BYTECODE_HANDLER || kind == CodeKind::BUILTIN;
+}
+
inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) {
STATIC_ASSERT(static_cast<int>(CodeKind::INTERPRETED_FUNCTION) + 1 ==
static_cast<int>(CodeKind::BASELINE));
@@ -66,14 +69,16 @@ inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) {
}
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
- STATIC_ASSERT(static_cast<int>(CodeKind::TURBOPROP) + 1 ==
+ STATIC_ASSERT(static_cast<int>(CodeKind::MAGLEV) + 1 ==
static_cast<int>(CodeKind::TURBOFAN));
- return base::IsInRange(kind, CodeKind::TURBOPROP, CodeKind::TURBOFAN);
+ return base::IsInRange(kind, CodeKind::MAGLEV, CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
- return CodeKindIsUnoptimizedJSFunction(kind) ||
- CodeKindIsOptimizedJSFunction(kind);
+ STATIC_ASSERT(static_cast<int>(CodeKind::BASELINE) + 1 ==
+ static_cast<int>(CodeKind::MAGLEV));
+ return base::IsInRange(kind, CodeKind::INTERPRETED_FUNCTION,
+ CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsBuiltinOrJSFunction(CodeKind kind) {
@@ -85,47 +90,22 @@ inline constexpr bool CodeKindCanDeoptimize(CodeKind kind) {
}
inline constexpr bool CodeKindCanOSR(CodeKind kind) {
- return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
-}
-
-inline bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
- return !FLAG_turboprop_as_toptier && kind == CodeKind::TURBOPROP;
+ return kind == CodeKind::TURBOFAN;
}
inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
- return CodeKindIsUnoptimizedJSFunction(kind) ||
- CodeKindIsOptimizedAndCanTierUp(kind);
+ return CodeKindIsUnoptimizedJSFunction(kind);
}
// The optimization marker field on the feedback vector has a dual purpose of
// controlling the tier-up workflow, and caching the produced code object for
// access from multiple closures.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
- return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
+ return kind == CodeKind::TURBOFAN;
}
-inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
- if (kind == CodeKind::TURBOFAN) return OptimizationTier::kTopTier;
- if (kind == CodeKind::TURBOPROP) {
- return FLAG_turboprop_as_toptier ? OptimizationTier::kTopTier
- : OptimizationTier::kMidTier;
- }
- return OptimizationTier::kNone;
-}
-
-inline CodeKind CodeKindForTopTier() {
- if (V8_UNLIKELY(FLAG_turboprop_as_toptier)) {
- return CodeKind::TURBOPROP;
- }
- return CodeKind::TURBOFAN;
-}
-
-inline CodeKind CodeKindForOSR() {
- if (V8_UNLIKELY(FLAG_turboprop)) {
- return CodeKind::TURBOPROP;
- }
- return CodeKind::TURBOFAN;
-}
+inline CodeKind CodeKindForTopTier() { return CodeKind::TURBOFAN; }
+inline CodeKind CodeKindForOSR() { return CodeKind::TURBOFAN; }
// The dedicated CodeKindFlag enum represents all code kinds in a format
// suitable for bit sets.
@@ -147,10 +127,10 @@ using CodeKinds = base::Flags<CodeKindFlag>;
DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
static constexpr CodeKinds kJSFunctionCodeKindsMask{
- CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
- CodeKindFlag::TURBOPROP | CodeKindFlag::BASELINE};
+ CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::BASELINE |
+ CodeKindFlag::MAGLEV | CodeKindFlag::TURBOFAN};
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
- CodeKindFlag::TURBOFAN | CodeKindFlag::TURBOPROP};
+ CodeKindFlag::MAGLEV | CodeKindFlag::TURBOFAN};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 328b3cae7c..96bda038db 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -12,7 +12,7 @@
#include "src/codegen/safepoint-table.h"
#include "src/codegen/source-position.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/isolate-utils.h"
+#include "src/execution/isolate-utils-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
@@ -123,8 +123,9 @@ void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
} else if (RelocInfo::IsCodeTargetMode(mode)) {
// Rewrite code handles to direct pointers to the first instruction in the
// code object.
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code code = Code::cast(*p);
+ Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
+ DCHECK(p->IsCodeT(GetPtrComprCageBaseSlow(*p)));
+ Code code = FromCodeT(CodeT::cast(*p));
it.rinfo()->set_target_address(code.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
@@ -584,7 +585,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
os << "\n";
- if (has_safepoint_info()) {
+ if (uses_safepoint_table()) {
SafepointTable table(isolate, current_pc, *this);
table.Print(os);
os << "\n";
@@ -646,8 +647,7 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << reinterpret_cast<const void*>(current_address) << " @ "
<< std::setw(4) << iterator.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode(
- os, reinterpret_cast<byte*>(current_address),
- static_cast<int>(parameter_count()));
+ os, reinterpret_cast<byte*>(current_address));
if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
Address jump_target = base_address + iterator.GetJumpTargetOffset();
os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 28c483cb1d..2ae72478e1 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -53,6 +53,18 @@ class CodeDataContainer : public HeapObject {
// is deterministic.
inline void clear_padding();
+ //
+ // A collection of getters and predicates that are used by respective methods
+ // on Code object. They are defined here mostly because they operate on the
+ // writable state of the respective Code object.
+ //
+
+ inline bool can_have_weak_objects() const;
+ inline void set_can_have_weak_objects(bool value);
+
+ inline bool marked_for_deoptimization() const;
+ inline void set_marked_for_deoptimization(bool flag);
+
// Back-reference to the Code object.
// Available only when V8_EXTERNAL_CODE_SPACE is defined.
DECL_GETTER(code, Code)
@@ -68,6 +80,8 @@ class CodeDataContainer : public HeapObject {
// the full value is not guaranteed.
inline PtrComprCageBase code_cage_base() const;
inline void set_code_cage_base(Address code_cage_base);
+ inline PtrComprCageBase code_cage_base(RelaxedLoadTag) const;
+ inline void set_code_cage_base(Address code_cage_base, RelaxedStoreTag);
// Cached value of code().InstructionStart().
// Available only when V8_EXTERNAL_CODE_SPACE is defined.
@@ -82,9 +96,20 @@ class CodeDataContainer : public HeapObject {
inline void AllocateExternalPointerEntries(Isolate* isolate);
+ // Initializes internal flags field which stores cached values of some
+ // properties of the respective Code object.
+ // Available only when V8_EXTERNAL_CODE_SPACE is enabled.
+ inline void initialize_flags(CodeKind kind, Builtin builtin_id);
+
// Alias for code_entry_point to make it API compatible with Code.
inline Address InstructionStart() const;
+ // Alias for code_entry_point to make it API compatible with Code.
+ inline Address raw_instruction_start();
+
+ // Alias for code_entry_point to make it API compatible with Code.
+ inline Address entry() const;
+
#ifdef V8_EXTERNAL_CODE_SPACE
//
// A collection of getters and predicates that forward queries to associated
@@ -94,8 +119,32 @@ class CodeDataContainer : public HeapObject {
inline CodeKind kind() const;
inline Builtin builtin_id() const;
inline bool is_builtin() const;
+
+ inline bool is_optimized_code() const;
+ inline bool is_wasm_code() const;
+
+ // Testers for interpreter builtins.
inline bool is_interpreter_trampoline_builtin() const;
+ // Testers for baseline builtins.
+ inline bool is_baseline_trampoline_builtin() const;
+ inline bool is_baseline_leave_frame_builtin() const;
+
+ // Tells whether the code checks the optimization marker in the function's
+ // feedback vector.
+ inline bool checks_optimization_marker() const;
+
+ // Tells whether the outgoing parameters of this code are tagged pointers.
+ inline bool has_tagged_outgoing_params() const;
+
+ // [is_turbofanned]: Tells whether the code object was generated by the
+ // TurboFan optimizing compiler.
+ inline bool is_turbofanned() const;
+
+ // [is_off_heap_trampoline]: For kind BUILTIN tells whether
+ // this is a trampoline to an off-heap builtin.
+ inline bool is_off_heap_trampoline() const;
+
DECL_GETTER(deoptimization_data, FixedArray)
DECL_GETTER(bytecode_or_interpreter_data, HeapObject)
DECL_GETTER(source_position_table, ByteArray)
@@ -110,23 +159,25 @@ class CodeDataContainer : public HeapObject {
DECL_VERIFIER(CodeDataContainer)
// Layout description.
-#define CODE_DATA_FIELDS(V) \
- /* Strong pointer fields. */ \
- V(kPointerFieldsStrongEndOffset, 0) \
- /* Weak pointer fields. */ \
- V(kNextCodeLinkOffset, kTaggedSize) \
- V(kPointerFieldsWeakEndOffset, 0) \
- /* Strong Code pointer fields. */ \
- V(kCodeOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
- V(kCodePointerFieldsStrongEndOffset, 0) \
- /* Raw data fields. */ \
- V(kCodeCageBaseUpper32BitsOffset, \
- V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
- V(kCodeEntryPointOffset, \
- V8_EXTERNAL_CODE_SPACE_BOOL ? kExternalPointerSize : 0) \
- V(kKindSpecificFlagsOffset, kInt32Size) \
- V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
- /* Total size. */ \
+#define CODE_DATA_FIELDS(V) \
+ /* Strong pointer fields. */ \
+ V(kPointerFieldsStrongEndOffset, 0) \
+ /* Weak pointer fields. */ \
+ V(kNextCodeLinkOffset, kTaggedSize) \
+ V(kPointerFieldsWeakEndOffset, 0) \
+ /* Strong Code pointer fields. */ \
+ V(kCodeOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
+ V(kCodePointerFieldsStrongEndOffset, 0) \
+ /* Raw data fields. */ \
+ V(kCodeCageBaseUpper32BitsOffset, \
+ V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
+ V(kCodeEntryPointOffset, \
+ V8_EXTERNAL_CODE_SPACE_BOOL ? kExternalPointerSize : 0) \
+ V(kFlagsOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kUInt16Size : 0) \
+ V(kBuiltinIdOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kInt16Size : 0) \
+ V(kKindSpecificFlagsOffset, kInt32Size) \
+ V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_DATA_FIELDS)
@@ -134,11 +185,27 @@ class CodeDataContainer : public HeapObject {
class BodyDescriptor;
+ // Flags layout.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(KindField, CodeKind, 4, _) \
+ /* The other 12 bits are still free. */
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+ STATIC_ASSERT(FLAGS_BIT_FIELDS_Ranges::kBitsCount == 4);
+ STATIC_ASSERT(!V8_EXTERNAL_CODE_SPACE_BOOL ||
+ (FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
+ FIELD_SIZE(CodeDataContainer::kFlagsOffset) * kBitsPerByte));
+
private:
DECL_ACCESSORS(raw_code, Object)
DECL_RELAXED_GETTER(raw_code, Object)
inline void set_code_entry_point(Isolate* isolate, Address value);
+ // When V8_EXTERNAL_CODE_SPACE is enabled the flags field contains cached
+ // values of some flags of the from the respective Code object.
+ DECL_RELAXED_UINT16_ACCESSORS(flags)
+
friend Factory;
friend FactoryBase<Factory>;
friend FactoryBase<LocalFactory>;
@@ -369,6 +436,10 @@ class Code : public HeapObject {
// TurboFan optimizing compiler.
inline bool is_turbofanned() const;
+ // TODO(jgruber): Reconsider these predicates; we should probably merge them
+ // and rename to something appropriate.
+ inline bool is_maglevved() const;
+
// [can_have_weak_objects]: If CodeKindIsOptimizedJSFunction(kind), tells
// whether the embedded objects in code should be treated weakly.
inline bool can_have_weak_objects() const;
@@ -384,10 +455,12 @@ class Code : public HeapObject {
inline unsigned inlined_bytecode_size() const;
inline void set_inlined_bytecode_size(unsigned size);
- inline bool has_safepoint_info() const;
+ // [uses_safepoint_table]: Whether this Code object uses safepoint tables
+ // (note the table may still be empty, see has_safepoint_table).
+ inline bool uses_safepoint_table() const;
- // [stack_slots]: If {has_safepoint_info()}, the number of stack slots
- // reserved in the code prologue.
+ // [stack_slots]: If {uses_safepoint_table()}, the number of stack slots
+ // reserved in the code prologue; otherwise 0.
inline int stack_slots() const;
// [marked_for_deoptimization]: If CodeKindCanDeoptimize(kind), tells whether
@@ -395,13 +468,6 @@ class Code : public HeapObject {
inline bool marked_for_deoptimization() const;
inline void set_marked_for_deoptimization(bool flag);
- // [deoptimization_count]: If CodeKindCanDeoptimize(kind). In turboprop we
- // retain the deoptimized code on soft deopts for a certain number of soft
- // deopts. This field keeps track of the number of deoptimizations we have
- // seen so far.
- inline int deoptimization_count() const;
- inline void increment_deoptimization_count();
-
// [embedded_objects_cleared]: If CodeKindIsOptimizedJSFunction(kind), tells
// whether the embedded objects in the code marked for deoptimization were
// cleared. Note that embedded_objects_cleared() implies
@@ -420,12 +486,6 @@ class Code : public HeapObject {
// Use GetBuiltinCatchPrediction to access this.
inline void set_is_promise_rejection(bool flag);
- // [is_exception_caught]: For kind BUILTIN tells whether the
- // exception thrown by the code will be caught internally or
- // uncaught if both this and is_promise_rejection is set.
- // Use GetBuiltinCatchPrediction to access this.
- inline void set_is_exception_caught(bool flag);
-
// [is_off_heap_trampoline]: For kind BUILTIN tells whether
// this is a trampoline to an off-heap builtin.
inline bool is_off_heap_trampoline() const;
@@ -448,7 +508,8 @@ class Code : public HeapObject {
// This field contains cage base value which is used for decompressing
// the references to non-Code objects (map, deoptimization_data, etc.).
inline PtrComprCageBase main_cage_base() const;
- inline void set_main_cage_base(Address cage_base);
+ inline PtrComprCageBase main_cage_base(RelaxedLoadTag) const;
+ inline void set_main_cage_base(Address cage_base, RelaxedStoreTag);
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic. Depending on the V8 build mode there could be no padding.
@@ -639,12 +700,10 @@ class Code : public HeapObject {
V(EmbeddedObjectsClearedField, bool, 1, _) \
V(DeoptAlreadyCountedField, bool, 1, _) \
V(CanHaveWeakObjectsField, bool, 1, _) \
- V(IsPromiseRejectionField, bool, 1, _) \
- V(IsExceptionCaughtField, bool, 1, _) \
- V(DeoptCountField, int, 4, _)
+ V(IsPromiseRejectionField, bool, 1, _)
DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
- STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 10);
+ STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 5);
STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) *
kBitsPerByte);
@@ -664,7 +723,6 @@ class Code : public HeapObject {
inline CodeDataContainer GCSafeCodeDataContainer(AcquireLoadTag) const;
bool is_promise_rejection() const;
- bool is_exception_caught() const;
enum BytecodeToPCPosition {
kPcAtStartOfBytecode,
@@ -698,12 +756,14 @@ class Code::OptimizedCodeIterator {
// Helper functions for converting Code objects to CodeDataContainer and back
// when V8_EXTERNAL_CODE_SPACE is enabled.
inline CodeT ToCodeT(Code code);
+inline Handle<CodeT> ToCodeT(Handle<Code> code, Isolate* isolate);
inline Code FromCodeT(CodeT code);
inline Code FromCodeT(CodeT code, RelaxedLoadTag);
inline Code FromCodeT(CodeT code, AcquireLoadTag);
inline Code FromCodeT(CodeT code, PtrComprCageBase);
inline Code FromCodeT(CodeT code, PtrComprCageBase, RelaxedLoadTag);
inline Code FromCodeT(CodeT code, PtrComprCageBase, AcquireLoadTag);
+inline Handle<CodeT> FromCodeT(Handle<Code> code, Isolate* isolate);
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code);
class AbstractCode : public HeapObject {
diff --git a/deps/v8/src/objects/compilation-cache-table.cc b/deps/v8/src/objects/compilation-cache-table.cc
index b934554116..84ec6c70c5 100644
--- a/deps/v8/src/objects/compilation-cache-table.cc
+++ b/deps/v8/src/objects/compilation-cache-table.cc
@@ -389,7 +389,7 @@ void CompilationCacheTable::Age(Isolate* isolate) {
} else if (key.IsFixedArray()) {
// The ageing mechanism for script and eval caches.
SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
- if (info.IsInterpreted() && info.GetBytecodeArray(isolate).IsOld()) {
+ if (info.HasBytecodeArray() && info.GetBytecodeArray(isolate).IsOld()) {
RemoveEntry(entry_index);
}
}
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index fee92ae98b..7661ccafb3 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -39,6 +39,9 @@ void ScriptContextTable::set_used(int used, ReleaseStoreTag tag) {
set(kUsedSlotIndex, Smi::FromInt(used), tag);
}
+ACCESSORS(ScriptContextTable, names_to_context_index, NameToIndexHashTable,
+ kHashTableOffset)
+
// static
Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
Handle<ScriptContextTable> table,
@@ -109,9 +112,7 @@ void NativeContext::set(int index, Object value, WriteBarrierMode mode,
Context::set(index, value, mode, tag);
}
-void Context::set_scope_info(ScopeInfo scope_info, WriteBarrierMode mode) {
- set(SCOPE_INFO_INDEX, scope_info, mode);
-}
+ACCESSORS(Context, scope_info, ScopeInfo, kScopeInfoOffset)
Object Context::unchecked_previous() const { return get(PREVIOUS_INDEX); }
@@ -124,10 +125,6 @@ void Context::set_previous(Context context, WriteBarrierMode mode) {
set(PREVIOUS_INDEX, context, mode);
}
-ScopeInfo Context::scope_info() const {
- return ScopeInfo::cast(get(SCOPE_INFO_INDEX));
-}
-
Object Context::next_context_link() const {
return get(Context::NEXT_CONTEXT_LINK);
}
@@ -271,13 +268,14 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
}
DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Isolate* isolate = GetIsolateForSandbox(*this);
return reinterpret_cast<MicrotaskQueue*>(ReadExternalPointerField(
kMicrotaskQueueOffset, isolate, kNativeContextMicrotaskQueueTag));
}
void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kMicrotaskQueueOffset, isolate);
+ InitExternalPointerField(kMicrotaskQueueOffset, isolate,
+ kNativeContextMicrotaskQueueTag);
}
void NativeContext::set_microtask_queue(Isolate* isolate,
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index cdf9edce24..2f514ac1e7 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -14,15 +14,40 @@
namespace v8 {
namespace internal {
+void ScriptContextTable::AddLocalNamesFromContext(
+ Isolate* isolate, Handle<ScriptContextTable> script_context_table,
+ Handle<Context> script_context, bool ignore_duplicates,
+ int script_context_index) {
+ ReadOnlyRoots roots(isolate);
+ PtrComprCageBase cage_base(isolate);
+ Handle<NameToIndexHashTable> names_table(
+ script_context_table->names_to_context_index(cage_base), isolate);
+ Handle<ScopeInfo> scope_info(script_context->scope_info(cage_base), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ names_table = names_table->EnsureCapacity(isolate, names_table, local_count);
+ for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
+ Handle<Name> name(it->name(cage_base), isolate);
+ if (ignore_duplicates) {
+ int32_t hash = NameToIndexShape::Hash(roots, name);
+ if (names_table->FindEntry(cage_base, roots, name, hash).is_found()) {
+ continue;
+ }
+ }
+ names_table = NameToIndexHashTable::Add(isolate, names_table, name,
+ script_context_index);
+ }
+ script_context_table->set_names_to_context_index(*names_table);
+}
+
Handle<ScriptContextTable> ScriptContextTable::Extend(
- Handle<ScriptContextTable> table, Handle<Context> script_context) {
+ Isolate* isolate, Handle<ScriptContextTable> table,
+ Handle<Context> script_context, bool ignore_duplicates) {
Handle<ScriptContextTable> result;
int used = table->used(kAcquireLoad);
int length = table->length();
CHECK(used >= 0 && length > 0 && used < length);
if (used + kFirstContextSlotIndex == length) {
CHECK(length < Smi::kMaxValue / 2);
- Isolate* isolate = script_context->GetIsolate();
Handle<FixedArray> copy =
isolate->factory()->CopyFixedArrayAndGrow(table, length);
copy->set_map(ReadOnlyRoots(isolate).script_context_table_map());
@@ -31,6 +56,8 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
result = table;
}
DCHECK(script_context->IsScriptContext());
+ ScriptContextTable::AddLocalNamesFromContext(isolate, result, script_context,
+ ignore_duplicates, used);
result->set(used + kFirstContextSlotIndex, *script_context, kReleaseStore);
result->set_used(used + 1, kReleaseStore);
return result;
@@ -46,21 +73,20 @@ void Context::Initialize(Isolate* isolate) {
}
}
-bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
- String name, VariableLookupResult* result) {
+bool ScriptContextTable::Lookup(Handle<String> name,
+ VariableLookupResult* result) {
DisallowGarbageCollection no_gc;
- // Static variables cannot be in script contexts.
- for (int i = 0; i < table.used(kAcquireLoad); i++) {
- Context context = table.get_context(i);
- DCHECK(context.IsScriptContext());
- int slot_index =
- ScopeInfo::ContextSlotIndex(context.scope_info(), name, result);
-
- if (slot_index >= 0) {
- result->context_index = i;
- result->slot_index = slot_index;
- return true;
- }
+ int index = names_to_context_index().Lookup(name);
+ if (index == -1) return false;
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, used(kAcquireLoad));
+ Context context = get_context(index);
+ DCHECK(context.IsScriptContext());
+ int slot_index = context.scope_info().ContextSlotIndex(name, result);
+ if (slot_index >= 0) {
+ result->context_index = index;
+ result->slot_index = slot_index;
+ return true;
}
return false;
}
@@ -217,7 +243,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
ScriptContextTable script_contexts =
context->global_object().native_context().script_context_table();
VariableLookupResult r;
- if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) {
+ if (script_contexts.Lookup(name, &r)) {
Context script_context = script_contexts.get_context(r.context_index);
if (FLAG_trace_contexts) {
PrintF("=> found property in script context %d: %p\n",
@@ -286,8 +312,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// for the context index.
ScopeInfo scope_info = context->scope_info();
VariableLookupResult lookup_result;
- int slot_index =
- ScopeInfo::ContextSlotIndex(scope_info, *name, &lookup_result);
+ int slot_index = scope_info.ContextSlotIndex(name, &lookup_result);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
// Re-direct lookup to the ScriptContextTable in case we find a hole in
@@ -401,12 +426,11 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
return Handle<Object>::null();
}
-void NativeContext::AddOptimizedCode(Code code) {
+void NativeContext::AddOptimizedCode(CodeT code) {
DCHECK(CodeKindCanDeoptimize(code.kind()));
DCHECK(code.next_code_link().IsUndefined());
code.set_next_code_link(OptimizedCodeListHead());
- set(OPTIMIZED_CODE_LIST, ToCodeT(code), UPDATE_WEAK_WRITE_BARRIER,
- kReleaseStore);
+ set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
}
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 46228428b9..c1a14a4501 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -122,6 +122,7 @@ enum ContextLookupFlags {
V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
fast_template_instantiations_cache) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
+ V(FUNCTION_PROTOTYPE_INDEX, JSObject, function_prototype) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
generator_function_function) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
@@ -207,6 +208,10 @@ enum ContextLookupFlags {
temporal_time_zone_function) \
V(JS_TEMPORAL_ZONED_DATE_TIME_FUNCTION_INDEX, JSFunction, \
temporal_zoned_date_time_function) \
+ V(TEMPORAL_INSTANT_FIXED_ARRAY_FROM_ITERABLE_FUNCTION_INDEX, JSFunction, \
+ temporal_instant_fixed_array_from_iterable) \
+ V(STRING_FIXED_ARRAY_FROM_ITERABLE_FUNCTION_INDEX, JSFunction, \
+ string_fixed_array_from_iterable) \
/* Context maps */ \
V(NATIVE_CONTEXT_MAP_INDEX, Map, native_context_map) \
V(FUNCTION_CONTEXT_MAP_INDEX, Map, function_context_map) \
@@ -229,6 +234,7 @@ enum ContextLookupFlags {
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(OBJECT_FUNCTION_PROTOTYPE_INDEX, JSObject, object_function_prototype) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(PROMISE_HOOK_INIT_FUNCTION_INDEX, Object, promise_hook_init_function) \
V(PROMISE_HOOK_BEFORE_FUNCTION_INDEX, Object, promise_hook_before_function) \
@@ -325,6 +331,8 @@ enum ContextLookupFlags {
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
V(AGGREGATE_ERROR_FUNCTION_INDEX, JSFunction, aggregate_error_function) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PARSE_FLOAT_FUN_INDEX, JSFunction, global_parse_float_fun) \
+ V(GLOBAL_PARSE_INT_FUN_INDEX, JSFunction, global_parse_int_fun) \
V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
V(MAP_DELETE_INDEX, JSFunction, map_delete) \
V(MAP_GET_INDEX, JSFunction, map_get) \
@@ -358,6 +366,7 @@ enum ContextLookupFlags {
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKMAP_DELETE_INDEX, JSFunction, weakmap_delete) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
+ V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \
V(RETAINED_MAPS, Object, retained_maps) \
V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
@@ -382,23 +391,34 @@ class ScriptContextTable : public FixedArray {
inline Context get_context(int i) const;
inline Context get_context(int i, AcquireLoadTag tag) const;
+ DECL_ACCESSORS(names_to_context_index, NameToIndexHashTable)
+
+ // Adds local names from `script_context` to the hash table.
+ static void AddLocalNamesFromContext(
+ Isolate* isolate, Handle<ScriptContextTable> script_context_table,
+ Handle<Context> script_context, bool ignore_duplicates,
+ int script_context_index);
+
// Lookup a variable `name` in a ScriptContextTable.
// If it returns true, the variable is found and `result` contains
// valid information about its location.
// If it returns false, `result` is untouched.
V8_WARN_UNUSED_RESULT
- V8_EXPORT_PRIVATE static bool Lookup(Isolate* isolate,
- ScriptContextTable table, String name,
- VariableLookupResult* result);
+ V8_EXPORT_PRIVATE bool Lookup(Handle<String> name,
+ VariableLookupResult* result);
V8_WARN_UNUSED_RESULT
V8_EXPORT_PRIVATE static Handle<ScriptContextTable> Extend(
- Handle<ScriptContextTable> table, Handle<Context> script_context);
+ Isolate* isolate, Handle<ScriptContextTable> table,
+ Handle<Context> script_context, bool ignore_duplicates = false);
- static const int kUsedSlotIndex = 0;
- static const int kFirstContextSlotIndex = 1;
+ static const int kHashTableIndex = 0;
+ static const int kUsedSlotIndex = 1;
+ static const int kFirstContextSlotIndex = 2;
static const int kMinLength = kFirstContextSlotIndex;
+ static const int kHashTableOffset = OffsetOfElementAt(kHashTableIndex);
+
OBJECT_CONSTRUCTORS(ScriptContextTable, FixedArray);
};
@@ -559,8 +579,7 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
static const int kInvalidContext = 1;
// Direct slot access.
- inline void set_scope_info(ScopeInfo scope_info,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ DECL_ACCESSORS(scope_info, ScopeInfo)
inline Object unchecked_previous() const;
inline Context previous() const;
@@ -573,7 +592,6 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
HeapObject object, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
JSObject extension_object() const;
JSReceiver extension_receiver() const;
- V8_EXPORT_PRIVATE inline ScopeInfo scope_info() const;
// Find the module context (assuming there is one) and return the associated
// module object.
@@ -752,7 +770,7 @@ class NativeContext : public Context {
// The native context stores a list of all optimized code and a list of all
// deoptimized code, which are needed by the deoptimizer.
- V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
+ V8_EXPORT_PRIVATE void AddOptimizedCode(CodeT code);
inline void SetOptimizedCodeListHead(Object head);
inline Object OptimizedCodeListHead();
inline void SetDeoptimizedCodeListHead(Object head);
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index e9e7c08f4c..f4013fcc12 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -52,6 +52,39 @@ BytecodeArray DebugInfo::DebugBytecodeArray() {
return BytecodeArray::cast(debug_bytecode_array(kAcquireLoad));
}
+TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
+NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
+
+Script StackFrameInfo::script() const {
+ HeapObject object = shared_or_script();
+ if (object.IsSharedFunctionInfo()) {
+ object = SharedFunctionInfo::cast(object).script();
+ }
+ return Script::cast(object);
+}
+
+BIT_FIELD_ACCESSORS(StackFrameInfo, flags, bytecode_offset_or_source_position,
+ StackFrameInfo::BytecodeOffsetOrSourcePositionBits)
+BIT_FIELD_ACCESSORS(StackFrameInfo, flags, is_constructor,
+ StackFrameInfo::IsConstructorBit)
+
+NEVER_READ_ONLY_SPACE_IMPL(ErrorStackData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ErrorStackData)
+
+bool ErrorStackData::HasFormattedStack() const {
+ return !call_site_infos_or_formatted_stack().IsFixedArray();
+}
+
+ACCESSORS_RELAXED_CHECKED(ErrorStackData, formatted_stack, Object,
+ kCallSiteInfosOrFormattedStackOffset,
+ !limit_or_stack_frame_infos().IsSmi())
+
+bool ErrorStackData::HasCallSiteInfos() const { return !HasFormattedStack(); }
+
+ACCESSORS_RELAXED_CHECKED(ErrorStackData, call_site_infos, FixedArray,
+ kCallSiteInfosOrFormattedStackOffset,
+ !HasFormattedStack())
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index d3caa12233..efe5d68543 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -7,6 +7,7 @@
#include "src/base/platform/mutex.h"
#include "src/debug/debug-evaluate.h"
#include "src/handles/handles-inl.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/utils/ostreams.h"
@@ -395,5 +396,66 @@ void CoverageInfo::CoverageInfoPrint(std::ostream& os,
}
}
+// static
+int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
+ if (info->shared_or_script().IsScript()) {
+ return info->bytecode_offset_or_source_position();
+ }
+ Isolate* isolate = info->GetIsolate();
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(info->shared_or_script()), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
+ int source_position = shared->abstract_code(isolate).SourcePosition(
+ info->bytecode_offset_or_source_position());
+ info->set_shared_or_script(shared->script());
+ info->set_bytecode_offset_or_source_position(source_position);
+ return source_position;
+}
+
+// static
+void ErrorStackData::EnsureStackFrameInfos(Isolate* isolate,
+ Handle<ErrorStackData> error_stack) {
+ if (!error_stack->limit_or_stack_frame_infos().IsSmi()) {
+ return;
+ }
+ int limit = Smi::cast(error_stack->limit_or_stack_frame_infos()).value();
+ Handle<FixedArray> call_site_infos(error_stack->call_site_infos(), isolate);
+ Handle<FixedArray> stack_frame_infos =
+ isolate->factory()->NewFixedArray(call_site_infos->length());
+ int index = 0;
+ for (int i = 0; i < call_site_infos->length(); ++i) {
+ Handle<CallSiteInfo> call_site_info(
+ CallSiteInfo::cast(call_site_infos->get(i)), isolate);
+ if (call_site_info->IsAsync()) {
+ break;
+ }
+ Handle<Script> script;
+ if (!CallSiteInfo::GetScript(isolate, call_site_info).ToHandle(&script) ||
+ !script->IsSubjectToDebugging()) {
+ continue;
+ }
+ Handle<StackFrameInfo> stack_frame_info =
+ isolate->factory()->NewStackFrameInfo(
+ script, CallSiteInfo::GetSourcePosition(call_site_info),
+ CallSiteInfo::GetFunctionDebugName(call_site_info),
+ call_site_info->IsConstructor());
+ stack_frame_infos->set(index++, *stack_frame_info);
+ }
+ stack_frame_infos =
+ FixedArray::ShrinkOrEmpty(isolate, stack_frame_infos, index);
+ if (limit < 0 && -limit < index) {
+ // Negative limit encodes cap to be applied to |stack_frame_infos|.
+ stack_frame_infos =
+ FixedArray::ShrinkOrEmpty(isolate, stack_frame_infos, -limit);
+ } else if (limit >= 0 && limit < call_site_infos->length()) {
+ // Positive limit means we need to cap the |call_site_infos|
+ // to that number before exposing them to the world.
+ call_site_infos =
+ FixedArray::ShrinkOrEmpty(isolate, call_site_infos, limit);
+ error_stack->set_call_site_infos(*call_site_infos);
+ }
+ error_stack->set_limit_or_stack_frame_infos(*stack_frame_infos);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 00aae07e46..efe056ac76 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -203,6 +203,54 @@ class BreakPoint : public TorqueGeneratedBreakPoint<BreakPoint, Struct> {
TQ_OBJECT_CONSTRUCTORS(BreakPoint)
};
+class StackFrameInfo
+ : public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ static int GetSourcePosition(Handle<StackFrameInfo> info);
+
+ // The script for the stack frame.
+ inline Script script() const;
+
+ // The bytecode offset or source position for the stack frame.
+ DECL_INT_ACCESSORS(bytecode_offset_or_source_position)
+
+ // Indicates that the frame corresponds to a 'new' invocation.
+ DECL_BOOLEAN_ACCESSORS(is_constructor)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(StackFrameInfo)
+
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_STACK_FRAME_INFO_FLAGS()
+
+ using BodyDescriptor = StructBodyDescriptor;
+
+ private:
+ TQ_OBJECT_CONSTRUCTORS(StackFrameInfo)
+};
+
+class ErrorStackData
+ : public TorqueGeneratedErrorStackData<ErrorStackData, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ inline bool HasFormattedStack() const;
+ DECL_ACCESSORS(formatted_stack, Object)
+ inline bool HasCallSiteInfos() const;
+ DECL_ACCESSORS(call_site_infos, FixedArray)
+
+ static void EnsureStackFrameInfos(Isolate* isolate,
+ Handle<ErrorStackData> error_stack);
+
+ DECL_VERIFIER(ErrorStackData)
+
+ using BodyDescriptor = StructBodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(ErrorStackData)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index d00b4abf4c..2ce08a8e2a 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -67,3 +67,63 @@ extern class CoverageInfo extends HeapObject {
const slot_count: int32;
slots[slot_count]: CoverageInfoSlot;
}
+
+bitfield struct StackFrameInfoFlags extends uint31 {
+ is_constructor: bool: 1 bit;
+ bytecode_offset_or_source_position: int32: 30 bit;
+}
+
+extern class StackFrameInfo extends Struct {
+ // In case this field holds a SharedFunctionInfo, the
+ // |bytecode_offset_or_source_position| part of the
+ // |flags| bit field below contains the bytecode offset
+ // within that SharedFunctionInfo. Otherwise if this
+ // is a Script, the |bytecode_offset_or_source_position|
+ // holds the source position within the Script.
+ shared_or_script: SharedFunctionInfo|Script;
+ function_name: String;
+ flags: SmiTagged<StackFrameInfoFlags>;
+}
+
+// This struct is used by V8 as error_data_symbol on JSError
+// instances when the inspector asks V8 to keep (detailed)
+// stack traces in addition to the (simple) stack traces that
+// are collected by V8 for error.stack.
+//
+// This can have one of the following forms:
+//
+// (1) A pair of FixedArray<CallSiteInfo> and positive limit
+// if the stack information is not formatted yet and the
+// inspector did not yet request any information about the
+// error's stack trace. The positive limit specifies the cap
+// for the number of call sites exposed to error.stack.
+// (2) A pair of FixedArray<CallSiteInfo> and negative limit
+// is similar to the above, except that the limit should be
+// applied to the inspector StackFrameInfo list once computed
+// rather than the number of call sites exposed to error.stack.
+// (3) A FixedArray<CallSiteInfo> and FixedArray<StackFrameInfo>
+// pair indicates that the inspector already asked for the
+// detailed stack information, but the error.stack property
+// was not yet formatted. If any limit (negative or positive)
+// was stored in the second field before, it was applied to the
+// appropriate FixedArray now.
+// (4) A valid JavaScript object and FixedArray<StackFrameInfo>
+// once error.stack was accessed.
+//
+// Memorizing the limits is important to ensure that the fact that
+// the inspector is active doesn't influence the script execution
+// (i.e. the observable limit of call sites in error.stack is the
+// same independent of whether the inspector is active or not).
+extern class ErrorStackData extends Struct {
+ // This holds either the FixedArray of CallSiteInfo instances or
+ // the formatted stack value (usually a string) that's returned
+ // from the error.stack property.
+ call_site_infos_or_formatted_stack: FixedArray|JSAny;
+ // This holds either the FixedArray of StackFrameInfo instances
+ // for the inspector stack trace or a stack trace limit, which
+ // if positive specifies how many of the CallSiteInfo instances
+ // in the first field are to be revealed via error.stack or if
+ // negative specifies the (negated) limit for the inspector
+ // stack traces.
+ limit_or_stack_frame_infos: Smi|FixedArray;
+}
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 387ae8d276..3756fafa33 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -5,12 +5,11 @@
#ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_INL_H_
#define V8_OBJECTS_DESCRIPTOR_ARRAY_INL_H_
-#include "src/objects/descriptor-array.h"
-
#include "src/execution/isolate.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
+#include "src/objects/descriptor-array.h"
#include "src/objects/field-type.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/lookup-cache-inl.h"
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index 4599710f8b..a7d8306c3d 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -220,6 +220,11 @@ inline bool IsBigIntTypedArrayElementsKind(ElementsKind kind) {
kind == RAB_GSAB_BIGUINT64_ELEMENTS;
}
+inline bool IsFloatTypedArrayElementsKind(ElementsKind kind) {
+ return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS ||
+ kind == RAB_GSAB_FLOAT32_ELEMENTS || kind == RAB_GSAB_FLOAT64_ELEMENTS;
+}
+
inline bool IsWasmArrayElementsKind(ElementsKind kind) {
return kind == WASM_ARRAY_ELEMENTS;
}
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 22fd0ada9f..8a318c06ec 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -3407,13 +3407,29 @@ class TypedElementsAccessor
DisallowGarbageCollection no_gc;
JSTypedArray typed_array = JSTypedArray::cast(*receiver);
- if (typed_array.WasDetached()) return Just<int64_t>(-1);
+ // If this is called via Array.prototype.indexOf (not
+ // TypedArray.prototype.indexOf), it's possible that the TypedArray is
+ // detached / out of bounds here.
+ if V8_UNLIKELY (typed_array.WasDetached()) return Just<int64_t>(-1);
+ bool out_of_bounds = false;
+ size_t typed_array_length =
+ typed_array.GetLengthOrOutOfBounds(out_of_bounds);
+ if V8_UNLIKELY (out_of_bounds) {
+ return Just<int64_t>(-1);
+ }
+
+ // Prototype has no elements, and not searching for the hole --- limit
+ // search to backing store length.
+ if (typed_array_length < length) {
+ length = typed_array_length;
+ }
ElementType typed_search_value;
ElementType* data_ptr =
reinterpret_cast<ElementType*>(typed_array.DataPtr());
- if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+
+ if (IsBigIntTypedArrayElementsKind(Kind)) {
if (!value->IsBigInt()) return Just<int64_t>(-1);
bool lossless;
typed_search_value = FromHandle(value, &lossless);
@@ -3423,7 +3439,7 @@ class TypedElementsAccessor
double search_value = value->Number();
if (!std::isfinite(search_value)) {
// Integral types cannot represent +Inf or NaN.
- if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ if (!IsFloatTypedArrayElementsKind(Kind)) {
return Just<int64_t>(-1);
}
if (std::isnan(search_value)) {
@@ -3440,12 +3456,6 @@ class TypedElementsAccessor
}
}
- // Prototype has no elements, and not searching for the hole --- limit
- // search to backing store length.
- if (typed_array.length() < length) {
- length = typed_array.length();
- }
-
auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
for (size_t k = start_from; k < length; ++k) {
ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
@@ -3461,12 +3471,13 @@ class TypedElementsAccessor
JSTypedArray typed_array = JSTypedArray::cast(*receiver);
DCHECK(!typed_array.WasDetached());
+ DCHECK(!typed_array.IsOutOfBounds());
ElementType typed_search_value;
ElementType* data_ptr =
reinterpret_cast<ElementType*>(typed_array.DataPtr());
- if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (IsBigIntTypedArrayElementsKind(Kind)) {
if (!value->IsBigInt()) return Just<int64_t>(-1);
bool lossless;
typed_search_value = FromHandle(value, &lossless);
@@ -3493,7 +3504,14 @@ class TypedElementsAccessor
}
}
- DCHECK_LT(start_from, typed_array.length());
+ size_t typed_array_length = typed_array.GetLength();
+ if (start_from >= typed_array_length) {
+ // This can happen if the TypedArray got resized when we did ToInteger
+ // on the last parameter of lastIndexOf.
+ DCHECK(typed_array.IsVariableLength());
+ start_from = typed_array_length - 1;
+ }
+
size_t k = start_from;
auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
do {
@@ -3509,7 +3527,7 @@ class TypedElementsAccessor
DCHECK(!typed_array.WasDetached());
- size_t len = typed_array.length();
+ size_t len = typed_array.GetLength();
if (len == 0) return;
ElementType* data = static_cast<ElementType*>(typed_array.DataPtr());
@@ -3589,6 +3607,7 @@ class TypedElementsAccessor
}
}
+ // TODO(v8:11111): Update this once we have external RAB / GSAB array types.
static bool HasSimpleRepresentation(ExternalArrayType type) {
return !(type == kExternalFloat32Array || type == kExternalFloat64Array ||
type == kExternalUint8ClampedArray);
@@ -3620,9 +3639,9 @@ class TypedElementsAccessor
CHECK(!source.WasDetached());
CHECK(!destination.WasDetached());
- DCHECK_LE(offset, destination.length());
- DCHECK_LE(length, destination.length() - offset);
- DCHECK_LE(length, source.length());
+ DCHECK_LE(offset, destination.GetLength());
+ DCHECK_LE(length, destination.GetLength() - offset);
+ DCHECK_LE(length, source.GetLength());
ExternalArrayType source_type = source.type();
ExternalArrayType destination_type = destination.type();
@@ -3683,6 +3702,7 @@ class TypedElementsAccessor
source_shared || destination_shared ? kShared : kUnshared); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
break;
@@ -3717,12 +3737,15 @@ class TypedElementsAccessor
static bool TryCopyElementsFastNumber(Context context, JSArray source,
JSTypedArray destination, size_t length,
size_t offset) {
- if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
+ if (IsBigIntTypedArrayElementsKind(Kind)) return false;
Isolate* isolate = source.GetIsolate();
DisallowGarbageCollection no_gc;
DisallowJavascriptExecution no_js(isolate);
CHECK(!destination.WasDetached());
+ bool out_of_bounds = false;
+ CHECK(destination.GetLengthOrOutOfBounds(out_of_bounds) >= length);
+ CHECK(!out_of_bounds);
size_t current_length;
DCHECK(source.length().IsNumber() &&
@@ -3730,7 +3753,7 @@ class TypedElementsAccessor
length <= current_length);
USE(current_length);
- size_t dest_length = destination.length();
+ size_t dest_length = destination.GetLength();
DCHECK(length + offset <= dest_length);
USE(dest_length);
@@ -3808,15 +3831,16 @@ class TypedElementsAccessor
LookupIterator it(isolate, source, i);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
- if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (IsBigIntTypedArrayElementsKind(Kind)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
BigInt::FromObject(isolate, elem));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::ToNumber(isolate, elem));
}
-
- if (V8_UNLIKELY(destination->WasDetached())) {
+ bool out_of_bounds = false;
+ size_t new_length = destination->GetLengthOrOutOfBounds(out_of_bounds);
+ if (V8_UNLIKELY(out_of_bounds || destination->WasDetached())) {
const char* op = "set";
const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
@@ -3824,8 +3848,14 @@ class TypedElementsAccessor
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(message, operation));
}
- // The spec says we store the length, then get each element, so we don't
- // need to check changes to length.
+ if (V8_UNLIKELY(new_length <= offset + i)) {
+ // Proceed with the loop so that we call get getters for the source even
+ // though we don't set the values in the target.
+ // TODO(v8:11111): Maybe change this, depending on how
+ // https://github.com/tc39/proposal-resizablearraybuffer/issues/86 is
+ // resolved.
+ continue;
+ }
SetImpl(destination, InternalIndex(offset + i), *elem);
}
return *isolate->factory()->undefined_value();
@@ -3838,15 +3868,18 @@ class TypedElementsAccessor
Handle<JSObject> destination,
size_t length, size_t offset) {
Isolate* isolate = destination->GetIsolate();
+ if (length == 0) return *isolate->factory()->undefined_value();
+
Handle<JSTypedArray> destination_ta =
Handle<JSTypedArray>::cast(destination);
- DCHECK_LE(offset + length, destination_ta->length());
-
- if (length == 0) return *isolate->factory()->undefined_value();
// All conversions from TypedArrays can be done without allocation.
if (source->IsJSTypedArray()) {
CHECK(!destination_ta->WasDetached());
+ bool out_of_bounds = false;
+ CHECK_LE(offset + length,
+ destination_ta->GetLengthOrOutOfBounds(out_of_bounds));
+ CHECK(!out_of_bounds);
Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
ElementsKind source_kind = source_ta->GetElementsKind();
bool source_is_bigint =
@@ -3856,12 +3889,16 @@ class TypedElementsAccessor
// If we have to copy more elements than we have in the source, we need to
// do special handling and conversion; that happens in the slow case.
if (source_is_bigint == target_is_bigint && !source_ta->WasDetached() &&
- length + offset <= source_ta->length()) {
+ length + offset <= source_ta->GetLength()) {
CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
return *isolate->factory()->undefined_value();
}
} else if (source->IsJSArray()) {
CHECK(!destination_ta->WasDetached());
+ bool out_of_bounds = false;
+ CHECK_LE(offset + length,
+ destination_ta->GetLengthOrOutOfBounds(out_of_bounds));
+ CHECK(!out_of_bounds);
// Fast cases for packed numbers kinds where we don't need to allocate.
Handle<JSArray> source_js_array = Handle<JSArray>::cast(source);
size_t current_length;
@@ -3876,7 +3913,8 @@ class TypedElementsAccessor
}
}
// Final generic case that handles prototype chain lookups, getters, proxies
- // and observable side effects via valueOf, etc.
+ // and observable side effects via valueOf, etc. In this case, it's possible
+ // that the length getter detached / resized the underlying buffer.
return CopyElementsHandleSlow(source, destination_ta, length, offset);
}
};
@@ -5187,6 +5225,7 @@ void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
context, source, destination, length, offset)); \
break;
TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAYS_CASE)
#undef TYPED_ARRAYS_CASE
default:
UNREACHABLE();
@@ -5206,6 +5245,7 @@ void CopyTypedArrayElementsToTypedArray(Address raw_source,
length, offset); \
break;
TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAYS_CASE)
#undef TYPED_ARRAYS_CASE
default:
UNREACHABLE();
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 983a0b0ad4..e4d63e5b32 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -5,13 +5,14 @@
#ifndef V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
#define V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
-#include "src/objects/embedder-data-slot.h"
-
#include "src/base/memory.h"
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/embedder-data-array.h"
+#include "src/objects/embedder-data-slot.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/sandbox/external-pointer-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,16 +28,17 @@ EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
: SlotBase(FIELD_ADDR(
object, object.GetEmbedderFieldOffset(embedder_field_index))) {}
-void EmbedderDataSlot::AllocateExternalPointerEntry(Isolate* isolate) {
-#ifdef V8_HEAP_SANDBOX
- // TODO(v8:10391, saelo): Use InitExternalPointerField() once
- // ExternalPointer_t is 4-bytes.
- uint32_t index = isolate->external_pointer_table().allocate();
- // Object slots don't support storing raw values, so we just "reinterpret
- // cast" the index value to Object.
- Object index_as_object(index);
- ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(index_as_object);
- ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi::zero());
+EmbedderDataSlot::EmbedderDataSlot(const EmbedderDataSlotSnapshot& snapshot)
+ : SlotBase(reinterpret_cast<Address>(&snapshot)) {}
+
+void EmbedderDataSlot::Initialize(Object initial_value) {
+ // TODO(v8) initialize the slot with Smi::zero() instead. This'll also
+ // guarantee that we don't need a write barrier.
+ DCHECK(initial_value.IsSmi() ||
+ ReadOnlyHeap::Contains(HeapObject::cast(initial_value)));
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(initial_value);
+#ifdef V8_COMPRESS_POINTERS
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::zero());
#endif
}
@@ -74,8 +76,7 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS
- // See gc_safe_store() for the reasons behind two stores and why the second is
- // only done if !V8_HEAP_SANDBOX_BOOL
+ // See gc_safe_store() for the reasons behind two stores.
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::zero());
#endif
@@ -87,12 +88,14 @@ bool EmbedderDataSlot::ToAlignedPointer(Isolate* isolate,
// are accessed this way only from the main thread via API during "mutator"
// phase which is propely synched with GC (concurrent marker may still look
// at the tagged part of the embedder slot but read-only access is ok).
- Address raw_value;
-#ifdef V8_HEAP_SANDBOX
- uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
- raw_value = isolate->external_pointer_table().get(index) &
- ~kEmbedderDataSlotPayloadTag;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ // The raw part must always contain a valid external pointer table index.
+ *out_pointer = reinterpret_cast<void*>(
+ ReadExternalPointerField(address() + kExternalPointerOffset, isolate,
+ kEmbedderDataSlotPayloadTag));
+ return true;
#else
+ Address raw_value;
if (COMPRESS_POINTERS_BOOL) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
@@ -102,46 +105,25 @@ bool EmbedderDataSlot::ToAlignedPointer(Isolate* isolate,
} else {
raw_value = *location();
}
-#endif
*out_pointer = reinterpret_cast<void*>(raw_value);
return HAS_SMI_TAG(raw_value);
-}
-
-bool EmbedderDataSlot::ToAlignedPointerSafe(Isolate* isolate,
- void** out_pointer) const {
-#ifdef V8_HEAP_SANDBOX
- uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
- Address raw_value;
- if (isolate->external_pointer_table().is_valid_index(index)) {
- raw_value = isolate->external_pointer_table().get(index) &
- ~kEmbedderDataSlotPayloadTag;
- *out_pointer = reinterpret_cast<void*>(raw_value);
- return true;
- }
- return false;
-#else
- return ToAlignedPointer(isolate, out_pointer);
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false;
-#ifdef V8_HEAP_SANDBOX
- if (V8_HEAP_SANDBOX_BOOL) {
- AllocateExternalPointerEntry(isolate);
- // Raw payload contains the table index. Object slots don't support loading
- // of raw values, so we just "reinterpret cast" Object value to index.
- Object index_as_object =
- ObjectSlot(address() + kRawPayloadOffset).Relaxed_Load();
- uint32_t index = static_cast<uint32_t>(index_as_object.ptr());
- isolate->external_pointer_table().set(index,
- value | kEmbedderDataSlotPayloadTag);
- return true;
- }
-#endif
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ DCHECK_EQ(0, value & kExternalPointerTagMask);
+ // This also mark the entry as alive until the next GC.
+ InitExternalPointerField(address() + kExternalPointerOffset, isolate, value,
+ kEmbedderDataSlotPayloadTag);
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi::zero());
+ return true;
+#else
gc_safe_store(isolate, value);
return true;
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
@@ -190,6 +172,36 @@ void EmbedderDataSlot::gc_safe_store(Isolate* isolate, Address value) {
#endif
}
+// static
+void EmbedderDataSlot::PopulateEmbedderDataSnapshot(
+ Map map, JSObject js_object, int entry_index,
+ EmbedderDataSlotSnapshot& snapshot) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(sizeof(EmbedderDataSlotSnapshot) == sizeof(AtomicTagged_t) * 2);
+#else // !V8_COMPRESS_POINTERS
+ STATIC_ASSERT(sizeof(EmbedderDataSlotSnapshot) == sizeof(AtomicTagged_t));
+#endif // !V8_COMPRESS_POINTERS
+ STATIC_ASSERT(sizeof(EmbedderDataSlotSnapshot) == kEmbedderDataSlotSize);
+
+ const Address field_base =
+ FIELD_ADDR(js_object, js_object.GetEmbedderFieldOffset(entry_index));
+
+#if defined(V8_TARGET_BIG_ENDIAN) && defined(V8_COMPRESS_POINTERS)
+ const int index = 1;
+#else
+ const int index = 0;
+#endif
+
+ reinterpret_cast<AtomicTagged_t*>(&snapshot)[index] =
+ AsAtomicTagged::Relaxed_Load(
+ reinterpret_cast<AtomicTagged_t*>(field_base + kTaggedPayloadOffset));
+#ifdef V8_COMPRESS_POINTERS
+ reinterpret_cast<AtomicTagged_t*>(&snapshot)[1 - index] =
+ AsAtomicTagged::Relaxed_Load(
+ reinterpret_cast<AtomicTagged_t*>(field_base + kRawPayloadOffset));
+#endif // V8_COMPRESS_POINTERS
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 6213b7b333..3a9e761710 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -32,30 +32,78 @@ class Object;
class EmbedderDataSlot
: public SlotBase<EmbedderDataSlot, Address, kTaggedSize> {
public:
- EmbedderDataSlot() : SlotBase(kNullAddress) {}
- V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
- V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
-
-#if defined(V8_TARGET_BIG_ENDIAN) && defined(V8_COMPRESS_POINTERS)
+#if defined(V8_SANDBOXED_EXTERNAL_POINTERS)
+ // When the sandbox is enabled, an EmbedderDataSlot always contains a valid
+ // external pointer table index (initially, zero) in it's "raw" part and a
+ // valid tagged value in its 32-bit "tagged" part.
+ //
+ // Layout (sandbox):
+ // +-----------------------------------+-----------------------------------+
+ // | Tagged (Smi/CompressedPointer) | External Pointer Table Index |
+ // +-----------------------------------+-----------------------------------+
+ // ^ ^
+ // kTaggedPayloadOffset kRawPayloadOffset
+ // kExternalPointerOffset
+ static constexpr int kTaggedPayloadOffset = 0;
+ static constexpr int kRawPayloadOffset = kTaggedSize;
+ static constexpr int kExternalPointerOffset = kRawPayloadOffset;
+#elif defined(V8_COMPRESS_POINTERS) && defined(V8_TARGET_BIG_ENDIAN)
+ // The raw payload is located in the other "tagged" part of the full pointer
+ // and cotains the upper part of an aligned address. The raw part is not
+ // expected to look like a tagged value.
+ //
+ // Layout (big endian pointer compression):
+ // +-----------------------------------+-----------------------------------+
+ // | External Pointer (high word) | Tagged (Smi/CompressedPointer) |
+ // | | OR External Pointer (low word) |
+ // +-----------------------------------+-----------------------------------+
+ // ^ ^
+ // kRawPayloadOffset kTaggedayloadOffset
+ // kExternalPointerOffset
+ static constexpr int kExternalPointerOffset = 0;
+ static constexpr int kRawPayloadOffset = 0;
static constexpr int kTaggedPayloadOffset = kTaggedSize;
+#elif defined(V8_COMPRESS_POINTERS) && defined(V8_TARGET_LITTLE_ENDIAN)
+ // Layout (little endian pointer compression):
+ // +-----------------------------------+-----------------------------------+
+ // | Tagged (Smi/CompressedPointer) | External Pointer (high word) |
+ // | OR External Pointer (low word) | |
+ // +-----------------------------------+-----------------------------------+
+ // ^ ^
+ // kTaggedPayloadOffset kRawPayloadOffset
+ // kExternalPointerOffset
+ static constexpr int kExternalPointerOffset = 0;
+ static constexpr int kTaggedPayloadOffset = 0;
+ static constexpr int kRawPayloadOffset = kTaggedSize;
#else
+ // Layout (no pointer compression):
+ // +-----------------------------------------------------------------------+
+ // | Tagged (Smi/Pointer) OR External Pointer |
+ // +-----------------------------------------------------------------------+
+ // ^
+ // kTaggedPayloadOffset
+ // kExternalPointerOffset
static constexpr int kTaggedPayloadOffset = 0;
-#endif
+ static constexpr int kExternalPointerOffset = 0;
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
-#ifdef V8_COMPRESS_POINTERS
- // The raw payload is located in the other "tagged" part of the full pointer
- // and cotains the upper part of aligned address. The raw part is not expected
- // to look like a tagged value.
- // When V8_HEAP_SANDBOX is defined the raw payload contains an index into the
- // external pointer table.
- static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset;
-#endif
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
+ using EmbedderDataSlotSnapshot = Address;
+ V8_INLINE static void PopulateEmbedderDataSnapshot(Map map,
+ JSObject js_object,
+ int entry_index,
+ EmbedderDataSlotSnapshot&);
+
+ EmbedderDataSlot() : SlotBase(kNullAddress) {}
+ V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
+ V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
+ V8_INLINE explicit EmbedderDataSlot(const EmbedderDataSlotSnapshot& snapshot);
+
// Opaque type used for storing raw embedder data.
using RawData = Address;
- V8_INLINE void AllocateExternalPointerEntry(Isolate* isolate);
+ V8_INLINE void Initialize(Object initial_value);
V8_INLINE Object load_tagged() const;
V8_INLINE void store_smi(Smi value);
@@ -72,23 +120,11 @@ class EmbedderDataSlot
// the pointer-like value. Note, that some Smis could still look like an
// aligned pointers.
// Returns true on success.
- // When V8 heap sandbox is enabled, calling this method when the raw part of
- // the slot does not contain valid external pointer table index is undefined
- // behaviour and most likely result in crashes.
+ // When sandboxed external pointers are enabled, calling this method when the
+ // raw part of the slot does not contain valid external pointer table index
+ // is undefined behaviour and most likely result in crashes.
V8_INLINE bool ToAlignedPointer(Isolate* isolate, void** out_result) const;
- // Same as ToAlignedPointer() but with a workaround for V8 heap sandbox.
- // When V8 heap sandbox is enabled, this method doesn't crash when the raw
- // part of the slot contains "undefined" instead of a correct external table
- // entry index (see Factory::InitializeJSObjectBody() for details).
- // Returns true when the external pointer table index was pointing to a valid
- // entry, otherwise false.
- //
- // Call this function if you are not sure whether the slot contains valid
- // external pointer or not.
- V8_INLINE bool ToAlignedPointerSafe(Isolate* isolate,
- void** out_result) const;
-
// Returns true if the pointer was successfully stored or false it the pointer
// was improperly aligned.
V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(Isolate* isolate,
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 46d92b8447..441e53312c 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_FEEDBACK_CELL_INL_H_
#define V8_OBJECTS_FEEDBACK_CELL_INL_H_
+#include "src/execution/tiering-manager.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/feedback-vector-inl.h"
@@ -48,11 +49,7 @@ void FeedbackCell::reset_feedback_vector(
}
void FeedbackCell::SetInitialInterruptBudget() {
- if (FLAG_lazy_feedback_allocation) {
- set_interrupt_budget(FLAG_budget_for_feedback_vector_allocation);
- } else {
- set_interrupt_budget(FLAG_interrupt_budget);
- }
+ set_interrupt_budget(TieringManager::InitialInterruptBudget());
}
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 86a14dbc81..a8493528af 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -80,16 +80,16 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kDefineOwnKeyed:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kDefineNamedOwn:
+ case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
return 2;
case FeedbackSlotKind::kInvalid:
@@ -124,19 +124,18 @@ void FeedbackVector::clear_invocation_count(RelaxedStoreTag tag) {
set_invocation_count(0, tag);
}
-Code FeedbackVector::optimized_code() const {
+CodeT FeedbackVector::optimized_code() const {
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
- Code code;
+ CodeT code;
if (slot->GetHeapObject(&heap_object)) {
- code = FromCodeT(CodeT::cast(heap_object));
+ code = CodeT::cast(heap_object);
}
- // It is possible that the maybe_optimized_code slot is cleared but the
- // optimization tier hasn't been updated yet. We update the tier when we
- // execute the function next time / when we create new closure.
- DCHECK_IMPLIES(!code.is_null(), OptimizationTierBits::decode(flags()) ==
- GetTierForCodeKind(code.kind()));
+ // It is possible that the maybe_optimized_code slot is cleared but the flags
+ // haven't been updated yet. We update them when we execute the function next
+ // time / when we create new closure.
+ DCHECK_IMPLIES(!code.is_null(), maybe_has_optimized_code());
return code;
}
@@ -144,22 +143,21 @@ OptimizationMarker FeedbackVector::optimization_marker() const {
return OptimizationMarkerBits::decode(flags());
}
-OptimizationTier FeedbackVector::optimization_tier() const {
- OptimizationTier tier = OptimizationTierBits::decode(flags());
- // It is possible that the optimization tier bits aren't updated when the code
- // was cleared due to a GC.
- DCHECK_IMPLIES(tier == OptimizationTier::kNone,
- maybe_optimized_code(kAcquireLoad)->IsCleared());
- return tier;
-}
-
bool FeedbackVector::has_optimized_code() const {
+ DCHECK_IMPLIES(!optimized_code().is_null(), maybe_has_optimized_code());
return !optimized_code().is_null();
}
+bool FeedbackVector::maybe_has_optimized_code() const {
+ return MaybeHasOptimizedCodeBit::decode(flags());
+}
+
+void FeedbackVector::set_maybe_has_optimized_code(bool value) {
+ set_flags(MaybeHasOptimizedCodeBit::update(flags(), value));
+}
+
bool FeedbackVector::has_optimization_marker() const {
- return optimization_marker() != OptimizationMarker::kLogFirstExecution &&
- optimization_marker() != OptimizationMarker::kNone;
+ return optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 15a0851c57..f4f517f73b 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -158,21 +158,21 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "LoadKeyed";
case FeedbackSlotKind::kHasKeyed:
return "HasKeyed";
- case FeedbackSlotKind::kStoreNamedSloppy:
- return "StoreNamedSloppy";
- case FeedbackSlotKind::kStoreNamedStrict:
- return "StoreNamedStrict";
- case FeedbackSlotKind::kStoreOwnNamed:
- return "StoreOwnNamed";
- case FeedbackSlotKind::kDefineOwnKeyed:
- return "DefineOwnKeyed";
+ case FeedbackSlotKind::kSetNamedSloppy:
+ return "SetNamedSloppy";
+ case FeedbackSlotKind::kSetNamedStrict:
+ return "SetNamedStrict";
+ case FeedbackSlotKind::kDefineNamedOwn:
+ return "DefineNamedOwn";
+ case FeedbackSlotKind::kDefineKeyedOwn:
+ return "DefineKeyedOwn";
case FeedbackSlotKind::kStoreGlobalSloppy:
return "StoreGlobalSloppy";
case FeedbackSlotKind::kStoreGlobalStrict:
return "StoreGlobalStrict";
- case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedSloppy:
return "StoreKeyedSloppy";
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetKeyedStrict:
return "StoreKeyedStrict";
case FeedbackSlotKind::kStoreInArrayLiteral:
return "StoreInArrayLiteral";
@@ -180,8 +180,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "BinaryOp";
case FeedbackSlotKind::kCompareOp:
return "CompareOp";
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
- return "StoreDataPropertyInLiteral";
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
+ return "DefineKeyedOwnPropertyInLiteral";
case FeedbackSlotKind::kLiteral:
return "Literal";
case FeedbackSlotKind::kTypeProfile:
@@ -261,10 +261,8 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
- DCHECK_EQ(vector->optimization_marker(),
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone);
- DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
+ DCHECK_EQ(vector->optimization_marker(), OptimizationMarker::kNone);
+ DCHECK(!vector->maybe_has_optimized_code());
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK(vector->maybe_optimized_code()->IsCleared());
@@ -303,14 +301,14 @@ Handle<FeedbackVector> FeedbackVector::New(
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kDefineOwnKeyed:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kDefineNamedOwn:
+ case FeedbackSlotKind::kDefineKeyedOwn:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile:
case FeedbackSlotKind::kInstanceOf:
vector->Set(slot, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
@@ -390,61 +388,31 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
// static
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
- Handle<Code> code,
- FeedbackCell feedback_cell) {
+ Handle<CodeT> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- // We should set optimized code only when there is no valid optimized code or
- // we are tiering up.
+ // We should set optimized code only when there is no valid optimized code.
DCHECK(!vector->has_optimized_code() ||
vector->optimized_code().marked_for_deoptimization() ||
- (vector->optimized_code().kind() == CodeKind::TURBOPROP &&
- code->kind() == CodeKind::TURBOFAN) ||
FLAG_stress_concurrent_inlining_attach_code);
// TODO(mythria): We could see a CompileOptimized marker here either from
// tests that use %OptimizeFunctionOnNextCall, --always-opt or because we
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
// kCompileOptimized or kCompileOptimizedConcurrent.
- vector->set_maybe_optimized_code(HeapObjectReference::Weak(ToCodeT(*code)),
+ vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code),
kReleaseStore);
int32_t state = vector->flags();
- state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
+ state = MaybeHasOptimizedCodeBit::update(state, true);
vector->set_flags(state);
- // With FLAG_turboprop, we would have an interrupt budget necessary for
- // tiering up to Turboprop code. Once we install turboprop code, set it to a
- // higher value as required for tiering up from Turboprop to TurboFan.
- if (FLAG_turboprop) {
- FeedbackVector::SetInterruptBudget(feedback_cell);
- }
-}
-
-// static
-void FeedbackVector::SetInterruptBudget(FeedbackCell feedback_cell) {
- DCHECK(feedback_cell.value().IsFeedbackVector());
- FeedbackVector vector = FeedbackVector::cast(feedback_cell.value());
- // Set the interrupt budget as required for tiering up to next level. Without
- // Turboprop, this is used only to tier up to TurboFan and hence always set to
- // FLAG_interrupt_budget. With Turboprop, we use this budget to both tier up
- // to Turboprop and TurboFan. When there is no optimized code, set it to
- // FLAG_interrupt_budget required for tiering up to Turboprop. When there is
- // optimized code, set it to a higher value required for tiering up from
- // Turboprop to TurboFan.
- if (FLAG_turboprop && vector.has_optimized_code()) {
- feedback_cell.set_interrupt_budget(
- FLAG_interrupt_budget *
- FLAG_interrupt_budget_scale_factor_for_top_tier);
- } else {
- feedback_cell.set_interrupt_budget(FLAG_interrupt_budget);
- }
}
-void FeedbackVector::ClearOptimizedCode(FeedbackCell feedback_cell) {
+void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code());
- DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
+ DCHECK(maybe_has_optimized_code());
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
kReleaseStore);
- ClearOptimizationTier(feedback_cell);
+ set_maybe_has_optimized_code(false);
}
void FeedbackVector::ClearOptimizationMarker() {
@@ -457,31 +425,16 @@ void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
set_flags(state);
}
-void FeedbackVector::ClearOptimizationTier(FeedbackCell feedback_cell) {
- int32_t state = flags();
- state = OptimizationTierBits::update(state, OptimizationTier::kNone);
- set_flags(state);
- // We are discarding the optimized code, adjust the interrupt budget
- // so we have the correct budget required for the tier up.
- if (FLAG_turboprop) {
- FeedbackVector::SetInterruptBudget(feedback_cell);
- }
-}
-
void FeedbackVector::InitializeOptimizationState() {
- int32_t state = 0;
- state = OptimizationMarkerBits::update(
- state, FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone);
- state = OptimizationTierBits::update(state, OptimizationTier::kNone);
- set_flags(state);
+ set_flags(OptimizationMarkerBits::encode(OptimizationMarker::kNone) |
+ MaybeHasOptimizedCodeBit::encode(false));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
- FeedbackCell feedback_cell, SharedFunctionInfo shared, const char* reason) {
+ SharedFunctionInfo shared, const char* reason) {
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
if (slot->IsCleared()) {
- ClearOptimizationTier(feedback_cell);
+ set_maybe_has_optimized_code(false);
return;
}
@@ -491,7 +444,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
if (!code.deopt_already_counted()) {
code.set_deopt_already_counted(true);
}
- ClearOptimizedCode(feedback_cell);
+ ClearOptimizedCode();
}
}
@@ -605,17 +558,17 @@ void FeedbackNexus::ConfigureUninitialized() {
SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
}
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kDefineOwnKeyed:
+ case FeedbackSlotKind::kDefineNamedOwn:
+ case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral: {
SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER,
UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
@@ -644,13 +597,13 @@ bool FeedbackNexus::Clear() {
feedback_updated = true;
break;
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kDefineOwnKeyed:
+ case FeedbackSlotKind::kDefineNamedOwn:
+ case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
@@ -660,7 +613,7 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kCall:
case FeedbackSlotKind::kInstanceOf:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
case FeedbackSlotKind::kCloneObject:
if (!IsCleared()) {
ConfigureUninitialized();
@@ -741,13 +694,13 @@ InlineCacheState FeedbackNexus::ic_state() const {
return InlineCacheState::UNINITIALIZED;
}
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kDefineOwnKeyed:
+ case FeedbackSlotKind::kDefineNamedOwn:
+ case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed: {
@@ -774,7 +727,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
}
if (heap_object.IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
- IsKeyedHasICKind(kind()) || IsDefineOwnICKind(kind()));
+ IsKeyedHasICKind(kind()) || IsDefineKeyedOwnICKind(kind()));
Object extra_object = extra->GetHeapObjectAssumeStrong();
WeakFixedArray extra_array = WeakFixedArray::cast(extra_object);
return extra_array.length() > 2 ? InlineCacheState::POLYMORPHIC
@@ -840,7 +793,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
}
return InlineCacheState::MONOMORPHIC;
}
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral: {
if (feedback == UninitializedSentinel()) {
return InlineCacheState::UNINITIALIZED;
} else if (feedback->IsWeakOrCleared()) {
@@ -1009,8 +962,9 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
count = SpeculationModeField::update(count, mode);
MaybeObject feedback = GetFeedback();
- // We can skip the write barrier for {feedback} because it's not changing.
- SetFeedback(feedback, SKIP_WRITE_BARRIER, Smi::FromInt(count),
+ // We could've skipped WB here (since we set the slot to the same value again)
+ // but we don't to make WB verification happy.
+ SetFeedback(feedback, UPDATE_WRITE_BARRIER, Smi::FromInt(count),
SKIP_WRITE_BARRIER);
}
@@ -1047,7 +1001,7 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
const MaybeObjectHandle& handler) {
DCHECK(handler.is_null() || IC::IsHandler(*handler));
- if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
+ if (kind() == FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral) {
SetFeedback(HeapObjectReference::Weak(*receiver_map), UPDATE_WRITE_BARRIER,
*name);
} else {
@@ -1106,7 +1060,7 @@ int FeedbackNexus::ExtractMapsAndFeedback(
MaybeObject maybe_handler = it.handler();
if (!maybe_handler->IsCleared()) {
DCHECK(IC::IsHandler(maybe_handler) ||
- IsStoreDataPropertyInLiteralKind(kind()));
+ IsDefineKeyedOwnPropertyInLiteralKind(kind()));
MaybeObjectHandle handler = config()->NewHandle(maybe_handler);
maps_and_feedback->push_back(MapAndHandler(map, handler));
found++;
@@ -1119,7 +1073,7 @@ int FeedbackNexus::ExtractMapsAndFeedback(
int FeedbackNexus::ExtractMapsAndHandlers(
std::vector<MapAndHandler>* maps_and_handlers,
TryUpdateHandler map_handler) const {
- DCHECK(!IsStoreDataPropertyInLiteralKind(kind()));
+ DCHECK(!IsDefineKeyedOwnPropertyInLiteralKind(kind()));
DisallowGarbageCollection no_gc;
int found = 0;
@@ -1153,13 +1107,13 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Name FeedbackNexus::GetName() const {
if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
- IsKeyedHasICKind(kind()) || IsKeyedDefineOwnICKind(kind())) {
+ IsKeyedHasICKind(kind()) || IsDefineKeyedOwnICKind(kind())) {
MaybeObject feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback->GetHeapObjectAssumeStrong());
}
}
- if (IsStoreDataPropertyInLiteralKind(kind())) {
+ if (IsDefineKeyedOwnPropertyInLiteralKind(kind())) {
MaybeObject extra = GetFeedbackExtra();
if (IsPropertyNameFeedback(extra)) {
return Name::cast(extra->GetHeapObjectAssumeStrong());
@@ -1236,7 +1190,8 @@ KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(Builtin builtin) {
KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()) ||
- IsStoreDataPropertyInLiteralKind(kind()) || IsDefineOwnICKind(kind()));
+ IsDefineKeyedOwnPropertyInLiteralKind(kind()) ||
+ IsDefineKeyedOwnICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
if (GetKeyType() == IcCheckType::kProperty) return mode;
@@ -1271,7 +1226,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
mode = StoreHandler::GetKeyedAccessStoreMode(*maybe_code_handler);
if (mode != STANDARD_STORE) return mode;
continue;
- } else if (IsDefineOwnICKind(kind())) {
+ } else if (IsDefineKeyedOwnICKind(kind())) {
mode = StoreHandler::GetKeyedAccessStoreMode(*maybe_code_handler);
if (mode != STANDARD_STORE) return mode;
continue;
@@ -1300,17 +1255,18 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()) ||
- IsStoreDataPropertyInLiteralKind(kind()) || IsDefineOwnICKind(kind()));
+ IsDefineKeyedOwnPropertyInLiteralKind(kind()) ||
+ IsDefineKeyedOwnICKind(kind()));
auto pair = GetFeedbackPair();
MaybeObject feedback = pair.first;
if (feedback == MegamorphicSentinel()) {
return static_cast<IcCheckType>(
Smi::ToInt(pair.second->template cast<Object>()));
}
- MaybeObject maybe_name =
- IsStoreDataPropertyInLiteralKind(kind()) || IsDefineOwnICKind(kind())
- ? pair.second
- : feedback;
+ MaybeObject maybe_name = IsDefineKeyedOwnPropertyInLiteralKind(kind()) ||
+ IsDefineKeyedOwnICKind(kind())
+ ? pair.second
+ : feedback;
return IsPropertyNameFeedback(maybe_name) ? IcCheckType::kProperty
: IcCheckType::kElement;
}
@@ -1459,12 +1415,13 @@ void FeedbackNexus::ResetTypeProfile() {
FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
: done_(false), index_(-1), state_(kOther) {
- DCHECK(IsLoadICKind(nexus->kind()) || IsStoreICKind(nexus->kind()) ||
- IsKeyedLoadICKind(nexus->kind()) ||
- IsKeyedStoreICKind(nexus->kind()) || IsStoreOwnICKind(nexus->kind()) ||
- IsStoreDataPropertyInLiteralKind(nexus->kind()) ||
- IsStoreInArrayLiteralICKind(nexus->kind()) ||
- IsKeyedHasICKind(nexus->kind()) || IsDefineOwnICKind(nexus->kind()));
+ DCHECK(
+ IsLoadICKind(nexus->kind()) || IsStoreICKind(nexus->kind()) ||
+ IsKeyedLoadICKind(nexus->kind()) || IsKeyedStoreICKind(nexus->kind()) ||
+ IsDefineNamedOwnICKind(nexus->kind()) ||
+ IsDefineKeyedOwnPropertyInLiteralKind(nexus->kind()) ||
+ IsStoreInArrayLiteralICKind(nexus->kind()) ||
+ IsKeyedHasICKind(nexus->kind()) || IsDefineKeyedOwnICKind(nexus->kind()));
DisallowGarbageCollection no_gc;
auto pair = nexus->GetFeedbackPair();
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index bab91fd497..359134baa0 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -36,9 +36,9 @@ enum class FeedbackSlotKind : uint8_t {
// Sloppy kinds come first, for easy language mode testing.
kStoreGlobalSloppy,
- kStoreNamedSloppy,
- kStoreKeyedSloppy,
- kLastSloppyKind = kStoreKeyedSloppy,
+ kSetNamedSloppy,
+ kSetKeyedSloppy,
+ kLastSloppyKind = kSetKeyedSloppy,
// Strict and language mode unaware kinds.
kCall,
@@ -48,14 +48,14 @@ enum class FeedbackSlotKind : uint8_t {
kLoadKeyed,
kHasKeyed,
kStoreGlobalStrict,
- kStoreNamedStrict,
- kStoreOwnNamed,
- kDefineOwnKeyed,
- kStoreKeyedStrict,
+ kSetNamedStrict,
+ kDefineNamedOwn,
+ kDefineKeyedOwn,
+ kSetKeyedStrict,
kStoreInArrayLiteral,
kBinaryOp,
kCompareOp,
- kStoreDataPropertyInLiteral,
+ kDefineKeyedOwnPropertyInLiteral,
kTypeProfile,
kLiteral,
kForIn,
@@ -95,29 +95,25 @@ inline bool IsStoreGlobalICKind(FeedbackSlotKind kind) {
}
inline bool IsStoreICKind(FeedbackSlotKind kind) {
- return kind == FeedbackSlotKind::kStoreNamedSloppy ||
- kind == FeedbackSlotKind::kStoreNamedStrict;
+ return kind == FeedbackSlotKind::kSetNamedSloppy ||
+ kind == FeedbackSlotKind::kSetNamedStrict;
}
-inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
- return kind == FeedbackSlotKind::kStoreOwnNamed;
+inline bool IsDefineNamedOwnICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kDefineNamedOwn;
}
-inline bool IsKeyedDefineOwnICKind(FeedbackSlotKind kind) {
- return kind == FeedbackSlotKind::kDefineOwnKeyed;
+inline bool IsDefineKeyedOwnICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kDefineKeyedOwn;
}
-inline bool IsDefineOwnICKind(FeedbackSlotKind kind) {
- return IsKeyedDefineOwnICKind(kind);
-}
-
-inline bool IsStoreDataPropertyInLiteralKind(FeedbackSlotKind kind) {
- return kind == FeedbackSlotKind::kStoreDataPropertyInLiteral;
+inline bool IsDefineKeyedOwnPropertyInLiteralKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral;
}
inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
- return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
- kind == FeedbackSlotKind::kStoreKeyedStrict;
+ return kind == FeedbackSlotKind::kSetKeyedSloppy ||
+ kind == FeedbackSlotKind::kSetKeyedStrict;
}
inline bool IsStoreInArrayLiteralICKind(FeedbackSlotKind kind) {
@@ -144,14 +140,14 @@ inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
}
inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
- DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
+ DCHECK(IsStoreICKind(kind) || IsDefineNamedOwnICKind(kind) ||
IsStoreGlobalICKind(kind) || IsKeyedStoreICKind(kind) ||
- IsDefineOwnICKind(kind));
+ IsDefineKeyedOwnICKind(kind));
STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
FeedbackSlotKind::kLastSloppyKind);
- STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <=
+ STATIC_ASSERT(FeedbackSlotKind::kSetKeyedSloppy <=
FeedbackSlotKind::kLastSloppyKind);
- STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
+ STATIC_ASSERT(FeedbackSlotKind::kSetNamedSloppy <=
FeedbackSlotKind::kLastSloppyKind);
return (kind <= FeedbackSlotKind::kLastSloppyKind) ? LanguageMode::kSloppy
: LanguageMode::kStrict;
@@ -203,24 +199,18 @@ class FeedbackVector
public:
NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
- STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <
+ STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <=
OptimizationMarkerBits::kMax);
- STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
- OptimizationTierBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
using TorqueGeneratedFeedbackVector<FeedbackVector,
HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
- static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
+ static constexpr uint32_t kHasCompileOptimizedMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
- static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
- kNoneOrMidTierMask << OptimizationTierBits::kShift |
- kHasCompileOptimizedOrLogFirstExecutionMarker;
static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
- OptimizationTierBits::kMask |
- kHasCompileOptimizedOrLogFirstExecutionMarker;
+ MaybeHasOptimizedCodeBit::kMask | kHasCompileOptimizedMarker;
inline bool is_empty() const;
@@ -236,29 +226,28 @@ class FeedbackVector
DECL_RELAXED_INT32_ACCESSORS(invocation_count)
inline void clear_invocation_count(RelaxedStoreTag tag);
- inline Code optimized_code() const;
+ inline CodeT optimized_code() const;
+ // Whether maybe_optimized_code contains a cached Code object.
inline bool has_optimized_code() const;
+ // Similar to above, but represented internally as a bit that can be
+ // efficiently checked by generated code. May lag behind the actual state of
+ // the world, thus 'maybe'.
+ inline bool maybe_has_optimized_code() const;
+ inline void set_maybe_has_optimized_code(bool value);
+
inline bool has_optimization_marker() const;
inline OptimizationMarker optimization_marker() const;
- inline OptimizationTier optimization_tier() const;
- void ClearOptimizedCode(FeedbackCell feedback_cell);
- void EvictOptimizedCodeMarkedForDeoptimization(FeedbackCell feedback_cell,
- SharedFunctionInfo shared,
+ void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
- static void SetOptimizedCode(Handle<FeedbackVector> vector, Handle<Code> code,
- FeedbackCell feedback_cell);
+ static void SetOptimizedCode(Handle<FeedbackVector> vector,
+ Handle<CodeT> code);
+ void ClearOptimizedCode();
void SetOptimizationMarker(OptimizationMarker marker);
- void ClearOptimizationTier(FeedbackCell feedback_cell);
void InitializeOptimizationState();
// Clears the optimization marker in the feedback vector.
void ClearOptimizationMarker();
- // Sets the interrupt budget based on the optimized code available on the
- // feedback vector. This function expects that the feedback cell contains a
- // feedback vector.
- static void SetInterruptBudget(FeedbackCell feedback_cell);
-
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
@@ -308,7 +297,7 @@ class FeedbackVector
DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
DEFINE_SLOT_KIND_PREDICATE(IsStoreIC)
- DEFINE_SLOT_KIND_PREDICATE(IsStoreOwnIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsDefineNamedOwnIC)
DEFINE_SLOT_KIND_PREDICATE(IsStoreGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC)
DEFINE_SLOT_KIND_PREDICATE(IsTypeProfile)
@@ -422,22 +411,22 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
FeedbackSlotKind GetStoreICSlot(LanguageMode language_mode) {
STATIC_ASSERT(LanguageModeSize == 2);
- return is_strict(language_mode) ? FeedbackSlotKind::kStoreNamedStrict
- : FeedbackSlotKind::kStoreNamedSloppy;
+ return is_strict(language_mode) ? FeedbackSlotKind::kSetNamedStrict
+ : FeedbackSlotKind::kSetNamedSloppy;
}
FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
return AddSlot(GetStoreICSlot(language_mode));
}
- FeedbackSlot AddStoreOwnICSlot() {
- return AddSlot(FeedbackSlotKind::kStoreOwnNamed);
+ FeedbackSlot AddDefineNamedOwnICSlot() {
+ return AddSlot(FeedbackSlotKind::kDefineNamedOwn);
}
- // Identical to StoreOwnKeyed, but will throw if a private field already
+ // Similar to DefinedNamedOwn, but will throw if a private field already
// exists.
- FeedbackSlot AddKeyedDefineOwnICSlot() {
- return AddSlot(FeedbackSlotKind::kDefineOwnKeyed);
+ FeedbackSlot AddDefineKeyedOwnICSlot() {
+ return AddSlot(FeedbackSlotKind::kDefineKeyedOwn);
}
FeedbackSlot AddStoreGlobalICSlot(LanguageMode language_mode) {
@@ -449,8 +438,8 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
FeedbackSlotKind GetKeyedStoreICSlotKind(LanguageMode language_mode) {
STATIC_ASSERT(LanguageModeSize == 2);
- return is_strict(language_mode) ? FeedbackSlotKind::kStoreKeyedStrict
- : FeedbackSlotKind::kStoreKeyedSloppy;
+ return is_strict(language_mode) ? FeedbackSlotKind::kSetKeyedStrict
+ : FeedbackSlotKind::kSetKeyedSloppy;
}
FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
@@ -477,8 +466,8 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
- FeedbackSlot AddStoreDataPropertyInLiteralICSlot() {
- return AddSlot(FeedbackSlotKind::kStoreDataPropertyInLiteral);
+ FeedbackSlot AddDefineKeyedOwnPropertyInLiteralICSlot() {
+ return AddSlot(FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral);
}
FeedbackSlot AddTypeProfileSlot();
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index bc2adf0718..68365f67fe 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -3,12 +3,14 @@
// found in the LICENSE file.
type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
-type OptimizationTier extends uint16 constexpr 'OptimizationTier';
bitfield struct FeedbackVectorFlags extends uint32 {
optimization_marker: OptimizationMarker: 3 bit;
- optimization_tier: OptimizationTier: 2 bit;
- global_ticks_at_last_runtime_profiler_interrupt: uint32: 24 bit;
+ // Whether the maybe_optimized_code field contains a code object. 'maybe',
+ // because they flag may lag behind the actual state of the world (it will be
+ // updated in time).
+ maybe_has_optimized_code: bool: 1 bit;
+ all_your_bits_are_belong_to_jgruber: uint32: 28 bit;
}
@generateBodyDescriptor
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 21081bebba..5dea891511 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -5,13 +5,12 @@
#ifndef V8_OBJECTS_FIXED_ARRAY_INL_H_
#define V8_OBJECTS_FIXED_ARRAY_INL_H_
-#include "src/objects/fixed-array.h"
-
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/numbers/conversions.h"
#include "src/objects/bigint.h"
#include "src/objects/compressed-slots.h"
+#include "src/objects/fixed-array.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/objects-inl.h"
@@ -84,7 +83,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +90,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index ae0f011027..f0c3bfc097 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,30 +134,20 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
-#if !defined(_WIN32)
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
-#endif
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
inline void set_undefined(Isolate* isolate, int index);
+ inline void set_undefined(ReadOnlyRoots ro_roots, int index);
inline void set_null(int index);
inline void set_null(Isolate* isolate, int index);
+ inline void set_null(ReadOnlyRoots ro_roots, int index);
inline void set_the_hole(int index);
inline void set_the_hole(Isolate* isolate, int index);
+ inline void set_the_hole(ReadOnlyRoots ro_roots, int index);
inline ObjectSlot GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
@@ -211,6 +201,7 @@ class FixedArray
// Dispatched behavior.
DECL_PRINTER(FixedArray)
+ DECL_VERIFIER(FixedArray)
int AllocatedSize();
@@ -227,10 +218,6 @@ class FixedArray
private:
STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
- inline void set_undefined(ReadOnlyRoots ro_roots, int index);
- inline void set_null(ReadOnlyRoots ro_roots, int index);
- inline void set_the_hole(ReadOnlyRoots ro_roots, int index);
-
TQ_OBJECT_CONSTRUCTORS(FixedArray)
};
@@ -396,7 +383,7 @@ class WeakArrayList
inline void CopyElements(Isolate* isolate, int dst_index, WeakArrayList src,
int src_index, int len, WriteBarrierMode mode);
- V8_EXPORT_PRIVATE bool IsFull();
+ V8_EXPORT_PRIVATE bool IsFull() const;
int AllocatedSize();
@@ -498,6 +485,8 @@ class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
static const int kFirstIndex = 1;
STATIC_ASSERT(kHeaderFields == kFirstIndex);
+ DECL_VERIFIER(ArrayList)
+
private:
static Handle<ArrayList> EnsureSpace(Isolate* isolate,
Handle<ArrayList> array, int length);
@@ -607,6 +596,13 @@ class PodArray : public ByteArray {
return memcmp(GetDataStartAddress(), buffer, length * sizeof(T)) == 0;
}
+ bool matches(int offset, const T* buffer, int length) {
+ DCHECK_LE(offset, this->length());
+ DCHECK_LE(offset + length, this->length());
+ return memcmp(GetDataStartAddress() + sizeof(T) * offset, buffer,
+ length * sizeof(T)) == 0;
+ }
+
T get(int index) {
T result;
copy_out(index, &result, 1);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index 8947653861..cb5121d6fa 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -9,7 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/objects-inl.h"
-#include "src/security/external-pointer-inl.h"
+#include "src/sandbox/external-pointer-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,13 +28,14 @@ bool Foreign::IsNormalized(Object value) {
}
DEF_GETTER(Foreign, foreign_address, Address) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Isolate* isolate = GetIsolateForSandbox(*this);
return ReadExternalPointerField(kForeignAddressOffset, isolate,
kForeignForeignAddressTag);
}
void Foreign::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kForeignAddressOffset, isolate);
+ InitExternalPointerField(kForeignAddressOffset, isolate,
+ kForeignForeignAddressTag);
}
void Foreign::set_foreign_address(Isolate* isolate, Address value) {
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 680ec9d3cd..f78a02ac69 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -35,6 +35,11 @@ ObjectHashTable::ObjectHashTable(Address ptr)
SLOW_DCHECK(IsObjectHashTable());
}
+RegisteredSymbolTable::RegisteredSymbolTable(Address ptr)
+ : HashTable<RegisteredSymbolTable, RegisteredSymbolTableShape>(ptr) {
+ SLOW_DCHECK(IsRegisteredSymbolTable());
+}
+
EphemeronHashTable::EphemeronHashTable(Address ptr)
: ObjectHashTableBase<EphemeronHashTable, ObjectHashTableShape>(ptr) {
SLOW_DCHECK(IsEphemeronHashTable());
@@ -45,9 +50,16 @@ ObjectHashSet::ObjectHashSet(Address ptr)
SLOW_DCHECK(IsObjectHashSet());
}
+NameToIndexHashTable::NameToIndexHashTable(Address ptr)
+ : HashTable<NameToIndexHashTable, NameToIndexShape>(ptr) {
+ SLOW_DCHECK(IsNameToIndexHashTable());
+}
+
CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(RegisteredSymbolTable)
CAST_ACCESSOR(EphemeronHashTable)
CAST_ACCESSOR(ObjectHashSet)
+CAST_ACCESSOR(NameToIndexHashTable)
void EphemeronHashTable::set_key(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -125,6 +137,16 @@ Handle<Map> HashTable<Derived, Shape>::GetMap(ReadOnlyRoots roots) {
}
// static
+Handle<Map> NameToIndexHashTable::GetMap(ReadOnlyRoots roots) {
+ return roots.name_to_index_hash_table_map_handle();
+}
+
+// static
+Handle<Map> RegisteredSymbolTable::GetMap(ReadOnlyRoots roots) {
+ return roots.registered_symbol_table_map_handle();
+}
+
+// static
Handle<Map> EphemeronHashTable::GetMap(ReadOnlyRoots roots) {
return roots.ephemeron_hash_table_map_handle();
}
@@ -158,6 +180,13 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(PtrComprCageBase cage_base,
}
}
+template <typename Derived, typename Shape>
+template <typename IsolateT>
+InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(IsolateT* isolate,
+ uint32_t hash) {
+ return FindInsertionEntry(isolate, ReadOnlyRoots(isolate), hash);
+}
+
// static
template <typename Derived, typename Shape>
bool HashTable<Derived, Shape>::IsKey(ReadOnlyRoots roots, Object k) {
@@ -247,6 +276,33 @@ bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object other) {
return key->SameValue(other);
}
+bool RegisteredSymbolTableShape::IsMatch(Handle<String> key, Object value) {
+ DCHECK(value.IsString());
+ return key->Equals(String::cast(value));
+}
+
+uint32_t RegisteredSymbolTableShape::Hash(ReadOnlyRoots roots,
+ Handle<String> key) {
+ return key->EnsureHash();
+}
+
+uint32_t RegisteredSymbolTableShape::HashForObject(ReadOnlyRoots roots,
+ Object object) {
+ return String::cast(object).EnsureHash();
+}
+
+bool NameToIndexShape::IsMatch(Handle<Name> key, Object other) {
+ return *key == other;
+}
+
+uint32_t NameToIndexShape::HashForObject(ReadOnlyRoots roots, Object other) {
+ return Name::cast(other).hash();
+}
+
+uint32_t NameToIndexShape::Hash(ReadOnlyRoots roots, Handle<Name> key) {
+ return key->hash();
+}
+
uint32_t ObjectHashTableShape::Hash(ReadOnlyRoots roots, Handle<Object> key) {
return Smi::ToInt(key->GetHash());
}
@@ -256,6 +312,24 @@ uint32_t ObjectHashTableShape::HashForObject(ReadOnlyRoots roots,
return Smi::ToInt(other.GetHash());
}
+template <typename IsolateT>
+Handle<NameToIndexHashTable> NameToIndexHashTable::Add(
+ IsolateT* isolate, Handle<NameToIndexHashTable> table, Handle<Name> key,
+ int32_t index) {
+ DCHECK_GE(index, 0);
+ // Validate that the key is absent.
+ SLOW_DCHECK(table->FindEntry(isolate, key).is_not_found());
+ // Check whether the dictionary should be extended.
+ table = EnsureCapacity(isolate, table);
+
+ // Compute the key object.
+ InternalIndex entry = table->FindInsertionEntry(isolate, key->hash());
+ table->set(EntryToIndex(entry), *key);
+ table->set(EntryToValueIndex(entry), Smi::FromInt(index));
+ table->ElementAdded();
+ return table;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 9fad02897f..d22d8dc58e 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -227,7 +227,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// has the given hash value.
InternalIndex FindInsertionEntry(PtrComprCageBase cage_base,
ReadOnlyRoots roots, uint32_t hash);
- InternalIndex FindInsertionEntry(Isolate* isolate, uint32_t hash);
+ template <typename IsolateT>
+ InternalIndex FindInsertionEntry(IsolateT* isolate, uint32_t hash);
// Computes the capacity a table with the given capacity would need to have
// room for the given number of elements, also allowing it to shrink.
@@ -437,6 +438,84 @@ class V8_EXPORT_PRIVATE ObjectHashSet
HashTable<ObjectHashSet, ObjectHashSetShape>);
};
+class NameToIndexShape : public BaseShape<Handle<Name>> {
+ public:
+ static inline bool IsMatch(Handle<Name> key, Object other);
+ static inline uint32_t Hash(ReadOnlyRoots roots, Handle<Name> key);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
+ static inline Handle<Object> AsHandle(Handle<Name> key);
+ static const int kPrefixSize = 0;
+ static const int kEntryValueIndex = 1;
+ static const int kEntrySize = 2;
+ static const bool kMatchNeedsHoleCheck = false;
+};
+
+class V8_EXPORT_PRIVATE NameToIndexHashTable
+ : public HashTable<NameToIndexHashTable, NameToIndexShape> {
+ public:
+ static const int kEntryValueIndex = NameToIndexShape::kEntryValueIndex;
+
+ inline static Handle<Map> GetMap(ReadOnlyRoots roots);
+ int Lookup(Handle<Name> key);
+
+ // Returns the value at entry.
+ Object ValueAt(InternalIndex entry);
+ int IndexAt(InternalIndex entry);
+
+ template <typename IsolateT>
+ static Handle<NameToIndexHashTable> Add(IsolateT* isolate,
+ Handle<NameToIndexHashTable> table,
+ Handle<Name> key, int32_t value);
+
+ DECL_CAST(NameToIndexHashTable)
+ DECL_PRINTER(NameToIndexHashTable)
+
+ OBJECT_CONSTRUCTORS(NameToIndexHashTable,
+ HashTable<NameToIndexHashTable, NameToIndexShape>);
+
+ private:
+ static inline int EntryToValueIndex(InternalIndex entry) {
+ return EntryToIndex(entry) + NameToIndexShape::kEntryValueIndex;
+ }
+};
+
+class RegisteredSymbolTableShape : public BaseShape<Handle<String>> {
+ public:
+ static inline bool IsMatch(Handle<String> key, Object other);
+ static inline uint32_t Hash(ReadOnlyRoots roots, Handle<String> key);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
+ static const int kPrefixSize = 0;
+ static const int kEntryValueIndex = 1;
+ static const int kEntrySize = 2;
+ static const bool kMatchNeedsHoleCheck = false;
+};
+
+class RegisteredSymbolTable
+ : public HashTable<RegisteredSymbolTable, RegisteredSymbolTableShape> {
+ public:
+ Object SlowReverseLookup(Object value);
+
+ // Returns the value at entry.
+ Object ValueAt(InternalIndex entry);
+
+ inline static Handle<Map> GetMap(ReadOnlyRoots roots);
+
+ static Handle<RegisteredSymbolTable> Add(Isolate* isolate,
+ Handle<RegisteredSymbolTable> table,
+ Handle<String> key, Handle<Symbol>);
+
+ DECL_CAST(RegisteredSymbolTable)
+ DECL_PRINTER(RegisteredSymbolTable)
+ OBJECT_CONSTRUCTORS(
+ RegisteredSymbolTable,
+ HashTable<RegisteredSymbolTable, RegisteredSymbolTableShape>);
+
+ private:
+ static inline int EntryToValueIndex(InternalIndex entry) {
+ return EntryToIndex(entry) + RegisteredSymbolTableShape::kEntryValueIndex;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index b9eca4cc5b..87951da0a0 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -35,6 +35,11 @@ class HeapObject : public Object {
DECL_GETTER(map, Map)
inline void set_map(Map value);
+ // This method behaves the same as `set_map` but marks the map transition as
+ // safe for the concurrent marker (object layout doesn't change) during
+ // verification.
+ inline void set_map_safe_transition(Map value);
+
inline ObjectSlot map_slot() const;
// The no-write-barrier version. This is OK if the object is white and in
@@ -47,6 +52,7 @@ class HeapObject : public Object {
// Access the map using acquire load and release store.
DECL_ACQUIRE_GETTER(map, Map)
inline void set_map(Map value, ReleaseStoreTag);
+ inline void set_map_safe_transition(Map value, ReleaseStoreTag);
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
@@ -90,8 +96,6 @@ class HeapObject : public Object {
IS_TYPE_FUNCTION_DECL(CodeT)
#undef IS_TYPE_FUNCTION_DECL
- bool IsExternal(Isolate* isolate) const;
-
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
#define IS_TYPE_FUNCTION_DECL(Type, Value) \
@@ -126,6 +130,9 @@ class HeapObject : public Object {
template <typename ObjectVisitor>
inline void IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ inline void IterateFast(Map map, int object_size, ObjectVisitor* v);
+
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
// parameter. This function does not access the map pointer in the
@@ -161,6 +168,7 @@ class HeapObject : public Object {
inline ObjectSlot RawField(int byte_offset) const;
inline MaybeObjectSlot RawMaybeWeakField(int byte_offset) const;
inline CodeObjectSlot RawCodeField(int byte_offset) const;
+ inline ExternalPointer_t RawExternalPointerField(int byte_offset) const;
DECL_CAST(HeapObject)
@@ -229,6 +237,20 @@ class HeapObject : public Object {
inline HeapObject(Address ptr, AllowInlineSmiStorage allow_smi);
OBJECT_CONSTRUCTORS(HeapObject, Object);
+
+ private:
+ enum class VerificationMode {
+ kSafeMapTransition,
+ kPotentialLayoutChange,
+ };
+
+ enum class EmitWriteBarrier {
+ kYes,
+ kNo,
+ };
+
+ template <EmitWriteBarrier emit_write_barrier, typename MemoryOrder>
+ V8_INLINE void set_map(Map value, MemoryOrder order, VerificationMode mode);
};
OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
diff --git a/deps/v8/src/objects/internal-index.h b/deps/v8/src/objects/internal-index.h
index dec83cefe2..959f9ad57b 100644
--- a/deps/v8/src/objects/internal-index.h
+++ b/deps/v8/src/objects/internal-index.h
@@ -58,6 +58,10 @@ class InternalIndex {
return *this;
}
+ bool operator<(const InternalIndex& other) const {
+ return entry_ < other.entry_;
+ }
+
class Range {
public:
explicit Range(size_t max) : min_(0), max_(max) {}
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index cd735d34e9..3ac016aaa8 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -15,6 +15,7 @@
#include "src/api/api-inl.h"
#include "src/base/strings.h"
+#include "src/date/date.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
#include "src/handles/global-handles.h"
@@ -225,14 +226,14 @@ icu::StringPiece ToICUStringPiece(Isolate* isolate, Handle<String> string,
DisallowGarbageCollection no_gc;
const String::FlatContent& flat = string->GetFlatContent(no_gc);
- if (!flat.IsOneByte()) return icu::StringPiece(nullptr, 0);
+ if (!flat.IsOneByte()) return icu::StringPiece();
int32_t length = string->length();
DCHECK_LT(offset, length);
const char* char_buffer =
reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
if (!String::IsAscii(char_buffer, length)) {
- return icu::StringPiece(nullptr, 0);
+ return icu::StringPiece();
}
return icu::StringPiece(char_buffer + offset, length - offset);
@@ -1521,151 +1522,195 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
return Nothing<NumberFormatDigitOptions>();
}
- int mnfd = 0;
- int mxfd = 0;
- Handle<Object> mnfd_obj;
- Handle<Object> mxfd_obj;
-
// 6. Let mnfd be ? Get(options, "minimumFractionDigits").
- Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ Handle<Object> mnfd_obj;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str),
+ isolate, mnfd_obj,
+ JSReceiver::GetProperty(isolate, options,
+ factory->minimumFractionDigits_string()),
Nothing<NumberFormatDigitOptions>());
- // 8. Let mxfd be ? Get(options, "maximumFractionDigits").
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ // 7. Let mxfd be ? Get(options, "maximumFractionDigits").
+ Handle<Object> mxfd_obj;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str),
+ isolate, mxfd_obj,
+ JSReceiver::GetProperty(isolate, options,
+ factory->maximumFractionDigits_string()),
Nothing<NumberFormatDigitOptions>());
- // 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
+ // 8. Let mnsd be ? Get(options, "minimumSignificantDigits").
Handle<Object> mnsd_obj;
- Handle<String> mnsd_str = factory->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
+ isolate, mnsd_obj,
+ JSReceiver::GetProperty(isolate, options,
+ factory->minimumSignificantDigits_string()),
Nothing<NumberFormatDigitOptions>());
- // 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
+ // 9. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
- Handle<String> mxsd_str = factory->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
+ isolate, mxsd_obj,
+ JSReceiver::GetProperty(isolate, options,
+ factory->maximumSignificantDigits_string()),
Nothing<NumberFormatDigitOptions>());
- // 11. Set intlObj.[[MinimumIntegerDigits]] to mnid.
- digit_options.minimum_integer_digits = mnid;
-
- // 12. Set intlObj.[[MinimumFractionDigits]] to mnfd.
- digit_options.minimum_fraction_digits = mnfd;
-
- // 13. Set intlObj.[[MaximumFractionDigits]] to mxfd.
- digit_options.maximum_fraction_digits = mxfd;
-
- // 14. If mnsd is not undefined or mxsd is not undefined, then
- if (!mnsd_obj->IsUndefined(isolate) || !mxsd_obj->IsUndefined(isolate)) {
- // 14. a. Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1).
- int mnsd;
- if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1, mnsd_str).To(&mnsd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
-
- // 14. b. Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21).
- int mxsd;
- if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21, mxsd_str)
- .To(&mxsd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
-
- // 14. c. Set intlObj.[[MinimumSignificantDigits]] to mnsd.
- digit_options.minimum_significant_digits = mnsd;
-
- // 14. d. Set intlObj.[[MaximumSignificantDigits]] to mxsd.
- digit_options.maximum_significant_digits = mxsd;
- } else {
- digit_options.minimum_significant_digits = 0;
- digit_options.maximum_significant_digits = 0;
+ digit_options.rounding_priority = RoundingPriority::kAuto;
+ digit_options.minimum_significant_digits = 0;
+ digit_options.maximum_significant_digits = 0;
- // 15. Else If mnfd is not undefined or mxfd is not undefined, then
- if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
- int specified_mnfd;
- int specified_mxfd;
+ // 10. Set intlObj.[[MinimumIntegerDigits]] to mnid.
+ digit_options.minimum_integer_digits = mnid;
- // a. Let _specifiedMnfd_ be ? DefaultNumberOption(_mnfd_, 0, 20,
- // *undefined*).
- if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, -1, mnfd_str)
- .To(&specified_mnfd)) {
+ if (FLAG_harmony_intl_number_format_v3) {
+ // 11. Let roundingPriority be ? GetOption(options, "roundingPriority",
+ // "string", « "auto", "morePrecision", "lessPrecision" », "auto").
+
+ Maybe<RoundingPriority> maybe_rounding_priority =
+ GetStringOption<RoundingPriority>(
+ isolate, options, "roundingPriority", "SetNumberFormatDigitOptions",
+ {"auto", "morePrecision", "lessPrecision"},
+ {RoundingPriority::kAuto, RoundingPriority::kMorePrecision,
+ RoundingPriority::kLessPrecision},
+ RoundingPriority::kAuto);
+ MAYBE_RETURN(maybe_rounding_priority, Nothing<NumberFormatDigitOptions>());
+ digit_options.rounding_priority = maybe_rounding_priority.FromJust();
+ }
+
+ // 12. If mnsd is not undefined or mxsd is not undefined, then
+ // a. Set hasSd to true.
+ // 13. Else,
+ // a. Set hasSd to false.
+ bool has_sd =
+ (!mnsd_obj->IsUndefined(isolate)) || (!mxsd_obj->IsUndefined(isolate));
+
+ // 14. If mnfd is not undefined or mxfd is not undefined, then
+ // a. Set hasFd to true.
+ // 15. Else,
+ // a. Set hasFd to false.
+ bool has_fd =
+ (!mnfd_obj->IsUndefined(isolate)) || (!mxfd_obj->IsUndefined(isolate));
+
+ // 17. If hasSd or roundingPriority is not "auto", set needSd to true; else,
+ // set needSd to false.
+ bool need_sd =
+ has_sd || (RoundingPriority::kAuto != digit_options.rounding_priority);
+
+ // 18. If ( not hasSd and (hasFd or notation is not "compact") ) or
+ // roundingPriority is not "auto", then a. Set needFd to true.
+ // 19. Else,
+ // a. Set needFd to false.
+ bool need_fd = ((!has_sd) && (has_fd || !notation_is_compact)) ||
+ (RoundingPriority::kAuto != digit_options.rounding_priority);
+
+ // 20. If needSd, then
+ if (need_sd) {
+ // 20.b If hasSd, then
+ if (has_sd) {
+ // 20.b.i Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1).
+ int mnsd;
+ if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1,
+ factory->minimumSignificantDigits_string())
+ .To(&mnsd)) {
return Nothing<NumberFormatDigitOptions>();
}
- Handle<Object> specifiedMnfd_obj;
- if (specified_mnfd < 0) {
- specifiedMnfd_obj = factory->undefined_value();
- } else {
- specifiedMnfd_obj = handle(Smi::FromInt(specified_mnfd), isolate);
- }
-
- // b. Let _specifiedMxfd_ be ? DefaultNumberOption(_mxfd_, 0, 20,
- // *undefined*).
- if (!DefaultNumberOption(isolate, mxfd_obj, 0, 20, -1, mxfd_str)
- .To(&specified_mxfd)) {
+ // 20.b.ii Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21).
+ int mxsd;
+ if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21,
+ factory->maximumSignificantDigits_string())
+ .To(&mxsd)) {
return Nothing<NumberFormatDigitOptions>();
}
- Handle<Object> specifiedMxfd_obj;
- if (specified_mxfd < 0) {
- specifiedMxfd_obj = factory->undefined_value();
- } else {
- specifiedMxfd_obj = handle(Smi::FromInt(specified_mxfd), isolate);
- }
-
- // c. If _specifiedMxfd_ is not *undefined*, set _mnfdDefault_ to
- // min(_mnfdDefault_, _specifiedMxfd_).
- if (specified_mxfd >= 0) {
- mnfd_default = std::min(mnfd_default, specified_mxfd);
- }
+ // 20.b.iii Set intlObj.[[MinimumSignificantDigits]] to mnsd.
+ digit_options.minimum_significant_digits = mnsd;
+ // 20.b.iv Set intlObj.[[MaximumSignificantDigits]] to mxsd.
+ digit_options.maximum_significant_digits = mxsd;
+ } else {
+ // 20.c Else
+ // 20.c.i Set intlObj.[[MinimumSignificantDigits]] to 1.
+ digit_options.minimum_significant_digits = 1;
+ // 20.c.ii Set intlObj.[[MaximumSignificantDigits]] to 21.
+ digit_options.maximum_significant_digits = 21;
+ }
+ }
- // d. Set _mnfd_ to ! DefaultNumberOption(_specifiedMnfd_, 0, 20,
- // _mnfdDefault_).
- if (!DefaultNumberOption(isolate, specifiedMnfd_obj, 0, 20, mnfd_default,
- mnfd_str)
+ // 21. If needFd, then
+ if (need_fd) {
+ // 21.a If hasFd, then
+ if (has_fd) {
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ // 21.a.i Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, undefined).
+ int mnfd;
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, -1, mnfd_str)
.To(&mnfd)) {
return Nothing<NumberFormatDigitOptions>();
}
-
- // e. Set _mxfd_ to ! DefaultNumberOption(_specifiedMxfd_, 0, 20,
- // max(_mxfdDefault_, _mnfd_)).
- if (!DefaultNumberOption(isolate, specifiedMxfd_obj, 0, 20,
- std::max(mxfd_default, mnfd), mxfd_str)
+ // 21.a.ii Let mxfd be ? DefaultNumberOption(mxfd, 0, 20, undefined).
+ int mxfd;
+ if (!DefaultNumberOption(isolate, mxfd_obj, 0, 20, -1, mxfd_str)
.To(&mxfd)) {
return Nothing<NumberFormatDigitOptions>();
}
-
- // f. If _mnfd_ is greater than _mxfd_, throw a *RangeError* exception.
- if (mnfd > mxfd) {
+ // 21.a.iii If mnfd is undefined, set mnfd to min(mnfdDefault, mxfd).
+ if (mnfd_obj->IsUndefined(isolate)) {
+ mnfd = std::min(mnfd_default, mxfd);
+ } else if (mxfd_obj->IsUndefined(isolate)) {
+ // 21.a.iv Else if mxfd is undefined, set mxfd to max(mxfdDefault,
+ // mnfd).
+ mxfd = std::max(mxfd_default, mnfd);
+ } else if (mnfd > mxfd) {
+ // 21.a.v Else if mnfd is greater than mxfd, throw a RangeError
+ // exception.
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(MessageTemplate::kPropertyValueOutOfRange, mxfd_str),
Nothing<NumberFormatDigitOptions>());
}
-
- // g. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+ // 21.a.vi Set intlObj.[[MinimumFractionDigits]] to mnfd.
digit_options.minimum_fraction_digits = mnfd;
-
- // h. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ // 21.a.vii Set intlObj.[[MaximumFractionDigits]] to mxfd.
digit_options.maximum_fraction_digits = mxfd;
- // Else If intlObj.[[Notation]] is "compact", then
- } else if (notation_is_compact) {
- // a. Set intlObj.[[RoundingType]] to "compact-rounding".
- // Set minimum_significant_digits to -1 to represent roundingtype is
- // "compact-rounding".
- digit_options.minimum_significant_digits = -1;
- // 17. Else,
- } else {
- // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
+ } else { // 17.b Else
+ // 21.b.i Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
digit_options.minimum_fraction_digits = mnfd_default;
-
- // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
+ // 21.b.ii Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
digit_options.maximum_fraction_digits = mxfd_default;
}
}
+
+ // 22. If needSd or needFd, then
+ if (need_sd || need_fd) {
+ // a. If roundingPriority is "morePrecision", then
+ if (digit_options.rounding_priority == RoundingPriority::kMorePrecision) {
+ // i. Set intlObj.[[RoundingType]] to morePrecision.
+ digit_options.rounding_type = RoundingType::kMorePrecision;
+ // b. Else if roundingPriority is "lessPrecision", then
+ } else if (digit_options.rounding_priority ==
+ RoundingPriority::kLessPrecision) {
+ // i. Set intlObj.[[RoundingType]] to lessPrecision.
+ digit_options.rounding_type = RoundingType::kLessPrecision;
+ // c. Else if hasSd, then
+ } else if (has_sd) {
+ // i. Set intlObj.[[RoundingType]] to significantDigits.
+ digit_options.rounding_type = RoundingType::kSignificantDigits;
+ // d. Else,
+ } else {
+ // i.Set intlObj.[[RoundingType]] to fractionDigits.
+ digit_options.rounding_type = RoundingType::kFractionDigits;
+ }
+ // 23. Else
+ } else {
+ // a. Set intlObj.[[RoundingType]] to morePrecision.
+ digit_options.rounding_type = RoundingType::kMorePrecision;
+ // b. Set intlObj.[[MinimumFractionDigits]] to 0.
+ digit_options.minimum_fraction_digits = 0;
+ // c. Set intlObj.[[MaximumFractionDigits]] to 0.
+ digit_options.maximum_fraction_digits = 0;
+ // d. Set intlObj.[[MinimumSignificantDigits]] to 1.
+ digit_options.minimum_significant_digits = 1;
+ // e. Set intlObj.[[MaximumSignificantDigits]] to 2.
+ digit_options.maximum_significant_digits = 2;
+ }
return Just(digit_options);
}
@@ -1967,8 +2012,8 @@ MaybeHandle<JSObject> SupportedLocales(
}
// 5. Return CreateArrayFromList(supportedLocales).
- PropertyAttributes attr = static_cast<PropertyAttributes>(NONE);
- return CreateArrayFromList(isolate, supported_locales, attr);
+ return CreateArrayFromList(isolate, supported_locales,
+ PropertyAttributes::NONE);
}
} // namespace
@@ -1982,8 +2027,8 @@ MaybeHandle<JSArray> Intl::GetCanonicalLocales(Isolate* isolate,
MAYBE_RETURN(maybe_ll, MaybeHandle<JSArray>());
// 2. Return CreateArrayFromList(ll).
- PropertyAttributes attr = static_cast<PropertyAttributes>(NONE);
- return CreateArrayFromList(isolate, maybe_ll.FromJust(), attr);
+ return CreateArrayFromList(isolate, maybe_ll.FromJust(),
+ PropertyAttributes::NONE);
}
namespace {
@@ -2643,22 +2688,22 @@ const std::set<std::string>& Intl::GetAvailableLocalesForDateFormat() {
return available_locales.Pointer()->Get();
}
+constexpr uint16_t kInfinityChar = 0x221e;
+
Handle<String> Intl::NumberFieldToType(Isolate* isolate,
- Handle<Object> numeric_obj,
- int32_t field_id) {
- DCHECK(numeric_obj->IsNumeric());
- switch (static_cast<UNumberFormatFields>(field_id)) {
+ const NumberFormatSpan& part,
+ const icu::UnicodeString& text,
+ bool is_nan) {
+ switch (static_cast<UNumberFormatFields>(part.field_id)) {
case UNUM_INTEGER_FIELD:
- if (numeric_obj->IsBigInt()) {
- // Neither NaN nor Infinite could be stored into BigInt
- // so just return integer.
- return isolate->factory()->integer_string();
- } else {
- double number = numeric_obj->Number();
- if (std::isfinite(number)) return isolate->factory()->integer_string();
- if (std::isnan(number)) return isolate->factory()->nan_string();
+ if (is_nan) return isolate->factory()->nan_string();
+ if (text.charAt(part.begin_pos) == kInfinityChar ||
+ // en-US-POSIX output "INF" for Infinity
+ (part.end_pos - part.begin_pos == 3 &&
+ text.tempSubString(part.begin_pos, 3) == "INF")) {
return isolate->factory()->infinity_string();
}
+ return isolate->factory()->integer_string();
case UNUM_FRACTION_FIELD:
return isolate->factory()->fraction_string();
case UNUM_DECIMAL_SEPARATOR_FIELD:
@@ -2670,15 +2715,9 @@ Handle<String> Intl::NumberFieldToType(Isolate* isolate,
case UNUM_PERCENT_FIELD:
return isolate->factory()->percentSign_string();
case UNUM_SIGN_FIELD:
- if (numeric_obj->IsBigInt()) {
- Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
- return big_int->IsNegative() ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
- } else {
- double number = numeric_obj->Number();
- return std::signbit(number) ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
- }
+ return (text.charAt(part.begin_pos) == '+')
+ ? isolate->factory()->plusSign_string()
+ : isolate->factory()->minusSign_string();
case UNUM_EXPONENT_SYMBOL_FIELD:
return isolate->factory()->exponentSeparator_string();
@@ -2760,5 +2799,162 @@ std::set<std::string> Intl::SanctionedSimpleUnits() {
"year"});
}
+// ecma-402/#sec-isvalidtimezonename
+
+namespace {
+bool IsUnicodeStringValidTimeZoneName(const icu::UnicodeString& id) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString canonical;
+ icu::TimeZone::getCanonicalID(id, canonical, status);
+ return U_SUCCESS(status) &&
+ canonical != icu::UnicodeString("Etc/Unknown", -1, US_INV);
+}
+} // namespace
+
+MaybeHandle<String> Intl::CanonicalizeTimeZoneName(Isolate* isolate,
+ Handle<String> identifier) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::string time_zone =
+ JSDateTimeFormat::CanonicalizeTimeZoneID(identifier->ToCString().get());
+ icu::UnicodeString time_zone_ustring =
+ icu::UnicodeString(time_zone.c_str(), -1, US_INV);
+ icu::UnicodeString canonical;
+ icu::TimeZone::getCanonicalID(time_zone_ustring, canonical, status);
+ CHECK(U_SUCCESS(status));
+ if (canonical == UNICODE_STRING_SIMPLE("Etc/UTC") ||
+ canonical == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ return isolate->factory()->UTC_string();
+ }
+ return Intl::ToString(isolate, canonical);
+}
+
+bool Intl::IsValidTimeZoneName(Isolate* isolate, Handle<String> id) {
+ std::string time_zone =
+ JSDateTimeFormat::CanonicalizeTimeZoneID(id->ToCString().get());
+ icu::UnicodeString time_zone_ustring =
+ icu::UnicodeString(time_zone.c_str(), -1, US_INV);
+ return IsUnicodeStringValidTimeZoneName(time_zone_ustring);
+}
+
+bool Intl::IsValidTimeZoneName(const icu::TimeZone& tz) {
+ icu::UnicodeString id;
+ tz.getID(id);
+ return IsUnicodeStringValidTimeZoneName(id);
+}
+
+// Function to support Temporal
+std::string Intl::TimeZoneIdFromIndex(int32_t index) {
+ if (index == 0) return "UTC";
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createEnumeration());
+ int32_t curr = 0;
+ const char* id;
+
+ UErrorCode status = U_ZERO_ERROR;
+ while (U_SUCCESS(status) && curr < index &&
+ ((id = enumeration->next(nullptr, status)) != nullptr)) {
+ CHECK(U_SUCCESS(status));
+ curr++;
+ }
+ CHECK(U_SUCCESS(status));
+ CHECK(id != nullptr);
+ return id;
+}
+
+Maybe<bool> Intl::GetTimeZoneIndex(Isolate* isolate, Handle<String> identifier,
+ int32_t* index) {
+ if (identifier->Equals(*isolate->factory()->UTC_string())) {
+ *index = 0;
+ return Just(true);
+ }
+
+ std::string identifier_str(identifier->ToCString().get());
+ std::unique_ptr<icu::TimeZone> tz(
+ icu::TimeZone::createTimeZone(identifier_str.c_str()));
+ if (!IsValidTimeZoneName(*tz)) {
+ return Just(false);
+ }
+
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createEnumeration());
+ int32_t curr = 0;
+ const char* id;
+
+ UErrorCode status = U_ZERO_ERROR;
+ while (U_SUCCESS(status) &&
+ (id = enumeration->next(nullptr, status)) != nullptr) {
+ if (identifier_str == id) {
+ *index = curr + 1;
+ return Just(true);
+ }
+ curr++;
+ }
+ CHECK(U_SUCCESS(status));
+ // We should not reach here, the !IsValidTimeZoneName should return earlier
+ UNREACHABLE();
+}
+
+// #sec-tointlmathematicalvalue
+MaybeHandle<Object> Intl::ToIntlMathematicalValueAsNumberBigIntOrString(
+ Isolate* isolate, Handle<Object> input) {
+ if (input->IsNumber() || input->IsBigInt()) return input; // Shortcut.
+ // TODO(ftang) revisit the following after the resolution of
+ // https://github.com/tc39/proposal-intl-numberformat-v3/pull/82
+ if (input->IsOddball()) {
+ return Oddball::ToNumber(isolate, Handle<Oddball>::cast(input));
+ }
+ if (input->IsSymbol()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kNumber),
+ Object);
+ if (input->IsString()) UNIMPLEMENTED();
+ return input;
+}
+
+Intl::FormatRangeSourceTracker::FormatRangeSourceTracker() {
+ start_[0] = start_[1] = limit_[0] = limit_[1] = 0;
+}
+
+void Intl::FormatRangeSourceTracker::Add(int32_t field, int32_t start,
+ int32_t limit) {
+ DCHECK_LT(field, 2);
+ start_[field] = start;
+ limit_[field] = limit;
+}
+
+Intl::FormatRangeSource Intl::FormatRangeSourceTracker::GetSource(
+ int32_t start, int32_t limit) const {
+ FormatRangeSource source = FormatRangeSource::kShared;
+ if (FieldContains(0, start, limit)) {
+ source = FormatRangeSource::kStartRange;
+ } else if (FieldContains(1, start, limit)) {
+ source = FormatRangeSource::kEndRange;
+ }
+ return source;
+}
+
+bool Intl::FormatRangeSourceTracker::FieldContains(int32_t field, int32_t start,
+ int32_t limit) const {
+ DCHECK_LT(field, 2);
+ return (start_[field] <= start) && (start <= limit_[field]) &&
+ (start_[field] <= limit) && (limit <= limit_[field]);
+}
+
+Handle<String> Intl::SourceString(Isolate* isolate, FormatRangeSource source) {
+ switch (source) {
+ case FormatRangeSource::kShared:
+ return ReadOnlyRoots(isolate).shared_string_handle();
+ case FormatRangeSource::kStartRange:
+ return ReadOnlyRoots(isolate).startRange_string_handle();
+ case FormatRangeSource::kEndRange:
+ return ReadOnlyRoots(isolate).endRange_string_handle();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 7ac37894ad..0541cd0ba5 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -28,12 +28,26 @@ class BreakIterator;
class Collator;
class FormattedValue;
class StringEnumeration;
+class TimeZone;
class UnicodeString;
} // namespace U_ICU_NAMESPACE
namespace v8 {
namespace internal {
+struct NumberFormatSpan {
+ int32_t field_id;
+ int32_t begin_pos;
+ int32_t end_pos;
+
+ NumberFormatSpan() = default;
+ NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
+ : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
+};
+
+V8_EXPORT_PRIVATE std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions);
+
template <typename T>
class Handle;
class JSCollator;
@@ -45,6 +59,24 @@ class Intl {
kLength
};
+ enum class FormatRangeSource { kShared, kStartRange, kEndRange };
+
+ class FormatRangeSourceTracker {
+ public:
+ FormatRangeSourceTracker();
+ void Add(int32_t field, int32_t start, int32_t limit);
+ FormatRangeSource GetSource(int32_t start, int32_t limit) const;
+
+ private:
+ int32_t start_[2];
+ int32_t limit_[2];
+
+ bool FieldContains(int32_t field, int32_t start, int32_t limit) const;
+ };
+
+ static Handle<String> SourceString(Isolate* isolate,
+ FormatRangeSource source);
+
// Build a set of ICU locales from a list of Locales. If there is a locale
// with a script tag then the locales also include a locale without the
// script; eg, pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India)
@@ -114,6 +146,21 @@ class Intl {
Isolate* isolate, Handle<Object> num, Handle<Object> locales,
Handle<Object> options, const char* method_name);
+ // [[RoundingPriority]] is one of the String values "auto", "morePrecision",
+ // or "lessPrecision", specifying the rounding priority for the number.
+ enum class RoundingPriority {
+ kAuto,
+ kMorePrecision,
+ kLessPrecision,
+ };
+
+ enum class RoundingType {
+ kFractionDigits,
+ kSignificantDigits,
+ kMorePrecision,
+ kLessPrecision,
+ };
+
// ecma402/#sec-setnfdigitoptions
struct NumberFormatDigitOptions {
int minimum_integer_digits;
@@ -121,6 +168,8 @@ class Intl {
int maximum_fraction_digits;
int minimum_significant_digits;
int maximum_significant_digits;
+ RoundingPriority rounding_priority;
+ RoundingType rounding_type;
};
V8_WARN_UNUSED_RESULT static Maybe<NumberFormatDigitOptions>
SetNumberFormatDigitOptions(Isolate* isolate, Handle<JSReceiver> options,
@@ -142,8 +191,9 @@ class Intl {
// Helper function to convert number field id to type string.
static Handle<String> NumberFieldToType(Isolate* isolate,
- Handle<Object> numeric_obj,
- int32_t field_id);
+ const NumberFormatSpan& part,
+ const icu::UnicodeString& text,
+ bool is_nan);
// A helper function to implement formatToParts which add element to array as
// $array[$index] = { type: $field_type_string, value: $value }
@@ -295,6 +345,32 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> AvailableCalendars(
Isolate* isolate);
+
+ V8_WARN_UNUSED_RESULT static bool IsValidTimeZoneName(
+ const icu::TimeZone& tz);
+ V8_WARN_UNUSED_RESULT static bool IsValidTimeZoneName(Isolate* isolate,
+ const std::string& id);
+ V8_WARN_UNUSED_RESULT static bool IsValidTimeZoneName(Isolate* isolate,
+ Handle<String> id);
+
+ // Function to support Temporal
+ V8_WARN_UNUSED_RESULT static std::string TimeZoneIdFromIndex(int32_t index);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetTimeZoneIndex(
+ Isolate* isolate, Handle<String> identifier, int32_t* index);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> CanonicalizeTimeZoneName(
+ Isolate* isolate, Handle<String> identifier);
+
+ // ecma402/#sec-coerceoptionstoobject
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
+ Isolate* isolate, Handle<Object> options, const char* service);
+
+ // #sec-tointlmathematicalvalue
+ // The implementation preserve the Object in String, BigInt or Number
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ToIntlMathematicalValueAsNumberBigIntOrString(Isolate* isolate,
+ Handle<Object> input);
};
} // namespace internal
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 672f419657..a5f18b9bb3 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -102,9 +102,9 @@ macro LocaleCompareFastPath<T1: type, T2: type>(
transitioning builtin StringFastLocaleCompare(implicit context: Context)(
localeCompareFn: JSFunction, left: JSAny, right: JSAny,
locales: JSAny): JSAny {
- if (TaggedEqual(left, right)) return SmiConstant(0);
try {
const left = Cast<String>(left) otherwise Bailout;
+ if (TaggedEqual(left, right)) return SmiConstant(0);
StringToSlice(left) otherwise LeftOneByte, LeftTwoByte;
} label LeftOneByte(leftSlice: ConstSlice<char8>) {
try {
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 0fd66630ca..9049394cde 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -36,14 +36,13 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Address value = ReadCagedPointerField(kBackingStoreOffset, cage_base);
+ Address value = ReadSandboxedPointerField(kBackingStoreOffset, cage_base);
return reinterpret_cast<void*>(value);
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- DCHECK(IsValidBackingStorePointer(value));
Address addr = reinterpret_cast<Address>(value);
- WriteCagedPointerField(kBackingStoreOffset, isolate, addr);
+ WriteSandboxedPointerField(kBackingStoreOffset, isolate, addr);
}
std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() const {
@@ -236,6 +235,16 @@ size_t JSTypedArray::GetLength() const {
return GetLengthOrOutOfBounds(out_of_bounds);
}
+size_t JSTypedArray::GetByteLength() const {
+ return GetLength() * element_size();
+}
+
+bool JSTypedArray::IsOutOfBounds() const {
+ bool out_of_bounds = false;
+ GetLengthOrOutOfBounds(out_of_bounds);
+ return out_of_bounds;
+}
+
size_t JSTypedArray::length() const {
DCHECK(!is_length_tracking());
DCHECK(!is_backed_by_rab());
@@ -251,12 +260,11 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- return ReadCagedPointerField(kExternalPointerOffset, cage_base);
+ return ReadSandboxedPointerField(kExternalPointerOffset, cage_base);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
- WriteCagedPointerField(kExternalPointerOffset, isolate, value);
+ WriteSandboxedPointerField(kExternalPointerOffset, isolate, value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -349,15 +357,11 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
}
- if (V8_UNLIKELY(array->IsVariableLength())) {
- bool out_of_bounds = false;
- array->GetLengthOrOutOfBounds(out_of_bounds);
- if (out_of_bounds) {
- const MessageTemplate message = MessageTemplate::kDetachedOperation;
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method_name);
- THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
- }
+ if (V8_UNLIKELY(array->IsVariableLength() && array->IsOutOfBounds())) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
}
// spec describes to return `buffer`, but it may disrupt current
@@ -366,14 +370,13 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- Address value = ReadCagedPointerField(kDataPointerOffset, cage_base);
+ Address value = ReadSandboxedPointerField(kDataPointerOffset, cage_base);
return reinterpret_cast<void*>(value);
}
void JSDataView::set_data_pointer(Isolate* isolate, void* ptr) {
- DCHECK(IsValidBackingStorePointer(ptr));
Address value = reinterpret_cast<Address>(ptr);
- WriteCagedPointerField(kDataPointerOffset, isolate, value);
+ WriteSandboxedPointerField(kDataPointerOffset, isolate, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index dac3c8b563..cd760b9e67 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -76,7 +76,6 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached());
- DCHECK(IsValidBackingStorePointer(backing_store->buffer_start()));
Isolate* isolate = GetIsolate();
if (backing_store->IsEmpty()) {
@@ -91,6 +90,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
// invariant that their byte_length field is always 0.
set_byte_length(0);
} else {
+ CHECK_LE(backing_store->byte_length(), kMaxByteLength);
set_byte_length(backing_store->byte_length());
}
set_max_byte_length(backing_store->max_byte_length());
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 8e5446c687..9a9fe2c813 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -21,7 +21,8 @@ class ArrayBufferExtension;
#include "torque-generated/src/objects/js-array-buffer-tq.inc"
class JSArrayBuffer
- : public TorqueGeneratedJSArrayBuffer<JSArrayBuffer, JSObject> {
+ : public TorqueGeneratedJSArrayBuffer<JSArrayBuffer,
+ JSObjectWithEmbedderSlots> {
public:
// The maximum length for JSArrayBuffer's supported by V8.
// On 32-bit architectures we limit this to 2GiB, so that
@@ -231,7 +232,8 @@ class ArrayBufferExtension final : public Malloced {
};
class JSArrayBufferView
- : public TorqueGeneratedJSArrayBufferView<JSArrayBufferView, JSObject> {
+ : public TorqueGeneratedJSArrayBufferView<JSArrayBufferView,
+ JSObjectWithEmbedderSlots> {
public:
// [byte_offset]: offset of typed array in bytes.
DECL_PRIMITIVE_ACCESSORS(byte_offset, size_t)
@@ -296,6 +298,8 @@ class JSTypedArray
inline size_t GetLengthOrOutOfBounds(bool& out_of_bounds) const;
inline size_t GetLength() const;
+ inline size_t GetByteLength() const;
+ inline bool IsOutOfBounds() const;
static size_t LengthTrackingGsabBackedTypedArrayLength(Isolate* isolate,
Address raw_array);
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 7fb62b0bb2..d00febb179 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -11,10 +11,11 @@ bitfield struct JSArrayBufferFlags extends uint32 {
is_resizable: bool: 1 bit;
}
-extern class JSArrayBuffer extends JSObject {
+extern class JSArrayBuffer extends JSObjectWithEmbedderSlots {
byte_length: uintptr;
max_byte_length: uintptr;
- backing_store: ExternalPointer;
+ // A SandboxedPtr if the sandbox is enabled
+ backing_store: RawPtr;
extension: RawPtr;
bit_field: JSArrayBufferFlags;
// Pads header size to be a multiple of kTaggedSize.
@@ -52,7 +53,7 @@ bitfield struct JSArrayBufferViewFlags extends uint32 {
}
@abstract
-extern class JSArrayBufferView extends JSObject {
+extern class JSArrayBufferView extends JSObjectWithEmbedderSlots {
buffer: JSArrayBuffer;
byte_offset: uintptr;
byte_length: uintptr;
@@ -72,9 +73,24 @@ macro IsLengthTrackingJSArrayBufferView(array: JSArrayBufferView): bool {
return array.bit_field.is_length_tracking;
}
+extern macro LoadVariableLengthJSArrayBufferViewByteLength(
+ JSArrayBufferView, JSArrayBuffer): uintptr labels DetachedOrOutOfBounds;
+
+macro LoadJSArrayBufferViewByteLength(
+ view: JSArrayBufferView,
+ buffer: JSArrayBuffer): uintptr labels DetachedOrOutOfBounds {
+ if (IsVariableLengthJSArrayBufferView(view)) {
+ return LoadVariableLengthJSArrayBufferViewByteLength(view, buffer)
+ otherwise DetachedOrOutOfBounds;
+ }
+ if (IsDetachedBuffer(buffer)) goto DetachedOrOutOfBounds;
+ return view.byte_length;
+}
+
extern class JSTypedArray extends JSArrayBufferView {
length: uintptr;
- external_pointer: ExternalPointer;
+ // A SandboxedPtr if the sandbox is enabled
+ external_pointer: RawPtr;
base_pointer: ByteArray|Smi;
}
@@ -85,7 +101,8 @@ macro IsOnHeapTypedArray(array: JSTypedArray): bool {
}
extern class JSDataView extends JSArrayBufferView {
- data_pointer: ExternalPointer;
+ // A SandboxedPtr if the sandbox is enabled
+ data_pointer: RawPtr;
}
@abstract
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 923580e6e6..955370b7ba 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -414,9 +414,11 @@ class SpecialTimeZoneMap {
std::map<std::string, std::string> map_;
};
+} // namespace
+
// Return the time zone id which match ICU's expectation of title casing
// return empty string when error.
-std::string CanonicalizeTimeZoneID(const std::string& input) {
+std::string JSDateTimeFormat::CanonicalizeTimeZoneID(const std::string& input) {
std::string upper = input;
transform(upper.begin(), upper.end(), upper.begin(),
LocaleIndependentAsciiToUpper);
@@ -463,6 +465,7 @@ std::string CanonicalizeTimeZoneID(const std::string& input) {
return ToTitleCaseTimezoneLocation(input);
}
+namespace {
Handle<String> DateTimeStyleAsString(Isolate* isolate,
JSDateTimeFormat::DateTimeStyle style) {
switch (style) {
@@ -491,48 +494,9 @@ int FractionalSecondDigitsFromPattern(const std::string& pattern) {
} // namespace
-// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
-MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+Handle<Object> JSDateTimeFormat::TimeZoneId(Isolate* isolate,
+ const icu::TimeZone& tz) {
Factory* factory = isolate->factory();
- // 4. Let options be ! ObjectCreate(%ObjectPrototype%).
- Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
-
- Handle<Object> resolved_obj;
-
- Handle<String> locale = Handle<String>(date_time_format->locale(), isolate);
- DCHECK(!date_time_format->icu_locale().is_null());
- DCHECK_NOT_NULL(date_time_format->icu_locale().raw());
- icu::Locale* icu_locale = date_time_format->icu_locale().raw();
-
- icu::SimpleDateFormat* icu_simple_date_format =
- date_time_format->icu_simple_date_format().raw();
- // calendar
- const icu::Calendar* calendar = icu_simple_date_format->getCalendar();
- // getType() returns legacy calendar type name instead of LDML/BCP47 calendar
- // key values. intl.js maps them to BCP47 values for key "ca".
- // TODO(jshin): Consider doing it here, instead.
- std::string calendar_str = calendar->getType();
-
- // Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
- // See typeMap section in third_party/icu/source/data/misc/keyTypeData.txt
- // and
- // http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
- if (calendar_str == "gregorian") {
- if (date_time_format->alt_calendar()) {
- calendar_str = "iso8601";
- } else {
- calendar_str = "gregory";
- }
- } else if (calendar_str == "ethiopic-amete-alem") {
- calendar_str = "ethioaa";
- } else if (calendar_str == "islamic") {
- if (date_time_format->alt_calendar()) {
- calendar_str = "islamic-rgsa";
- }
- }
-
- const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
tz.getID(time_zone);
UErrorCode status = U_ZERO_ERROR;
@@ -550,14 +514,84 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
timezone_value = factory->UTC_string();
} else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, timezone_value,
- Intl::ToString(isolate, canonical_time_zone),
- JSObject);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, timezone_value, Intl::ToString(isolate, canonical_time_zone),
+ Handle<Object>());
}
} else {
// Somehow on Windows we will reach here.
timezone_value = factory->undefined_value();
}
+ return timezone_value;
+}
+
+namespace {
+Handle<String> GetCalendar(Isolate* isolate,
+ const icu::SimpleDateFormat& simple_date_format,
+ bool is_alt_calendar = false) {
+ // getType() returns legacy calendar type name instead of LDML/BCP47 calendar
+ // key values. intl.js maps them to BCP47 values for key "ca".
+ // TODO(jshin): Consider doing it here, instead.
+ std::string calendar_str = simple_date_format.getCalendar()->getType();
+
+ // Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
+ // See typeMap section in third_party/icu/source/data/misc/keyTypeData.txt
+ // and
+ // http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+ if (calendar_str == "gregorian") {
+ if (is_alt_calendar) {
+ calendar_str = "iso8601";
+ } else {
+ calendar_str = "gregory";
+ }
+ } else if (calendar_str == "ethiopic-amete-alem") {
+ calendar_str = "ethioaa";
+ } else if (calendar_str == "islamic") {
+ if (is_alt_calendar) {
+ calendar_str = "islamic-rgsa";
+ }
+ }
+ return isolate->factory()->NewStringFromAsciiChecked(calendar_str.c_str());
+}
+
+Handle<Object> GetTimeZone(Isolate* isolate,
+ const icu::SimpleDateFormat& simple_date_format) {
+ return JSDateTimeFormat::TimeZoneId(
+ isolate, simple_date_format.getCalendar()->getTimeZone());
+}
+} // namespace
+
+Handle<String> JSDateTimeFormat::Calendar(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ return GetCalendar(isolate,
+ *(date_time_format->icu_simple_date_format().raw()),
+ date_time_format->alt_calendar());
+}
+
+Handle<Object> JSDateTimeFormat::TimeZone(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ return GetTimeZone(isolate,
+ *(date_time_format->icu_simple_date_format().raw()));
+}
+
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
+MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ Factory* factory = isolate->factory();
+ // 4. Let options be ! ObjectCreate(%ObjectPrototype%).
+ Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
+
+ Handle<Object> resolved_obj;
+
+ Handle<String> locale = Handle<String>(date_time_format->locale(), isolate);
+ DCHECK(!date_time_format->icu_locale().is_null());
+ DCHECK_NOT_NULL(date_time_format->icu_locale().raw());
+ icu::Locale* icu_locale = date_time_format->icu_locale().raw();
+
+ icu::SimpleDateFormat* icu_simple_date_format =
+ date_time_format->icu_simple_date_format().raw();
+ Handle<Object> timezone =
+ JSDateTimeFormat::TimeZone(isolate, date_time_format);
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
// to assume that for given locale NumberingSystem constructor produces the
@@ -594,10 +628,10 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
DCHECK(maybe_create_locale.FromJust());
USE(maybe_create_locale);
+ Handle<String> calendar =
+ JSDateTimeFormat::Calendar(isolate, date_time_format);
Maybe<bool> maybe_create_calendar = JSReceiver::CreateDataProperty(
- isolate, options, factory->calendar_string(),
- factory->NewStringFromAsciiChecked(calendar_str.c_str()),
- Just(kDontThrow));
+ isolate, options, factory->calendar_string(), calendar, Just(kDontThrow));
DCHECK(maybe_create_calendar.FromJust());
USE(maybe_create_calendar);
@@ -610,8 +644,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
USE(maybe_create_numbering_system);
}
Maybe<bool> maybe_create_time_zone = JSReceiver::CreateDataProperty(
- isolate, options, factory->timeZone_string(), timezone_value,
- Just(kDontThrow));
+ isolate, options, factory->timeZone_string(), timezone, Just(kDontThrow));
DCHECK(maybe_create_time_zone.FromJust());
USE(maybe_create_time_zone);
@@ -1015,20 +1048,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::UnwrapDateTimeFormat(
return Handle<JSDateTimeFormat>::cast(dtf);
}
-namespace {
-
-// ecma-402/#sec-isvalidtimezonename
-bool IsValidTimeZoneName(const icu::TimeZone& tz) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString id;
- tz.getID(id);
- icu::UnicodeString canonical;
- icu::TimeZone::getCanonicalID(id, canonical, status);
- return U_SUCCESS(status) &&
- canonical != icu::UnicodeString("Etc/Unknown", -1, US_INV);
-}
-
-std::unique_ptr<icu::TimeZone> CreateTimeZone(const char* timezone) {
+std::unique_ptr<icu::TimeZone> JSDateTimeFormat::CreateTimeZone(
+ const char* timezone) {
// Create time zone as specified by the user. We have to re-create time zone
// since calendar takes ownership.
if (timezone == nullptr) {
@@ -1041,10 +1062,12 @@ std::unique_ptr<icu::TimeZone> CreateTimeZone(const char* timezone) {
icu::TimeZone::createTimeZone(canonicalized.c_str()));
// 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
// i. Throw a RangeError exception.
- if (!IsValidTimeZoneName(*tz)) return std::unique_ptr<icu::TimeZone>();
+ if (!Intl::IsValidTimeZoneName(*tz)) return std::unique_ptr<icu::TimeZone>();
return tz;
}
+namespace {
+
class CalendarCache {
public:
icu::Calendar* CreateCalendar(const icu::Locale& locale, icu::TimeZone* tz) {
@@ -1435,24 +1458,38 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
class DateTimePatternGeneratorCache {
public:
// Return a clone copy that the caller have to free.
- icu::DateTimePatternGenerator* CreateGenerator(const icu::Locale& locale) {
+ icu::DateTimePatternGenerator* CreateGenerator(Isolate* isolate,
+ const icu::Locale& locale) {
std::string key(locale.getName());
base::MutexGuard guard(&mutex_);
auto it = map_.find(key);
+ icu::DateTimePatternGenerator* orig;
if (it != map_.end()) {
- return it->second->clone();
+ DCHECK(it->second != nullptr);
+ orig = it->second.get();
+ } else {
+ UErrorCode status = U_ZERO_ERROR;
+ orig = icu::DateTimePatternGenerator::createInstance(locale, status);
+ // It may not be an U_MEMORY_ALLOCATION_ERROR.
+ // Fallback to use "root".
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ orig = icu::DateTimePatternGenerator::createInstance("root", status);
+ }
+ if (U_SUCCESS(status) && orig != nullptr) {
+ map_[key].reset(orig);
+ } else {
+ DCHECK(status == U_MEMORY_ALLOCATION_ERROR);
+ V8::FatalProcessOutOfMemory(
+ isolate, "DateTimePatternGeneratorCache::CreateGenerator");
+ }
}
- UErrorCode status = U_ZERO_ERROR;
- map_[key].reset(
- icu::DateTimePatternGenerator::createInstance(locale, status));
- // Fallback to use "root".
- if (U_FAILURE(status)) {
- status = U_ZERO_ERROR;
- map_[key].reset(
- icu::DateTimePatternGenerator::createInstance("root", status));
+ icu::DateTimePatternGenerator* clone = orig ? orig->clone() : nullptr;
+ if (clone == nullptr) {
+ V8::FatalProcessOutOfMemory(
+ isolate, "DateTimePatternGeneratorCache::CreateGenerator");
}
- DCHECK(U_SUCCESS(status));
- return map_[key]->clone();
+ return clone;
}
private:
@@ -1599,7 +1636,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
generator_cache = LAZY_INSTANCE_INITIALIZER;
std::unique_ptr<icu::DateTimePatternGenerator> generator(
- generator_cache.Pointer()->CreateGenerator(icu_locale));
+ generator_cache.Pointer()->CreateGenerator(isolate, icu_locale));
// 15.Let hcDefault be dataLocaleData.[[hourCycle]].
HourCycle hc_default = ToHourCycle(generator->getDefaultHourCycle(status));
@@ -1654,7 +1691,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
isolate, options, "timeZone", empty_values, service, &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
- std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(timezone.get());
+ std::unique_ptr<icu::TimeZone> tz =
+ JSDateTimeFormat::CreateTimeZone(timezone.get());
if (tz.get() == nullptr) {
THROW_NEW_ERROR(
isolate,
@@ -2063,56 +2101,12 @@ Handle<String> JSDateTimeFormat::HourCycleAsString() const {
}
}
-enum Source { kShared, kStartRange, kEndRange };
-
namespace {
-class SourceTracker {
- public:
- SourceTracker() { start_[0] = start_[1] = limit_[0] = limit_[1] = 0; }
- void Add(int32_t field, int32_t start, int32_t limit) {
- DCHECK_LT(field, 2);
- start_[field] = start;
- limit_[field] = limit;
- }
-
- Source GetSource(int32_t start, int32_t limit) const {
- Source source = Source::kShared;
- if (FieldContains(0, start, limit)) {
- source = Source::kStartRange;
- } else if (FieldContains(1, start, limit)) {
- source = Source::kEndRange;
- }
- return source;
- }
-
- private:
- int32_t start_[2];
- int32_t limit_[2];
-
- bool FieldContains(int32_t field, int32_t start, int32_t limit) const {
- DCHECK_LT(field, 2);
- return (start_[field] <= start) && (start <= limit_[field]) &&
- (start_[field] <= limit) && (limit <= limit_[field]);
- }
-};
-
-Handle<String> SourceString(Isolate* isolate, Source source) {
- switch (source) {
- case Source::kShared:
- return ReadOnlyRoots(isolate).shared_string_handle();
- case Source::kStartRange:
- return ReadOnlyRoots(isolate).startRange_string_handle();
- case Source::kEndRange:
- return ReadOnlyRoots(isolate).endRange_string_handle();
- UNREACHABLE();
- }
-}
-
-Maybe<bool> AddPartForFormatRange(Isolate* isolate, Handle<JSArray> array,
- const icu::UnicodeString& string,
- int32_t index, int32_t field, int32_t start,
- int32_t end, const SourceTracker& tracker) {
+Maybe<bool> AddPartForFormatRange(
+ Isolate* isolate, Handle<JSArray> array, const icu::UnicodeString& string,
+ int32_t index, int32_t field, int32_t start, int32_t end,
+ const Intl::FormatRangeSourceTracker& tracker) {
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, substring,
Intl::ToString(isolate, string, start, end),
@@ -2120,7 +2114,7 @@ Maybe<bool> AddPartForFormatRange(Isolate* isolate, Handle<JSArray> array,
Intl::AddElement(isolate, array, index,
IcuDateFieldIdToDateType(field, isolate), substring,
isolate->factory()->source_string(),
- SourceString(isolate, tracker.GetSource(start, end)));
+ Intl::SourceString(isolate, tracker.GetSource(start, end)));
return Just(true);
}
@@ -2155,7 +2149,7 @@ MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
icu::ConstrainedFieldPosition cfpos;
int index = 0;
int32_t previous_end_pos = 0;
- SourceTracker tracker;
+ Intl::FormatRangeSourceTracker tracker;
*outputRange = false;
while (formatted.nextPosition(cfpos, status)) {
int32_t category = cfpos.getCategory();
@@ -2203,13 +2197,11 @@ MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
}
// The shared code between formatRange and formatRangeToParts
-template <typename T>
-MaybeHandle<T> FormatRangeCommon(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
- double y,
- const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&,
- bool*)>& formatToResult,
- bool* outputRange) {
+template <typename T,
+ MaybeHandle<T> (*F)(Isolate*, const icu::FormattedValue&, bool*)>
+MaybeHandle<T> FormatRangeCommon(Isolate* isolate,
+ Handle<JSDateTimeFormat> date_time_format,
+ double x, double y, bool* outputRange) {
// Track newer feature formateRange and formatRangeToParts
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
@@ -2252,7 +2244,7 @@ MaybeHandle<T> FormatRangeCommon(
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
}
- return formatToResult(isolate, formatted, outputRange);
+ return F(isolate, formatted, outputRange);
}
} // namespace
@@ -2261,8 +2253,8 @@ MaybeHandle<String> JSDateTimeFormat::FormatRange(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y) {
bool outputRange = true;
- MaybeHandle<String> ret = FormatRangeCommon<String>(
- isolate, date_time_format, x, y, FormattedToString, &outputRange);
+ MaybeHandle<String> ret = FormatRangeCommon<String, FormattedToString>(
+ isolate, date_time_format, x, y, &outputRange);
if (outputRange) {
return ret;
}
@@ -2275,8 +2267,8 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
double y) {
bool outputRange = true;
MaybeHandle<JSArray> ret =
- FormatRangeCommon<JSArray>(isolate, date_time_format, x, y,
- FormattedDateIntervalToJSArray, &outputRange);
+ FormatRangeCommon<JSArray, FormattedDateIntervalToJSArray>(
+ isolate, date_time_format, x, y, &outputRange);
if (outputRange) {
return ret;
}
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 9c5b2f9dc8..a5465d37cf 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
+#define V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
+
#ifndef V8_INTL_SUPPORT
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
-#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
-#define V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
-
#include <set>
#include <string>
@@ -25,6 +25,7 @@ namespace U_ICU_NAMESPACE {
class DateIntervalFormat;
class Locale;
class SimpleDateFormat;
+class TimeZone;
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -42,6 +43,12 @@ class JSDateTimeFormat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
+ V8_WARN_UNUSED_RESULT static Handle<String> Calendar(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
+
+ V8_WARN_UNUSED_RESULT static Handle<Object> TimeZone(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
+
// ecma402/#sec-unwrapdatetimeformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat>
UnwrapDateTimeFormat(Isolate* isolate, Handle<JSReceiver> format_holder);
@@ -85,6 +92,12 @@ class JSDateTimeFormat
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
+ Handle<Object> static TimeZoneId(Isolate* isolate, const icu::TimeZone& tz);
+ std::unique_ptr<icu::TimeZone> static CreateTimeZone(const char* timezone);
+
+ V8_EXPORT_PRIVATE static std::string CanonicalizeTimeZoneID(
+ const std::string& input);
+
Handle<String> HourCycleAsString() const;
// ecma-402/#sec-properties-of-intl-datetimeformat-instances
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 27916a17ce..99a1d4b64c 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -25,8 +25,9 @@ namespace internal {
#include "torque-generated/src/objects/js-function-tq-inl.inc"
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunctionOrWrappedFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSWrappedFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunction)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
@@ -57,68 +58,21 @@ bool JSFunction::ChecksOptimizationMarker() {
}
bool JSFunction::IsMarkedForOptimization() {
- return has_feedback_vector() && feedback_vector().optimization_marker() ==
- OptimizationMarker::kCompileOptimized;
+ return has_feedback_vector() &&
+ feedback_vector().optimization_marker() ==
+ OptimizationMarker::kCompileTurbofan_NotConcurrent;
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
return has_feedback_vector() &&
feedback_vector().optimization_marker() ==
- OptimizationMarker::kCompileOptimizedConcurrent;
-}
-
-void JSFunction::SetInterruptBudget() {
- if (!has_feedback_vector()) {
- DCHECK(shared().is_compiled());
- int budget = FLAG_budget_for_feedback_vector_allocation;
- if (FLAG_feedback_allocation_on_bytecode_size) {
- budget = shared().GetBytecodeArray(GetIsolate()).length() *
- FLAG_scale_factor_for_feedback_allocation;
- }
- raw_feedback_cell().set_interrupt_budget(budget);
- return;
- }
- FeedbackVector::SetInterruptBudget(raw_feedback_cell());
-}
-
-void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
- Isolate* isolate = GetIsolate();
- if (!isolate->concurrent_recompilation_enabled() ||
- isolate->bootstrapper()->IsActive()) {
- mode = ConcurrencyMode::kNotConcurrent;
- }
-
- DCHECK(!is_compiled() || ActiveTierIsIgnition() ||
- ActiveTierIsMidtierTurboprop() || ActiveTierIsBaseline());
- DCHECK(!ActiveTierIsTurbofan());
- DCHECK(shared().IsInterpreted());
- DCHECK(shared().allows_lazy_compilation() ||
- !shared().optimization_disabled());
-
- if (mode == ConcurrencyMode::kConcurrent) {
- if (IsInOptimizationQueue()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Not marking ");
- ShortPrint();
- PrintF(" -- already in optimization queue.\n");
- }
- return;
- }
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Marking ");
- ShortPrint();
- PrintF(" for concurrent recompilation.\n");
- }
- }
-
- SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
- ? OptimizationMarker::kCompileOptimizedConcurrent
- : OptimizationMarker::kCompileOptimized);
+ OptimizationMarker::kCompileTurbofan_Concurrent;
}
bool JSFunction::IsInOptimizationQueue() {
if (!has_feedback_vector()) return false;
- return IsInOptimizationQueueMarker(feedback_vector().optimization_marker());
+ return feedback_vector().optimization_marker() ==
+ OptimizationMarker::kInOptimizationQueue;
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
@@ -133,41 +87,24 @@ AbstractCode JSFunction::abstract_code(IsolateT* isolate) {
if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else {
- return AbstractCode::cast(code(kAcquireLoad));
+ return AbstractCode::cast(FromCodeT(code(kAcquireLoad)));
}
}
int JSFunction::length() { return shared().length(); }
-ACCESSORS_RELAXED(JSFunction, raw_code, CodeT, kCodeOffset)
-RELEASE_ACQUIRE_ACCESSORS(JSFunction, raw_code, CodeT, kCodeOffset)
-
-DEF_GETTER(JSFunction, code, Code) { return FromCodeT(raw_code(cage_base)); }
-
-void JSFunction::set_code(Code code, WriteBarrierMode mode) {
- set_raw_code(ToCodeT(code), mode);
-}
-
-DEF_ACQUIRE_GETTER(JSFunction, code, Code) {
- return FromCodeT(raw_code(cage_base, kAcquireLoad));
-}
-
-void JSFunction::set_code(Code code, ReleaseStoreTag, WriteBarrierMode mode) {
- set_raw_code(ToCodeT(code), kReleaseStore, mode);
-}
+ACCESSORS_RELAXED(JSFunction, code, CodeT, kCodeOffset)
+RELEASE_ACQUIRE_ACCESSORS(JSFunction, code, CodeT, kCodeOffset)
#ifdef V8_EXTERNAL_CODE_SPACE
-void JSFunction::set_code(CodeT code, WriteBarrierMode mode) {
- set_raw_code(code, mode);
-}
-void JSFunction::set_code(CodeT code, ReleaseStoreTag, WriteBarrierMode mode) {
- set_raw_code(code, kReleaseStore, mode);
+void JSFunction::set_code(Code code, ReleaseStoreTag, WriteBarrierMode mode) {
+ set_code(ToCodeT(code), kReleaseStore, mode);
}
#endif
Address JSFunction::code_entry_point() const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- return CodeDataContainer::cast(raw_code()).code_entry_point();
+ return CodeDataContainer::cast(code()).code_entry_point();
} else {
return code().InstructionStart();
}
@@ -309,7 +246,7 @@ bool JSFunction::ShouldFlushBaselineCode(
// SFI / FV to JSFunction but it is safe in practice.
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
- Code code = FromCodeT(CodeT::cast(maybe_code));
+ CodeT code = CodeT::cast(maybe_code);
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
@@ -326,7 +263,7 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
- Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
+ CodeT code = CodeT::cast(maybe_code);
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index bfb3f7ba96..b96aa696e5 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -6,6 +6,7 @@
#include "src/codegen/compiler.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/execution/tiering-manager.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
@@ -48,7 +49,7 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
// Check the optimized code cache.
if (has_feedback_vector() && feedback_vector().has_optimized_code() &&
!feedback_vector().optimized_code().marked_for_deoptimization()) {
- Code code = feedback_vector().optimized_code();
+ CodeT code = feedback_vector().optimized_code();
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
result |= CodeKindToCodeKindFlag(code.kind());
}
@@ -84,21 +85,14 @@ namespace {
V8_WARN_UNUSED_RESULT bool HighestTierOf(CodeKinds kinds,
CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
- if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
- *highest_tier = CodeKind::TURBOFAN;
- return true;
- } else if ((kinds & CodeKindFlag::TURBOPROP) != 0) {
- *highest_tier = CodeKind::TURBOPROP;
- return true;
- } else if ((kinds & CodeKindFlag::BASELINE) != 0) {
- *highest_tier = CodeKind::BASELINE;
- return true;
- } else if ((kinds & CodeKindFlag::INTERPRETED_FUNCTION) != 0) {
- *highest_tier = CodeKind::INTERPRETED_FUNCTION;
- return true;
- }
- DCHECK_EQ(kinds, 0);
- return false;
+ // Higher tiers > lower tiers.
+ STATIC_ASSERT(CodeKind::TURBOFAN > CodeKind::INTERPRETED_FUNCTION);
+ if (kinds == 0) return false;
+ const int highest_tier_log2 =
+ 31 - base::bits::CountLeadingZeros(static_cast<uint32_t>(kinds));
+ DCHECK(CodeKindIsJSFunction(static_cast<CodeKind>(highest_tier_log2)));
+ *highest_tier = static_cast<CodeKind>(highest_tier_log2);
+ return true;
}
} // namespace
@@ -120,7 +114,7 @@ base::Optional<CodeKind> JSFunction::GetActiveTier() const {
#ifdef DEBUG
CHECK(highest_tier == CodeKind::TURBOFAN ||
highest_tier == CodeKind::BASELINE ||
- highest_tier == CodeKind::TURBOPROP ||
+ highest_tier == CodeKind::MAGLEV ||
highest_tier == CodeKind::INTERPRETED_FUNCTION);
if (highest_tier == CodeKind::INTERPRETED_FUNCTION) {
@@ -128,7 +122,7 @@ base::Optional<CodeKind> JSFunction::GetActiveTier() const {
(CodeKindIsOptimizedJSFunction(code().kind()) &&
code().marked_for_deoptimization()) ||
(code().builtin_id() == Builtin::kCompileLazy &&
- shared().IsInterpreted()));
+ shared().HasBytecodeArray() && !shared().HasBaselineCode()));
}
#endif // DEBUG
@@ -139,31 +133,16 @@ bool JSFunction::ActiveTierIsIgnition() const {
return GetActiveTier() == CodeKind::INTERPRETED_FUNCTION;
}
-bool JSFunction::ActiveTierIsTurbofan() const {
- return GetActiveTier() == CodeKind::TURBOFAN;
-}
-
bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
-bool JSFunction::ActiveTierIsToptierTurboprop() const {
- return FLAG_turboprop_as_toptier && GetActiveTier() == CodeKind::TURBOPROP;
-}
-
-bool JSFunction::ActiveTierIsMidtierTurboprop() const {
- return FLAG_turboprop && !FLAG_turboprop_as_toptier &&
- GetActiveTier() == CodeKind::TURBOPROP;
+bool JSFunction::ActiveTierIsMaglev() const {
+ return GetActiveTier() == CodeKind::MAGLEV;
}
-CodeKind JSFunction::NextTier() const {
- if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
- return CodeKind::TURBOFAN;
- } else if (V8_UNLIKELY(FLAG_turboprop)) {
- DCHECK(ActiveTierIsIgnition() || ActiveTierIsBaseline());
- return CodeKind::TURBOPROP;
- }
- return CodeKind::TURBOFAN;
+bool JSFunction::ActiveTierIsTurbofan() const {
+ return GetActiveTier() == CodeKind::TURBOFAN;
}
bool JSFunction::CanDiscardCompiled() const {
@@ -181,6 +160,62 @@ bool JSFunction::CanDiscardCompiled() const {
return (result & kJSFunctionCodeKindsMask) != 0;
}
+namespace {
+
+constexpr OptimizationMarker OptimizationMarkerFor(CodeKind target_kind,
+ ConcurrencyMode mode) {
+ DCHECK(target_kind == CodeKind::MAGLEV || target_kind == CodeKind::TURBOFAN);
+ return target_kind == CodeKind::MAGLEV
+ ? (mode == ConcurrencyMode::kConcurrent
+ ? OptimizationMarker::kCompileMaglev_Concurrent
+ : OptimizationMarker::kCompileMaglev_NotConcurrent)
+ : (mode == ConcurrencyMode::kConcurrent
+ ? OptimizationMarker::kCompileTurbofan_Concurrent
+ : OptimizationMarker::kCompileTurbofan_NotConcurrent);
+}
+
+} // namespace
+
+void JSFunction::MarkForOptimization(Isolate* isolate, CodeKind target_kind,
+ ConcurrencyMode mode) {
+ if (!isolate->concurrent_recompilation_enabled() ||
+ isolate->bootstrapper()->IsActive()) {
+ mode = ConcurrencyMode::kNotConcurrent;
+ }
+
+ DCHECK(CodeKindIsOptimizedJSFunction(target_kind));
+ DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsBaseline() ||
+ ActiveTierIsMaglev());
+ DCHECK(!ActiveTierIsTurbofan());
+ DCHECK(shared().HasBytecodeArray());
+ DCHECK(shared().allows_lazy_compilation() ||
+ !shared().optimization_disabled());
+
+ if (mode == ConcurrencyMode::kConcurrent) {
+ if (IsInOptimizationQueue()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Not marking ");
+ ShortPrint();
+ PrintF(" -- already in optimization queue.\n");
+ }
+ return;
+ }
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Marking ");
+ ShortPrint();
+ PrintF(" for concurrent %s recompilation.\n",
+ CodeKindToString(target_kind));
+ }
+ }
+
+ SetOptimizationMarker(OptimizationMarkerFor(target_kind, mode));
+}
+
+void JSFunction::SetInterruptBudget(Isolate* isolate) {
+ raw_feedback_cell().set_interrupt_budget(
+ TieringManager::InterruptBudgetFor(isolate, *this));
+}
+
// static
MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<JSBoundFunction> function) {
@@ -240,6 +275,12 @@ Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
}
// static
+Handle<String> JSWrappedFunction::ToString(Handle<JSWrappedFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ return isolate->factory()->function_native_code_string();
+}
+
+// static
Handle<Object> JSFunction::GetName(Isolate* isolate,
Handle<JSFunction> function) {
if (function->shared().name_should_print_as_anonymous()) {
@@ -261,19 +302,17 @@ void JSFunction::EnsureClosureFeedbackCellArray(
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
- bool has_closure_feedback_cell_array =
+ const bool has_closure_feedback_cell_array =
(function->has_closure_feedback_cell_array() ||
function->has_feedback_vector());
// Initialize the interrupt budget to the feedback vector allocation budget
// when initializing the feedback cell for the first time or after a bytecode
// flush. We retain the closure feedback cell array on bytecode flush, so
// reset_budget_for_feedback_allocation is used to reset the budget in these
- // cases. When using a fixed allocation budget, we reset it on a bytecode
- // flush so no additional initialization is required here.
- if (V8_UNLIKELY(FLAG_feedback_allocation_on_bytecode_size) &&
- (reset_budget_for_feedback_allocation ||
- !has_closure_feedback_cell_array)) {
- function->SetInterruptBudget();
+ // cases.
+ if (reset_budget_for_feedback_allocation ||
+ !has_closure_feedback_cell_array) {
+ function->SetInterruptBudget(isolate);
}
if (has_closure_feedback_cell_array) {
@@ -292,7 +331,7 @@ void JSFunction::EnsureClosureFeedbackCellArray(
Handle<FeedbackCell> feedback_cell =
isolate->factory()->NewOneClosureCell(feedback_cell_array);
function->set_raw_feedback_cell(*feedback_cell, kReleaseStore);
- function->SetInterruptBudget();
+ function->SetInterruptBudget(isolate);
} else {
function->raw_feedback_cell().set_value(*feedback_cell_array,
kReleaseStore);
@@ -300,16 +339,30 @@ void JSFunction::EnsureClosureFeedbackCellArray(
}
// static
-void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
- IsCompiledScope* is_compiled_scope) {
- Isolate* const isolate = function->GetIsolate();
- DCHECK(is_compiled_scope->is_compiled());
+void JSFunction::EnsureFeedbackVector(Isolate* isolate,
+ Handle<JSFunction> function,
+ IsCompiledScope* compiled_scope) {
+ DCHECK(compiled_scope->is_compiled());
DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_feedback_vector()) return;
#if V8_ENABLE_WEBASSEMBLY
if (function->shared().HasAsmWasmData()) return;
#endif // V8_ENABLE_WEBASSEMBLY
+ CreateAndAttachFeedbackVector(isolate, function, compiled_scope);
+}
+
+// static
+void JSFunction::CreateAndAttachFeedbackVector(
+ Isolate* isolate, Handle<JSFunction> function,
+ IsCompiledScope* compiled_scope) {
+ DCHECK(compiled_scope->is_compiled());
+ DCHECK(function->shared().HasFeedbackMetadata());
+ DCHECK(!function->has_feedback_vector());
+#if V8_ENABLE_WEBASSEMBLY
+ DCHECK(!function->shared().HasAsmWasmData());
+#endif // V8_ENABLE_WEBASSEMBLY
+
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
@@ -317,14 +370,14 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
handle(function->closure_feedback_cell_array(), isolate);
Handle<HeapObject> feedback_vector = FeedbackVector::New(
- isolate, shared, closure_feedback_cell_array, is_compiled_scope);
+ isolate, shared, closure_feedback_cell_array, compiled_scope);
// EnsureClosureFeedbackCellArray should handle the special case where we need
// to allocate a new feedback cell. Please look at comment in that function
// for more details.
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
function->raw_feedback_cell().set_value(*feedback_vector, kReleaseStore);
- function->SetInterruptBudget();
+ function->SetInterruptBudget(isolate);
}
// static
@@ -361,7 +414,7 @@ void JSFunction::InitializeFeedbackCell(
isolate->is_collecting_type_profile();
if (needs_feedback_vector) {
- EnsureFeedbackVector(function, is_compiled_scope);
+ CreateAndAttachFeedbackVector(isolate, function, is_compiled_scope);
} else {
EnsureClosureFeedbackCellArray(function,
reset_budget_for_feedback_allocation);
@@ -591,6 +644,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_PROMISE_TYPE:
case JS_REG_EXP_TYPE:
case JS_SET_TYPE:
+ case JS_SHADOW_REALM_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_PRIMITIVE_WRAPPER_TYPE:
@@ -643,6 +697,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_PROXY_TYPE:
+ case JS_WRAPPED_FUNCTION_TYPE:
case MAP_TYPE:
case ODDBALL_TYPE:
case PROPERTY_CELL_TYPE:
@@ -701,8 +756,9 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
static_cast<int>(constructor->shared().expected_nof_properties()),
JSFunction::CalculateExpectedNofProperties(isolate, new_target));
JSFunction::CalculateInstanceSizeHelper(
- instance_type, true, embedder_fields, expected_nof_properties,
- &instance_size, &in_object_properties);
+ instance_type, constructor_initial_map->has_prototype_slot(),
+ embedder_fields, expected_nof_properties, &instance_size,
+ &in_object_properties);
int pre_allocated = constructor_initial_map->GetInObjectProperties() -
constructor_initial_map->UnusedPropertyFields();
@@ -778,7 +834,8 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
JSReceiver::GetFunctionRealm(new_target), Map);
DCHECK(context->IsNativeContext());
Handle<Object> maybe_index = JSReceiver::GetDataProperty(
- constructor, isolate->factory()->native_context_index_symbol());
+ isolate, constructor,
+ isolate->factory()->native_context_index_symbol());
int index = maybe_index->IsSmi() ? Smi::ToInt(*maybe_index)
: Context::OBJECT_FUNCTION_INDEX;
Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)),
@@ -902,7 +959,7 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
// that exact behavior and go with SharedFunctionInfo::DebugName()
// in case of the fast-path.
Handle<Object> name =
- GetDataProperty(function, isolate->factory()->name_string());
+ GetDataProperty(isolate, function, isolate->factory()->name_string());
if (name->IsString()) return Handle<String>::cast(name);
}
return SharedFunctionInfo::DebugName(handle(function->shared(), isolate));
@@ -957,7 +1014,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
// Check if we should print {function} as a class.
Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
- function, isolate->factory()->class_positions_symbol());
+ isolate, function, isolate->factory()->class_positions_symbol());
if (maybe_class_positions->IsClassPositions()) {
ClassPositions class_positions =
ClassPositions::cast(*maybe_class_positions);
@@ -1059,13 +1116,8 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
DCHECK_LE(static_cast<unsigned>(requested_embedder_fields),
JSObject::kMaxEmbedderFields);
int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
- if (requested_embedder_fields) {
- // If there are embedder fields, then the embedder fields start offset must
- // be properly aligned (embedder fields are located between object header
- // and inobject fields).
- header_size = RoundUp<kSystemPointerSize>(header_size);
- requested_embedder_fields *= kEmbedderDataSlotSizeInTaggedSlots;
- }
+ requested_embedder_fields *= kEmbedderDataSlotSizeInTaggedSlots;
+
int max_nof_fields =
(JSObject::kMaxInstanceSize - header_size) >> kTaggedSizeLog2;
CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 866871628f..6d430aec30 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -19,21 +19,21 @@ namespace internal {
// An abstract superclass for classes representing JavaScript function values.
// It doesn't carry any functionality but allows function classes to be
// identified in the type system.
-class JSFunctionOrBoundFunction
- : public TorqueGeneratedJSFunctionOrBoundFunction<JSFunctionOrBoundFunction,
- JSObject> {
+class JSFunctionOrBoundFunctionOrWrappedFunction
+ : public TorqueGeneratedJSFunctionOrBoundFunctionOrWrappedFunction<
+ JSFunctionOrBoundFunctionOrWrappedFunction, JSObject> {
public:
static const int kLengthDescriptorIndex = 0;
static const int kNameDescriptorIndex = 1;
STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
- TQ_OBJECT_CONSTRUCTORS(JSFunctionOrBoundFunction)
+ TQ_OBJECT_CONSTRUCTORS(JSFunctionOrBoundFunctionOrWrappedFunction)
};
// JSBoundFunction describes a bound function exotic object.
class JSBoundFunction
- : public TorqueGeneratedJSBoundFunction<JSBoundFunction,
- JSFunctionOrBoundFunction> {
+ : public TorqueGeneratedJSBoundFunction<
+ JSBoundFunction, JSFunctionOrBoundFunctionOrWrappedFunction> {
public:
static MaybeHandle<String> GetName(Isolate* isolate,
Handle<JSBoundFunction> function);
@@ -51,9 +51,25 @@ class JSBoundFunction
TQ_OBJECT_CONSTRUCTORS(JSBoundFunction)
};
+// JSWrappedFunction describes a wrapped function exotic object.
+class JSWrappedFunction
+ : public TorqueGeneratedJSWrappedFunction<
+ JSWrappedFunction, JSFunctionOrBoundFunctionOrWrappedFunction> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSWrappedFunction)
+ DECL_VERIFIER(JSWrappedFunction)
+
+ // The wrapped function's string representation implemented according
+ // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSWrappedFunction> function);
+
+ TQ_OBJECT_CONSTRUCTORS(JSWrappedFunction)
+};
+
// JSFunction describes JavaScript functions.
-class JSFunction
- : public TorqueGeneratedJSFunction<JSFunction, JSFunctionOrBoundFunction> {
+class JSFunction : public TorqueGeneratedJSFunction<
+ JSFunction, JSFunctionOrBoundFunctionOrWrappedFunction> {
public:
// [prototype_or_initial_map]:
DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
@@ -84,14 +100,12 @@ class JSFunction
// optimized code object, or when reading from the background thread.
// Storing a builtin doesn't require release semantics because these objects
// are fully initialized.
- DECL_ACCESSORS(code, Code)
- DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
+ DECL_ACCESSORS(code, CodeT)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code, CodeT)
#ifdef V8_EXTERNAL_CODE_SPACE
// Convenient overloads to avoid unnecessary Code <-> CodeT conversions.
// TODO(v8:11880): remove once |code| accessors are migrated to CodeT.
- inline void set_code(CodeT code,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void set_code(CodeT code, ReleaseStoreTag,
+ inline void set_code(Code code, ReleaseStoreTag,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
#endif
@@ -129,12 +143,9 @@ class JSFunction
base::Optional<CodeKind> GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
- bool ActiveTierIsTurbofan() const;
bool ActiveTierIsBaseline() const;
- bool ActiveTierIsMidtierTurboprop() const;
- bool ActiveTierIsToptierTurboprop() const;
-
- CodeKind NextTier() const;
+ bool ActiveTierIsMaglev() const;
+ bool ActiveTierIsTurbofan() const;
// Similar to SharedFunctionInfo::CanDiscardCompiled. Returns true, if the
// attached code can be recreated at a later point by replacing it with
@@ -150,7 +161,8 @@ class JSFunction
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
- inline void MarkForOptimization(ConcurrencyMode mode);
+ void MarkForOptimization(Isolate* isolate, CodeKind target_kind,
+ ConcurrencyMode mode);
// Tells whether or not the function is already marked for lazy recompilation.
inline bool IsMarkedForOptimization();
@@ -167,7 +179,7 @@ class JSFunction
// Sets the interrupt budget based on whether the function has a feedback
// vector and any optimized code.
- inline void SetInterruptBudget();
+ void SetInterruptBudget(Isolate* isolate);
// If slack tracking is active, it computes instance size of the initial map
// with minimum permissible object slack. If it is not active, it simply
@@ -194,7 +206,11 @@ class JSFunction
inline FeedbackVector feedback_vector() const;
inline bool has_feedback_vector() const;
V8_EXPORT_PRIVATE static void EnsureFeedbackVector(
- Handle<JSFunction> function, IsCompiledScope* compiled_scope);
+ Isolate* isolate, Handle<JSFunction> function,
+ IsCompiledScope* compiled_scope);
+ static void CreateAndAttachFeedbackVector(Isolate* isolate,
+ Handle<JSFunction> function,
+ IsCompiledScope* compiled_scope);
// Functions related to closure feedback cell array that holds feedback cells
// used to create closures from this function. We allocate closure feedback
@@ -317,9 +333,6 @@ class JSFunction
class BodyDescriptor;
private:
- DECL_ACCESSORS(raw_code, CodeT)
- DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
-
// JSFunction doesn't have a fixed header size:
// Hide TorqueGeneratedClass::kHeaderSize to avoid confusion.
static const int kHeaderSize;
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index 59dd2d5dc2..5ffed87356 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -3,10 +3,11 @@
// found in the LICENSE file.
@abstract
-extern class JSFunctionOrBoundFunction extends JSObject {
+extern class JSFunctionOrBoundFunctionOrWrappedFunction extends JSObject {
}
-extern class JSBoundFunction extends JSFunctionOrBoundFunction {
+extern class JSBoundFunction extends
+ JSFunctionOrBoundFunctionOrWrappedFunction {
// The wrapped function object.
bound_target_function: Callable;
// The value that is always passed as the this value when calling the wrapped
@@ -17,10 +18,18 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
bound_arguments: FixedArray;
}
+extern class JSWrappedFunction extends
+ JSFunctionOrBoundFunctionOrWrappedFunction {
+ // The wrapped function object.
+ wrapped_target_function: Callable;
+ // The creation context.
+ context: NativeContext;
+}
+
// This class does not use the generated verifier, so if you change anything
// here, please also update JSFunctionVerify in objects-debug.cc.
@highestInstanceTypeWithinParentClassRange
-extern class JSFunction extends JSFunctionOrBoundFunction {
+extern class JSFunction extends JSFunctionOrBoundFunctionOrWrappedFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: FeedbackCell;
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 6830d4f992..5ce722128a 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -197,19 +197,13 @@ Handle<String> JSListFormat::TypeAsString() const {
namespace {
-// Extract String from JSArray into array of UnicodeString
+// Extract String from FixedArray into array of UnicodeString
Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
- Isolate* isolate, Handle<JSArray> array) {
- // Thanks to iterable-to-list preprocessing, we never see dictionary-mode
- // arrays here, so the loop below can construct an entry from the index.
- DCHECK(array->HasFastElements(isolate));
- auto* accessor = array->GetElementsAccessor();
- size_t length = accessor->NumberOfElements(*array);
-
+ Isolate* isolate, Handle<FixedArray> array) {
+ int length = array->length();
std::vector<icu::UnicodeString> result;
- for (InternalIndex entry : InternalIndex::Range(length)) {
- DCHECK(accessor->HasEntry(*array, entry));
- Handle<Object> item = accessor->Get(array, entry);
+ for (int i = 0; i < length; i++) {
+ Handle<Object> item = FixedArray::get(*array, i, isolate);
DCHECK(item->IsString());
Handle<String> item_str = Handle<String>::cast(item);
if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str);
@@ -220,7 +214,7 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
- Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
+ Isolate* isolate, Handle<JSListFormat> format, Handle<FixedArray> list,
const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&)>&
formatToResult) {
DCHECK(!list->IsUndefined());
@@ -283,14 +277,14 @@ MaybeHandle<JSArray> FormattedListToJSArray(
// ecma402 #sec-formatlist
MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
Handle<JSListFormat> format,
- Handle<JSArray> list) {
+ Handle<FixedArray> list) {
return FormatListCommon<String>(isolate, format, list,
Intl::FormattedToString);
}
// ecma42 #sec-formatlisttoparts
MaybeHandle<JSArray> JSListFormat::FormatListToParts(
- Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list) {
+ Isolate* isolate, Handle<JSListFormat> format, Handle<FixedArray> list) {
return FormatListCommon<JSArray>(isolate, format, list,
FormattedListToJSArray);
}
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 123f9e459e..0f6de1e2bf 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -46,12 +46,12 @@ class JSListFormat
// ecma402 #sec-formatlist
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatList(
Isolate* isolate, Handle<JSListFormat> format_holder,
- Handle<JSArray> list);
+ Handle<FixedArray> list);
// ecma42 #sec-formatlisttoparts
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatListToParts(
Isolate* isolate, Handle<JSListFormat> format_holder,
- Handle<JSArray> list);
+ Handle<FixedArray> list);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 27324f0d42..e93eb8a804 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -689,7 +689,7 @@ MaybeHandle<JSObject> JSLocale::TextInfo(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSObject);
}
- if (orientation == ULOC_LAYOUT_LTR) {
+ if (orientation == ULOC_LAYOUT_RTL) {
// Let dir be "rtl".
dir = factory->rtl_string();
}
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index cddc93afd2..570a00da8e 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -25,6 +25,9 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat)
ACCESSORS(JSNumberFormat, icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>,
kIcuNumberFormatterOffset)
+ACCESSORS(JSNumberFormat, icu_number_range_formatter,
+ Managed<icu::number::LocalizedNumberRangeFormatter>,
+ kIcuNumberRangeFormatterOffset)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index cc337a0df2..ebfa5528a3 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -18,10 +18,9 @@
#include "src/objects/objects-inl.h"
#include "src/objects/option-utils.h"
#include "unicode/currunit.h"
-#include "unicode/decimfmt.h"
#include "unicode/locid.h"
#include "unicode/numberformatter.h"
-#include "unicode/numfmt.h"
+#include "unicode/numberrangeformatter.h"
#include "unicode/numsys.h"
#include "unicode/ucurr.h"
#include "unicode/uloc.h"
@@ -98,6 +97,40 @@ enum class SignDisplay {
ALWAYS,
NEVER,
EXCEPT_ZERO,
+ NEGATIVE,
+};
+
+// [[RoundingMode]] is one of the String values "ceil", "floor", "expand",
+// "trunc", "halfCeil", "halfFloor", "halfExpand", "halfTrunc", or "halfEven",
+// specifying the rounding strategy for the number.
+// Note: To avoid name conflict with RoundingMode defined in other places,
+// prefix with Intl as IntlRoundingMode
+enum class IntlRoundingMode {
+ CEIL,
+ FLOOR,
+ EXPAND,
+ TRUNC,
+ HALF_CEIL,
+ HALF_FLOOR,
+ HALF_EXPAND,
+ HALF_TRUNC,
+ HALF_EVEN,
+};
+
+// [[TrailingZeroDisplay]] is one of the String values "auto" or
+// "stripIfInteger", specifying the strategy for displaying trailing zeros on
+// whole number.
+enum class TrailingZeroDisplay {
+ AUTO,
+ STRIP_IF_INTEGER,
+};
+
+// [[UseGrouping]] is ....
+enum class UseGrouping {
+ OFF,
+ MIN2,
+ AUTO,
+ ALWAYS,
};
UNumberUnitWidth ToUNumberUnitWidth(CurrencyDisplay currency_display) {
@@ -147,6 +180,12 @@ UNumberSignDisplay ToUNumberSignDisplay(SignDisplay sign_display,
}
DCHECK(currency_sign == CurrencySign::STANDARD);
return UNumberSignDisplay::UNUM_SIGN_EXCEPT_ZERO;
+ case SignDisplay::NEGATIVE:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_NEGATIVE;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_NEGATIVE;
}
}
@@ -170,6 +209,43 @@ icu::number::Notation ToICUNotation(Notation notation,
}
}
+UNumberFormatRoundingMode ToUNumberFormatRoundingMode(
+ IntlRoundingMode rounding_mode) {
+ switch (rounding_mode) {
+ case IntlRoundingMode::CEIL:
+ return UNumberFormatRoundingMode::UNUM_ROUND_CEILING;
+ case IntlRoundingMode::FLOOR:
+ return UNumberFormatRoundingMode::UNUM_ROUND_FLOOR;
+ case IntlRoundingMode::EXPAND:
+ return UNumberFormatRoundingMode::UNUM_ROUND_UP;
+ case IntlRoundingMode::TRUNC:
+ return UNumberFormatRoundingMode::UNUM_ROUND_DOWN;
+ case IntlRoundingMode::HALF_CEIL:
+ return UNumberFormatRoundingMode::UNUM_ROUND_HALF_CEILING;
+ case IntlRoundingMode::HALF_FLOOR:
+ return UNumberFormatRoundingMode::UNUM_ROUND_HALF_FLOOR;
+ case IntlRoundingMode::HALF_EXPAND:
+ return UNumberFormatRoundingMode::UNUM_ROUND_HALFUP;
+ case IntlRoundingMode::HALF_TRUNC:
+ return UNumberFormatRoundingMode::UNUM_ROUND_HALFDOWN;
+ case IntlRoundingMode::HALF_EVEN:
+ return UNumberFormatRoundingMode::UNUM_ROUND_HALFEVEN;
+ }
+}
+
+UNumberGroupingStrategy ToUNumberGroupingStrategy(UseGrouping use_grouping) {
+ switch (use_grouping) {
+ case UseGrouping::OFF:
+ return UNumberGroupingStrategy::UNUM_GROUPING_OFF;
+ case UseGrouping::MIN2:
+ return UNumberGroupingStrategy::UNUM_GROUPING_MIN2;
+ case UseGrouping::AUTO:
+ return UNumberGroupingStrategy::UNUM_GROUPING_AUTO;
+ case UseGrouping::ALWAYS:
+ return UNumberGroupingStrategy::UNUM_GROUPING_ON_ALIGNED;
+ }
+}
+
std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
UErrorCode status = U_ZERO_ERROR;
int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
@@ -336,6 +412,35 @@ bool UseGroupingFromSkeleton(const icu::UnicodeString& skeleton) {
return skeleton.indexOf("group-off") == -1;
}
+Handle<Object> UseGroupingFromSkeleton(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ Factory* factory = isolate->factory();
+ static const char* group = "group-";
+ int32_t start = skeleton.indexOf(group);
+ if (start >= 0) {
+ DCHECK_EQ(6, strlen(group));
+ icu::UnicodeString check = skeleton.tempSubString(start + 6);
+ // Ex: skeleton as
+ // .### rounding-mode-half-up group-off
+ if (check.startsWith("off")) {
+ return factory->false_value();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-up group-min2
+ if (check.startsWith("min2")) {
+ return ReadOnlyRoots(isolate).min2_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-up group-on-aligned
+ if (check.startsWith("on-aligned")) {
+ return ReadOnlyRoots(isolate).always_string_handle();
+ }
+ }
+ // Ex: skeleton as
+ // .###
+ return ReadOnlyRoots(isolate).auto_string_handle();
+}
+
// Parse currency code from skeleton. For example, skeleton as
// "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
const icu::UnicodeString CurrencyFromSkeleton(
@@ -462,6 +567,116 @@ Handle<String> SignDisplayString(Isolate* isolate,
skeleton.indexOf("sign-except-zero") >= 0) {
return ReadOnlyRoots(isolate).exceptZero_string_handle();
}
+ // Ex: skeleton as
+ // ".### rounding-mode-half-up sign-negative" or
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-negative"
+ if (skeleton.indexOf("sign-accounting-negative") >= 0 ||
+ skeleton.indexOf("sign-negative") >= 0) {
+ return ReadOnlyRoots(isolate).negative_string_handle();
+ }
+ return ReadOnlyRoots(isolate).auto_string_handle();
+}
+
+// Return RoundingMode as string based on skeleton.
+Handle<String> RoundingModeString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ static const char* rounding_mode = "rounding-mode-";
+ int32_t start = skeleton.indexOf(rounding_mode);
+ if (start >= 0) {
+ DCHECK_EQ(14, strlen(rounding_mode));
+ icu::UnicodeString check = skeleton.tempSubString(start + 14);
+
+ // Ex: skeleton as
+ // .### rounding-mode-ceiling
+ if (check.startsWith("ceiling")) {
+ return ReadOnlyRoots(isolate).ceil_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-down
+ if (check.startsWith("down")) {
+ return ReadOnlyRoots(isolate).trunc_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-floor
+ if (check.startsWith("floor")) {
+ return ReadOnlyRoots(isolate).floor_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-ceiling
+ if (check.startsWith("half-ceiling")) {
+ return ReadOnlyRoots(isolate).halfCeil_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-down
+ if (check.startsWith("half-down")) {
+ return ReadOnlyRoots(isolate).halfTrunc_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-floor
+ if (check.startsWith("half-floor")) {
+ return ReadOnlyRoots(isolate).halfFloor_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-half-up
+ if (check.startsWith("half-up")) {
+ return ReadOnlyRoots(isolate).halfExpand_string_handle();
+ }
+ // Ex: skeleton as
+ // .### rounding-mode-up
+ if (check.startsWith("up")) {
+ return ReadOnlyRoots(isolate).expand_string_handle();
+ }
+ }
+ // Ex: skeleton as
+ // .###
+ return ReadOnlyRoots(isolate).halfEven_string_handle();
+}
+
+Handle<Object> RoundingIncrement(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ int32_t cur = skeleton.indexOf(u"precision-increment/");
+ if (cur < 0) return isolate->factory()->NewNumberFromInt(1);
+ cur += 20; // length of "precision-increment/"
+ int32_t increment = 0;
+ while (cur < skeleton.length()) {
+ char16_t c = skeleton[cur++];
+ if (c == u'.') continue;
+ if (!IsDecimalDigit(c)) break;
+ increment = increment * 10 + (c - '0');
+ }
+ return isolate->factory()->NewNumberFromInt(increment);
+}
+
+// Return RoundingPriority as string based on skeleton.
+Handle<String> RoundingPriorityString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ int32_t found;
+ // If #r or @r is followed by a SPACE or in the end of line.
+ if ((found = skeleton.indexOf("#r")) >= 0 ||
+ (found = skeleton.indexOf("@r")) >= 0) {
+ if (found + 2 == skeleton.length() || skeleton[found + 2] == ' ') {
+ return ReadOnlyRoots(isolate).morePrecision_string_handle();
+ }
+ }
+ // If #s or @s is followed by a SPACE or in the end of line.
+ if ((found = skeleton.indexOf("#s")) >= 0 ||
+ (found = skeleton.indexOf("@s")) >= 0) {
+ if (found + 2 == skeleton.length() || skeleton[found + 2] == ' ') {
+ return ReadOnlyRoots(isolate).lessPrecision_string_handle();
+ }
+ }
+ return ReadOnlyRoots(isolate).auto_string_handle();
+}
+
+// Return trailingZeroDisplay as string based on skeleton.
+Handle<String> TrailingZeroDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ int32_t found;
+ if ((found = skeleton.indexOf("/w")) >= 0) {
+ if (found + 2 == skeleton.length() || skeleton[found + 2] == ' ') {
+ return ReadOnlyRoots(isolate).stripIfInteger_string_handle();
+ }
+ }
return ReadOnlyRoots(isolate).auto_string_handle();
}
@@ -505,7 +720,7 @@ bool JSNumberFormat::FractionDigitsFromSkeleton(
if (index < 0) return false;
*minimum = 0;
index++; // skip the '.'
- while (index < skeleton.length() && skeleton[index] == '0') {
+ while (index < skeleton.length() && IsDecimalDigit(skeleton[index])) {
(*minimum)++;
index++;
}
@@ -607,21 +822,16 @@ Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
return Style::DECIMAL;
}
-} // anonymous namespace
-
-icu::number::LocalizedNumberFormatter
-JSNumberFormat::SetDigitOptionsToFormatter(
- const icu::number::LocalizedNumberFormatter& icu_number_formatter,
+icu::number::UnlocalizedNumberFormatter SetDigitOptionsToFormatterV2(
+ const icu::number::UnlocalizedNumberFormatter& settings,
const Intl::NumberFormatDigitOptions& digit_options) {
- icu::number::LocalizedNumberFormatter result = icu_number_formatter;
+ icu::number::UnlocalizedNumberFormatter result = settings;
if (digit_options.minimum_integer_digits > 1) {
result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo(
digit_options.minimum_integer_digits));
}
- // Value -1 of minimum_significant_digits represent the roundingtype is
- // "compact-rounding".
- if (digit_options.minimum_significant_digits < 0) {
+ if (digit_options.rounding_type == Intl::RoundingType::kMorePrecision) {
return result;
}
icu::number::Precision precision =
@@ -636,6 +846,70 @@ JSNumberFormat::SetDigitOptionsToFormatter(
return result.precision(precision);
}
+icu::number::UnlocalizedNumberFormatter SetDigitOptionsToFormatterV3(
+ const icu::number::UnlocalizedNumberFormatter& settings,
+ const Intl::NumberFormatDigitOptions& digit_options, int rounding_increment,
+ JSNumberFormat::ShowTrailingZeros trailing_zeros) {
+ icu::number::UnlocalizedNumberFormatter result = settings;
+ if (digit_options.minimum_integer_digits > 1) {
+ result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo(
+ digit_options.minimum_integer_digits));
+ }
+
+ icu::number::Precision precision = icu::number::Precision::unlimited();
+ bool relaxed = false;
+ switch (digit_options.rounding_type) {
+ case Intl::RoundingType::kSignificantDigits:
+ precision = icu::number::Precision::minMaxSignificantDigits(
+ digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits);
+ break;
+ case Intl::RoundingType::kFractionDigits:
+ precision = icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits);
+ break;
+ case Intl::RoundingType::kMorePrecision:
+ relaxed = true;
+ V8_FALLTHROUGH;
+ case Intl::RoundingType::kLessPrecision:
+ precision =
+ icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits)
+ .withSignificantDigits(digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits,
+ relaxed ? UNUM_ROUNDING_PRIORITY_RELAXED
+ : UNUM_ROUNDING_PRIORITY_STRICT);
+ break;
+ }
+ if (rounding_increment != 1) {
+ double icu_increment = rounding_increment *
+ std::pow(10, -digit_options.maximum_fraction_digits);
+ precision = ::icu::number::Precision::increment(icu_increment)
+ .withMinFraction(digit_options.minimum_fraction_digits);
+ }
+ if (trailing_zeros == JSNumberFormat::ShowTrailingZeros::kHide) {
+ precision = precision.trailingZeroDisplay(UNUM_TRAILING_ZERO_HIDE_IF_WHOLE);
+ }
+ return result.precision(precision);
+}
+
+} // anonymous namespace
+
+icu::number::UnlocalizedNumberFormatter
+JSNumberFormat::SetDigitOptionsToFormatter(
+ const icu::number::UnlocalizedNumberFormatter& settings,
+ const Intl::NumberFormatDigitOptions& digit_options, int rounding_increment,
+ JSNumberFormat::ShowTrailingZeros trailing_zeros) {
+ if (FLAG_harmony_intl_number_format_v3) {
+ return SetDigitOptionsToFormatterV3(settings, digit_options,
+ rounding_increment, trailing_zeros);
+ } else {
+ return SetDigitOptionsToFormatterV2(settings, digit_options);
+ }
+}
+
// static
// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
@@ -662,12 +936,24 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
// [[Style]] "style"
// [[Currency]] "currency"
// [[CurrencyDisplay]] "currencyDisplay"
+ // [[CurrencySign]] "currencySign"
+ // [[Unit]] "unit"
+ // [[UnitDisplay]] "unitDisplay"
// [[MinimumIntegerDigits]] "minimumIntegerDigits"
// [[MinimumFractionDigits]] "minimumFractionDigits"
// [[MaximumFractionDigits]] "maximumFractionDigits"
// [[MinimumSignificantDigits]] "minimumSignificantDigits"
// [[MaximumSignificantDigits]] "maximumSignificantDigits"
// [[UseGrouping]] "useGrouping"
+ // [[Notation]] "notation"
+ // [[CompactDisplay]] "compactDisplay"
+ // [[SignDisplay]] "signDisplay"
+ //
+ // For v3
+ // [[RoundingMode]] "roundingMode"
+ // [[RoundingIncrement]] "roundingIncrement"
+ // [[TrailingZeroDisplay]] "trailingZeroDisplay"
+
CHECK(JSReceiver::CreateDataProperty(isolate, options,
factory->locale_string(), locale,
Just(kDontThrow))
@@ -747,11 +1033,19 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
.FromJust());
}
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->useGrouping_string(),
- factory->ToBoolean(UseGroupingFromSkeleton(skeleton)),
- Just(kDontThrow))
- .FromJust());
+ if (FLAG_harmony_intl_number_format_v3) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->useGrouping_string(),
+ UseGroupingFromSkeleton(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ } else {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->useGrouping_string(),
+ factory->ToBoolean(UseGroupingFromSkeleton(skeleton)),
+ Just(kDontThrow))
+ .FromJust());
+ }
+
Notation notation = NotationFromSkeleton(skeleton);
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->notation_string(),
@@ -768,6 +1062,24 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
isolate, options, factory->signDisplay_string(),
SignDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
+ if (FLAG_harmony_intl_number_format_v3) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->roundingMode_string(),
+ RoundingModeString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->roundingIncrement_string(),
+ RoundingIncrement(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->trailingZeroDisplay_string(),
+ TrailingZeroDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->roundingPriority_string(),
+ RoundingPriorityString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
return options;
}
@@ -800,6 +1112,14 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
return Handle<JSNumberFormat>::cast(object);
}
+// 22. is in « 1, 2, 5, 10, 20, 25, 50, 100, 200, 250, 500, 1000, 2000, 2500,
+// 5000 »
+bool IsValidRoundingIncrement(int value) {
+ return value == 1 || value == 2 || value == 5 || value == 10 || value == 20 ||
+ value == 25 || value == 50 || value == 100 || value == 200 ||
+ value == 250 || value == 500 || value == 1000 || value == 2000 ||
+ value == 2500 || value == 5000;
+}
// static
MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Handle<Map> map,
@@ -821,28 +1141,28 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
JSNumberFormat);
- // 4. Let opt be a new Record.
- // 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
+ // 3. Let opt be a new Record.
+ // 4. Let matcher be ? GetOption(options, "localeMatcher", "string", «
// "lookup", "best fit" », "best fit").
- // 6. Set opt.[[localeMatcher]] to matcher.
+ // 5. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
std::unique_ptr<char[]> numbering_system_str = nullptr;
- // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // 6. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
// `"string"`, *undefined*, *undefined*).
Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
isolate, options, service, &numbering_system_str);
- // 8. If _numberingSystem_ is not *undefined*, then
- // a. If _numberingSystem_ does not match the
+ // 7. If _numberingSystem_ is not *undefined*, then
+ // 8. If _numberingSystem_ does not match the
// `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
// exception.
MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSNumberFormat>());
- // 7. Let localeData be %NumberFormat%.[[LocaleData]].
- // 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
+ // 9. Let localeData be %NumberFormat%.[[LocaleData]].
+ // 10. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
// requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
// localeData).
std::set<std::string> relevant_extension_keys{"nu"};
@@ -882,21 +1202,20 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 11. Let dataLocale be r.[[dataLocale]].
- icu::number::LocalizedNumberFormatter icu_number_formatter =
- icu::number::NumberFormatter::withLocale(icu_locale)
- .roundingMode(UNUM_ROUND_HALFUP);
+ icu::number::UnlocalizedNumberFormatter settings =
+ icu::number::UnlocalizedNumberFormatter().roundingMode(UNUM_ROUND_HALFUP);
// For 'latn' numbering system, skip the adoptSymbols which would cause
// 10.1%-13.7% of regression of JSTests/Intl-NewIntlNumberFormat
// See crbug/1052751 so we skip calling adoptSymbols and depending on the
// default instead.
if (!numbering_system.empty() && numbering_system != "latn") {
- icu_number_formatter = icu_number_formatter.adoptSymbols(
- icu::NumberingSystem::createInstanceByName(numbering_system.c_str(),
- status));
+ settings = settings.adoptSymbols(icu::NumberingSystem::createInstanceByName(
+ numbering_system.c_str(), status));
CHECK(U_SUCCESS(status));
}
+ // ==== Start SetNumberFormatUnitOptions ====
// 3. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency", "unit" », "decimal").
@@ -1029,15 +1348,14 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Intl::ToString(isolate, currency_ustr),
JSNumberFormat);
- icu_number_formatter = icu_number_formatter.unit(
- icu::CurrencyUnit(currency_ustr.getBuffer(), status));
+ settings =
+ settings.unit(icu::CurrencyUnit(currency_ustr.getBuffer(), status));
CHECK(U_SUCCESS(status));
// 14.c Set intlObj.[[CurrencyDisplay]] to currencyDisplay.
// The default unitWidth is SHORT in ICU and that mapped from
// Symbol so we can skip the setting for optimization.
if (currency_display != CurrencyDisplay::SYMBOL) {
- icu_number_formatter = icu_number_formatter.unitWidth(
- ToUNumberUnitWidth(currency_display));
+ settings = settings.unitWidth(ToUNumberUnitWidth(currency_display));
}
CHECK(U_SUCCESS(status));
}
@@ -1051,27 +1369,27 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
icu::MeasureUnit none = icu::MeasureUnit();
// 13.b Set intlObj.[[Unit]] to unit.
if (unit_pair.first != none) {
- icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
+ settings = settings.unit(unit_pair.first);
}
if (unit_pair.second != none) {
- icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
+ settings = settings.perUnit(unit_pair.second);
}
// The default unitWidth is SHORT in ICU and that mapped from
// Symbol so we can skip the setting for optimization.
if (unit_display != UnitDisplay::SHORT) {
- icu_number_formatter =
- icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display));
+ settings = settings.unitWidth(ToUNumberUnitWidth(unit_display));
}
}
+ // === End of SetNumberFormatUnitOptions
+
if (style == Style::PERCENT) {
- icu_number_formatter =
- icu_number_formatter.unit(icu::MeasureUnit::getPercent())
- .scale(icu::number::Scale::powerOfTen(2));
+ settings = settings.unit(icu::MeasureUnit::getPercent())
+ .scale(icu::number::Scale::powerOfTen(2));
}
- // 23. If style is "currency", then
+ // 16. If style is "currency", then
int mnfd_default, mxfd_default;
if (style == Style::CURRENCY) {
// b. Let cDigits be CurrencyDigits(currency).
@@ -1080,7 +1398,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// d. Let mxfdDefault be cDigits.
mnfd_default = c_digits;
mxfd_default = c_digits;
- // 24. Else,
+ // 17. Else,
} else {
// a. Let mnfdDefault be 0.
mnfd_default = 0;
@@ -1096,7 +1414,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
Notation notation = Notation::STANDARD;
- // 25. Let notation be ? GetOption(options, "notation", "string", «
+ // 18. Let notation be ? GetOption(options, "notation", "string", «
// "standard", "scientific", "engineering", "compact" », "standard").
Maybe<Notation> maybe_notation = GetStringOption<Notation>(
isolate, options, "notation", service,
@@ -1105,9 +1423,10 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Notation::COMPACT},
Notation::STANDARD);
MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
+ // 19. Set numberFormat.[[Notation]] to notation.
notation = maybe_notation.FromJust();
- // 27. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
+ // 20. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
// mnfdDefault, mxfdDefault).
Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
@@ -1115,10 +1434,58 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
notation == Notation::COMPACT);
MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
- icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
- icu_number_formatter, digit_options);
- // 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
+ if (FLAG_harmony_intl_number_format_v3) {
+ // 21. Let roundingIncrement be ? GetNumberOption(options,
+ // "roundingIncrement,", 1, 5000, 1).
+ int rounding_increment = 1;
+ Maybe<int> maybe_rounding_increment = GetNumberOption(
+ isolate, options, factory->roundingIncrement_string(), 1, 5000, 1);
+ MAYBE_RETURN(maybe_rounding_increment, MaybeHandle<JSNumberFormat>());
+ CHECK(maybe_rounding_increment.To(&rounding_increment));
+
+ // 22. If roundingIncrement is not in « 1, 2, 5, 10, 20, 25, 50, 100, 200,
+ // 250, 500, 1000, 2000, 2500, 5000 », throw a RangeError exception.
+ if (!IsValidRoundingIncrement(rounding_increment)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->roundingIncrement_string()),
+ JSNumberFormat);
+ }
+ // 23. If roundingIncrement is not 1 and numberFormat.[[RoundingType]] is
+ // not fractionDigits, throw a RangeError exception.
+ if (rounding_increment != 1 &&
+ digit_options.rounding_type != Intl::RoundingType::kFractionDigits) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->roundingIncrement_string()),
+ JSNumberFormat);
+ }
+ // 24. Set _numberFormat.[[RoundingIncrement]] to roundingIncrement.
+
+ // 25. Let trailingZeroDisplay be ? GetOption(options,
+ // "trailingZeroDisplay", "string", « "auto", "stripIfInteger" », "auto").
+ Maybe<TrailingZeroDisplay> maybe_trailing_zero_display =
+ GetStringOption<TrailingZeroDisplay>(
+ isolate, options, "trailingZeroDisplay", service,
+ {"auto", "stripIfInteger"},
+ {TrailingZeroDisplay::AUTO, TrailingZeroDisplay::STRIP_IF_INTEGER},
+ TrailingZeroDisplay::AUTO);
+ MAYBE_RETURN(maybe_trailing_zero_display, MaybeHandle<JSNumberFormat>());
+ TrailingZeroDisplay trailing_zero_display =
+ maybe_trailing_zero_display.FromJust();
+
+ // 26. Set numberFormat.[[TrailingZeroDisplay]] to trailingZeroDisplay.
+ settings = SetDigitOptionsToFormatterV3(
+ settings, digit_options, rounding_increment,
+ trailing_zero_display == TrailingZeroDisplay::STRIP_IF_INTEGER
+ ? ShowTrailingZeros::kHide
+ : ShowTrailingZeros::kShow);
+ } else {
+ settings = SetDigitOptionsToFormatterV2(settings, digit_options);
+ }
+
+ // 27. Let compactDisplay be ? GetOption(options, "compactDisplay",
// "string", « "short", "long" », "short").
Maybe<CompactDisplay> maybe_compact_display = GetStringOption<CompactDisplay>(
isolate, options, "compactDisplay", service, {"short", "long"},
@@ -1126,33 +1493,73 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
CompactDisplay compact_display = maybe_compact_display.FromJust();
- // 26. Set numberFormat.[[Notation]] to notation.
// The default notation in ICU is Simple, which mapped from STANDARD
// so we can skip setting it.
if (notation != Notation::STANDARD) {
- icu_number_formatter =
- icu_number_formatter.notation(ToICUNotation(notation, compact_display));
+ settings = settings.notation(ToICUNotation(notation, compact_display));
}
- // 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
- // undefined, true).
- bool use_grouping = true;
- Maybe<bool> found_use_grouping =
- GetBoolOption(isolate, options, "useGrouping", service, &use_grouping);
- MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
- // 31. Set numberFormat.[[UseGrouping]] to useGrouping.
- if (!use_grouping) {
- icu_number_formatter = icu_number_formatter.grouping(
- UNumberGroupingStrategy::UNUM_GROUPING_OFF);
+
+ if (!FLAG_harmony_intl_number_format_v3) {
+ // 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
+ // undefined, true).
+ bool use_grouping = true;
+ Maybe<bool> found_use_grouping =
+ GetBoolOption(isolate, options, "useGrouping", service, &use_grouping);
+ MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
+ // 31. Set numberFormat.[[UseGrouping]] to useGrouping.
+ if (!use_grouping) {
+ settings = settings.grouping(UNumberGroupingStrategy::UNUM_GROUPING_OFF);
+ }
+ settings = JSNumberFormat::SetDigitOptionsToFormatter(
+ settings, digit_options, 1, ShowTrailingZeros::kShow);
+ } else {
+ // 28. Let defaultUseGrouping be "auto".
+ UseGrouping default_use_grouping = UseGrouping::AUTO;
+
+ // 29. If notation is "compact", then
+ if (notation == Notation::COMPACT) {
+ // a. Set numberFormat.[[CompactDisplay]] to compactDisplay.
+ // Done in above together
+ // b. Set defaultUseGrouping to "min2".
+ default_use_grouping = UseGrouping::MIN2;
+ }
+
+ // 30. Let useGrouping be ? GetStringOrBooleanOption(options, "useGrouping",
+ // « "min2", "auto", "always" », "always", false, defaultUseGrouping).
+ Maybe<UseGrouping> maybe_use_grouping =
+ GetStringOrBooleanOption<UseGrouping>(
+ isolate, options, "useGrouping", service,
+ {"min2", "auto", "always"},
+ {UseGrouping::MIN2, UseGrouping::AUTO, UseGrouping::ALWAYS},
+ UseGrouping::ALWAYS, // trueValue
+ UseGrouping::OFF, // falseValue
+ default_use_grouping); // fallbackValue
+ MAYBE_RETURN(maybe_use_grouping, MaybeHandle<JSNumberFormat>());
+ UseGrouping use_grouping = maybe_use_grouping.FromJust();
+ // 31. Set numberFormat.[[UseGrouping]] to useGrouping.
+ if (use_grouping != UseGrouping::AUTO) {
+ settings = settings.grouping(ToUNumberGroupingStrategy(use_grouping));
+ }
}
// 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
- // "auto", "never", "always", "exceptZero" », "auto").
- Maybe<SignDisplay> maybe_sign_display = GetStringOption<SignDisplay>(
- isolate, options, "signDisplay", service,
- {"auto", "never", "always", "exceptZero"},
- {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
- SignDisplay::EXCEPT_ZERO},
- SignDisplay::AUTO);
+ // "auto", "never", "always", "exceptZero", "negative" », "auto").
+ Maybe<SignDisplay> maybe_sign_display = Nothing<SignDisplay>();
+ if (FLAG_harmony_intl_number_format_v3) {
+ maybe_sign_display = GetStringOption<SignDisplay>(
+ isolate, options, "signDisplay", service,
+ {"auto", "never", "always", "exceptZero", "negative"},
+ {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
+ SignDisplay::EXCEPT_ZERO, SignDisplay::NEGATIVE},
+ SignDisplay::AUTO);
+ } else {
+ maybe_sign_display = GetStringOption<SignDisplay>(
+ isolate, options, "signDisplay", service,
+ {"auto", "never", "always", "exceptZero"},
+ {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
+ SignDisplay::EXCEPT_ZERO},
+ SignDisplay::AUTO);
+ }
MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
SignDisplay sign_display = maybe_sign_display.FromJust();
@@ -1162,8 +1569,29 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// under that values for optimization.
if (sign_display != SignDisplay::AUTO ||
currency_sign != CurrencySign::STANDARD) {
- icu_number_formatter = icu_number_formatter.sign(
- ToUNumberSignDisplay(sign_display, currency_sign));
+ settings = settings.sign(ToUNumberSignDisplay(sign_display, currency_sign));
+ }
+
+ if (FLAG_harmony_intl_number_format_v3) {
+ // X. Let roundingMode be ? GetOption(options, "roundingMode", "string",
+ // « "ceil", "floor", "expand", "trunc", "halfCeil", "halfFloor",
+ // "halfExpand", "halfTrunc", "halfEven" »,
+ // "halfExpand").
+ Maybe<IntlRoundingMode> maybe_rounding_mode =
+ GetStringOption<IntlRoundingMode>(
+ isolate, options, "roundingMode", service,
+ {"ceil", "floor", "expand", "trunc", "halfCeil", "halfFloor",
+ "halfExpand", "halfTrunc", "halfEven"},
+ {IntlRoundingMode::CEIL, IntlRoundingMode::FLOOR,
+ IntlRoundingMode::EXPAND, IntlRoundingMode::TRUNC,
+ IntlRoundingMode::HALF_CEIL, IntlRoundingMode::HALF_FLOOR,
+ IntlRoundingMode::HALF_EXPAND, IntlRoundingMode::HALF_TRUNC,
+ IntlRoundingMode::HALF_EVEN},
+ IntlRoundingMode::HALF_EXPAND);
+ MAYBE_RETURN(maybe_rounding_mode, MaybeHandle<JSNumberFormat>());
+ IntlRoundingMode rounding_mode = maybe_rounding_mode.FromJust();
+ settings =
+ settings.roundingMode(ToUNumberFormatRoundingMode(rounding_mode));
}
// 25. Let dataLocaleData be localeData.[[<dataLocale>]].
@@ -1180,12 +1608,27 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 30. Set numberFormat.[[NegativePattern]] to
// stylePatterns.[[negativePattern]].
//
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ settings.locale(icu_locale);
+
+ icu::number::LocalizedNumberRangeFormatter icu_number_range_formatter =
+ icu::number::UnlocalizedNumberRangeFormatter()
+ .numberFormatterBoth(settings)
+ .locale(icu_locale);
+
Handle<Managed<icu::number::LocalizedNumberFormatter>>
managed_number_formatter =
Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
isolate, 0,
new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+ Handle<Managed<icu::number::LocalizedNumberRangeFormatter>>
+ managed_number_range_formatter =
+ Managed<icu::number::LocalizedNumberRangeFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberRangeFormatter(
+ icu_number_range_formatter));
+
// Now all properties are ready, so we can allocate the result object.
Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>::cast(
isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
@@ -1193,6 +1636,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
number_format->set_locale(*locale_str);
number_format->set_icu_number_formatter(*managed_number_formatter);
+ number_format->set_icu_number_range_formatter(
+ *managed_number_range_formatter);
number_format->set_bound_format(*factory->undefined_value());
// 31. Return numberFormat.
@@ -1200,6 +1645,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
namespace {
+
Maybe<bool> IcuFormatNumber(
Isolate* isolate,
const icu::number::LocalizedNumberFormatter& number_format,
@@ -1212,13 +1658,42 @@ Maybe<bool> IcuFormatNumber(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
BigInt::ToString(isolate, big_int),
Nothing<bool>());
- *formatted = number_format.formatDecimal(
- {big_int_string->ToCString().get(), big_int_string->length()}, status);
+ big_int_string = String::Flatten(isolate, big_int_string);
+ DisallowGarbageCollection no_gc;
+ const String::FlatContent& flat = big_int_string->GetFlatContent(no_gc);
+ int32_t length = big_int_string->length();
+ DCHECK(flat.IsOneByte());
+ const char* char_buffer =
+ reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
+ *formatted = number_format.formatDecimal({char_buffer, length}, status);
} else {
- double number = numeric_obj->IsNaN()
- ? std::numeric_limits<double>::quiet_NaN()
- : numeric_obj->Number();
- *formatted = number_format.formatDouble(number, status);
+ if (FLAG_harmony_intl_number_format_v3 && numeric_obj->IsString()) {
+ // TODO(ftang) Correct the handling of string after the resolution of
+ // https://github.com/tc39/proposal-intl-numberformat-v3/pull/82
+ Handle<String> string =
+ String::Flatten(isolate, Handle<String>::cast(numeric_obj));
+ DisallowGarbageCollection no_gc;
+ const String::FlatContent& flat = string->GetFlatContent(no_gc);
+ int32_t length = string->length();
+ if (flat.IsOneByte()) {
+ const char* char_buffer =
+ reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
+ *formatted = number_format.formatDecimal({char_buffer, length}, status);
+ } else {
+ // We may have two bytes string such as "漢 123456789".substring(2)
+ // The value will be "123456789" only in ASCII range, but encoded
+ // in two bytes string.
+ // ICU accepts UTF8 string, so if the source is two-byte encoded,
+ // copy into a UTF8 string via ToCString.
+ *formatted = number_format.formatDecimal(
+ {string->ToCString().get(), string->length()}, status);
+ }
+ } else {
+ double number = numeric_obj->IsNaN()
+ ? std::numeric_limits<double>::quiet_NaN()
+ : numeric_obj->Number();
+ *formatted = number_format.formatDouble(number, status);
+ }
}
if (U_FAILURE(status)) {
// This happen because of icu data trimming trim out "unit".
@@ -1229,27 +1704,37 @@ Maybe<bool> IcuFormatNumber(
return Just(true);
}
-} // namespace
-
-MaybeHandle<String> JSNumberFormat::FormatNumeric(
- Isolate* isolate,
- const icu::number::LocalizedNumberFormatter& number_format,
- Handle<Object> numeric_obj) {
- DCHECK(numeric_obj->IsNumeric());
-
- icu::number::FormattedNumber formatted;
- Maybe<bool> maybe_format =
- IcuFormatNumber(isolate, number_format, numeric_obj, &formatted);
- MAYBE_RETURN(maybe_format, Handle<String>());
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString result = formatted.toString(status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+Maybe<icu::Formattable> ToFormattable(Isolate* isolate, Handle<Object> obj,
+ const char* field) {
+ if (obj->IsBigInt()) {
+ Handle<BigInt> big_int = Handle<BigInt>::cast(obj);
+ Handle<String> big_int_string;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
+ BigInt::ToString(isolate, big_int),
+ Nothing<icu::Formattable>());
+ big_int_string = String::Flatten(isolate, big_int_string);
+ {
+ DisallowGarbageCollection no_gc;
+ const String::FlatContent& flat = big_int_string->GetFlatContent(no_gc);
+ int32_t length = big_int_string->length();
+ DCHECK(flat.IsOneByte());
+ const char* char_buffer =
+ reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Formattable result({char_buffer, length}, status);
+ if (U_SUCCESS(status)) return Just(result);
+ }
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::Formattable>());
}
- return Intl::ToString(isolate, result);
-}
+ // TODO(ftang) Handle the case of IsString after the resolution of
+ // https://github.com/tc39/proposal-intl-numberformat-v3/pull/82
-namespace {
+ // FormatRange(|ToParts) does not allow NaN
+ DCHECK(!obj->IsNaN());
+ return Just(icu::Formattable(obj->Number()));
+}
bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
const NumberFormatSpan& b) {
@@ -1359,17 +1844,15 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
}
namespace {
-Maybe<int> ConstructParts(Isolate* isolate,
- icu::number::FormattedNumber* formatted,
+Maybe<int> ConstructParts(Isolate* isolate, icu::FormattedValue* formatted,
Handle<JSArray> result, int start_index,
- Handle<Object> numeric_obj, bool style_is_unit) {
+ bool style_is_unit, bool is_nan, bool output_source) {
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString formatted_text = formatted->toString(status);
if (U_FAILURE(status)) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewTypeError(MessageTemplate::kIcuError), Nothing<int>());
}
- DCHECK(numeric_obj->IsNumeric());
int32_t length = formatted_text.length();
int index = start_index;
if (length == 0) return Just(index);
@@ -1380,13 +1863,21 @@ Maybe<int> ConstructParts(Isolate* isolate,
// there's another field with exactly the same begin and end as this backdrop,
// in which case the backdrop's field_id of -1 will give it lower priority.
regions.push_back(NumberFormatSpan(-1, 0, formatted_text.length()));
-
+ Intl::FormatRangeSourceTracker tracker;
{
- icu::ConstrainedFieldPosition cfp;
- cfp.constrainCategory(UFIELD_CATEGORY_NUMBER);
- while (formatted->nextPosition(cfp, status)) {
- regions.push_back(
- NumberFormatSpan(cfp.getField(), cfp.getStart(), cfp.getLimit()));
+ icu::ConstrainedFieldPosition cfpos;
+ while (formatted->nextPosition(cfpos, status)) {
+ int32_t category = cfpos.getCategory();
+ int32_t field = cfpos.getField();
+ int32_t start = cfpos.getStart();
+ int32_t limit = cfpos.getLimit();
+ if (category == UFIELD_CATEGORY_NUMBER_RANGE_SPAN) {
+ DCHECK_LE(field, 2);
+ DCHECK(FLAG_harmony_intl_number_format_v3);
+ tracker.Add(field, start, limit);
+ } else {
+ regions.push_back(NumberFormatSpan(field, start, limit));
+ }
}
}
@@ -1402,7 +1893,7 @@ Maybe<int> ConstructParts(Isolate* isolate,
field_type_string = isolate->factory()->unit_string();
} else {
field_type_string =
- Intl::NumberFieldToType(isolate, numeric_obj, part.field_id);
+ Intl::NumberFieldToType(isolate, part, formatted_text, is_nan);
}
}
Handle<String> substring;
@@ -1410,20 +1901,224 @@ Maybe<int> ConstructParts(Isolate* isolate,
isolate, substring,
Intl::ToString(isolate, formatted_text, part.begin_pos, part.end_pos),
Nothing<int>());
- Intl::AddElement(isolate, result, index, field_type_string, substring);
+
+ if (output_source) {
+ Intl::AddElement(
+ isolate, result, index, field_type_string, substring,
+ isolate->factory()->source_string(),
+ Intl::SourceString(isolate,
+ tracker.GetSource(part.begin_pos, part.end_pos)));
+ } else {
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
+ }
++index;
}
JSObject::ValidateElements(*result);
return Just(index);
}
+bool IsPositiveInfinity(Isolate* isolate, Handle<Object> v) {
+ if (v->IsBigInt()) return false;
+ if (v->IsString()) {
+ return isolate->factory()->Infinity_string()->Equals(String::cast(*v));
+ }
+ CHECK(v->IsNumber());
+ double const value_number = v->Number();
+ return std::isinf(value_number) && (value_number > 0.0);
+}
+
+bool IsNegativeInfinity(Isolate* isolate, Handle<Object> v) {
+ if (v->IsBigInt()) return false;
+ if (v->IsString()) {
+ return isolate->factory()->minus_Infinity_string()->Equals(
+ String::cast(*v));
+ }
+ CHECK(v->IsNumber());
+ double const value_number = v->Number();
+ return std::isinf(value_number) && (value_number < 0.0);
+}
+
+bool IsNegativeZero(Isolate* isolate, Handle<Object> v) {
+ if (v->IsBigInt()) return false;
+ if (v->IsString()) {
+ return isolate->factory()->minus_0()->Equals(String::cast(*v));
+ }
+ CHECK(v->IsNumber());
+ return IsMinusZero(v->Number());
+}
+
+bool LessThan(Isolate* isolate, Handle<Object> a, Handle<Object> b) {
+ Maybe<ComparisonResult> comparison = Object::Compare(isolate, a, b);
+ return comparison.IsJust() &&
+ comparison.FromJust() == ComparisonResult::kLessThan;
+}
+
+bool IsFiniteNonMinusZeroNumberOrBigInt(Isolate* isolate, Handle<Object> v) {
+ return !(IsPositiveInfinity(isolate, v) || IsNegativeInfinity(isolate, v) ||
+ v->IsMinusZero());
+}
+
+// #sec-partitionnumberrangepattern
+template <typename T, MaybeHandle<T> (*F)(
+ Isolate*, icu::FormattedValue*,
+ const icu::number::LocalizedNumberFormatter*, bool)>
+MaybeHandle<T> PartitionNumberRangePattern(Isolate* isolate,
+ Handle<JSNumberFormat> number_format,
+ Handle<Object> x, Handle<Object> y,
+ const char* func_name) {
+ Factory* factory = isolate->factory();
+
+ // 1. If x is NaN or y is NaN, throw a RangeError exception.
+ if (x->IsNaN()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("start"), x),
+ MaybeHandle<T>());
+ }
+ if (y->IsNaN()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("end"), y),
+ MaybeHandle<T>());
+ }
+
+ // 2. If x is a mathematical value, then
+ if (IsFiniteNonMinusZeroNumberOrBigInt(isolate, x)) {
+ // a. If y is a mathematical value and y < x, throw a RangeError exception.
+ if (IsFiniteNonMinusZeroNumberOrBigInt(isolate, y) &&
+ LessThan(isolate, y, x)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // b. Else if y is -∞, throw a RangeError exception.
+ if (IsNegativeInfinity(isolate, y)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // c. Else if y is -0 and x ≥ 0, throw a RangeError exception.
+ if (y->IsMinusZero() &&
+ !LessThan(isolate, x, Handle<Object>(Smi::zero(), isolate))) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // 3. Else if x is +∞, then
+ } else if (IsPositiveInfinity(isolate, x)) {
+ // a. If y is a mathematical value, throw a RangeError exception.
+ if (IsFiniteNonMinusZeroNumberOrBigInt(isolate, y)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // b. Else if y is -∞, throw a RangeError exception.
+ if (IsNegativeInfinity(isolate, y)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // c. Else if y is -0, throw a RangeError exception.
+ if (IsNegativeZero(isolate, y)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // 4. Else if x is -0, then
+ } else if (IsNegativeZero(isolate, x)) {
+ // a. If y is a mathematical value and y < 0, throw a RangeError exception.
+ if (IsFiniteNonMinusZeroNumberOrBigInt(isolate, y) &&
+ LessThan(isolate, y, Handle<Object>(Smi::zero(), isolate))) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ // b. Else if y is -∞, throw a RangeError exception.
+ if (IsNegativeInfinity(isolate, y)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalid, x, y),
+ MaybeHandle<T>());
+ }
+ }
+
+ Maybe<icu::Formattable> maybe_x = ToFormattable(isolate, x, "start");
+ MAYBE_RETURN(maybe_x, MaybeHandle<T>());
+
+ Maybe<icu::Formattable> maybe_y = ToFormattable(isolate, y, "end");
+ MAYBE_RETURN(maybe_y, MaybeHandle<T>());
+
+ icu::number::LocalizedNumberRangeFormatter* nrfmt =
+ number_format->icu_number_range_formatter().raw();
+ CHECK_NOT_NULL(nrfmt);
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::FormattedNumberRange formatted = nrfmt->formatFormattableRange(
+ maybe_x.FromJust(), maybe_y.FromJust(), status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewTypeError(MessageTemplate::kIcuError), MaybeHandle<T>());
+ }
+
+ return F(isolate, &formatted, number_format->icu_number_formatter().raw(),
+ false /* is_nan */);
+}
+
+MaybeHandle<String> FormatToString(Isolate* isolate,
+ icu::FormattedValue* formatted,
+ const icu::number::LocalizedNumberFormatter*,
+ bool) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result = formatted->toString(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+ }
+ return Intl::ToString(isolate, result);
+}
+
+MaybeHandle<JSArray> FormatToJSArray(
+ Isolate* isolate, icu::FormattedValue* formatted,
+ const icu::number::LocalizedNumberFormatter* nfmt, bool is_nan,
+ bool output_source) {
+ UErrorCode status = U_ZERO_ERROR;
+ bool is_unit = Style::UNIT == StyleFromSkeleton(nfmt->toSkeleton(status));
+ CHECK(U_SUCCESS(status));
+
+ Factory* factory = isolate->factory();
+ Handle<JSArray> result = factory->NewJSArray(0);
+ Maybe<int> maybe_format_to_parts = ConstructParts(
+ isolate, formatted, result, 0, is_unit, is_nan, output_source);
+ MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
+ return result;
+}
+
+MaybeHandle<JSArray> FormatRangeToJSArray(
+ Isolate* isolate, icu::FormattedValue* formatted,
+ const icu::number::LocalizedNumberFormatter* nfmt, bool is_nan) {
+ return FormatToJSArray(isolate, formatted, nfmt, is_nan, true);
+}
+
} // namespace
+MaybeHandle<String> JSNumberFormat::FormatNumeric(
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
+ Handle<Object> numeric_obj) {
+ DCHECK(numeric_obj->IsNumeric() || FLAG_harmony_intl_number_format_v3);
+
+ icu::number::FormattedNumber formatted;
+ Maybe<bool> maybe_format =
+ IcuFormatNumber(isolate, number_format, numeric_obj, &formatted);
+ MAYBE_RETURN(maybe_format, Handle<String>());
+
+ return FormatToString(isolate, &formatted, &number_format,
+ numeric_obj->IsNaN());
+}
+
MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj) {
- CHECK(numeric_obj->IsNumeric());
- Factory* factory = isolate->factory();
+ CHECK(numeric_obj->IsNumeric() || FLAG_harmony_intl_number_format_v3);
icu::number::LocalizedNumberFormatter* fmt =
number_format->icu_number_formatter().raw();
CHECK_NOT_NULL(fmt);
@@ -1432,18 +2127,24 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
Maybe<bool> maybe_format =
IcuFormatNumber(isolate, *fmt, numeric_obj, &formatted);
MAYBE_RETURN(maybe_format, Handle<JSArray>());
- UErrorCode status = U_ZERO_ERROR;
- bool style_is_unit =
- Style::UNIT == StyleFromSkeleton(fmt->toSkeleton(status));
- CHECK(U_SUCCESS(status));
+ return FormatToJSArray(isolate, &formatted, fmt, numeric_obj->IsNaN(), false);
+}
- Handle<JSArray> result = factory->NewJSArray(0);
- Maybe<int> maybe_format_to_parts = ConstructParts(
- isolate, &formatted, result, 0, numeric_obj, style_is_unit);
- MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
+MaybeHandle<String> JSNumberFormat::FormatNumericRange(
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> x_obj, Handle<Object> y_obj) {
+ return PartitionNumberRangePattern<String, FormatToString>(
+ isolate, number_format, x_obj, y_obj,
+ "Intl.NumberFormat.prototype.formatRange");
+}
- return result;
+MaybeHandle<JSArray> JSNumberFormat::FormatNumericRangeToParts(
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> x_obj, Handle<Object> y_obj) {
+ return PartitionNumberRangePattern<JSArray, FormatRangeToJSArray>(
+ isolate, number_format, x_obj, y_obj,
+ "Intl.NumberFormat.prototype.formatRangeToParts");
}
namespace {
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 38710131d6..b71b9ddef8 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -26,8 +26,10 @@ namespace U_ICU_NAMESPACE {
class UnicodeString;
namespace number {
class LocalizedNumberFormatter;
-} // namespace number
-} // namespace U_ICU_NAMESPACE
+class UnlocalizedNumberFormatter;
+class LocalizedNumberRangeFormatter;
+} // namespace number
+} // namespace U_ICU_NAMESPACE
namespace v8 {
namespace internal {
@@ -54,6 +56,16 @@ class JSNumberFormat
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj);
+ // ecma402/#sec-formatnumericrange
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumericRange(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, Handle<Object> x,
+ Handle<Object> y);
+
+ // ecma402/#sec-formatnumericrangetoparts
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatNumericRangeToParts(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, Handle<Object> x,
+ Handle<Object> y);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumeric(
Isolate* isolate,
const icu::number::LocalizedNumberFormatter& number_format,
@@ -68,31 +80,24 @@ class JSNumberFormat
int32_t* minimum, int32_t* maximum);
static bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
int32_t* minimum, int32_t* maximum);
- static icu::number::LocalizedNumberFormatter SetDigitOptionsToFormatter(
- const icu::number::LocalizedNumberFormatter& icu_number_formatter,
- const Intl::NumberFormatDigitOptions& digit_options);
+
+ enum class ShowTrailingZeros { kShow, kHide };
+
+ static icu::number::UnlocalizedNumberFormatter SetDigitOptionsToFormatter(
+ const icu::number::UnlocalizedNumberFormatter& settings,
+ const Intl::NumberFormatDigitOptions& digit_options,
+ int rounding_increment, ShowTrailingZeros show);
DECL_PRINTER(JSNumberFormat)
DECL_ACCESSORS(icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>)
+ DECL_ACCESSORS(icu_number_range_formatter,
+ Managed<icu::number::LocalizedNumberRangeFormatter>)
TQ_OBJECT_CONSTRUCTORS(JSNumberFormat)
};
-struct NumberFormatSpan {
- int32_t field_id;
- int32_t begin_pos;
- int32_t end_pos;
-
- NumberFormatSpan() = default;
- NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
- : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
-};
-
-V8_EXPORT_PRIVATE std::vector<NumberFormatSpan> FlattenRegionsToParts(
- std::vector<NumberFormatSpan>* regions);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.tq b/deps/v8/src/objects/js-number-format.tq
index fe618daa3b..23daa45b45 100644
--- a/deps/v8/src/objects/js-number-format.tq
+++ b/deps/v8/src/objects/js-number-format.tq
@@ -8,5 +8,7 @@ extern class JSNumberFormat extends JSObject {
locale: String;
icu_number_formatter:
Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ icu_number_range_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberRangeFormatter>
bound_format: JSFunction|Undefined;
}
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 24e86512de..0fe9938ac9 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -33,6 +33,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSReceiver)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSObjectWithEmbedderSlots)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
@@ -85,9 +86,10 @@ MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
return Object::GetProperty(&it);
}
-Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
+Handle<Object> JSReceiver::GetDataProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
Handle<Name> name) {
- LookupIterator it(object->GetIsolate(), object, name, object,
+ LookupIterator it(isolate, object, name, object,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
if (!it.IsFound()) return it.factory()->undefined_value();
return GetDataProperty(&it);
@@ -283,6 +285,20 @@ int JSObject::GetEmbedderFieldsStartOffset() {
}
// static
+bool JSObject::MayHaveEmbedderFields(Map map) {
+ InstanceType instance_type = map.instance_type();
+ // TODO(v8) It'd be nice if all objects with embedder data slots inherited
+ // from JSObjectWithEmbedderSlots, but this is currently not possible due to
+ // instance_type constraints.
+ return InstanceTypeChecker::IsJSObjectWithEmbedderSlots(instance_type) ||
+ InstanceTypeChecker::IsJSSpecialObject(instance_type);
+}
+
+bool JSObject::MayHaveEmbedderFields() const {
+ return MayHaveEmbedderFields(map());
+}
+
+// static
int JSObject::GetEmbedderFieldCount(Map map) {
int instance_size = map.instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
@@ -307,10 +323,6 @@ int JSObject::GetEmbedderFieldOffset(int index) {
return GetEmbedderFieldsStartOffset() + (kEmbedderDataSlotSize * index);
}
-void JSObject::InitializeEmbedderField(Isolate* isolate, int index) {
- EmbedderDataSlot(*this, index).AllocateExternalPointerEntry(isolate);
-}
-
Object JSObject::GetEmbedderField(int index) {
return EmbedderDataSlot(*this, index).load_tagged();
}
@@ -323,6 +335,12 @@ void JSObject::SetEmbedderField(int index, Smi value) {
EmbedderDataSlot(*this, index).store_smi(value);
}
+bool JSObject::IsDroppableApiObject() const {
+ auto instance_type = map().instance_type();
+ return InstanceTypeChecker::IsJSApiObject(instance_type) ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+}
+
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
@@ -334,13 +352,31 @@ Object JSObject::RawFastPropertyAt(FieldIndex index) const {
Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base,
FieldIndex index) const {
if (index.is_inobject()) {
- return TaggedField<Object>::load(cage_base, *this, index.offset());
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this, index.offset());
} else {
return property_array(cage_base).get(cage_base,
index.outobject_array_index());
}
}
+// The SeqCst versions of RawFastPropertyAt are used for atomically accessing
+// shared struct fields.
+Object JSObject::RawFastPropertyAt(FieldIndex index,
+ SeqCstAccessTag tag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return RawFastPropertyAt(cage_base, index, tag);
+}
+
+Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base, FieldIndex index,
+ SeqCstAccessTag tag) const {
+ if (index.is_inobject()) {
+ return TaggedField<Object>::SeqCst_Load(cage_base, *this, index.offset());
+ } else {
+ return property_array(cage_base).get(cage_base,
+ index.outobject_array_index(), tag);
+ }
+}
+
base::Optional<Object> JSObject::RawInobjectPropertyAt(
PtrComprCageBase cage_base, Map original_map, FieldIndex index) const {
CHECK(index.is_inobject());
@@ -384,6 +420,17 @@ void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
+void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
+ SeqCstAccessTag tag) {
+ DCHECK(index.is_inobject());
+ DCHECK(value.IsShared());
+ SEQ_CST_WRITE_FIELD(*this, index.offset(), value);
+ // JSSharedStructs are allocated in the shared old space, which is currently
+ // collected by stopping the world, so the incremental write barrier is not
+ // needed. They can only store Smis and other HeapObjects in the shared old
+ // space, so the generational write barrier is also not needed.
+}
+
void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode) {
if (index.is_inobject()) {
@@ -394,6 +441,15 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
}
}
+void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
+ SeqCstAccessTag tag) {
+ if (index.is_inobject()) {
+ RawFastInobjectPropertyAtPut(index, value, tag);
+ } else {
+ property_array().set(index.outobject_array_index(), value, tag);
+ }
+}
+
void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
DCHECK_EQ(PropertyLocation::kField, details.location());
@@ -444,11 +500,37 @@ void JSObject::InitializeBody(Map map, int start_offset,
MapWord filler_map, Object undefined_filler) {
int size = map.instance_size();
int offset = start_offset;
+
+ // embedder data slots need to be initialized separately
+ if (MayHaveEmbedderFields(map)) {
+ int embedder_field_start = GetEmbedderFieldsStartOffset(map);
+ int embedder_field_count = GetEmbedderFieldCount(map);
+
+ // fill start with references to the undefined value object
+ DCHECK_LE(offset, embedder_field_start);
+ while (offset < embedder_field_start) {
+ WRITE_FIELD(*this, offset, undefined_filler);
+ offset += kTaggedSize;
+ }
+
+ // initialize embedder data slots
+ DCHECK_EQ(offset, embedder_field_start);
+ for (int i = 0; i < embedder_field_count; i++) {
+ // TODO(v8): consider initializing embedded data slots with Smi::zero().
+ EmbedderDataSlot(*this, i).Initialize(undefined_filler);
+ offset += kEmbedderDataSlotSize;
+ }
+ } else {
+ DCHECK_EQ(0, GetEmbedderFieldCount(map));
+ }
+
+ DCHECK_LE(offset, size);
if (is_slack_tracking_in_progress) {
int end_of_pre_allocated_offset =
size - (map.UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
- // fill start with references to the undefined value object
+ DCHECK_LE(offset, end_of_pre_allocated_offset);
+ // fill pre allocated slots with references to the undefined value object
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(*this, offset, undefined_filler);
offset += kTaggedSize;
@@ -461,13 +543,31 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
} else {
while (offset < size) {
- // fill with references to the undefined value object
+ // fill everything with references to the undefined value object
WRITE_FIELD(*this, offset, undefined_filler);
offset += kTaggedSize;
}
}
}
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSExternalObject)
+
+DEF_GETTER(JSExternalObject, value, void*) {
+ Isolate* isolate = GetIsolateForSandbox(*this);
+ return reinterpret_cast<void*>(
+ ReadExternalPointerField(kValueOffset, isolate, kExternalObjectValueTag));
+}
+
+void JSExternalObject::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kValueOffset, isolate, kExternalObjectValueTag);
+}
+
+void JSExternalObject::set_value(Isolate* isolate, void* value) {
+ WriteExternalPointerField(kValueOffset, isolate,
+ reinterpret_cast<Address>(value),
+ kExternalObjectValueTag);
+}
+
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
return TaggedField<Object, kNativeContextOffset>::Relaxed_Load(cage_base,
*this);
@@ -707,19 +807,18 @@ DEF_GETTER(JSReceiver, property_array, PropertyArray) {
return PropertyArray::cast(prop);
}
-Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
+Maybe<bool> JSReceiver::HasProperty(Isolate* isolate, Handle<JSReceiver> object,
Handle<Name> name) {
- Isolate* isolate = object->GetIsolate();
PropertyKey key(isolate, name);
LookupIterator it(isolate, object, key, object);
return HasProperty(&it);
}
-Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+Maybe<bool> JSReceiver::HasOwnProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
uint32_t index) {
if (object->IsJSObject()) { // Shortcut.
- LookupIterator it(object->GetIsolate(), object, index, object,
- LookupIterator::OWN);
+ LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
return HasProperty(&it);
}
@@ -752,8 +851,9 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
return GetPropertyAttributes(&it);
}
-Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- LookupIterator it(object->GetIsolate(), object, index, object);
+Maybe<bool> JSReceiver::HasElement(Isolate* isolate, Handle<JSReceiver> object,
+ uint32_t index) {
+ LookupIterator it(isolate, object, index, object);
return HasProperty(&it);
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 1df13df72c..090a56c334 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -52,12 +52,14 @@
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
+#include "src/objects/js-shadow-realms.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format.h"
#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter.h"
#include "src/objects/js-segments.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-struct-inl.h"
#include "src/objects/js-temporal-objects-inl.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/map-inl.h"
@@ -119,16 +121,15 @@ Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
}
// static
-Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+Maybe<bool> JSReceiver::HasOwnProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
Handle<Name> name) {
if (object->IsJSModuleNamespace()) {
PropertyDescriptor desc;
- return JSReceiver::GetOwnPropertyDescriptor(object->GetIsolate(), object,
- name, &desc);
+ return JSReceiver::GetOwnPropertyDescriptor(isolate, object, name, &desc);
}
if (object->IsJSObject()) { // Shortcut.
- Isolate* isolate = object->GetIsolate();
PropertyKey key(isolate, name);
LookupIterator it(isolate, object, key, LookupIterator::OWN);
return HasProperty(&it);
@@ -291,7 +292,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
Representation representation = details.representation();
FieldIndex index = FieldIndex::ForPropertyIndex(
*map, details.field_index(), representation);
- prop_value = JSObject::FastPropertyAt(from, representation, index);
+ prop_value =
+ JSObject::FastPropertyAt(isolate, from, representation, index);
}
} else {
LookupIterator it(isolate, from, next_key,
@@ -499,9 +501,7 @@ String JSReceiver::class_name() {
namespace {
std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
- Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
-
+ Isolate* isolate, Handle<JSReceiver> receiver) {
// If the object was instantiated simply with base == new.target, the
// constructor on the map provides the most accurate name.
// Don't provide the info for prototypes, since their constructors are
@@ -571,19 +571,20 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
// static
MaybeHandle<JSFunction> JSReceiver::GetConstructor(
- Handle<JSReceiver> receiver) {
- return GetConstructorHelper(receiver).first;
+ Isolate* isolate, Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(isolate, receiver).first;
}
// static
-Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
- return GetConstructorHelper(receiver).second;
+Handle<String> JSReceiver::GetConstructorName(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(isolate, receiver).second;
}
MaybeHandle<NativeContext> JSReceiver::GetCreationContext() {
JSReceiver receiver = *this;
// Externals are JSObjects with null as a constructor.
- DCHECK(!receiver.IsExternal(GetIsolate()));
+ DCHECK(!receiver.IsJSExternalObject());
Object constructor = receiver.map().GetConstructor();
JSFunction function;
if (constructor.IsJSFunction()) {
@@ -635,6 +636,11 @@ MaybeHandle<NativeContext> JSReceiver::GetFunctionRealm(
current = function.bound_target_function();
continue;
}
+ if (current.IsJSWrappedFunction()) {
+ JSWrappedFunction function = JSWrappedFunction::cast(current);
+ current = function.wrapped_target_function();
+ continue;
+ }
JSObject object = JSObject::cast(current);
DCHECK(!object.IsJSFunction());
return object.GetCreationContext();
@@ -1197,6 +1203,8 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
if (!result.is_null()) {
int32_t value;
CHECK(result->ToInt32(&value));
+ DCHECK_IMPLIES((value & ~PropertyAttributes::ALL_ATTRIBUTES_MASK) != 0,
+ value == PropertyAttributes::ABSENT);
return Just(static_cast<PropertyAttributes>(value));
}
} else if (!interceptor->getter().IsUndefined(isolate)) {
@@ -1921,9 +1929,9 @@ Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
}
// static
-MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
+MaybeHandle<Object> JSReceiver::ToPrimitive(Isolate* isolate,
+ Handle<JSReceiver> receiver,
ToPrimitiveHint hint) {
- Isolate* const isolate = receiver->GetIsolate();
Handle<Object> exotic_to_prim;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, exotic_to_prim,
@@ -1942,15 +1950,16 @@ MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
Object);
}
- return OrdinaryToPrimitive(receiver, (hint == ToPrimitiveHint::kString)
- ? OrdinaryToPrimitiveHint::kString
- : OrdinaryToPrimitiveHint::kNumber);
+ return OrdinaryToPrimitive(isolate, receiver,
+ (hint == ToPrimitiveHint::kString)
+ ? OrdinaryToPrimitiveHint::kString
+ : OrdinaryToPrimitiveHint::kNumber);
}
// static
MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
- Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint) {
- Isolate* const isolate = receiver->GetIsolate();
+ Isolate* isolate, Handle<JSReceiver> receiver,
+ OrdinaryToPrimitiveHint hint) {
Handle<String> method_names[2];
switch (hint) {
case OrdinaryToPrimitiveHint::kNumber:
@@ -2042,8 +2051,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Representation representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*map, details.field_index(), representation);
- prop_value =
- JSObject::FastPropertyAt(object, representation, field_index);
+ prop_value = JSObject::FastPropertyAt(isolate, object, representation,
+ field_index);
}
} else {
LookupIterator it(isolate, object, next_key,
@@ -2152,14 +2161,15 @@ MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
try_fast_path, true);
}
-Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
+Maybe<bool> JSReceiver::SetPrototype(Isolate* isolate,
+ Handle<JSReceiver> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
if (object->IsJSProxy()) {
- return JSProxy::SetPrototype(Handle<JSProxy>::cast(object), value,
+ return JSProxy::SetPrototype(isolate, Handle<JSProxy>::cast(object), value,
from_javascript, should_throw);
}
- return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
+ return JSObject::SetPrototype(isolate, Handle<JSObject>::cast(object), value,
from_javascript, should_throw);
}
@@ -2334,10 +2344,16 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSObject::kHeaderSize;
case JS_ERROR_TYPE:
return JSObject::kHeaderSize;
+ case JS_EXTERNAL_OBJECT_TYPE:
+ return JSExternalObject::kHeaderSize;
+ case JS_SHADOW_REALM_TYPE:
+ return JSShadowRealm::kHeaderSize;
case JS_STRING_ITERATOR_TYPE:
return JSStringIterator::kHeaderSize;
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
+ case JS_SHARED_STRUCT_TYPE:
+ return JSSharedStruct::kHeaderSize;
case JS_TEMPORAL_CALENDAR_TYPE:
return JSTemporalCalendar::kHeaderSize;
case JS_TEMPORAL_DURATION_TYPE:
@@ -2358,6 +2374,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSTemporalTimeZone::kHeaderSize;
case JS_TEMPORAL_ZONED_DATE_TIME_TYPE:
return JSTemporalZonedDateTime::kHeaderSize;
+ case JS_WRAPPED_FUNCTION_TYPE:
+ return JSWrappedFunction::kHeaderSize;
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
return JSV8BreakIterator::kHeaderSize;
@@ -2812,7 +2830,7 @@ bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
HeapObject heap_object = HeapObject::cast(object);
if (!object.IsJSObject()) return false;
JSObject js_object = JSObject::cast(object);
- if (!js_object.IsDroppableApiWrapper()) return false;
+ if (!js_object.IsDroppableApiObject()) return false;
Object maybe_constructor = js_object.map().GetConstructor();
if (!maybe_constructor.IsJSFunction()) return false;
JSFunction constructor = JSFunction::cast(maybe_constructor);
@@ -3226,7 +3244,8 @@ void JSObject::MigrateToMap(Isolate* isolate, Handle<JSObject> object,
old_map->set_owns_descriptors(false);
DCHECK(old_map->is_abandoned_prototype_map());
// Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionsAccessor(isolate, old_map).NumberOfTransitions());
+ DCHECK_EQ(0,
+ TransitionsAccessor(isolate, *old_map).NumberOfTransitions());
DCHECK(new_map->GetBackPointer(isolate).IsUndefined(isolate));
DCHECK(object->map(isolate) != *old_map);
}
@@ -3451,9 +3470,18 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
if (can_define.IsNothing() || !can_define.FromJust()) {
return can_define;
}
- it->Restart();
}
- break;
+
+ // The interceptor declined to handle the operation, so proceed defining
+ // own property without the interceptor.
+ Isolate* isolate = it->isolate();
+ Handle<Object> receiver = it->GetReceiver();
+ LookupIterator::Configuration c = LookupIterator::OWN_SKIP_INTERCEPTOR;
+ LookupIterator own_lookup =
+ it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
+ : LookupIterator(isolate, receiver, it->name(), c);
+ return JSObject::DefineOwnPropertyIgnoreAttributes(
+ &own_lookup, value, attributes, should_throw, handling, semantics);
}
case LookupIterator::ACCESSOR: {
@@ -4084,7 +4112,7 @@ void JSObject::ApplyAttributesToDictionary(
Object v = dictionary->ValueAt(i);
if (v.IsAccessorPair()) attrs &= ~READ_ONLY;
}
- details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
+ details = details.CopyAddAttributes(PropertyAttributesFromInt(attrs));
dictionary->DetailsAtPut(i, details);
}
}
@@ -4198,10 +4226,10 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<Map> old_map(object->map(), isolate);
old_map = Map::Update(isolate, old_map);
- TransitionsAccessor transitions(isolate, old_map);
- Map transition = transitions.SearchSpecial(*transition_marker);
- if (!transition.is_null()) {
- Handle<Map> transition_map(transition, isolate);
+ Handle<Map> transition_map;
+ MaybeHandle<Map> maybe_transition_map =
+ TransitionsAccessor::SearchSpecial(isolate, old_map, *transition_marker);
+ if (maybe_transition_map.ToHandle(&transition_map)) {
DCHECK(transition_map->has_dictionary_elements() ||
transition_map->has_typed_array_elements() ||
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
@@ -4211,7 +4239,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
new_element_dictionary = CreateElementDictionary(isolate, object);
}
JSObject::MigrateToMap(isolate, object, transition_map);
- } else if (transitions.CanHaveMoreTransitions()) {
+ } else if (TransitionsAccessor::CanHaveMoreTransitions(isolate, old_map)) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
@@ -4298,14 +4326,22 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
return Just(true);
}
-Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+Handle<Object> JSObject::FastPropertyAt(Isolate* isolate,
+ Handle<JSObject> object,
Representation representation,
FieldIndex index) {
- Isolate* isolate = object->GetIsolate();
Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
return Object::WrapForRead(isolate, raw_value, representation);
}
+Handle<Object> JSObject::FastPropertyAt(Isolate* isolate,
+ Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index, SeqCstAccessTag tag) {
+ Handle<Object> raw_value(object->RawFastPropertyAt(index, tag), isolate);
+ return Object::WrapForRead(isolate, raw_value, representation);
+}
+
// static
Handle<Object> JSObject::DictionaryPropertyAt(Isolate* isolate,
Handle<JSObject> object,
@@ -4843,11 +4879,9 @@ void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
InvalidateOnePrototypeValidityCellInternal(global.map());
}
-Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
+Maybe<bool> JSObject::SetPrototype(Isolate* isolate, Handle<JSObject> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
- Isolate* isolate = object->GetIsolate();
-
#ifdef DEBUG
int size = object->Size();
#endif
@@ -5218,25 +5252,25 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
return GetPropertyWithInterceptorInternal(it, it->GetInterceptor(), done);
}
-Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
+Maybe<bool> JSObject::HasRealNamedProperty(Isolate* isolate,
+ Handle<JSObject> object,
Handle<Name> name) {
- Isolate* isolate = object->GetIsolate();
PropertyKey key(isolate, name);
LookupIterator it(isolate, object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
return HasProperty(&it);
}
-Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
+Maybe<bool> JSObject::HasRealElementProperty(Isolate* isolate,
+ Handle<JSObject> object,
uint32_t index) {
- Isolate* isolate = object->GetIsolate();
LookupIterator it(isolate, object, index, object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
return HasProperty(&it);
}
-Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+Maybe<bool> JSObject::HasRealNamedCallbackProperty(Isolate* isolate,
+ Handle<JSObject> object,
Handle<Name> name) {
- Isolate* isolate = object->GetIsolate();
PropertyKey key(isolate, name);
LookupIterator it(isolate, object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
@@ -5244,26 +5278,6 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
: Nothing<bool>();
}
-bool JSObject::IsApiWrapper() {
- // These object types can carry information relevant for embedders. The
- // *_API_* types are generated through templates which can have embedder
- // fields. The other types have their embedder fields added at compile time.
- auto instance_type = map().instance_type();
- return instance_type == JS_ARRAY_BUFFER_TYPE ||
- instance_type == JS_DATA_VIEW_TYPE ||
- instance_type == JS_GLOBAL_OBJECT_TYPE ||
- instance_type == JS_GLOBAL_PROXY_TYPE ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
- instance_type == JS_TYPED_ARRAY_TYPE ||
- InstanceTypeChecker::IsJSApiObject(instance_type);
-}
-
-bool JSObject::IsDroppableApiWrapper() {
- auto instance_type = map().instance_type();
- return InstanceTypeChecker::IsJSApiObject(instance_type) ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE;
-}
-
bool JSGlobalProxy::IsDetached() const {
return native_context().IsNull(GetIsolate());
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index ff7a268470..898a3d044f 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -86,12 +86,13 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
- Handle<JSReceiver> receiver,
+ Isolate* isolate, Handle<JSReceiver> receiver,
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
// ES6 section 7.1.1.1 OrdinaryToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
- Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
+ Isolate* isolate, Handle<JSReceiver> receiver,
+ OrdinaryToPrimitiveHint hint);
static MaybeHandle<NativeContext> GetFunctionRealm(
Handle<JSReceiver> receiver);
@@ -119,14 +120,14 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(
LookupIterator* it);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasProperty(
- Handle<JSReceiver> object, Handle<Name> name);
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasElement(
- Handle<JSReceiver> object, uint32_t index);
+ Isolate* isolate, Handle<JSReceiver> object, uint32_t index);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> HasOwnProperty(
- Handle<JSReceiver> object, Handle<Name> name);
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasOwnProperty(
- Handle<JSReceiver> object, uint32_t index);
+ Isolate* isolate, Handle<JSReceiver> object, uint32_t index);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
@@ -227,13 +228,15 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
// Returns the constructor (the function that was used to instantiate the
// object).
- static MaybeHandle<JSFunction> GetConstructor(Handle<JSReceiver> receiver);
+ static MaybeHandle<JSFunction> GetConstructor(Isolate* isolate,
+ Handle<JSReceiver> receiver);
// Returns the constructor name (the (possibly inferred) name of the function
// that was used to instantiate the object), if any. If a FunctionTemplate is
// used to instantiate the object, the class_name of the FunctionTemplate is
// returned instead.
- static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
+ static Handle<String> GetConstructorName(Isolate* isolate,
+ Handle<JSReceiver> receiver);
V8_EXPORT_PRIVATE MaybeHandle<NativeContext> GetCreationContext();
@@ -254,10 +257,11 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
// Set the object's prototype (only JSReceiver and null are allowed values).
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSReceiver> object, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> value,
+ bool from_javascript, ShouldThrow should_throw);
- inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
+ inline static Handle<Object> GetDataProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
Handle<Name> name);
V8_EXPORT_PRIVATE static Handle<Object> GetDataProperty(
LookupIterator* it, AllocationPolicy allocation_policy =
@@ -587,39 +591,36 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// Support functions for v8 api (needed for correct interceptor behavior).
V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
- Handle<JSObject> object, Handle<Name> name);
+ Isolate* isolate, Handle<JSObject> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealElementProperty(
- Handle<JSObject> object, uint32_t index);
+ Isolate* isolate, Handle<JSObject> object, uint32_t index);
V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
- Handle<JSObject> object, Handle<Name> name);
+ Isolate* isolate, Handle<JSObject> object, Handle<Name> name);
// Get the header size for a JSObject. Used to compute the index of
// embedder fields as well as the number of embedder fields.
// The |function_has_prototype_slot| parameter is needed only for
// JSFunction objects.
- static int GetHeaderSize(InstanceType instance_type,
- bool function_has_prototype_slot = false);
+ static V8_EXPORT_PRIVATE int GetHeaderSize(
+ InstanceType instance_type, bool function_has_prototype_slot = false);
static inline int GetHeaderSize(Map map);
+ static inline bool MayHaveEmbedderFields(Map map);
+ inline bool MayHaveEmbedderFields() const;
+
static inline int GetEmbedderFieldsStartOffset(Map map);
inline int GetEmbedderFieldsStartOffset();
static inline int GetEmbedderFieldCount(Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
- inline void InitializeEmbedderField(Isolate* isolate, int index);
inline Object GetEmbedderField(int index);
inline void SetEmbedderField(int index, Object value);
inline void SetEmbedderField(int index, Smi value);
- // Returns true when the object is potentially a wrapper that gets special
- // garbage collection treatment.
- // TODO(mlippautz): Make check exact and replace the pattern match in
- // Heap::TracePossibleWrapper.
- bool IsApiWrapper();
-
- // Same as IsApiWrapper() but also allow dropping the wrapper on minor GCs.
- bool IsDroppableApiWrapper();
+ // Returns true if this object is an Api object which can, if unmodified, be
+ // dropped during minor GC because the embedder can recreate it again later.
+ inline bool IsDroppableApiObject() const;
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
@@ -672,12 +673,20 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
Heap* heap);
// Access fast-case object properties at index.
- static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+ static Handle<Object> FastPropertyAt(Isolate* isolate,
+ Handle<JSObject> object,
Representation representation,
FieldIndex index);
+ static Handle<Object> FastPropertyAt(Isolate* isolate,
+ Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index, SeqCstAccessTag tag);
inline Object RawFastPropertyAt(FieldIndex index) const;
inline Object RawFastPropertyAt(PtrComprCageBase cage_base,
FieldIndex index) const;
+ inline Object RawFastPropertyAt(FieldIndex index, SeqCstAccessTag tag) const;
+ inline Object RawFastPropertyAt(PtrComprCageBase cage_base, FieldIndex index,
+ SeqCstAccessTag tag) const;
// See comment in the body of the method to understand the conditions
// in which this method is meant to be used, and what guarantees it
@@ -688,9 +697,13 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
inline void FastPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void FastPropertyAtPut(FieldIndex index, Object value,
+ SeqCstAccessTag tag);
inline void RawFastInobjectPropertyAtPut(
FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
+ SeqCstAccessTag tag);
inline void WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value);
@@ -702,8 +715,8 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// Set the object's prototype (only JSReceiver and null are allowed values).
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSObject> object, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> value,
+ bool from_javascript, ShouldThrow should_throw);
// Makes the object prototype immutable
// Never called from JavaScript
@@ -875,6 +888,35 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
TQ_OBJECT_CONSTRUCTORS(JSObject)
};
+// A JSObject created through the public api which wraps an external pointer.
+// See v8::External.
+class JSExternalObject
+ : public TorqueGeneratedJSExternalObject<JSExternalObject, JSObject> {
+ public:
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
+ // [value]: field containing the pointer value.
+ DECL_GETTER(value, void*)
+
+ inline void set_value(Isolate* isolate, void* value);
+
+ static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
+
+ class BodyDescriptor;
+
+ private:
+ TQ_OBJECT_CONSTRUCTORS(JSExternalObject)
+};
+
+// An abstract superclass for JSObjects that may contain EmbedderDataSlots.
+class JSObjectWithEmbedderSlots
+ : public TorqueGeneratedJSObjectWithEmbedderSlots<JSObjectWithEmbedderSlots,
+ JSObject> {
+ public:
+ STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(JSObjectWithEmbedderSlots)
+};
+
// An abstract superclass for JSObjects that may have elements while having an
// empty fixed array as elements backing store. It doesn't carry any
// functionality but allows function classes to be identified in the type
@@ -890,6 +932,8 @@ class JSCustomElementsObject
// An abstract superclass for JSObjects that require non-standard element
// access. It doesn't carry any functionality but allows function classes to be
// identified in the type system.
+// These may also contain EmbedderDataSlots, but can't currently inherit from
+// JSObjectWithEmbedderSlots due to instance_type constraints.
class JSSpecialObject
: public TorqueGeneratedJSSpecialObject<JSSpecialObject,
JSCustomElementsObject> {
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 927bca18de..f80602a147 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -40,11 +40,20 @@ macro NewJSObject(implicit context: Context)(): JSObject {
return AllocateJSObjectFromMap(map);
}
+extern class JSExternalObject extends JSObject { value: ExternalPointer; }
+
+// A JSObject that may contain EmbedderDataSlots.
+@abstract
+extern class JSObjectWithEmbedderSlots extends JSObject {
+}
+
@abstract
@lowestInstanceTypeWithinParentClassRange
extern class JSCustomElementsObject extends JSObject {
}
+// These may also contain EmbedderDataSlots but can't be a child class of
+// JSObjectWithEmbedderSlots due to type id constraints.
@abstract
@lowestInstanceTypeWithinParentClassRange
extern class JSSpecialObject extends JSCustomElementsObject {
@@ -78,10 +87,11 @@ macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
let properties: EmptyFixedArray|NameDictionary|SwissNameDictionary =
kEmptyFixedArray;
if (IsDictionaryMap(map)) {
- if (kDictModePrototypes) {
+ @if(V8_ENABLE_SWISS_NAME_DICTIONARY) {
properties =
AllocateSwissNameDictionary(kSwissNameDictionaryInitialCapacity);
- } else {
+ }
+ @ifnot(V8_ENABLE_SWISS_NAME_DICTIONARY) {
properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
}
}
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index fb4a97e476..728e8212e9 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -28,6 +28,9 @@ ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
ACCESSORS(JSPluralRules, icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>,
kIcuNumberFormatterOffset)
+ACCESSORS(JSPluralRules, icu_number_range_formatter,
+ Managed<icu::number::LocalizedNumberRangeFormatter>,
+ kIcuNumberRangeFormatterOffset)
inline void JSPluralRules::set_type(Type type) {
DCHECK_LE(type, TypeBit::kMax);
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index ec15bd17cd..d4f2353b55 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -16,6 +16,7 @@
#include "src/objects/option-utils.h"
#include "unicode/locid.h"
#include "unicode/numberformatter.h"
+#include "unicode/numberrangeformatter.h"
#include "unicode/plurrule.h"
#include "unicode/unumberformatter.h"
@@ -115,21 +116,19 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- icu::number::LocalizedNumberFormatter icu_number_formatter =
- icu::number::NumberFormatter::withLocale(r.icu_locale)
- .roundingMode(UNUM_ROUND_HALFUP);
+ icu::Locale icu_locale = r.icu_locale;
+ icu::number::UnlocalizedNumberFormatter settings =
+ icu::number::UnlocalizedNumberFormatter().roundingMode(UNUM_ROUND_HALFUP);
std::unique_ptr<icu::PluralRules> icu_plural_rules;
bool success =
CreateICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules);
if (!success || icu_plural_rules.get() == nullptr) {
// Remove extensions and try again.
- icu::Locale no_extension_locale(r.icu_locale.getBaseName());
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
success = CreateICUPluralRules(isolate, no_extension_locale, type,
&icu_plural_rules);
- icu_number_formatter =
- icu::number::NumberFormatter::withLocale(no_extension_locale)
- .roundingMode(UNUM_ROUND_HALFUP);
+ icu_locale = no_extension_locale;
if (!success || icu_plural_rules.get() == nullptr) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
@@ -142,8 +141,15 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3, false);
MAYBE_RETURN(maybe_digit_options, MaybeHandle<JSPluralRules>());
Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
- icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
- icu_number_formatter, digit_options);
+ settings = JSNumberFormat::SetDigitOptionsToFormatter(
+ settings, digit_options, 1, JSNumberFormat::ShowTrailingZeros::kShow);
+
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ settings.locale(icu_locale);
+ icu::number::LocalizedNumberRangeFormatter icu_number_range_formatter =
+ icu::number::UnlocalizedNumberRangeFormatter()
+ .numberFormatterBoth(settings)
+ .locale(icu_locale);
Handle<Managed<icu::PluralRules>> managed_plural_rules =
Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
@@ -154,6 +160,12 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
isolate, 0,
new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+ Handle<Managed<icu::number::LocalizedNumberRangeFormatter>>
+ managed_number_range_formatter =
+ Managed<icu::number::LocalizedNumberRangeFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberRangeFormatter(
+ icu_number_range_formatter));
// Now all properties are ready, so we can allocate the result object.
Handle<JSPluralRules> plural_rules = Handle<JSPluralRules>::cast(
@@ -169,6 +181,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
plural_rules->set_icu_plural_rules(*managed_plural_rules);
plural_rules->set_icu_number_formatter(*managed_number_formatter);
+ plural_rules->set_icu_number_range_formatter(*managed_number_range_formatter);
// 13. Return pluralRules.
return plural_rules;
@@ -195,6 +208,26 @@ MaybeHandle<String> JSPluralRules::ResolvePlural(
return Intl::ToString(isolate, result);
}
+MaybeHandle<String> JSPluralRules::ResolvePluralRange(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules, double x, double y) {
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
+ DCHECK_NOT_NULL(icu_plural_rules);
+
+ icu::number::LocalizedNumberRangeFormatter* fmt =
+ plural_rules->icu_number_range_formatter().raw();
+ DCHECK_NOT_NULL(fmt);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::FormattedNumberRange formatted = fmt->formatFormattableRange(
+ icu::Formattable(x), icu::Formattable(y), status);
+
+ DCHECK(U_SUCCESS(status));
+ icu::UnicodeString result = icu_plural_rules->select(formatted, status);
+ DCHECK(U_SUCCESS(status));
+
+ return Intl::ToString(isolate, result);
+}
+
namespace {
void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index bd0bfe65f7..0a2b42462b 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -26,6 +26,7 @@ namespace U_ICU_NAMESPACE {
class PluralRules;
namespace number {
class LocalizedNumberFormatter;
+class LocalizedNumberRangeFormatter;
} // namespace number
} // namespace U_ICU_NAMESPACE
@@ -47,6 +48,9 @@ class JSPluralRules
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePlural(
Isolate* isolate, Handle<JSPluralRules> plural_rules, double number);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePluralRange(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules, double x, double y);
+
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
// [[Type]] is one of the values "cardinal" or "ordinal",
@@ -68,6 +72,8 @@ class JSPluralRules
DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
DECL_ACCESSORS(icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>)
+ DECL_ACCESSORS(icu_number_range_formatter,
+ Managed<icu::number::LocalizedNumberRangeFormatter>)
TQ_OBJECT_CONSTRUCTORS(JSPluralRules)
};
diff --git a/deps/v8/src/objects/js-plural-rules.tq b/deps/v8/src/objects/js-plural-rules.tq
index 697108609b..c322f9da49 100644
--- a/deps/v8/src/objects/js-plural-rules.tq
+++ b/deps/v8/src/objects/js-plural-rules.tq
@@ -15,4 +15,6 @@ extern class JSPluralRules extends JSObject {
icu_plural_rules: Foreign; // Managed<icu::PluralRules>
icu_number_formatter:
Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ icu_number_range_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberRangeFormatter>
}
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index 5afb66a0b2..1d3a94f035 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -28,7 +28,8 @@ namespace internal {
// We also overlay the result and reactions fields on the JSPromise, since
// the reactions are only necessary for pending promises, whereas the result
// is only meaningful for settled promises.
-class JSPromise : public TorqueGeneratedJSPromise<JSPromise, JSObject> {
+class JSPromise
+ : public TorqueGeneratedJSPromise<JSPromise, JSObjectWithEmbedderSlots> {
public:
// [result]: Checks that the promise is settled and returns the result.
inline Object result() const;
diff --git a/deps/v8/src/objects/js-promise.tq b/deps/v8/src/objects/js-promise.tq
index 01426fd6d2..25c7e1f76c 100644
--- a/deps/v8/src/objects/js-promise.tq
+++ b/deps/v8/src/objects/js-promise.tq
@@ -10,7 +10,7 @@ bitfield struct JSPromiseFlags extends uint31 {
async_task_id: int32: 22 bit;
}
-extern class JSPromise extends JSObject {
+extern class JSPromise extends JSObjectWithEmbedderSlots {
macro Status(): PromiseState {
return this.flags.status;
}
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index df89b4d17a..12abe9623f 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -31,8 +31,8 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
// ES6 9.5.2
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSProxy> proxy, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Object> value,
+ bool from_javascript, ShouldThrow should_throw);
// ES6 9.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index ce9a9a908c..1d8d5fe668 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -179,9 +179,10 @@ void JSRegExp::set_bytecode_and_trampoline(Isolate* isolate,
SetDataAt(kIrregexpLatin1BytecodeIndex, *bytecode);
SetDataAt(kIrregexpUC16BytecodeIndex, *bytecode);
- Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
- SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, ToCodeT(*trampoline));
- SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, ToCodeT(*trampoline));
+ Handle<CodeT> trampoline =
+ BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
+ SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, *trampoline);
+ SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, *trampoline);
}
bool JSRegExp::ShouldProduceBytecode() {
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index d6a65d95ca..23dfa9587d 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -343,9 +343,9 @@ template <typename T>
MaybeHandle<T> FormatCommon(
Isolate* isolate, Handle<JSRelativeTimeFormat> format,
Handle<Object> value_obj, Handle<Object> unit_obj, const char* func_name,
- const std::function<
- MaybeHandle<T>(Isolate*, const icu::FormattedRelativeDateTime&,
- Handle<Object>, Handle<String>)>& formatToResult) {
+ MaybeHandle<T> (*formatToResult)(Isolate*,
+ const icu::FormattedRelativeDateTime&,
+ Handle<String>, bool)) {
// 3. Let value be ? ToNumber(value).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
@@ -382,13 +382,13 @@ MaybeHandle<T> FormatCommon(
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
}
- return formatToResult(isolate, formatted, value,
- UnitAsString(isolate, unit_enum));
+ return formatToResult(isolate, formatted, UnitAsString(isolate, unit_enum),
+ value->IsNaN());
}
MaybeHandle<String> FormatToString(
Isolate* isolate, const icu::FormattedRelativeDateTime& formatted,
- Handle<Object> value, Handle<String> unit) {
+ Handle<String> unit, bool is_nan) {
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString result = formatted.toString(status);
if (U_FAILURE(status)) {
@@ -411,21 +411,22 @@ Maybe<bool> AddLiteral(Isolate* isolate, Handle<JSArray> array,
Maybe<bool> AddUnit(Isolate* isolate, Handle<JSArray> array,
const icu::UnicodeString& string, int32_t index,
- int32_t start, int32_t limit, int32_t field_id,
- Handle<Object> value, Handle<String> unit) {
+ const NumberFormatSpan& part, Handle<String> unit,
+ bool is_nan) {
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, substring, Intl::ToString(isolate, string, start, limit),
+ isolate, substring,
+ Intl::ToString(isolate, string, part.begin_pos, part.end_pos),
Nothing<bool>());
Intl::AddElement(isolate, array, index,
- Intl::NumberFieldToType(isolate, value, field_id), substring,
- isolate->factory()->unit_string(), unit);
+ Intl::NumberFieldToType(isolate, part, string, is_nan),
+ substring, isolate->factory()->unit_string(), unit);
return Just(true);
}
MaybeHandle<JSArray> FormatToJSArray(
Isolate* isolate, const icu::FormattedRelativeDateTime& formatted,
- Handle<Object> value, Handle<String> unit) {
+ Handle<String> unit, bool is_nan) {
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString string = formatted.toString(status);
@@ -457,19 +458,23 @@ MaybeHandle<JSArray> FormatToJSArray(
for (auto start_limit : groups) {
if (start_limit.first > start) {
Maybe<bool> maybe_added =
- AddUnit(isolate, array, string, index++, start,
- start_limit.first, field, value, unit);
+ AddUnit(isolate, array, string, index++,
+ NumberFormatSpan(field, start, start_limit.first), unit,
+ is_nan);
MAYBE_RETURN(maybe_added, Handle<JSArray>());
- maybe_added = AddUnit(isolate, array, string, index++,
- start_limit.first, start_limit.second,
- UNUM_GROUPING_SEPARATOR_FIELD, value, unit);
+ maybe_added =
+ AddUnit(isolate, array, string, index++,
+ NumberFormatSpan(UNUM_GROUPING_SEPARATOR_FIELD,
+ start_limit.first, start_limit.second),
+ unit, is_nan);
MAYBE_RETURN(maybe_added, Handle<JSArray>());
start = start_limit.second;
}
}
}
- Maybe<bool> maybe_added = AddUnit(isolate, array, string, index++, start,
- limit, field, value, unit);
+ Maybe<bool> maybe_added =
+ AddUnit(isolate, array, string, index++,
+ NumberFormatSpan(field, start, limit), unit, is_nan);
MAYBE_RETURN(maybe_added, Handle<JSArray>());
previous_end = limit;
}
diff --git a/deps/v8/src/objects/js-shadow-realms-inl.h b/deps/v8/src/objects/js-shadow-realms-inl.h
new file mode 100644
index 0000000000..80106e87dd
--- /dev/null
+++ b/deps/v8/src/objects/js-shadow-realms-inl.h
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_SHADOW_REALMS_INL_H_
+#define V8_OBJECTS_JS_SHADOW_REALMS_INL_H_
+
+#include "src/api/api-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/js-shadow-realms.h"
+#include "src/objects/smi-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-shadow-realms-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSShadowRealm)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SHADOW_REALMS_INL_H_
diff --git a/deps/v8/src/objects/js-shadow-realms.h b/deps/v8/src/objects/js-shadow-realms.h
new file mode 100644
index 0000000000..131b7d84f8
--- /dev/null
+++ b/deps/v8/src/objects/js-shadow-realms.h
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_SHADOW_REALMS_H_
+#define V8_OBJECTS_JS_SHADOW_REALMS_H_
+
+#include "src/objects/js-objects.h"
+#include "torque-generated/bit-fields.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class NativeContext;
+
+#include "torque-generated/src/objects/js-shadow-realms-tq.inc"
+
+// ShadowRealm object from the JS ShadowRealm spec proposal:
+// https://github.com/tc39/proposal-shadowrealm
+class JSShadowRealm
+ : public TorqueGeneratedJSShadowRealm<JSShadowRealm, JSObject> {
+ public:
+ DECL_PRINTER(JSShadowRealm)
+ EXPORT_DECL_VERIFIER(JSShadowRealm)
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(JSShadowRealm)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SHADOW_REALMS_H_
diff --git a/deps/v8/src/objects/js-shadow-realms.tq b/deps/v8/src/objects/js-shadow-realms.tq
new file mode 100644
index 0000000000..8fb9008ab1
--- /dev/null
+++ b/deps/v8/src/objects/js-shadow-realms.tq
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern class JSShadowRealm extends JSObject { native_context: NativeContext; }
diff --git a/deps/v8/src/objects/js-struct-inl.h b/deps/v8/src/objects/js-struct-inl.h
new file mode 100644
index 0000000000..6c3601b71a
--- /dev/null
+++ b/deps/v8/src/objects/js-struct-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_STRUCT_INL_H_
+#define V8_OBJECTS_JS_STRUCT_INL_H_
+
+#include "src/api/api-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/js-struct.h"
+#include "src/objects/smi-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-struct-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSSharedStruct)
+
+CAST_ACCESSOR(JSSharedStruct)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_STRUCT_INL_H_
diff --git a/deps/v8/src/objects/js-struct.h b/deps/v8/src/objects/js-struct.h
new file mode 100644
index 0000000000..c65f60a3a1
--- /dev/null
+++ b/deps/v8/src/objects/js-struct.h
@@ -0,0 +1,35 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_STRUCT_H_
+#define V8_OBJECTS_JS_STRUCT_H_
+
+#include "src/objects/js-objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-struct-tq.inc"
+
+class JSSharedStruct
+ : public TorqueGeneratedJSSharedStruct<JSSharedStruct, JSObject> {
+ public:
+ DECL_CAST(JSSharedStruct)
+ DECL_PRINTER(JSSharedStruct)
+ EXPORT_DECL_VERIFIER(JSSharedStruct)
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(JSSharedStruct)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_STRUCT_H_
diff --git a/deps/v8/src/objects/js-struct.tq b/deps/v8/src/objects/js-struct.tq
new file mode 100644
index 0000000000..d2304ead2f
--- /dev/null
+++ b/deps/v8/src/objects/js-struct.tq
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern class JSSharedStruct extends JSObject {
+ // escaped_local_thread: Smi;
+}
diff --git a/deps/v8/src/objects/js-temporal-objects-inl.h b/deps/v8/src/objects/js-temporal-objects-inl.h
index d3c8dfeb7b..fdf92ec347 100644
--- a/deps/v8/src/objects/js-temporal-objects-inl.h
+++ b/deps/v8/src/objects/js-temporal-objects-inl.h
@@ -37,23 +37,27 @@ namespace internal {
DCHECK_GE(upper, field); \
DCHECK_LE(lower, field); \
int hints = data(); \
+ /* Mask out unrelated bits */ \
+ field &= (static_cast<uint32_t>(int32_t{-1})) ^ \
+ (static_cast<uint32_t>(int32_t{-1}) << B##Bits::kSize); \
hints = B##Bits::update(hints, field); \
set_##data(hints); \
} \
inline int32_t T::field() const { \
int32_t v = B##Bits::decode(data()); \
+ /* Restore bits for negative values based on the MSB in that field */ \
v |= ((int32_t{1} << (B##Bits::kSize - 1) & v) \
? (static_cast<uint32_t>(int32_t{-1}) << B##Bits::kSize) \
: 0); \
- CHECK_GE(upper, v); \
- CHECK_LE(lower, v); \
+ DCHECK_GE(upper, v); \
+ DCHECK_LE(lower, v); \
return v; \
}
-#define TEMPORAL_DATE_INLINE_GETTER_SETTER(T, data) \
- TEMPORAL_INLINE_SIGNED_GETTER_SETTER(T, data, iso_year, -32767, 32768, \
- IsoYear) \
- TEMPORAL_INLINE_GETTER_SETTER(T, data, iso_month, 1, 12, IsoMonth) \
+#define TEMPORAL_DATE_INLINE_GETTER_SETTER(T, data) \
+ TEMPORAL_INLINE_SIGNED_GETTER_SETTER(T, data, iso_year, -271821, 275760, \
+ IsoYear) \
+ TEMPORAL_INLINE_GETTER_SETTER(T, data, iso_month, 1, 12, IsoMonth) \
TEMPORAL_INLINE_GETTER_SETTER(T, data, iso_day, 1, 31, IsoDay)
#define TEMPORAL_TIME_INLINE_GETTER_SETTER(T, data1, data2) \
@@ -91,6 +95,16 @@ BIT_FIELD_ACCESSORS(JSTemporalCalendar, flags, calendar_index,
BOOL_ACCESSORS(JSTemporalTimeZone, flags, is_offset, IsOffsetBit::kShift)
+// Special handling of sign
+TEMPORAL_INLINE_SIGNED_GETTER_SETTER(JSTemporalTimeZone, flags,
+ offset_milliseconds, -24 * 60 * 60 * 1000,
+ 24 * 60 * 60 * 1000,
+ OffsetMillisecondsOrTimeZoneIndex)
+
+TEMPORAL_INLINE_SIGNED_GETTER_SETTER(JSTemporalTimeZone, details,
+ offset_sub_milliseconds, -1000000, 1000000,
+ OffsetSubMilliseconds)
+
BIT_FIELD_ACCESSORS(JSTemporalTimeZone, flags,
offset_milliseconds_or_time_zone_index,
JSTemporalTimeZone::OffsetMillisecondsOrTimeZoneIndexBits)
diff --git a/deps/v8/src/objects/js-temporal-objects.cc b/deps/v8/src/objects/js-temporal-objects.cc
new file mode 100644
index 0000000000..1bd7bd1cf9
--- /dev/null
+++ b/deps/v8/src/objects/js-temporal-objects.cc
@@ -0,0 +1,5196 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/js-temporal-objects.h"
+
+#include <set>
+
+#include "src/common/globals.h"
+#include "src/date/date.h"
+#include "src/execution/isolate.h"
+#include "src/heap/factory.h"
+#include "src/numbers/conversions-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-date-time-format.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/js-temporal-objects-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/managed-inl.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
+#include "src/objects/property-descriptor.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/temporal/temporal-parser.h"
+#ifdef V8_INTL_SUPPORT
+#include "unicode/calendar.h"
+#include "unicode/unistr.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+enum class Unit {
+ kNotPresent,
+ kAuto,
+ kYear,
+ kMonth,
+ kWeek,
+ kDay,
+ kHour,
+ kMinute,
+ kSecond,
+ kMillisecond,
+ kMicrosecond,
+ kNanosecond
+};
+
+/**
+ * This header declare the Abstract Operations defined in the
+ * Temporal spec with the enum and struct for them.
+ */
+
+// Struct
+struct DateTimeRecordCommon {
+ int32_t year;
+ int32_t month;
+ int32_t day;
+ int32_t hour;
+ int32_t minute;
+ int32_t second;
+ int32_t millisecond;
+ int32_t microsecond;
+ int32_t nanosecond;
+};
+
+struct DateRecord {
+ int32_t year;
+ int32_t month;
+ int32_t day;
+ Handle<String> calendar;
+};
+
+struct DateTimeRecord : public DateTimeRecordCommon {
+ Handle<String> calendar;
+};
+
+struct DurationRecord {
+ int64_t years;
+ int64_t months;
+ int64_t weeks;
+ int64_t days;
+ int64_t hours;
+ int64_t minutes;
+ int64_t seconds;
+ int64_t milliseconds;
+ int64_t microseconds;
+ int64_t nanoseconds;
+};
+
+struct TimeZoneRecord {
+ bool z;
+ Handle<String> offset_string;
+ Handle<String> name;
+};
+
+// Options
+
+V8_WARN_UNUSED_RESULT Handle<String> UnitToString(Isolate* isolate, Unit unit);
+
+// #sec-temporal-totemporaldisambiguation
+enum class Disambiguation { kCompatible, kEarlier, kLater, kReject };
+
+// #sec-temporal-totemporaloverflow
+enum class ShowOverflow { kConstrain, kReject };
+
+// ISO8601 String Parsing
+
+// #sec-temporal-parsetemporalcalendarstring
+V8_WARN_UNUSED_RESULT MaybeHandle<String> ParseTemporalCalendarString(
+ Isolate* isolate, Handle<String> iso_string);
+
+// #sec-temporal-parsetemporaldatestring
+V8_WARN_UNUSED_RESULT Maybe<DateRecord> ParseTemporalDateString(
+ Isolate* isolate, Handle<String> iso_string);
+
+// #sec-temporal-parsetemporaltimezone
+V8_WARN_UNUSED_RESULT MaybeHandle<String> ParseTemporalTimeZone(
+ Isolate* isolate, Handle<String> string);
+
+V8_WARN_UNUSED_RESULT Maybe<int64_t> ParseTimeZoneOffsetString(
+ Isolate* isolate, Handle<String> offset_string,
+ bool throwIfNotSatisfy = true);
+
+void BalanceISODate(Isolate* isolate, int32_t* year, int32_t* month,
+ int32_t* day);
+
+// Math and Misc
+
+V8_WARN_UNUSED_RESULT MaybeHandle<BigInt> AddInstant(
+ Isolate* isolate, Handle<BigInt> epoch_nanoseconds, int64_t hours,
+ int64_t minutes, int64_t seconds, int64_t milliseconds,
+ int64_t microseconds, int64_t nanoseconds);
+
+// #sec-temporal-balanceduration
+V8_WARN_UNUSED_RESULT Maybe<bool> BalanceDuration(
+ Isolate* isolate, int64_t* days, int64_t* hours, int64_t* minutes,
+ int64_t* seconds, int64_t* milliseconds, int64_t* microseconds,
+ int64_t* nanoseconds, Unit largest_unit, Handle<Object> relative_to,
+ const char* method_name);
+
+V8_WARN_UNUSED_RESULT Maybe<DurationRecord> DifferenceISODateTime(
+ Isolate* isolate, int32_t y1, int32_t mon1, int32_t d1, int32_t h1,
+ int32_t min1, int32_t s1, int32_t ms1, int32_t mus1, int32_t ns1,
+ int32_t y2, int32_t mon2, int32_t d2, int32_t h2, int32_t min2, int32_t s2,
+ int32_t ms2, int32_t mus2, int32_t ns2, Handle<JSReceiver> calendar,
+ Unit largest_unit, Handle<Object> relative_to, const char* method_name);
+
+// #sec-temporal-adddatetime
+V8_WARN_UNUSED_RESULT Maybe<DateTimeRecordCommon> AddDateTime(
+ Isolate* isolate, int32_t year, int32_t month, int32_t day, int32_t hour,
+ int32_t minute, int32_t second, int32_t millisecond, int32_t microsecond,
+ int32_t nanosecond, Handle<JSReceiver> calendar, const DurationRecord& dur,
+ Handle<Object> options);
+
+// #sec-temporal-addzoneddatetime
+V8_WARN_UNUSED_RESULT MaybeHandle<BigInt> AddZonedDateTime(
+ Isolate* isolate, Handle<BigInt> eopch_nanoseconds,
+ Handle<JSReceiver> time_zone, Handle<JSReceiver> calendar,
+ const DurationRecord& duration, const char* method_name);
+
+V8_WARN_UNUSED_RESULT MaybeHandle<BigInt> AddZonedDateTime(
+ Isolate* isolate, Handle<BigInt> eopch_nanoseconds,
+ Handle<JSReceiver> time_zone, Handle<JSReceiver> calendar,
+ const DurationRecord& duration, Handle<JSReceiver> options,
+ const char* method_name);
+
+// #sec-temporal-isvalidepochnanoseconds
+bool IsValidEpochNanoseconds(Isolate* isolate,
+ Handle<BigInt> epoch_nanoseconds);
+
+// #sec-temporal-isvalidduration
+bool IsValidDuration(Isolate* isolate, const DurationRecord& dur);
+
+// #sec-temporal-nanosecondstodays
+V8_WARN_UNUSED_RESULT Maybe<bool> NanosecondsToDays(
+ Isolate* isolate, Handle<BigInt> nanoseconds,
+ Handle<Object> relative_to_obj, int64_t* result_days,
+ int64_t* result_nanoseconds, int64_t* result_day_length,
+ const char* method_name);
+
+V8_WARN_UNUSED_RESULT Maybe<bool> NanosecondsToDays(
+ Isolate* isolate, int64_t nanoseconds, Handle<Object> relative_to_obj,
+ int64_t* result_days, int64_t* resultj_nanoseconds,
+ int64_t* result_day_length, const char* method_name);
+
+// #sec-temporal-interpretisodatetimeoffset
+enum class OffsetBehaviour { kOption, kExact, kWall };
+
+V8_WARN_UNUSED_RESULT
+MaybeHandle<BigInt> GetEpochFromISOParts(Isolate* isolate, int32_t year,
+ int32_t month, int32_t day,
+ int32_t hour, int32_t minute,
+ int32_t second, int32_t millisecond,
+ int32_t microsecond,
+ int32_t nanosecond);
+
+int32_t DurationSign(Isolate* isolaet, const DurationRecord& dur);
+
+// #sec-temporal-isodaysinmonth
+int32_t ISODaysInMonth(Isolate* isolate, int32_t year, int32_t month);
+
+// #sec-temporal-isodaysinyear
+int32_t ISODaysInYear(Isolate* isolate, int32_t year);
+
+bool IsValidTime(Isolate* isolate, int32_t hour, int32_t minute, int32_t second,
+ int32_t millisecond, int32_t microsecond, int32_t nanosecond);
+
+// #sec-temporal-isvalidisodate
+bool IsValidISODate(Isolate* isolate, int32_t year, int32_t month, int32_t day);
+
+// #sec-temporal-compareisodate
+int32_t CompareISODate(Isolate* isolate, int32_t y1, int32_t m1, int32_t d1,
+ int32_t y2, int32_t m2, int32_t d2);
+
+// #sec-temporal-balanceisoyearmonth
+void BalanceISOYearMonth(Isolate* isolate, int32_t* year, int32_t* month);
+
+// #sec-temporal-balancetime
+V8_WARN_UNUSED_RESULT DateTimeRecordCommon
+BalanceTime(Isolate* isolate, int64_t hour, int64_t minute, int64_t second,
+ int64_t millisecond, int64_t microsecond, int64_t nanosecond);
+
+// #sec-temporal-differencetime
+V8_WARN_UNUSED_RESULT DurationRecord
+DifferenceTime(Isolate* isolate, int32_t h1, int32_t min1, int32_t s1,
+ int32_t ms1, int32_t mus1, int32_t ns1, int32_t h2, int32_t min2,
+ int32_t s2, int32_t ms2, int32_t mus2, int32_t ns2);
+
+// #sec-temporal-addtime
+V8_WARN_UNUSED_RESULT DateTimeRecordCommon
+AddTime(Isolate* isolate, int64_t hour, int64_t minute, int64_t second,
+ int64_t millisecond, int64_t microsecond, int64_t nanosecond,
+ int64_t hours, int64_t minutes, int64_t seconds, int64_t milliseconds,
+ int64_t microseconds, int64_t nanoseconds);
+
+// #sec-temporal-totaldurationnanoseconds
+int64_t TotalDurationNanoseconds(Isolate* isolate, int64_t days, int64_t hours,
+ int64_t minutes, int64_t seconds,
+ int64_t milliseconds, int64_t microseconds,
+ int64_t nanoseconds, int64_t offset_shift);
+
+// Calendar Operations
+
+// #sec-temporal-calendardateadd
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalPlainDate> CalendarDateAdd(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<Object> date,
+ Handle<Object> durations, Handle<Object> options, Handle<Object> date_add);
+
+// #sec-temporal-calendardateuntil
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalDuration> CalendarDateUntil(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<Object> one,
+ Handle<Object> two, Handle<Object> options, Handle<Object> date_until);
+
+// #sec-temporal-calendarfields
+MaybeHandle<FixedArray> CalendarFields(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<FixedArray> field_names);
+
+// #sec-temporal-getoffsetnanosecondsfor
+V8_WARN_UNUSED_RESULT Maybe<int64_t> GetOffsetNanosecondsFor(
+ Isolate* isolate, Handle<JSReceiver> time_zone, Handle<Object> instant,
+ const char* method_name);
+
+// #sec-temporal-totemporalcalendarwithisodefault
+MaybeHandle<JSReceiver> ToTemporalCalendarWithISODefault(
+ Isolate* isolate, Handle<Object> temporal_calendar_like,
+ const char* method_name);
+
+// #sec-temporal-isbuiltincalendar
+bool IsBuiltinCalendar(Isolate* isolate, Handle<String> id);
+
+// Internal Helper Function
+int32_t CalendarIndex(Isolate* isolate, Handle<String> id);
+
+// #sec-isvalidtimezonename
+bool IsValidTimeZoneName(Isolate* isolate, Handle<String> time_zone);
+
+// #sec-canonicalizetimezonename
+V8_WARN_UNUSED_RESULT MaybeHandle<String> CanonicalizeTimeZoneName(
+ Isolate* isolate, Handle<String> identifier);
+
+// #sec-temporal-tointegerthrowoninfinity
+MaybeHandle<Object> ToIntegerThrowOnInfinity(Isolate* isolate,
+ Handle<Object> argument);
+
+// #sec-temporal-topositiveinteger
+MaybeHandle<Object> ToPositiveInteger(Isolate* isolate,
+ Handle<Object> argument);
+
+inline int64_t floor_divide(int64_t a, int64_t b) {
+ return (((a) / (b)) + ((((a) < 0) && (((a) % (b)) != 0)) ? -1 : 0));
+}
+inline int64_t modulo(int64_t a, int64_t b) {
+ return ((((a) % (b)) + (b)) % (b));
+}
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define AT __FILE__ ":" TOSTRING(__LINE__)
+
+#ifdef DEBUG
+#define TEMPORAL_DEBUG_INFO AT
+#define TEMPORAL_ENTER_FUNC()
+// #define TEMPORAL_ENTER_FUNC() do { PrintF("Start: %s\n", __func__); } while
+// (false)
+#else
+// #define TEMPORAL_DEBUG_INFO ""
+#define TEMPORAL_DEBUG_INFO AT
+#define TEMPORAL_ENTER_FUNC()
+// #define TEMPORAL_ENTER_FUNC() do { PrintF("Start: %s\n", __func__); } while
+// (false)
+#endif // DEBUG
+
+#define NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR() \
+ NewTypeError( \
+ MessageTemplate::kInvalidArgumentForTemporal, \
+ isolate->factory()->NewStringFromStaticChars(TEMPORAL_DEBUG_INFO))
+
+#define NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR() \
+ NewRangeError( \
+ MessageTemplate::kInvalidTimeValueForTemporal, \
+ isolate->factory()->NewStringFromStaticChars(TEMPORAL_DEBUG_INFO))
+
+// #sec-defaulttimezone
+MaybeHandle<String> DefaultTimeZone(Isolate* isolate) {
+ TEMPORAL_ENTER_FUNC();
+ // For now, always return "UTC"
+ // TODO(ftang) implement behavior specified in #sup-defaulttimezone
+ return isolate->factory()->UTC_string();
+}
+
+// #sec-temporal-isodatetimewithinlimits
+bool ISODateTimeWithinLimits(Isolate* isolate, int32_t year, int32_t month,
+ int32_t day, int32_t hour, int32_t minute,
+ int32_t second, int32_t millisecond,
+ int32_t microsecond, int32_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+ /**
+ * Note: It is really overkill to decide within the limit by following the
+ * specified algorithm literally, which require the conversion to BigInt.
+ * Take a short cut and use pre-calculated year/month/day boundary instead.
+ *
+ * Math:
+ * (-8.64 x 10^21- 8.64 x 10^16, 8.64 x 10^21 + 8.64 x 10^16) ns
+ * = (-8.64 x 9999 x 10^16, 8.64 x 9999 x 10^16) ns
+ * = (-8.64 x 9999 x 10^10, 8.64 x 9999 x 10^10) millisecond
+ * = (-8.64 x 9999 x 10^7, 8.64 x 9999 x 10^7) second
+ * = (-86400 x 9999 x 10^3, 86400 x 9999 x 10^3) second
+ * = (-9999 x 10^3, 9999 x 10^3) days => Because 60*60*24 = 86400
+ * 9999000 days is about 27376 years, 4 months and 7 days.
+ * Therefore 9999000 days before Jan 1 1970 is around Auguest 23, -25407 and
+ * 9999000 days after Jan 1 1970 is around April 9, 29346.
+ */
+ if (year > -25407 && year < 29346) return true;
+ if (year < -25407 || year > 29346) return false;
+ if (year == -25407) {
+ if (month > 8) return true;
+ if (month < 8) return false;
+ return (day > 23);
+ } else {
+ DCHECK_EQ(year, 29346);
+ if (month > 4) return false;
+ if (month < 4) return true;
+ return (day > 23);
+ }
+ // 1. Assert: year, month, day, hour, minute, second, millisecond,
+ // microsecond, and nanosecond are integers.
+ // 2. Let ns be ! GetEpochFromISOParts(year, month, day, hour, minute,
+ // second, millisecond, microsecond, nanosecond).
+ // 3. If ns ≤ -8.64 × 10^21 - 8.64 × 10^16, then
+ // 4. If ns ≥ 8.64 × 10^21 + 8.64 × 10^16, then
+ // 5. Return true.
+}
+
+// #sec-temporal-isoyearmonthwithinlimits
+bool ISOYearMonthWithinLimits(int32_t year, int32_t month) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: year and month are integers.
+ // 2. If year < −271821 or year > 275760, then
+ // a. Return false.
+ if (year < -271821 || year > 275760) return false;
+ // 3. If year is −271821 and month < 4, then
+ // a. Return false.
+ if (year == -271821 && month < 4) return false;
+ // 4. If year is 275760 and month > 9, then
+ // a. Return false.
+ if (year == 275760 && month > 9) return false;
+ // 5. Return true.
+ return true;
+}
+
+#define ORDINARY_CREATE_FROM_CONSTRUCTOR(obj, target, new_target, T) \
+ Handle<JSReceiver> new_target_receiver = \
+ Handle<JSReceiver>::cast(new_target); \
+ Handle<Map> map; \
+ ASSIGN_RETURN_ON_EXCEPTION( \
+ isolate, map, \
+ JSFunction::GetDerivedMap(isolate, target, new_target_receiver), T); \
+ Handle<T> object = \
+ Handle<T>::cast(isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+
+#define THROW_INVALID_RANGE(T) \
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), T);
+
+#define CONSTRUCTOR(name) \
+ Handle<JSFunction>( \
+ JSFunction::cast( \
+ isolate->context().native_context().temporal_##name##_function()), \
+ isolate)
+
+// #sec-temporal-systemutcepochnanoseconds
+MaybeHandle<BigInt> SystemUTCEpochNanoseconds(Isolate* isolate) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let ns be the approximate current UTC date and time, in nanoseconds
+ // since the epoch.
+ double ms = V8::GetCurrentPlatform()->CurrentClockTimeMillis();
+ // 2. Set ns to the result of clamping ns between −8.64 × 10^21 and 8.64 ×
+ // 10^21.
+
+ // 3. Return ℤ(ns).
+ double ns = ms * 1000000.0;
+ ns = std::floor(std::max(-8.64e21, std::min(ns, 8.64e21)));
+ return BigInt::FromNumber(isolate, isolate->factory()->NewNumber(ns));
+}
+
+// #sec-temporal-createtemporalcalendar
+MaybeHandle<JSTemporalCalendar> CreateTemporalCalendar(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<String> identifier) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: ! IsBuiltinCalendar(identifier) is true.
+ // 2. If newTarget is not provided, set newTarget to %Temporal.Calendar%.
+ // 3. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.Calendar.prototype%", « [[InitializedTemporalCalendar]],
+ // [[Identifier]] »).
+ int32_t index = CalendarIndex(isolate, identifier);
+
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalCalendar)
+
+ object->set_flags(0);
+ // 4. Set object.[[Identifier]] to identifier.
+ object->set_calendar_index(index);
+ // 5. Return object.
+ return object;
+}
+
+MaybeHandle<JSTemporalCalendar> CreateTemporalCalendar(
+ Isolate* isolate, Handle<String> identifier) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalCalendar(isolate, CONSTRUCTOR(calendar),
+ CONSTRUCTOR(calendar), identifier);
+}
+
+// #sec-temporal-createtemporaldate
+MaybeHandle<JSTemporalPlainDate> CreateTemporalDate(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t iso_year, int32_t iso_month, int32_t iso_day,
+ Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: isoYear is an integer.
+ // 2. Assert: isoMonth is an integer.
+ // 3. Assert: isoDay is an integer.
+ // 4. Assert: Type(calendar) is Object.
+ // 5. If ! IsValidISODate(isoYear, isoMonth, isoDay) is false, throw a
+ // RangeError exception.
+ if (!IsValidISODate(isolate, iso_year, iso_month, iso_day)) {
+ THROW_INVALID_RANGE(JSTemporalPlainDate);
+ }
+ // 6. If ! ISODateTimeWithinLimits(isoYear, isoMonth, isoDay, 12, 0, 0, 0, 0,
+ // 0) is false, throw a RangeError exception.
+ if (!ISODateTimeWithinLimits(isolate, iso_year, iso_month, iso_day, 12, 0, 0,
+ 0, 0, 0)) {
+ THROW_INVALID_RANGE(JSTemporalPlainDate);
+ }
+ // 7. If newTarget is not present, set it to %Temporal.PlainDate%.
+
+ // 8. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.PlainDate.prototype%", « [[InitializedTemporalDate]],
+ // [[ISOYear]], [[ISOMonth]], [[ISODay]], [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalPlainDate)
+ object->set_year_month_day(0);
+ // 9. Set object.[[ISOYear]] to isoYear.
+ object->set_iso_year(iso_year);
+ // 10. Set object.[[ISOMonth]] to isoMonth.
+ object->set_iso_month(iso_month);
+ // 11. Set object.[[ISODay]] to isoDay.
+ object->set_iso_day(iso_day);
+ // 12. Set object.[[Calendar]] to calendar.
+ object->set_calendar(*calendar);
+ // 13. Return object.
+ return object;
+}
+
+MaybeHandle<JSTemporalPlainDate> CreateTemporalDate(
+ Isolate* isolate, int32_t iso_year, int32_t iso_month, int32_t iso_day,
+ Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalDate(isolate, CONSTRUCTOR(plain_date),
+ CONSTRUCTOR(plain_date), iso_year, iso_month,
+ iso_day, calendar);
+}
+
+// #sec-temporal-createtemporaldatetime
+MaybeHandle<JSTemporalPlainDateTime> CreateTemporalDateTime(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t iso_year, int32_t iso_month, int32_t iso_day, int32_t hour,
+ int32_t minute, int32_t second, int32_t millisecond, int32_t microsecond,
+ int32_t nanosecond, Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: isoYear, isoMonth, isoDay, hour, minute, second, millisecond,
+ // microsecond, and nanosecond are integers.
+ // 2. Assert: Type(calendar) is Object.
+ // 3. If ! IsValidISODate(isoYear, isoMonth, isoDay) is false, throw a
+ // RangeError exception.
+ if (!IsValidISODate(isolate, iso_year, iso_month, iso_day)) {
+ THROW_INVALID_RANGE(JSTemporalPlainDateTime);
+ }
+ // 4. If ! IsValidTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond) is false, throw a RangeError exception.
+ if (!IsValidTime(isolate, hour, minute, second, millisecond, microsecond,
+ nanosecond)) {
+ THROW_INVALID_RANGE(JSTemporalPlainDateTime);
+ }
+ // 5. If ! ISODateTimeWithinLimits(isoYear, isoMonth, isoDay, hour, minute,
+ // second, millisecond, microsecond, nanosecond) is false, then
+ if (!ISODateTimeWithinLimits(isolate, iso_year, iso_month, iso_day, hour,
+ minute, second, millisecond, microsecond,
+ nanosecond)) {
+ // a. Throw a RangeError exception.
+ THROW_INVALID_RANGE(JSTemporalPlainDateTime);
+ }
+ // 6. If newTarget is not present, set it to %Temporal.PlainDateTime%.
+ // 7. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.PlainDateTime.prototype%", « [[InitializedTemporalDateTime]],
+ // [[ISOYear]], [[ISOMonth]], [[ISODay]], [[ISOHour]], [[ISOMinute]],
+ // [[ISOSecond]], [[ISOMillisecond]], [[ISOMicrosecond]], [[ISONanosecond]],
+ // [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalPlainDateTime)
+
+ object->set_year_month_day(0);
+ object->set_hour_minute_second(0);
+ object->set_second_parts(0);
+ // 8. Set object.[[ISOYear]] to isoYear.
+ object->set_iso_year(iso_year);
+ // 9. Set object.[[ISOMonth]] to isoMonth.
+ object->set_iso_month(iso_month);
+ // 10. Set object.[[ISODay]] to isoDay.
+ object->set_iso_day(iso_day);
+ // 11. Set object.[[ISOHour]] to hour.
+ object->set_iso_hour(hour);
+ // 12. Set object.[[ISOMinute]] to minute.
+ object->set_iso_minute(minute);
+ // 13. Set object.[[ISOSecond]] to second.
+ object->set_iso_second(second);
+ // 14. Set object.[[ISOMillisecond]] to millisecond.
+ object->set_iso_millisecond(millisecond);
+ // 15. Set object.[[ISOMicrosecond]] to microsecond.
+ object->set_iso_microsecond(microsecond);
+ // 16. Set object.[[ISONanosecond]] to nanosecond.
+ object->set_iso_nanosecond(nanosecond);
+ // 17. Set object.[[Calendar]] to calendar.
+ object->set_calendar(*calendar);
+ // 18. Return object.
+ return object;
+}
+
+MaybeHandle<JSTemporalPlainDateTime> CreateTemporalDateTimeDefaultTarget(
+ Isolate* isolate, int32_t iso_year, int32_t iso_month, int32_t iso_day,
+ int32_t hour, int32_t minute, int32_t second, int32_t millisecond,
+ int32_t microsecond, int32_t nanosecond, Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalDateTime(isolate, CONSTRUCTOR(plain_date_time),
+ CONSTRUCTOR(plain_date_time), iso_year,
+ iso_month, iso_day, hour, minute, second,
+ millisecond, microsecond, nanosecond, calendar);
+}
+
+} // namespace
+
+namespace temporal {
+
+MaybeHandle<JSTemporalPlainDateTime> CreateTemporalDateTime(
+ Isolate* isolate, int32_t iso_year, int32_t iso_month, int32_t iso_day,
+ int32_t hour, int32_t minute, int32_t second, int32_t millisecond,
+ int32_t microsecond, int32_t nanosecond, Handle<JSReceiver> calendar) {
+ return CreateTemporalDateTimeDefaultTarget(
+ isolate, iso_year, iso_month, iso_day, hour, minute, second, millisecond,
+ microsecond, nanosecond, calendar);
+}
+
+} // namespace temporal
+
+namespace {
+// #sec-temporal-createtemporaltime
+MaybeHandle<JSTemporalPlainTime> CreateTemporalTime(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t hour, int32_t minute, int32_t second, int32_t millisecond,
+ int32_t microsecond, int32_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+ // 2. If ! IsValidTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond) is false, throw a RangeError exception.
+ if (!IsValidTime(isolate, hour, minute, second, millisecond, microsecond,
+ nanosecond)) {
+ THROW_INVALID_RANGE(JSTemporalPlainTime);
+ }
+
+ // 4. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.PlainTime.prototype%", « [[InitializedTemporalTime]],
+ // [[ISOHour]], [[ISOMinute]], [[ISOSecond]], [[ISOMillisecond]],
+ // [[ISOMicrosecond]], [[ISONanosecond]], [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalPlainTime)
+ Handle<JSTemporalCalendar> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalPlainTime);
+ object->set_hour_minute_second(0);
+ object->set_second_parts(0);
+ // 5. Set object.[[ISOHour]] to hour.
+ object->set_iso_hour(hour);
+ // 6. Set object.[[ISOMinute]] to minute.
+ object->set_iso_minute(minute);
+ // 7. Set object.[[ISOSecond]] to second.
+ object->set_iso_second(second);
+ // 8. Set object.[[ISOMillisecond]] to millisecond.
+ object->set_iso_millisecond(millisecond);
+ // 9. Set object.[[ISOMicrosecond]] to microsecond.
+ object->set_iso_microsecond(microsecond);
+ // 10. Set object.[[ISONanosecond]] to nanosecond.
+ object->set_iso_nanosecond(nanosecond);
+ // 11. Set object.[[Calendar]] to ? GetISO8601Calendar().
+ object->set_calendar(*calendar);
+
+ // 12. Return object.
+ return object;
+}
+MaybeHandle<JSTemporalPlainTime> CreateTemporalTime(
+ Isolate* isolate, int32_t hour, int32_t minute, int32_t second,
+ int32_t millisecond, int32_t microsecond, int32_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalTime(isolate, CONSTRUCTOR(plain_time),
+ CONSTRUCTOR(plain_time), hour, minute, second,
+ millisecond, microsecond, nanosecond);
+}
+
+// #sec-temporal-createtemporalmonthday
+MaybeHandle<JSTemporalPlainMonthDay> CreateTemporalMonthDay(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t iso_month, int32_t iso_day, Handle<JSReceiver> calendar,
+ int32_t reference_iso_year) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: isoMonth, isoDay, and referenceISOYear are integers.
+ // 2. Assert: Type(calendar) is Object.
+ // 3. If ! IsValidISODate(referenceISOYear, isoMonth, isoDay) is false, throw
+ // a RangeError exception.
+ if (!IsValidISODate(isolate, reference_iso_year, iso_month, iso_day)) {
+ THROW_INVALID_RANGE(JSTemporalPlainMonthDay);
+ }
+ // 4. If newTarget is not present, set it to %Temporal.PlainMonthDay%.
+ // 5. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.PlainMonthDay.prototype%", « [[InitializedTemporalMonthDay]],
+ // [[ISOMonth]], [[ISODay]], [[ISOYear]], [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalPlainMonthDay)
+ object->set_year_month_day(0);
+ // 6. Set object.[[ISOMonth]] to isoMonth.
+ object->set_iso_month(iso_month);
+ // 7. Set object.[[ISODay]] to isoDay.
+ object->set_iso_day(iso_day);
+ // 8. Set object.[[Calendar]] to calendar.
+ object->set_calendar(*calendar);
+ // 9. Set object.[[ISOYear]] to referenceISOYear.
+ object->set_iso_year(reference_iso_year);
+ // 10. Return object.
+ return object;
+}
+
+// #sec-temporal-createtemporalyearmonth
+MaybeHandle<JSTemporalPlainYearMonth> CreateTemporalYearMonth(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t iso_year, int32_t iso_month, Handle<JSReceiver> calendar,
+ int32_t reference_iso_day) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: isoYear, isoMonth, and referenceISODay are integers.
+ // 2. Assert: Type(calendar) is Object.
+ // 3. If ! IsValidISODate(isoYear, isoMonth, referenceISODay) is false, throw
+ // a RangeError exception.
+ if (!IsValidISODate(isolate, iso_year, iso_month, reference_iso_day)) {
+ THROW_INVALID_RANGE(JSTemporalPlainYearMonth);
+ }
+ // 4. If ! ISOYearMonthWithinLimits(isoYear, isoMonth) is false, throw a
+ // RangeError exception.
+ if (!ISOYearMonthWithinLimits(iso_year, iso_month)) {
+ THROW_INVALID_RANGE(JSTemporalPlainYearMonth);
+ }
+ // 5. If newTarget is not present, set it to %Temporal.PlainYearMonth%.
+ // 6. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.PlainYearMonth.prototype%", « [[InitializedTemporalYearMonth]],
+ // [[ISOYear]], [[ISOMonth]], [[ISODay]], [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalPlainYearMonth)
+ object->set_year_month_day(0);
+ // 7. Set object.[[ISOYear]] to isoYear.
+ object->set_iso_year(iso_year);
+ // 8. Set object.[[ISOMonth]] to isoMonth.
+ object->set_iso_month(iso_month);
+ // 9. Set object.[[Calendar]] to calendar.
+ object->set_calendar(*calendar);
+ // 10. Set object.[[ISODay]] to referenceISODay.
+ object->set_iso_day(reference_iso_day);
+ // 11. Return object.
+ return object;
+}
+
+// #sec-temporal-createtemporalzoneddatetime
+MaybeHandle<JSTemporalZonedDateTime> CreateTemporalZonedDateTime(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<BigInt> epoch_nanoseconds, Handle<JSReceiver> time_zone,
+ Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: Type(epochNanoseconds) is BigInt.
+ // 2. Assert: ! IsValidEpochNanoseconds(epochNanoseconds) is true.
+ DCHECK(IsValidEpochNanoseconds(isolate, epoch_nanoseconds));
+ // 3. Assert: Type(timeZone) is Object.
+ // 4. Assert: Type(calendar) is Object.
+ // 5. If newTarget is not present, set it to %Temporal.ZonedDateTime%.
+ // 6. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.ZonedDateTime.prototype%", «
+ // [[InitializedTemporalZonedDateTime]], [[Nanoseconds]], [[TimeZone]],
+ // [[Calendar]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalZonedDateTime)
+ // 7. Set object.[[Nanoseconds]] to epochNanoseconds.
+ object->set_nanoseconds(*epoch_nanoseconds);
+ // 8. Set object.[[TimeZone]] to timeZone.
+ object->set_time_zone(*time_zone);
+ // 9. Set object.[[Calendar]] to calendar.
+ object->set_calendar(*calendar);
+ // 10. Return object.
+ return object;
+}
+MaybeHandle<JSTemporalZonedDateTime> CreateTemporalZonedDateTime(
+ Isolate* isolate, Handle<BigInt> epoch_nanoseconds,
+ Handle<JSReceiver> time_zone, Handle<JSReceiver> calendar) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalZonedDateTime(isolate, CONSTRUCTOR(zoned_date_time),
+ CONSTRUCTOR(zoned_date_time),
+ epoch_nanoseconds, time_zone, calendar);
+}
+
+// #sec-temporal-createtemporalduration
+MaybeHandle<JSTemporalDuration> CreateTemporalDuration(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int64_t years, int64_t months, int64_t weeks, int64_t days, int64_t hours,
+ int64_t minutes, int64_t seconds, int64_t milliseconds,
+ int64_t microseconds, int64_t nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ Factory* factory = isolate->factory();
+ // 1. If ! IsValidDuration(years, months, weeks, days, hours, minutes,
+ // seconds, milliseconds, microseconds, nanoseconds) is false, throw a
+ // RangeError exception.
+ if (!IsValidDuration(isolate,
+ {years, months, weeks, days, hours, minutes, seconds,
+ milliseconds, microseconds, nanoseconds})) {
+ THROW_INVALID_RANGE(JSTemporalDuration);
+ }
+
+ // 2. If newTarget is not present, set it to %Temporal.Duration%.
+ // 3. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.Duration.prototype%", « [[InitializedTemporalDuration]],
+ // [[Years]], [[Months]], [[Weeks]], [[Days]], [[Hours]], [[Minutes]],
+ // [[Seconds]], [[Milliseconds]], [[Microseconds]], [[Nanoseconds]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalDuration)
+#define SET_FROM_INT64(obj, p) \
+ do { \
+ Handle<Object> item = factory->NewNumberFromInt64(p); \
+ object->set_##p(*item); \
+ } while (false)
+ // 4. Set object.[[Years]] to years.
+ SET_FROM_INT64(object, years);
+ // 5. Set object.[[Months]] to months.
+ SET_FROM_INT64(object, months);
+ // 6. Set object.[[Weeks]] to weeks.
+ SET_FROM_INT64(object, weeks);
+ // 7. Set object.[[Days]] to days.
+ SET_FROM_INT64(object, days);
+ // 8. Set object.[[Hours]] to hours.
+ SET_FROM_INT64(object, hours);
+ // 9. Set object.[[Minutes]] to minutes.
+ SET_FROM_INT64(object, minutes);
+ // 10. Set object.[[Seconds]] to seconds.
+ SET_FROM_INT64(object, seconds);
+ // 11. Set object.[[Milliseconds]] to milliseconds.
+ SET_FROM_INT64(object, milliseconds);
+ // 12. Set object.[[Microseconds]] to microseconds.
+ SET_FROM_INT64(object, microseconds);
+ // 13. Set object.[[Nanoseconds]] to nanoseconds.
+ SET_FROM_INT64(object, nanoseconds);
+#undef SET_FROM_INT64
+ // 14. Return object.
+ return object;
+}
+
+MaybeHandle<JSTemporalDuration> CreateTemporalDuration(
+ Isolate* isolate, int64_t years, int64_t months, int64_t weeks,
+ int64_t days, int64_t hours, int64_t minutes, int64_t seconds,
+ int64_t milliseconds, int64_t microseconds, int64_t nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalDuration(isolate, CONSTRUCTOR(duration),
+ CONSTRUCTOR(duration), years, months, weeks,
+ days, hours, minutes, seconds, milliseconds,
+ microseconds, nanoseconds);
+}
+
+} // namespace
+
+namespace temporal {
+
+// #sec-temporal-createtemporalinstant
+MaybeHandle<JSTemporalInstant> CreateTemporalInstant(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<BigInt> epoch_nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: Type(epochNanoseconds) is BigInt.
+ // 2. Assert: ! IsValidEpochNanoseconds(epochNanoseconds) is true.
+ DCHECK(IsValidEpochNanoseconds(isolate, epoch_nanoseconds));
+
+ // 4. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.Instant.prototype%", « [[InitializedTemporalInstant]],
+ // [[Nanoseconds]] »).
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalInstant)
+ // 5. Set object.[[Nanoseconds]] to ns.
+ object->set_nanoseconds(*epoch_nanoseconds);
+ return object;
+}
+
+MaybeHandle<JSTemporalInstant> CreateTemporalInstant(
+ Isolate* isolate, Handle<BigInt> epoch_nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalInstant(isolate, CONSTRUCTOR(instant),
+ CONSTRUCTOR(instant), epoch_nanoseconds);
+}
+
+} // namespace temporal
+
+namespace {
+
+MaybeHandle<JSTemporalTimeZone> CreateTemporalTimeZoneFromIndex(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ int32_t index) {
+ TEMPORAL_ENTER_FUNC();
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalTimeZone)
+ object->set_flags(0);
+ object->set_details(0);
+
+ object->set_is_offset(false);
+ object->set_offset_milliseconds_or_time_zone_index(index);
+ return object;
+}
+
+MaybeHandle<JSTemporalTimeZone> CreateTemporalTimeZoneUTC(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalTimeZoneFromIndex(isolate, target, new_target, 0);
+}
+
+bool IsUTC(Isolate* isolate, Handle<String> time_zone);
+
+// #sec-temporal-createtemporaltimezone
+MaybeHandle<JSTemporalTimeZone> CreateTemporalTimeZone(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<String> identifier) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. If newTarget is not present, set it to %Temporal.TimeZone%.
+ // 2. Let object be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%Temporal.TimeZone.prototype%", « [[InitializedTemporalTimeZone]],
+ // [[Identifier]], [[OffsetNanoseconds]] »).
+ // 3. Set object.[[Identifier]] to identifier.
+ if (IsUTC(isolate, identifier)) {
+ return CreateTemporalTimeZoneUTC(isolate, target, new_target);
+ }
+#ifdef V8_INTL_SUPPORT
+ int32_t time_zone_index;
+ Maybe<bool> maybe_time_zone_index =
+ Intl::GetTimeZoneIndex(isolate, identifier, &time_zone_index);
+ MAYBE_RETURN(maybe_time_zone_index, Handle<JSTemporalTimeZone>());
+ if (maybe_time_zone_index.FromJust()) {
+ return CreateTemporalTimeZoneFromIndex(isolate, target, new_target,
+ time_zone_index);
+ }
+#endif // V8_INTL_SUPPORT
+
+ // 4. If identifier satisfies the syntax of a TimeZoneNumericUTCOffset
+ // (see 13.33), then a. Set object.[[OffsetNanoseconds]] to !
+ // ParseTimeZoneOffsetString(identifier).
+ // 5. Else,
+ // a. Assert: ! CanonicalizeTimeZoneName(identifier) is identifier.
+ // b. Set object.[[OffsetNanoseconds]] to undefined.
+ // 6. Return object.
+ Maybe<int64_t> maybe_offset_nanoseconds =
+ ParseTimeZoneOffsetString(isolate, identifier, false);
+ MAYBE_RETURN(maybe_offset_nanoseconds, Handle<JSTemporalTimeZone>());
+ int64_t offset_nanoseconds = maybe_offset_nanoseconds.FromJust();
+
+ ORDINARY_CREATE_FROM_CONSTRUCTOR(object, target, new_target,
+ JSTemporalTimeZone)
+ object->set_flags(0);
+ object->set_details(0);
+
+ object->set_is_offset(true);
+ object->set_offset_nanoseconds(offset_nanoseconds);
+ return object;
+}
+
+MaybeHandle<JSTemporalTimeZone> CreateTemporalTimeZoneDefaultTarget(
+ Isolate* isolate, Handle<String> identifier) {
+ TEMPORAL_ENTER_FUNC();
+ return CreateTemporalTimeZone(isolate, CONSTRUCTOR(time_zone),
+ CONSTRUCTOR(time_zone), identifier);
+}
+
+} // namespace
+
+namespace temporal {
+MaybeHandle<JSTemporalTimeZone> CreateTemporalTimeZone(
+ Isolate* isolate, Handle<String> identifier) {
+ return CreateTemporalTimeZoneDefaultTarget(isolate, identifier);
+}
+} // namespace temporal
+
+namespace {
+
+// #sec-temporal-systeminstant
+MaybeHandle<JSTemporalInstant> SystemInstant(Isolate* isolate) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let ns be ! SystemUTCEpochNanoseconds().
+ Handle<BigInt> ns;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, ns, SystemUTCEpochNanoseconds(isolate),
+ JSTemporalInstant);
+ // 2. Return ? CreateTemporalInstant(ns).
+ return temporal::CreateTemporalInstant(isolate, ns);
+}
+
+// #sec-temporal-systemtimezone
+MaybeHandle<JSTemporalTimeZone> SystemTimeZone(Isolate* isolate) {
+ TEMPORAL_ENTER_FUNC();
+ Handle<String> default_time_zone;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, default_time_zone,
+ DefaultTimeZone(isolate), JSTemporalTimeZone);
+ return temporal::CreateTemporalTimeZone(isolate, default_time_zone);
+}
+
+Maybe<DateTimeRecordCommon> GetISOPartsFromEpoch(
+ Isolate* isolate, Handle<BigInt> epoch_nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ DateTimeRecordCommon result;
+ // 1. Let remainderNs be epochNanoseconds modulo 10^6.
+ Handle<BigInt> million = BigInt::FromInt64(isolate, 1000000);
+ Handle<BigInt> remainder_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, remainder_ns,
+ BigInt::Remainder(isolate, epoch_nanoseconds, million),
+ Nothing<DateTimeRecordCommon>());
+ // Need to do some remainder magic to negative remainder.
+ if (remainder_ns->IsNegative()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, remainder_ns, BigInt::Add(isolate, remainder_ns, million),
+ Nothing<DateTimeRecordCommon>());
+ }
+
+ // 2. Let epochMilliseconds be (epochNanoseconds − remainderNs) / 10^6.
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, bigint,
+ BigInt::Subtract(isolate, epoch_nanoseconds, remainder_ns),
+ Nothing<DateTimeRecordCommon>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, bigint,
+ BigInt::Divide(isolate, bigint, million),
+ Nothing<DateTimeRecordCommon>());
+ int64_t epoch_milliseconds = bigint->AsInt64();
+ int year = 0;
+ int month = 0;
+ int day = 0;
+ int wday = 0;
+ int hour = 0;
+ int min = 0;
+ int sec = 0;
+ int ms = 0;
+ isolate->date_cache()->BreakDownTime(epoch_milliseconds, &year, &month, &day,
+ &wday, &hour, &min, &sec, &ms);
+
+ // 3. Let year be ! YearFromTime(epochMilliseconds).
+ result.year = year;
+ // 4. Let month be ! MonthFromTime(epochMilliseconds) + 1.
+ result.month = month + 1;
+ DCHECK_GE(result.month, 1);
+ DCHECK_LE(result.month, 12);
+ // 5. Let day be ! DateFromTime(epochMilliseconds).
+ result.day = day;
+ DCHECK_GE(result.day, 1);
+ DCHECK_LE(result.day, 31);
+ // 6. Let hour be ! HourFromTime(epochMilliseconds).
+ result.hour = hour;
+ DCHECK_GE(result.hour, 0);
+ DCHECK_LE(result.hour, 23);
+ // 7. Let minute be ! MinFromTime(epochMilliseconds).
+ result.minute = min;
+ DCHECK_GE(result.minute, 0);
+ DCHECK_LE(result.minute, 59);
+ // 8. Let second be ! SecFromTime(epochMilliseconds).
+ result.second = sec;
+ DCHECK_GE(result.second, 0);
+ DCHECK_LE(result.second, 59);
+ // 9. Let millisecond be ! msFromTime(epochMilliseconds).
+ result.millisecond = ms;
+ DCHECK_GE(result.millisecond, 0);
+ DCHECK_LE(result.millisecond, 999);
+ // 10. Let microsecond be floor(remainderNs / 1000) modulo 1000.
+ int64_t remainder = remainder_ns->AsInt64();
+ result.microsecond = (remainder / 1000) % 1000;
+ DCHECK_GE(result.microsecond, 0);
+ DCHECK_LE(result.microsecond, 999);
+ // 11. Let nanosecond be remainderNs modulo 1000.
+ result.nanosecond = remainder % 1000;
+ DCHECK_GE(result.nanosecond, 0);
+ DCHECK_LE(result.nanosecond, 999);
+ return Just(result);
+}
+
+// #sec-temporal-balanceisodatetime
+DateTimeRecordCommon BalanceISODateTime(Isolate* isolate, int32_t year,
+ int32_t month, int32_t day,
+ int32_t hour, int32_t minute,
+ int32_t second, int32_t millisecond,
+ int32_t microsecond,
+ int64_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: year, month, day, hour, minute, second, millisecond,
+ // microsecond, and nanosecond are integers.
+ // 2. Let balancedTime be ! BalanceTime(hour, minute, second, millisecond,
+ // microsecond, nanosecond).
+ DateTimeRecordCommon balanced_time = BalanceTime(
+ isolate, hour, minute, second, millisecond, microsecond, nanosecond);
+ // 3. Let balancedDate be ! BalanceISODate(year, month, day +
+ // balancedTime.[[Days]]).
+ day += balanced_time.day;
+ BalanceISODate(isolate, &year, &month, &day);
+ // 4. Return the Record { [[Year]]: balancedDate.[[Year]], [[Month]]:
+ // balancedDate.[[Month]], [[Day]]: balancedDate.[[Day]], [[Hour]]:
+ // balancedTime.[[Hour]], [[Minute]]: balancedTime.[[Minute]], [[Second]]:
+ // balancedTime.[[Second]], [[Millisecond]]: balancedTime.[[Millisecond]],
+ // [[Microsecond]]: balancedTime.[[Microsecond]], [[Nanosecond]]:
+ // balancedTime.[[Nanosecond]] }.
+ return {year,
+ month,
+ day,
+ balanced_time.hour,
+ balanced_time.minute,
+ balanced_time.second,
+ balanced_time.millisecond,
+ balanced_time.microsecond,
+ balanced_time.nanosecond};
+}
+
+} // namespace
+
+namespace temporal {
+MaybeHandle<JSTemporalPlainDateTime> BuiltinTimeZoneGetPlainDateTimeFor(
+ Isolate* isolate, Handle<JSReceiver> time_zone,
+ Handle<JSTemporalInstant> instant, Handle<JSReceiver> calendar,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let offsetNanoseconds be ? GetOffsetNanosecondsFor(timeZone, instant).
+ Maybe<int64_t> maybe_offset_nanoseconds =
+ GetOffsetNanosecondsFor(isolate, time_zone, instant, method_name);
+ MAYBE_RETURN(maybe_offset_nanoseconds, Handle<JSTemporalPlainDateTime>());
+ // 2. Let result be ! GetISOPartsFromEpoch(instant.[[Nanoseconds]]).
+ Maybe<DateTimeRecordCommon> maybe_result = GetISOPartsFromEpoch(
+ isolate, Handle<BigInt>(instant->nanoseconds(), isolate));
+ MAYBE_RETURN(maybe_result, Handle<JSTemporalPlainDateTime>());
+ int64_t offset_nanoseconds = maybe_offset_nanoseconds.FromJust();
+
+ // 3. Set result to ! BalanceISODateTime(result.[[Year]], result.[[Month]],
+ // result.[[Day]], result.[[Hour]], result.[[Minute]], result.[[Second]],
+ // result.[[Millisecond]], result.[[Microsecond]], result.[[Nanosecond]] +
+ // offsetNanoseconds).
+ DateTimeRecordCommon result = maybe_result.FromJust();
+ result = BalanceISODateTime(isolate, result.year, result.month, result.day,
+ result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond,
+ offset_nanoseconds + result.nanosecond);
+ // 4. Return ? CreateTemporalDateTime(result.[[Year]], result.[[Month]],
+ // result.[[Day]], result.[[Hour]], result.[[Minute]], result.[[Second]],
+ // result.[[Millisecond]], result.[[Microsecond]], result.[[Nanosecond]],
+ // calendar).
+ return temporal::CreateTemporalDateTime(
+ isolate, result.year, result.month, result.day, result.hour,
+ result.minute, result.second, result.millisecond, result.microsecond,
+ result.nanosecond, calendar);
+}
+
+} // namespace temporal
+
+namespace {
+// #sec-temporal-getpossibleinstantsfor
+MaybeHandle<FixedArray> GetPossibleInstantsFor(Isolate* isolate,
+ Handle<JSReceiver> time_zone,
+ Handle<Object> date_time) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let possibleInstants be ? Invoke(timeZone, "getPossibleInstantsFor", «
+ // dateTime »).
+ Handle<Object> function;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function,
+ Object::GetProperty(isolate, time_zone,
+ isolate->factory()->getPossibleInstantsFor_string()),
+ FixedArray);
+ if (!function->IsCallable()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kCalledNonCallable,
+ isolate->factory()->getPossibleInstantsFor_string()),
+ FixedArray);
+ }
+ Handle<Object> possible_instants;
+ {
+ Handle<Object> argv[] = {date_time};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, possible_instants,
+ Execution::Call(isolate, function, time_zone, arraysize(argv), argv),
+ FixedArray);
+ }
+
+ // Step 4-6 of GetPossibleInstantsFor is implemented inside
+ // temporal_instant_fixed_array_from_iterable.
+ {
+ Handle<Object> argv[] = {possible_instants};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, possible_instants,
+ Execution::CallBuiltin(
+ isolate, isolate->temporal_instant_fixed_array_from_iterable(),
+ possible_instants, arraysize(argv), argv),
+ FixedArray);
+ }
+ DCHECK(possible_instants->IsFixedArray());
+ // 7. Return list.
+ return Handle<FixedArray>::cast(possible_instants);
+}
+
+// #sec-temporal-disambiguatepossibleinstants
+MaybeHandle<JSTemporalInstant> DisambiguatePossibleInstants(
+ Isolate* isolate, Handle<FixedArray> possible_instants,
+ Handle<JSReceiver> time_zone, Handle<Object> date_time_obj,
+ Disambiguation disambiguation, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: dateTime has an [[InitializedTemporalDateTime]] internal slot.
+ DCHECK(date_time_obj->IsJSTemporalPlainDateTime());
+ Handle<JSTemporalPlainDateTime> date_time =
+ Handle<JSTemporalPlainDateTime>::cast(date_time_obj);
+
+ // 2. Let n be possibleInstants's length.
+ int32_t n = possible_instants->length();
+
+ // 3. If n = 1, then
+ if (n == 1) {
+ // a. Return possibleInstants[0].
+ Handle<Object> ret_obj = FixedArray::get(*possible_instants, 0, isolate);
+ DCHECK(ret_obj->IsJSTemporalInstant());
+ return Handle<JSTemporalInstant>::cast(ret_obj);
+ }
+ // 4. If n ≠ 0, then
+ if (n != 0) {
+ // a. If disambiguation is "earlier" or "compatible", then
+ if (disambiguation == Disambiguation::kEarlier ||
+ disambiguation == Disambiguation::kCompatible) {
+ // i. Return possibleInstants[0].
+ Handle<Object> ret_obj = FixedArray::get(*possible_instants, 0, isolate);
+ DCHECK(ret_obj->IsJSTemporalInstant());
+ return Handle<JSTemporalInstant>::cast(ret_obj);
+ }
+ // b. If disambiguation is "later", then
+ if (disambiguation == Disambiguation::kLater) {
+ // i. Return possibleInstants[n − 1].
+ Handle<Object> ret_obj =
+ FixedArray::get(*possible_instants, n - 1, isolate);
+ DCHECK(ret_obj->IsJSTemporalInstant());
+ return Handle<JSTemporalInstant>::cast(ret_obj);
+ }
+ // c. Assert: disambiguation is "reject".
+ DCHECK_EQ(disambiguation, Disambiguation::kReject);
+ // d. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ // 5. Assert: n = 0.
+ DCHECK_EQ(n, 0);
+ // 6. If disambiguation is "reject", then
+ if (disambiguation == Disambiguation::kReject) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ // 7. Let epochNanoseconds be ! GetEpochFromISOParts(dateTime.[[ISOYear]],
+ // dateTime.[[ISOMonth]], dateTime.[[ISODay]], dateTime.[[ISOHour]],
+ // dateTime.[[ISOMinute]], dateTime.[[ISOSecond]],
+ // dateTime.[[ISOMillisecond]], dateTime.[[ISOMicrosecond]],
+ // dateTime.[[ISONanosecond]]).
+ Handle<BigInt> epoch_nanoseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, epoch_nanoseconds,
+ GetEpochFromISOParts(
+ isolate, date_time->iso_year(), date_time->iso_month(),
+ date_time->iso_day(), date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond()),
+ JSTemporalInstant);
+
+ // 8. Let dayBefore be ! CreateTemporalInstant(epochNanoseconds − 8.64 ×
+ // 10^13).
+ Handle<BigInt> one_day_in_ns = BigInt::FromUint64(isolate, 86400000000000ULL);
+ Handle<BigInt> day_before_ns;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, day_before_ns,
+ BigInt::Subtract(isolate, epoch_nanoseconds, one_day_in_ns),
+ JSTemporalInstant);
+ Handle<JSTemporalInstant> day_before;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, day_before,
+ temporal::CreateTemporalInstant(isolate, day_before_ns),
+ JSTemporalInstant);
+ // 9. Let dayAfter be ! CreateTemporalInstant(epochNanoseconds + 8.64 ×
+ // 10^13).
+ Handle<BigInt> day_after_ns;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, day_after_ns,
+ BigInt::Add(isolate, epoch_nanoseconds, one_day_in_ns),
+ JSTemporalInstant);
+ Handle<JSTemporalInstant> day_after;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, day_after,
+ temporal::CreateTemporalInstant(isolate, day_after_ns),
+ JSTemporalInstant);
+ // 10. Let offsetBefore be ? GetOffsetNanosecondsFor(timeZone, dayBefore).
+ Maybe<int64_t> maybe_offset_before =
+ GetOffsetNanosecondsFor(isolate, time_zone, day_before, method_name);
+ MAYBE_RETURN(maybe_offset_before, Handle<JSTemporalInstant>());
+ // 11. Let offsetAfter be ? GetOffsetNanosecondsFor(timeZone, dayAfter).
+ Maybe<int64_t> maybe_offset_after =
+ GetOffsetNanosecondsFor(isolate, time_zone, day_after, method_name);
+ MAYBE_RETURN(maybe_offset_after, Handle<JSTemporalInstant>());
+
+ // 12. Let nanoseconds be offsetAfter − offsetBefore.
+ int64_t nanoseconds =
+ maybe_offset_after.FromJust() - maybe_offset_before.FromJust();
+
+ // 13. If disambiguation is "earlier", then
+ if (disambiguation == Disambiguation::kEarlier) {
+ // a. Let earlier be ? AddDateTime(dateTime.[[ISOYear]],
+ // dateTime.[[ISOMonth]], dateTime.[[ISODay]], dateTime.[[ISOHour]],
+ // dateTime.[[ISOMinute]], dateTime.[[ISOSecond]],
+ // dateTime.[[ISOMillisecond]],
+ // dateTime.[[ISOMicrosecond]], dateTime.[[ISONanosecond]],
+ // dateTime.[[Calendar]], 0, 0, 0, 0, 0, 0, 0, 0, 0, −nanoseconds,
+ // undefined).
+ Maybe<DateTimeRecordCommon> maybe_earlier = AddDateTime(
+ isolate, date_time->iso_year(), date_time->iso_month(),
+ date_time->iso_day(), date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond(),
+ handle(date_time->calendar(), isolate),
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, -nanoseconds},
+ isolate->factory()->undefined_value());
+ MAYBE_RETURN(maybe_earlier, Handle<JSTemporalInstant>());
+ DateTimeRecordCommon earlier = maybe_earlier.FromJust();
+
+ // See https://github.com/tc39/proposal-temporal/issues/1816
+ // b. Let earlierDateTime be ? CreateTemporalDateTime(earlier.[[Year]],
+ // earlier.[[Month]], earlier.[[Day]], earlier.[[Hour]], earlier.[[Minute]],
+ // earlier.[[Second]], earlier.[[Millisecond]], earlier.[[Microsecond]],
+ // earlier.[[Nanosecond]], dateTime.[[Calendar]]).
+ Handle<JSTemporalPlainDateTime> earlier_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, earlier_date_time,
+ temporal::CreateTemporalDateTime(
+ isolate, earlier.year, earlier.month, earlier.day, earlier.hour,
+ earlier.minute, earlier.second, earlier.millisecond,
+ earlier.microsecond, earlier.nanosecond,
+ handle(date_time->calendar(), isolate)),
+ JSTemporalInstant);
+
+ // c. Set possibleInstants to ? GetPossibleInstantsFor(timeZone,
+ // earlierDateTime).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, possible_instants,
+ GetPossibleInstantsFor(isolate, time_zone, earlier_date_time),
+ JSTemporalInstant);
+
+ // d. If possibleInstants is empty, throw a RangeError exception.
+ if (possible_instants->length() == 0) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ // e. Return possibleInstants[0].
+ Handle<Object> ret_obj = FixedArray::get(*possible_instants, 0, isolate);
+ DCHECK(ret_obj->IsJSTemporalInstant());
+ return Handle<JSTemporalInstant>::cast(ret_obj);
+ }
+ // 14. Assert: disambiguation is "compatible" or "later".
+ DCHECK(disambiguation == Disambiguation::kCompatible ||
+ disambiguation == Disambiguation::kLater);
+ // 15. Let later be ? AddDateTime(dateTime.[[ISOYear]], dateTime.[[ISOMonth]],
+ // dateTime.[[ISODay]], dateTime.[[ISOHour]], dateTime.[[ISOMinute]],
+ // dateTime.[[ISOSecond]], dateTime.[[ISOMillisecond]],
+ // dateTime.[[ISOMicrosecond]], dateTime.[[ISONanosecond]],
+ // dateTime.[[Calendar]], 0, 0, 0, 0, 0, 0, 0, 0, 0, nanoseconds, undefined).
+ Maybe<DateTimeRecordCommon> maybe_later = AddDateTime(
+ isolate, date_time->iso_year(), date_time->iso_month(),
+ date_time->iso_day(), date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond(),
+ handle(date_time->calendar(), isolate),
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, nanoseconds},
+ isolate->factory()->undefined_value());
+ MAYBE_RETURN(maybe_later, Handle<JSTemporalInstant>());
+ DateTimeRecordCommon later = maybe_later.FromJust();
+
+ // See https://github.com/tc39/proposal-temporal/issues/1816
+ // 16. Let laterDateTime be ? CreateTemporalDateTime(later.[[Year]],
+ // later.[[Month]], later.[[Day]], later.[[Hour]], later.[[Minute]],
+ // later.[[Second]], later.[[Millisecond]], later.[[Microsecond]],
+ // later.[[Nanosecond]], dateTime.[[Calendar]]).
+
+ Handle<JSTemporalPlainDateTime> later_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, later_date_time,
+ temporal::CreateTemporalDateTime(
+ isolate, later.year, later.month, later.day, later.hour, later.minute,
+ later.second, later.millisecond, later.microsecond, later.nanosecond,
+ handle(date_time->calendar(), isolate)),
+ JSTemporalInstant);
+ // 17. Set possibleInstants to ? GetPossibleInstantsFor(timeZone,
+ // laterDateTime).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, possible_instants,
+ GetPossibleInstantsFor(isolate, time_zone, later_date_time),
+ JSTemporalInstant);
+ // 18. Set n to possibleInstants's length.
+ n = possible_instants->length();
+ // 19. If n = 0, throw a RangeError exception.
+ if (n == 0) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ // 20. Return possibleInstants[n − 1].
+ Handle<Object> ret_obj = FixedArray::get(*possible_instants, n - 1, isolate);
+ DCHECK(ret_obj->IsJSTemporalInstant());
+ return Handle<JSTemporalInstant>::cast(ret_obj);
+}
+
+// #sec-temporal-gettemporalcalendarwithisodefault
+MaybeHandle<JSReceiver> GetTemporalCalendarWithISODefault(
+ Isolate* isolate, Handle<JSReceiver> item, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ // 1. If item has an [[InitializedTemporalDate]],
+ // [[InitializedTemporalDateTime]], [[InitializedTemporalMonthDay]],
+ // [[InitializedTemporalTime]], [[InitializedTemporalYearMonth]], or
+ // [[InitializedTemporalZonedDateTime]] internal slot, then a. Return
+ // item.[[Calendar]].
+ if (item->IsJSTemporalPlainDate()) {
+ return handle(Handle<JSTemporalPlainDate>::cast(item)->calendar(), isolate);
+ }
+ if (item->IsJSTemporalPlainDateTime()) {
+ return handle(Handle<JSTemporalPlainDateTime>::cast(item)->calendar(),
+ isolate);
+ }
+ if (item->IsJSTemporalPlainMonthDay()) {
+ return handle(Handle<JSTemporalPlainMonthDay>::cast(item)->calendar(),
+ isolate);
+ }
+ if (item->IsJSTemporalPlainTime()) {
+ return handle(Handle<JSTemporalPlainTime>::cast(item)->calendar(), isolate);
+ }
+ if (item->IsJSTemporalPlainYearMonth()) {
+ return handle(Handle<JSTemporalPlainYearMonth>::cast(item)->calendar(),
+ isolate);
+ }
+ if (item->IsJSTemporalZonedDateTime()) {
+ return handle(Handle<JSTemporalZonedDateTime>::cast(item)->calendar(),
+ isolate);
+ }
+
+ // 2. Let calendar be ? Get(item, "calendar").
+ Handle<Object> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ JSReceiver::GetProperty(isolate, item, factory->calendar_string()),
+ JSReceiver);
+ // 3. Return ? ToTemporalCalendarWithISODefault(calendar).
+ return ToTemporalCalendarWithISODefault(isolate, calendar, method_name);
+}
+
+enum class RequiredFields { kNone, kTimeZone, kTimeZoneAndOffset, kDay };
+
+// The common part of PrepareTemporalFields and PreparePartialTemporalFields
+// #sec-temporal-preparetemporalfields
+// #sec-temporal-preparepartialtemporalfields
+V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> PrepareTemporalFieldsOrPartial(
+ Isolate* isolate, Handle<JSReceiver> fields, Handle<FixedArray> field_names,
+ RequiredFields required, bool partial) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ // 1. Assert: Type(fields) is Object.
+ // 2. Let result be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 3. For each value property of fieldNames, do
+ int length = field_names->length();
+ bool any = false;
+ for (int i = 0; i < length; i++) {
+ Handle<Object> property_obj = Handle<Object>(field_names->get(i), isolate);
+ Handle<String> property = Handle<String>::cast(property_obj);
+ // a. Let value be ? Get(fields, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, JSReceiver::GetProperty(isolate, fields, property),
+ JSObject);
+
+ // b. If value is undefined, then
+ if (value->IsUndefined()) {
+ // This part is only for PrepareTemporalFields
+ // Skip for the case of PreparePartialTemporalFields.
+ if (partial) continue;
+
+ // i. If requiredFields contains property, then
+ if ((required == RequiredFields::kDay &&
+ String::Equals(isolate, property, factory->day_string())) ||
+ ((required == RequiredFields::kTimeZone ||
+ required == RequiredFields::kTimeZoneAndOffset) &&
+ String::Equals(isolate, property, factory->timeZone_string())) ||
+ (required == RequiredFields::kTimeZoneAndOffset &&
+ String::Equals(isolate, property, factory->offset_string()))) {
+ // 1. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ JSObject);
+ }
+ // ii. Else,
+ // 1. If property is in the Property column of Table 13, then
+ // a. Set value to the corresponding Default value of the same row.
+ if (String::Equals(isolate, property, factory->hour_string()) ||
+ String::Equals(isolate, property, factory->minute_string()) ||
+ String::Equals(isolate, property, factory->second_string()) ||
+ String::Equals(isolate, property, factory->millisecond_string()) ||
+ String::Equals(isolate, property, factory->microsecond_string()) ||
+ String::Equals(isolate, property, factory->nanosecond_string())) {
+ value = Handle<Object>(Smi::zero(), isolate);
+ }
+ } else {
+ // For both PrepareTemporalFields and PreparePartialTemporalFields
+ any = partial;
+ // c. Else,
+ // i. If property is in the Property column of Table 13 and there is a
+ // Conversion value in the same row, then
+ // 1. Let Conversion represent the abstract operation named by the
+ // Conversion value of the same row.
+ // 2. Set value to ? Conversion(value).
+ if (String::Equals(isolate, property, factory->month_string()) ||
+ String::Equals(isolate, property, factory->day_string())) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ ToPositiveInteger(isolate, value), JSObject);
+ } else if (String::Equals(isolate, property, factory->year_string()) ||
+ String::Equals(isolate, property, factory->hour_string()) ||
+ String::Equals(isolate, property, factory->minute_string()) ||
+ String::Equals(isolate, property, factory->second_string()) ||
+ String::Equals(isolate, property,
+ factory->millisecond_string()) ||
+ String::Equals(isolate, property,
+ factory->microsecond_string()) ||
+ String::Equals(isolate, property,
+ factory->nanosecond_string()) ||
+ String::Equals(isolate, property, factory->eraYear_string())) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, ToIntegerThrowOnInfinity(isolate, value), JSObject);
+ } else if (String::Equals(isolate, property,
+ factory->monthCode_string()) ||
+ String::Equals(isolate, property, factory->offset_string()) ||
+ String::Equals(isolate, property, factory->era_string())) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Object::ToString(isolate, value), JSObject);
+ }
+ }
+
+ // d. Perform ! CreateDataPropertyOrThrow(result, property, value).
+ CHECK(JSReceiver::CreateDataProperty(isolate, result, property, value,
+ Just(kThrowOnError))
+ .FromJust());
+ }
+
+ // Only for PreparePartialTemporalFields
+ if (partial) {
+ // 5. If any is false, then
+ if (!any) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(), JSObject);
+ }
+ }
+ // 4. Return result.
+ return result;
+}
+
+// #sec-temporal-preparetemporalfields
+V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> PrepareTemporalFields(
+ Isolate* isolate, Handle<JSReceiver> fields, Handle<FixedArray> field_names,
+ RequiredFields required) {
+ TEMPORAL_ENTER_FUNC();
+
+ return PrepareTemporalFieldsOrPartial(isolate, fields, field_names, required,
+ false);
+}
+
+// Template for DateFromFields, YearMonthFromFields, and MonthDayFromFields
+template <typename T>
+MaybeHandle<T> FromFields(Isolate* isolate, Handle<JSReceiver> calendar,
+ Handle<JSReceiver> fields, Handle<Object> options,
+ Handle<String> property, InstanceType type) {
+ Handle<Object> function;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function, Object::GetProperty(isolate, calendar, property), T);
+ if (!function->IsCallable()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledNonCallable, property),
+ T);
+ }
+ Handle<Object> argv[] = {fields, options};
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, function, calendar, 2, argv),
+ T);
+ if ((!result->IsHeapObject()) ||
+ HeapObject::cast(*result).map().instance_type() != type) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(), T);
+ }
+ return Handle<T>::cast(result);
+}
+
+MaybeHandle<JSTemporalPlainDate> DateFromFields(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<JSReceiver> fields,
+ Handle<Object> options) {
+ return FromFields<JSTemporalPlainDate>(
+ isolate, calendar, fields, options,
+ isolate->factory()->dateFromFields_string(), JS_TEMPORAL_PLAIN_DATE_TYPE);
+}
+
+// IMPL_FROM_FIELDS_ABSTRACT_OPERATION(Date, date, JS_TEMPORAL_PLAIN_DATE_TYPE)
+#undef IMPL_FROM_FIELDS_ABSTRACT_OPERATION
+// #sec-temporal-totemporaloverflow
+Maybe<ShowOverflow> ToTemporalOverflow(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method_name) {
+ return GetStringOption<ShowOverflow>(
+ isolate, options, "overflow", method_name, {"constrain", "reject"},
+ {ShowOverflow::kConstrain, ShowOverflow::kReject},
+ ShowOverflow::kConstrain);
+}
+
+// #sec-temporal-builtintimezonegetinstantfor
+MaybeHandle<JSTemporalInstant> BuiltinTimeZoneGetInstantFor(
+ Isolate* isolate, Handle<JSReceiver> time_zone,
+ Handle<JSTemporalPlainDateTime> date_time, Disambiguation disambiguation,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: dateTime has an [[InitializedTemporalDateTime]] internal slot.
+ // 2. Let possibleInstants be ? GetPossibleInstantsFor(timeZone, dateTime).
+ Handle<FixedArray> possible_instants;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, possible_instants,
+ GetPossibleInstantsFor(isolate, time_zone, date_time), JSTemporalInstant);
+ // 3. Return ? DisambiguatePossibleInstants(possibleInstants, timeZone,
+ // dateTime, disambiguation).
+ return DisambiguatePossibleInstants(isolate, possible_instants, time_zone,
+ date_time, disambiguation, method_name);
+}
+
+} // namespace
+
+namespace temporal {
+// #sec-temporal-totemporalcalendar
+MaybeHandle<JSReceiver> ToTemporalCalendar(
+ Isolate* isolate, Handle<Object> temporal_calendar_like,
+ const char* method_name) {
+ Factory* factory = isolate->factory();
+ // 1.If Type(temporalCalendarLike) is Object, then
+ if (temporal_calendar_like->IsJSReceiver()) {
+ // a. If temporalCalendarLike has an [[InitializedTemporalDate]],
+ // [[InitializedTemporalDateTime]], [[InitializedTemporalMonthDay]],
+ // [[InitializedTemporalTime]], [[InitializedTemporalYearMonth]], or
+ // [[InitializedTemporalZonedDateTime]] internal slot, then i. Return
+ // temporalCalendarLike.[[Calendar]].
+
+#define EXTRACT_CALENDAR(T, obj) \
+ if (obj->IsJSTemporal##T()) { \
+ return handle(Handle<JSTemporal##T>::cast(obj)->calendar(), isolate); \
+ }
+
+ EXTRACT_CALENDAR(PlainDate, temporal_calendar_like)
+ EXTRACT_CALENDAR(PlainDateTime, temporal_calendar_like)
+ EXTRACT_CALENDAR(PlainMonthDay, temporal_calendar_like)
+ EXTRACT_CALENDAR(PlainTime, temporal_calendar_like)
+ EXTRACT_CALENDAR(PlainYearMonth, temporal_calendar_like)
+ EXTRACT_CALENDAR(ZonedDateTime, temporal_calendar_like)
+
+#undef EXTRACT_CALENDAR
+ Handle<JSReceiver> obj = Handle<JSReceiver>::cast(temporal_calendar_like);
+
+ // b. If ? HasProperty(temporalCalendarLike, "calendar") is false, return
+ // temporalCalendarLike.
+ Maybe<bool> maybe_has =
+ JSReceiver::HasProperty(isolate, obj, factory->calendar_string());
+
+ MAYBE_RETURN(maybe_has, Handle<JSReceiver>());
+ if (!maybe_has.FromJust()) {
+ return obj;
+ }
+ // c. Set temporalCalendarLike to ? Get(temporalCalendarLike, "calendar").
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_calendar_like,
+ JSReceiver::GetProperty(isolate, obj, factory->calendar_string()),
+ JSReceiver);
+ // d. If Type(temporalCalendarLike) is Object
+ if (temporal_calendar_like->IsJSReceiver()) {
+ obj = Handle<JSReceiver>::cast(temporal_calendar_like);
+ // and ? HasProperty(temporalCalendarLike, "calendar") is false,
+ maybe_has =
+ JSReceiver::HasProperty(isolate, obj, factory->calendar_string());
+ MAYBE_RETURN(maybe_has, Handle<JSReceiver>());
+ if (!maybe_has.FromJust()) {
+ // return temporalCalendarLike.
+ return obj;
+ }
+ }
+ }
+
+ // 2. Let identifier be ? ToString(temporalCalendarLike).
+ Handle<String> identifier;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ Object::ToString(isolate, temporal_calendar_like),
+ JSReceiver);
+ // 3. If ! IsBuiltinCalendar(identifier) is false, then
+ if (!IsBuiltinCalendar(isolate, identifier)) {
+ // a. Let identifier be ? ParseTemporalCalendarString(identifier).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ ParseTemporalCalendarString(isolate, identifier),
+ JSReceiver);
+ }
+ // 4. Return ? CreateTemporalCalendar(identifier).
+ return CreateTemporalCalendar(isolate, identifier);
+}
+
+} // namespace temporal
+
+namespace {
+// #sec-temporal-totemporalcalendarwithisodefault
+MaybeHandle<JSReceiver> ToTemporalCalendarWithISODefault(
+ Isolate* isolate, Handle<Object> temporal_calendar_like,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. If temporalCalendarLike is undefined, then
+ if (temporal_calendar_like->IsUndefined()) {
+ // a. Return ? GetISO8601Calendar().
+ return temporal::GetISO8601Calendar(isolate);
+ }
+ // 2. Return ? ToTemporalCalendar(temporalCalendarLike).
+ return temporal::ToTemporalCalendar(isolate, temporal_calendar_like,
+ method_name);
+}
+
+// #sec-temporal-totemporaldate
+MaybeHandle<JSTemporalPlainDate> ToTemporalDate(Isolate* isolate,
+ Handle<Object> item_obj,
+ Handle<JSReceiver> options,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ // 2. Assert: Type(options) is Object.
+ // 3. If Type(item) is Object, then
+ if (item_obj->IsJSReceiver()) {
+ Handle<JSReceiver> item = Handle<JSReceiver>::cast(item_obj);
+ // a. If item has an [[InitializedTemporalDate]] internal slot, then
+ // i. Return item.
+ if (item->IsJSTemporalPlainDate()) {
+ return Handle<JSTemporalPlainDate>::cast(item);
+ }
+ // b. If item has an [[InitializedTemporalZonedDateTime]] internal slot,
+ // then
+ if (item->IsJSTemporalZonedDateTime()) {
+ // i. Let instant be ! CreateTemporalInstant(item.[[Nanoseconds]]).
+ Handle<JSTemporalZonedDateTime> zoned_date_time =
+ Handle<JSTemporalZonedDateTime>::cast(item);
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instant,
+ temporal::CreateTemporalInstant(
+ isolate, Handle<BigInt>(zoned_date_time->nanoseconds(), isolate)),
+ JSTemporalPlainDate);
+ // ii. Let plainDateTime be ?
+ // BuiltinTimeZoneGetPlainDateTimeFor(item.[[TimeZone]],
+ // instant, item.[[Calendar]]).
+ Handle<JSTemporalPlainDateTime> plain_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, plain_date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(
+ isolate,
+ Handle<JSReceiver>(zoned_date_time->time_zone(), isolate),
+ instant, Handle<JSReceiver>(zoned_date_time->calendar(), isolate),
+ method_name),
+ JSTemporalPlainDate);
+ // iii. Return ! CreateTemporalDate(plainDateTime.[[ISOYear]],
+ // plainDateTime.[[ISOMonth]], plainDateTime.[[ISODay]],
+ // plainDateTime.[[Calendar]]).
+ return CreateTemporalDate(
+ isolate, plain_date_time->iso_year(), plain_date_time->iso_month(),
+ plain_date_time->iso_day(),
+ Handle<JSReceiver>(plain_date_time->calendar(), isolate));
+ }
+
+ // c. If item has an [[InitializedTemporalDateTime]] internal slot, then
+ // i. Return ! CreateTemporalDate(item.[[ISOYear]], item.[[ISOMonth]],
+ // item.[[ISODay]], item.[[Calendar]]).
+ if (item->IsJSTemporalPlainDateTime()) {
+ Handle<JSTemporalPlainDateTime> date_time =
+ Handle<JSTemporalPlainDateTime>::cast(item);
+ return CreateTemporalDate(isolate, date_time->iso_year(),
+ date_time->iso_month(), date_time->iso_day(),
+ handle(date_time->calendar(), isolate));
+ }
+
+ // d. Let calendar be ? GetTemporalCalendarWithISODefault(item).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ GetTemporalCalendarWithISODefault(isolate, item, method_name),
+ JSTemporalPlainDate);
+ // e. Let fieldNames be ? CalendarFields(calendar, « "day", "month",
+ // "monthCode", "year" »).
+ Handle<FixedArray> field_names = factory->NewFixedArray(4);
+ field_names->set(0, *(factory->day_string()));
+ field_names->set(1, *(factory->month_string()));
+ field_names->set(2, *(factory->monthCode_string()));
+ field_names->set(3, *(factory->year_string()));
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, field_names,
+ CalendarFields(isolate, calendar, field_names),
+ JSTemporalPlainDate);
+ // f. Let fields be ? PrepareTemporalFields(item,
+ // fieldNames, «»).
+ Handle<JSReceiver> fields;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, fields,
+ PrepareTemporalFields(isolate, item, field_names,
+ RequiredFields::kNone),
+ JSTemporalPlainDate);
+ // g. Return ? DateFromFields(calendar, fields, options).
+ return DateFromFields(isolate, calendar, fields, options);
+ }
+ // 4. Perform ? ToTemporalOverflow(options).
+ Maybe<ShowOverflow> maybe_overflow =
+ ToTemporalOverflow(isolate, options, method_name);
+ MAYBE_RETURN(maybe_overflow, Handle<JSTemporalPlainDate>());
+
+ // 5. Let string be ? ToString(item).
+ Handle<String> string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, string,
+ Object::ToString(isolate, item_obj),
+ JSTemporalPlainDate);
+ // 6. Let result be ? ParseTemporalDateString(string).
+ Maybe<DateRecord> maybe_result = ParseTemporalDateString(isolate, string);
+ MAYBE_RETURN(maybe_result, MaybeHandle<JSTemporalPlainDate>());
+ DateRecord result = maybe_result.FromJust();
+
+ // 7. Assert: ! IsValidISODate(result.[[Year]], result.[[Month]],
+ // result.[[Day]]) is true.
+ DCHECK(IsValidISODate(isolate, result.year, result.month, result.day));
+ // 8. Let calendar be ? ToTemporalCalendarWithISODefault(result.[[Calendar]]).
+ Handle<Object> calendar_string;
+ if (result.calendar->length() == 0) {
+ calendar_string = factory->undefined_value();
+ } else {
+ calendar_string = result.calendar;
+ }
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_string, method_name),
+ JSTemporalPlainDate);
+ // 9. Return ? CreateTemporalDate(result.[[Year]], result.[[Month]],
+ // result.[[Day]], calendar).
+ return CreateTemporalDate(isolate, result.year, result.month, result.day,
+ calendar);
+}
+
+} // namespace
+
+namespace temporal {
+
+// #sec-temporal-totemporaltimezone
+MaybeHandle<JSReceiver> ToTemporalTimeZone(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ // 1. If Type(temporalTimeZoneLike) is Object, then
+ if (temporal_time_zone_like->IsJSReceiver()) {
+ // a. If temporalTimeZoneLike has an [[InitializedTemporalZonedDateTime]]
+ // internal slot, then
+ if (temporal_time_zone_like->IsJSTemporalZonedDateTime()) {
+ // i. Return temporalTimeZoneLike.[[TimeZone]].
+ Handle<JSTemporalZonedDateTime> zoned_date_time =
+ Handle<JSTemporalZonedDateTime>::cast(temporal_time_zone_like);
+ return handle(zoned_date_time->time_zone(), isolate);
+ }
+ Handle<JSReceiver> obj = Handle<JSReceiver>::cast(temporal_time_zone_like);
+ // b. If ? HasProperty(temporalTimeZoneLike, "timeZone") is false,
+ Maybe<bool> maybe_has =
+ JSReceiver::HasProperty(isolate, obj, factory->timeZone_string());
+ MAYBE_RETURN(maybe_has, Handle<JSReceiver>());
+ if (!maybe_has.FromJust()) {
+ // return temporalTimeZoneLike.
+ return obj;
+ }
+ // c. Set temporalTimeZoneLike to ?
+ // Get(temporalTimeZoneLike, "timeZone").
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_time_zone_like,
+ JSReceiver::GetProperty(isolate, obj, factory->timeZone_string()),
+ JSReceiver);
+ // d. If Type(temporalTimeZoneLike)
+ if (temporal_time_zone_like->IsJSReceiver()) {
+ // is Object and ? HasProperty(temporalTimeZoneLike, "timeZone") is false,
+ obj = Handle<JSReceiver>::cast(temporal_time_zone_like);
+ maybe_has =
+ JSReceiver::HasProperty(isolate, obj, factory->timeZone_string());
+ MAYBE_RETURN(maybe_has, Handle<JSReceiver>());
+ if (!maybe_has.FromJust()) {
+ // return temporalTimeZoneLike.
+ return obj;
+ }
+ }
+ }
+ Handle<String> identifier;
+ // 2. Let identifier be ? ToString(temporalTimeZoneLike).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ Object::ToString(isolate, temporal_time_zone_like),
+ JSReceiver);
+ // 3. Let result be ? ParseTemporalTimeZone(identifier).
+ Handle<String> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, ParseTemporalTimeZone(isolate, identifier), JSReceiver);
+
+ // 4. Return ? CreateTemporalTimeZone(result).
+ return temporal::CreateTemporalTimeZone(isolate, result);
+}
+
+} // namespace temporal
+
+namespace {
+// #sec-temporal-systemdatetime
+MaybeHandle<JSTemporalPlainDateTime> SystemDateTime(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like,
+ Handle<Object> calendar_like, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Handle<JSReceiver> time_zone;
+ // 1. 1. If temporalTimeZoneLike is undefined, then
+ if (temporal_time_zone_like->IsUndefined()) {
+ // a. Let timeZone be ! SystemTimeZone().
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, time_zone, SystemTimeZone(isolate),
+ JSTemporalPlainDateTime);
+ } else {
+ // 2. Else,
+ // a. Let timeZone be ? ToTemporalTimeZone(temporalTimeZoneLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, time_zone,
+ temporal::ToTemporalTimeZone(isolate, temporal_time_zone_like,
+ method_name),
+ JSTemporalPlainDateTime);
+ }
+ Handle<JSReceiver> calendar;
+ // 3. Let calendar be ? ToTemporalCalendar(calendarLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ temporal::ToTemporalCalendar(isolate, calendar_like, method_name),
+ JSTemporalPlainDateTime);
+ // 4. Let instant be ! SystemInstant().
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, instant, SystemInstant(isolate),
+ JSTemporalPlainDateTime);
+ // 5. Return ? BuiltinTimeZoneGetPlainDateTimeFor(timeZone, instant,
+ // calendar).
+ return temporal::BuiltinTimeZoneGetPlainDateTimeFor(
+ isolate, time_zone, instant, calendar, method_name);
+}
+
+MaybeHandle<JSTemporalZonedDateTime> SystemZonedDateTime(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like,
+ Handle<Object> calendar_like, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Handle<JSReceiver> time_zone;
+ // 1. 1. If temporalTimeZoneLike is undefined, then
+ if (temporal_time_zone_like->IsUndefined()) {
+ // a. Let timeZone be ! SystemTimeZone().
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, time_zone, SystemTimeZone(isolate),
+ JSTemporalZonedDateTime);
+ } else {
+ // 2. Else,
+ // a. Let timeZone be ? ToTemporalTimeZone(temporalTimeZoneLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, time_zone,
+ temporal::ToTemporalTimeZone(isolate, temporal_time_zone_like,
+ method_name),
+ JSTemporalZonedDateTime);
+ }
+ Handle<JSReceiver> calendar;
+ // 3. Let calendar be ? ToTemporalCalendar(calendarLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ temporal::ToTemporalCalendar(isolate, calendar_like, method_name),
+ JSTemporalZonedDateTime);
+ // 4. Let ns be ! SystemUTCEpochNanoseconds().
+ Handle<BigInt> ns;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, ns, SystemUTCEpochNanoseconds(isolate),
+ JSTemporalZonedDateTime);
+ // Return ? CreateTemporalZonedDateTime(ns, timeZone, calendar).
+ return CreateTemporalZonedDateTime(isolate, ns, time_zone, calendar);
+}
+
+#define COMPARE_RESULT_TO_SIGN(r) \
+ ((r) == ComparisonResult::kEqual \
+ ? 0 \
+ : ((r) == ComparisonResult::kLessThan ? -1 : 1))
+
+// #sec-temporal-formattimezoneoffsetstring
+MaybeHandle<String> FormatTimeZoneOffsetString(Isolate* isolate,
+ int64_t offset_nanoseconds) {
+ IncrementalStringBuilder builder(isolate);
+ // 1. Assert: offsetNanoseconds is an integer.
+ // 2. If offsetNanoseconds ≥ 0, let sign be "+"; otherwise, let sign be "-".
+ builder.AppendCString((offset_nanoseconds >= 0) ? "+" : "-");
+ // 3. Let offsetNanoseconds be abs(offsetNanoseconds).
+ offset_nanoseconds = std::abs(offset_nanoseconds);
+ // 3. Let nanoseconds be offsetNanoseconds modulo 10^9.
+ int64_t nanoseconds = offset_nanoseconds % 1000000000;
+ // 4. Let seconds be floor(offsetNanoseconds / 10^9) modulo 60.
+ int64_t seconds = (offset_nanoseconds / 1000000000) % 60;
+ // 5. Let minutes be floor(offsetNanoseconds / (6 × 10^10)) modulo 60.
+ int64_t minutes = (offset_nanoseconds / 60000000000) % 60;
+ // 6. Let hours be floor(offsetNanoseconds / (3.6 × 10^12)).
+ int64_t hours = offset_nanoseconds / 3600000000000;
+ // 7. Let h be hours, formatted as a two-digit decimal number, padded to the
+ // left with a zero if necessary.
+ if (hours < 10) {
+ builder.AppendCStringLiteral("0");
+ }
+ builder.AppendInt(static_cast<int32_t>(hours));
+ // 8. Let m be minutes, formatted as a two-digit decimal number, padded to the
+ // left with a zero if necessary.
+ builder.AppendCString((minutes < 10) ? ":0" : ":");
+ builder.AppendInt(static_cast<int>(minutes));
+ // 9. Let s be seconds, formatted as a two-digit decimal number, padded to the
+ // left with a zero if necessary.
+ // 10. If nanoseconds ≠ 0, then
+ if (nanoseconds != 0) {
+ builder.AppendCString((seconds < 10) ? ":0" : ":");
+ builder.AppendInt(static_cast<int>(seconds));
+ builder.AppendCStringLiteral(".");
+ // a. Let fraction be nanoseconds, formatted as a nine-digit decimal number,
+ // padded to the left with zeroes if necessary.
+ // b. Set fraction to the longest possible substring of fraction starting at
+ // position 0 and not ending with the code unit 0x0030 (DIGIT ZERO).
+ int64_t divisor = 100000000;
+ do {
+ builder.AppendInt(static_cast<int>(nanoseconds / divisor));
+ nanoseconds %= divisor;
+ divisor /= 10;
+ } while (nanoseconds > 0);
+ // c. Let post be the string-concatenation of the code unit 0x003A (COLON),
+ // s, the code unit 0x002E (FULL STOP), and fraction.
+ // 11. Else if seconds ≠ 0, then
+ } else if (seconds != 0) {
+ // a. Let post be the string-concatenation of the code unit 0x003A (COLON)
+ // and s.
+ builder.AppendCString((seconds < 10) ? ":0" : ":");
+ builder.AppendInt(static_cast<int>(seconds));
+ }
+ // 12. Return the string-concatenation of sign, h, the code unit 0x003A
+ // (COLON), m, and post.
+ return builder.Finish();
+}
+
+// #sec-temporal-builtintimezonegetoffsetstringfor
+MaybeHandle<String> BuiltinTimeZoneGetOffsetStringFor(
+ Isolate* isolate, Handle<JSReceiver> time_zone,
+ Handle<JSTemporalInstant> instant, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let offsetNanoseconds be ? GetOffsetNanosecondsFor(timeZone, instant).
+ Maybe<int64_t> maybe_offset_nanoseconds =
+ GetOffsetNanosecondsFor(isolate, time_zone, instant, method_name);
+ MAYBE_RETURN(maybe_offset_nanoseconds, Handle<String>());
+ int64_t offset_nanoseconds = maybe_offset_nanoseconds.FromJust();
+
+ // 2. Return ! FormatTimeZoneOffsetString(offsetNanoseconds).
+ return FormatTimeZoneOffsetString(isolate, offset_nanoseconds);
+}
+
+// #sec-temporal-parseisodatetime
+Maybe<DateTimeRecord> ParseISODateTime(Isolate* isolate,
+ Handle<String> iso_string,
+ const ParsedISO8601Result& parsed) {
+ TEMPORAL_ENTER_FUNC();
+
+ DateTimeRecord result;
+ // 5. Set year to ! ToIntegerOrInfinity(year).
+ result.year = parsed.date_year;
+ // 6. If month is undefined, then
+ if (parsed.date_month_is_undefined()) {
+ // a. Set month to 1.
+ result.month = 1;
+ // 7. Else,
+ } else {
+ // a. Set month to ! ToIntegerOrInfinity(month).
+ result.month = parsed.date_month;
+ }
+
+ // 8. If day is undefined, then
+ if (parsed.date_day_is_undefined()) {
+ // a. Set day to 1.
+ result.day = 1;
+ // 9. Else,
+ } else {
+ // a. Set day to ! ToIntegerOrInfinity(day).
+ result.day = parsed.date_day;
+ }
+ // 10. Set hour to ! ToIntegerOrInfinity(hour).
+ result.hour = parsed.time_hour_is_undefined() ? 0 : parsed.time_hour;
+ // 11. Set minute to ! ToIntegerOrInfinity(minute).
+ result.minute = parsed.time_minute_is_undefined() ? 0 : parsed.time_minute;
+ // 12. Set second to ! ToIntegerOrInfinity(second).
+ result.second = parsed.time_second_is_undefined() ? 0 : parsed.time_second;
+ // 13. If second is 60, then
+ if (result.second == 60) {
+ // a. Set second to 59.
+ result.second = 59;
+ }
+ // 14. If fraction is not undefined, then
+ if (!parsed.time_nanosecond_is_undefined()) {
+ // a. Set fraction to the string-concatenation of the previous value of
+ // fraction and the string "000000000".
+ // b. Let millisecond be the String value equal to the substring of fraction
+ // from 0 to 3. c. Set millisecond to ! ToIntegerOrInfinity(millisecond).
+ result.millisecond = parsed.time_nanosecond / 1000000;
+ // d. Let microsecond be the String value equal to the substring of fraction
+ // from 3 to 6. e. Set microsecond to ! ToIntegerOrInfinity(microsecond).
+ result.microsecond = (parsed.time_nanosecond / 1000) % 1000;
+ // f. Let nanosecond be the String value equal to the substring of fraction
+ // from 6 to 9. g. Set nanosecond to ! ToIntegerOrInfinity(nanosecond).
+ result.nanosecond = (parsed.time_nanosecond % 1000);
+ // 15. Else,
+ } else {
+ // a. Let millisecond be 0.
+ result.millisecond = 0;
+ // b. Let microsecond be 0.
+ result.microsecond = 0;
+ // c. Let nanosecond be 0.
+ result.nanosecond = 0;
+ }
+ // 16. If ! IsValidISODate(year, month, day) is false, throw a RangeError
+ // exception.
+ if (!IsValidISODate(isolate, result.year, result.month, result.day)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<DateTimeRecord>());
+ }
+ // 17. If ! IsValidTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond) is false, throw a RangeError exception.
+ if (!IsValidTime(isolate, result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond, result.nanosecond)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<DateTimeRecord>());
+ }
+ // 18. Return the Record { [[Year]]: year, [[Month]]: month, [[Day]]: day,
+ // [[Hour]]: hour, [[Minute]]: minute, [[Second]]: second, [[Millisecond]]:
+ // millisecond, [[Microsecond]]: microsecond, [[Nanosecond]]: nanosecond,
+ // [[Calendar]]: calendar }.
+ if (parsed.calendar_name_length == 0) {
+ result.calendar = isolate->factory()->empty_string();
+ } else {
+ result.calendar = isolate->factory()->NewSubString(
+ iso_string, parsed.calendar_name_start,
+ parsed.calendar_name_start + parsed.calendar_name_length);
+ }
+ return Just(result);
+}
+
+// #sec-temporal-parsetemporaldatestring
+Maybe<DateRecord> ParseTemporalDateString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: Type(isoString) is String.
+ // 2. If isoString does not satisfy the syntax of a TemporalDateString
+ // (see 13.33), then
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTemporalDateString(isolate, iso_string);
+ if (maybe_parsed.IsNothing()) {
+ // a. Throw a *RangeError* exception.
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<DateRecord>());
+ }
+ MAYBE_RETURN(maybe_parsed, Nothing<DateRecord>());
+
+ ParsedISO8601Result parsed = maybe_parsed.FromJust();
+ // 3. If _isoString_ contains a |UTCDesignator|, then
+ if (parsed.utc_designator) {
+ // a. Throw a *RangeError* exception.
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<DateRecord>());
+ }
+ // 3. Let result be ? ParseISODateTime(isoString).
+ Maybe<DateTimeRecord> maybe_result =
+ ParseISODateTime(isolate, iso_string, parsed);
+
+ MAYBE_RETURN(maybe_result, Nothing<DateRecord>());
+ DateTimeRecord result = maybe_result.FromJust();
+ // 4. Return the Record { [[Year]]: result.[[Year]], [[Month]]:
+ // result.[[Month]], [[Day]]: result.[[Day]], [[Calendar]]:
+ // result.[[Calendar]] }.
+ DateRecord ret = {result.year, result.month, result.day, result.calendar};
+ return Just(ret);
+}
+
+// #sec-temporal-parsetemporaltimezonestring
+Maybe<TimeZoneRecord> ParseTemporalTimeZoneString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(isoString) is String.
+ // 2. If isoString does not satisfy the syntax of a TemporalTimeZoneString
+ // (see 13.33), then
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTemporalTimeZoneString(isolate, iso_string);
+ MAYBE_RETURN(maybe_parsed, Nothing<TimeZoneRecord>());
+ if (maybe_parsed.IsNothing()) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<TimeZoneRecord>());
+ }
+ ParsedISO8601Result parsed = maybe_parsed.FromJust();
+ // 3. Let z, sign, hours, minutes, seconds, fraction and name be the parts of
+ // isoString produced respectively by the UTCDesignator,
+ // TimeZoneUTCOffsetSign, TimeZoneUTCOffsetHour, TimeZoneUTCOffsetMinute,
+ // TimeZoneUTCOffsetSecond, TimeZoneUTCOffsetFraction, and TimeZoneIANAName
+ // productions, or undefined if not present.
+ // 4. If z is not undefined, then
+ if (parsed.utc_designator) {
+ // a. Return the Record { [[Z]]: true, [[OffsetString]]: undefined,
+ // [[Name]]: name }.
+ if (parsed.tzi_name_length > 0) {
+ Handle<String> name = isolate->factory()->NewSubString(
+ iso_string, parsed.tzi_name_start,
+ parsed.tzi_name_start + parsed.tzi_name_length);
+ TimeZoneRecord ret({true, isolate->factory()->empty_string(), name});
+ return Just(ret);
+ }
+ TimeZoneRecord ret({true, isolate->factory()->empty_string(),
+ isolate->factory()->empty_string()});
+ return Just(ret);
+ }
+
+ // 5. If hours is undefined, then
+ // a. Let offsetString be undefined.
+ // 6. Else,
+ Handle<String> offset_string;
+ bool offset_string_is_defined = false;
+ if (!parsed.tzuo_hour_is_undefined()) {
+ // a. Assert: sign is not undefined.
+ DCHECK(!parsed.tzuo_sign_is_undefined());
+ // b. Set hours to ! ToIntegerOrInfinity(hours).
+ int32_t hours = parsed.tzuo_hour;
+ // c. If sign is the code unit 0x002D (HYPHEN-MINUS) or the code unit 0x2212
+ // (MINUS SIGN), then i. Set sign to −1. d. Else, i. Set sign to 1.
+ int32_t sign = parsed.tzuo_sign;
+ // e. Set minutes to ! ToIntegerOrInfinity(minutes).
+ int32_t minutes =
+ parsed.tzuo_minute_is_undefined() ? 0 : parsed.tzuo_minute;
+ // f. Set seconds to ! ToIntegerOrInfinity(seconds).
+ int32_t seconds =
+ parsed.tzuo_second_is_undefined() ? 0 : parsed.tzuo_second;
+ // g. If fraction is not undefined, then
+ int32_t nanoseconds;
+ if (!parsed.tzuo_nanosecond_is_undefined()) {
+ // i. Set fraction to the string-concatenation of the previous value of
+ // fraction and the string "000000000".
+ // ii. Let nanoseconds be the String value equal to the substring of
+ // fraction from 0 to 9. iii. Set nanoseconds to !
+ // ToIntegerOrInfinity(nanoseconds).
+ nanoseconds = parsed.tzuo_nanosecond;
+ // h. Else,
+ } else {
+ // i. Let nanoseconds be 0.
+ nanoseconds = 0;
+ }
+ // i. Let offsetNanoseconds be sign × (((hours × 60 + minutes) × 60 +
+ // seconds) × 10^9 + nanoseconds).
+ int64_t offset_nanoseconds =
+ sign *
+ (((hours * 60 + minutes) * 60 + seconds) * 1000000000L + nanoseconds);
+ // j. Let offsetString be ! FormatTimeZoneOffsetString(offsetNanoseconds).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, offset_string,
+ FormatTimeZoneOffsetString(isolate, offset_nanoseconds),
+ Nothing<TimeZoneRecord>());
+ offset_string_is_defined = true;
+ }
+ // 7. If name is not undefined, then
+ Handle<String> name;
+ if (parsed.tzi_name_length > 0) {
+ name = isolate->factory()->NewSubString(
+ iso_string, parsed.tzi_name_start,
+ parsed.tzi_name_start + parsed.tzi_name_length);
+
+ // a. If ! IsValidTimeZoneName(name) is false, throw a RangeError exception.
+ if (!IsValidTimeZoneName(isolate, name)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<TimeZoneRecord>());
+ }
+ // b. Set name to ! CanonicalizeTimeZoneName(name).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
+ CanonicalizeTimeZoneName(isolate, name),
+ Nothing<TimeZoneRecord>());
+ // 8. Return the Record { [[Z]]: false, [[OffsetString]]: offsetString,
+ // [[Name]]: name }.
+ TimeZoneRecord ret({false,
+ offset_string_is_defined
+ ? offset_string
+ : isolate->factory()->empty_string(),
+ name});
+ return Just(ret);
+ }
+ // 8. Return the Record { [[Z]]: false, [[OffsetString]]: offsetString,
+ // [[Name]]: name }.
+ TimeZoneRecord ret({false,
+ offset_string_is_defined
+ ? offset_string
+ : isolate->factory()->empty_string(),
+ isolate->factory()->empty_string()});
+ return Just(ret);
+}
+
+// #sec-temporal-parsetemporaltimezone
+MaybeHandle<String> ParseTemporalTimeZone(Isolate* isolate,
+ Handle<String> string) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 2. Let result be ? ParseTemporalTimeZoneString(string).
+ Maybe<TimeZoneRecord> maybe_result =
+ ParseTemporalTimeZoneString(isolate, string);
+ MAYBE_RETURN(maybe_result, Handle<String>());
+ TimeZoneRecord result = maybe_result.FromJust();
+
+ // 3. If result.[[Name]] is not undefined, return result.[[Name]].
+ if (result.name->length() > 0) {
+ return result.name;
+ }
+
+ // 4. If result.[[Z]] is true, return "UTC".
+ if (result.z) {
+ return isolate->factory()->UTC_string();
+ }
+
+ // 5. Return result.[[OffsetString]].
+ return result.offset_string;
+}
+
+Maybe<int64_t> ParseTimeZoneOffsetString(Isolate* isolate,
+ Handle<String> iso_string,
+ bool throwIfNotSatisfy) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(offsetString) is String.
+ // 2. If offsetString does not satisfy the syntax of a
+ // TimeZoneNumericUTCOffset (see 13.33), then
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, iso_string);
+ MAYBE_RETURN(maybe_parsed, Nothing<int64_t>());
+ if (throwIfNotSatisfy && maybe_parsed.IsNothing()) {
+ /* a. Throw a RangeError exception. */
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<int64_t>());
+ }
+ ParsedISO8601Result parsed = maybe_parsed.FromJust();
+ // 3. Let sign, hours, minutes, seconds, and fraction be the parts of
+ // offsetString produced respectively by the TimeZoneUTCOffsetSign,
+ // TimeZoneUTCOffsetHour, TimeZoneUTCOffsetMinute, TimeZoneUTCOffsetSecond,
+ // and TimeZoneUTCOffsetFraction productions, or undefined if not present.
+ // 4. If either hours or sign are undefined, throw a RangeError exception.
+ if (parsed.tzuo_hour_is_undefined() || parsed.tzuo_sign_is_undefined()) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<int64_t>());
+ }
+ // 5. If sign is the code unit 0x002D (HYPHEN-MINUS) or 0x2212 (MINUS SIGN),
+ // then a. Set sign to −1.
+ // 6. Else,
+ // a. Set sign to 1.
+ int64_t sign = parsed.tzuo_sign;
+
+ // 7. Set hours to ! ToIntegerOrInfinity(hours).
+ int64_t hours = parsed.tzuo_hour;
+ // 8. Set minutes to ! ToIntegerOrInfinity(minutes).
+ int64_t minutes = parsed.tzuo_minute_is_undefined() ? 0 : parsed.tzuo_minute;
+ // 9. Set seconds to ! ToIntegerOrInfinity(seconds).
+ int64_t seconds = parsed.tzuo_second_is_undefined() ? 0 : parsed.tzuo_second;
+ // 10. If fraction is not undefined, then
+ int64_t nanoseconds;
+ if (!parsed.tzuo_nanosecond_is_undefined()) {
+ // a. Set fraction to the string-concatenation of the previous value of
+ // fraction and the string "000000000".
+ // b. Let nanoseconds be the String value equal to the substring of fraction
+ // consisting of the code units with indices 0 (inclusive) through 9
+ // (exclusive). c. Set nanoseconds to ! ToIntegerOrInfinity(nanoseconds).
+ nanoseconds = parsed.tzuo_nanosecond;
+ // 11. Else,
+ } else {
+ // a. Let nanoseconds be 0.
+ nanoseconds = 0;
+ }
+ // 12. Return sign × (((hours × 60 + minutes) × 60 + seconds) × 10^9 +
+ // nanoseconds).
+ return Just(sign * (((hours * 60 + minutes) * 60 + seconds) * 1000000000 +
+ nanoseconds));
+}
+
+Maybe<bool> IsValidTimeZoneNumericUTCOffsetString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, iso_string);
+ return Just(maybe_parsed.IsJust());
+}
+
+// #sec-temporal-parsetemporalcalendarstring
+MaybeHandle<String> ParseTemporalCalendarString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(isoString) is String.
+ // 2. If isoString does not satisfy the syntax of a TemporalCalendarString
+ // (see 13.33), then a. Throw a RangeError exception.
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTemporalCalendarString(isolate, iso_string);
+ MAYBE_RETURN(maybe_parsed, Handle<String>());
+ if (maybe_parsed.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), String);
+ }
+ ParsedISO8601Result parsed = maybe_parsed.FromJust();
+ // 3. Let id be the part of isoString produced by the CalendarName production,
+ // or undefined if not present.
+ // 4. If id is undefined, then
+ if (parsed.calendar_name_length == 0) {
+ // a. Return "iso8601".
+ return isolate->factory()->iso8601_string();
+ }
+ Handle<String> id = isolate->factory()->NewSubString(
+ iso_string, parsed.calendar_name_start,
+ parsed.calendar_name_start + parsed.calendar_name_length);
+ // 5. If ! IsBuiltinCalendar(id) is false, then
+ if (!IsBuiltinCalendar(isolate, id)) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidCalendar, id), String);
+ }
+ // 6. Return id.
+ return id;
+}
+
+// #sec-temporal-calendarfields
+MaybeHandle<FixedArray> CalendarFields(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<FixedArray> field_names) {
+ // 1. Let fields be ? GetMethod(calendar, "fields").
+ Handle<Object> fields;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, fields,
+ Object::GetMethod(calendar, isolate->factory()->fields_string()),
+ FixedArray);
+ // 2. Let fieldsArray be ! CreateArrayFromList(fieldNames).
+ Handle<Object> fields_array =
+ isolate->factory()->NewJSArrayWithElements(field_names);
+ // 3. If fields is not undefined, then
+ if (!fields->IsUndefined()) {
+ // a. Set fieldsArray to ? Call(fields, calendar, « fieldsArray »).
+ Handle<Object> argv[] = {fields_array};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, fields_array,
+ Execution::Call(isolate, fields, calendar, 1, argv), FixedArray);
+ }
+ // 4. Return ? IterableToListOfType(fieldsArray, « String »).
+ Handle<Object> argv[] = {fields_array};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, fields_array,
+ Execution::CallBuiltin(isolate,
+ isolate->string_fixed_array_from_iterable(),
+ fields_array, 1, argv),
+ FixedArray);
+ DCHECK(fields_array->IsFixedArray());
+ return Handle<FixedArray>::cast(fields_array);
+}
+
+MaybeHandle<JSTemporalPlainDate> CalendarDateAdd(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<Object> date,
+ Handle<Object> duration,
+ Handle<Object> options) {
+ return CalendarDateAdd(isolate, calendar, date, duration, options,
+ isolate->factory()->undefined_value());
+}
+
+MaybeHandle<JSTemporalPlainDate> CalendarDateAdd(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<Object> date,
+ Handle<Object> duration, Handle<Object> options, Handle<Object> date_add) {
+ // 1. Assert: Type(calendar) is Object.
+ // 2. If dateAdd is not present, set dateAdd to ? GetMethod(calendar,
+ // "dateAdd").
+ if (date_add->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_add,
+ Object::GetMethod(calendar, isolate->factory()->dateAdd_string()),
+ JSTemporalPlainDate);
+ }
+ // 3. Let addedDate be ? Call(dateAdd, calendar, « date, duration, options »).
+ Handle<Object> argv[] = {date, duration, options};
+ Handle<Object> added_date;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, added_date,
+ Execution::Call(isolate, date_add, calendar, arraysize(argv), argv),
+ JSTemporalPlainDate);
+ // 4. Perform ? RequireInternalSlot(addedDate, [[InitializedTemporalDate]]).
+ if (!added_date->IsJSTemporalPlainDate()) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ JSTemporalPlainDate);
+ }
+ // 5. Return addedDate.
+ return Handle<JSTemporalPlainDate>::cast(added_date);
+}
+
+MaybeHandle<JSTemporalDuration> CalendarDateUntil(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<Object> one,
+ Handle<Object> two,
+ Handle<Object> options) {
+ return CalendarDateUntil(isolate, calendar, one, two, options,
+ isolate->factory()->undefined_value());
+}
+
+MaybeHandle<JSTemporalDuration> CalendarDateUntil(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<Object> one,
+ Handle<Object> two, Handle<Object> options, Handle<Object> date_until) {
+ // 1. Assert: Type(calendar) is Object.
+ // 2. If dateUntil is not present, set dateUntil to ? GetMethod(calendar,
+ // "dateUntil").
+ if (date_until->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_until,
+ Object::GetMethod(calendar, isolate->factory()->dateUntil_string()),
+ JSTemporalDuration);
+ }
+ // 3. Let duration be ? Call(dateUntil, calendar, « one, two, options »).
+ Handle<Object> argv[] = {one, two, options};
+ Handle<Object> duration;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, duration,
+ Execution::Call(isolate, date_until, calendar, arraysize(argv), argv),
+ JSTemporalDuration);
+ // 4. Perform ? RequireInternalSlot(duration,
+ // [[InitializedTemporalDuration]]).
+ if (!duration->IsJSTemporalDuration()) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ JSTemporalDuration);
+ }
+ // 5. Return duration.
+ return Handle<JSTemporalDuration>::cast(duration);
+}
+
+Maybe<int64_t> GetOffsetNanosecondsFor(Isolate* isolate,
+ Handle<JSReceiver> time_zone_obj,
+ Handle<Object> instant,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let getOffsetNanosecondsFor be ? GetMethod(timeZone,
+ // "getOffsetNanosecondsFor").
+ Handle<Object> get_offset_nanoseconds_for;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, get_offset_nanoseconds_for,
+ Object::GetMethod(time_zone_obj,
+ isolate->factory()->getOffsetNanosecondsFor_string()),
+ Nothing<int64_t>());
+ if (!get_offset_nanoseconds_for->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewTypeError(MessageTemplate::kCalledNonCallable,
+ isolate->factory()->getOffsetNanosecondsFor_string()),
+ Nothing<int64_t>());
+ }
+ Handle<Object> offset_nanoseconds_obj;
+ // 3. Let offsetNanoseconds be ? Call(getOffsetNanosecondsFor, timeZone, «
+ // instant »).
+ Handle<Object> argv[] = {instant};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, offset_nanoseconds_obj,
+ Execution::Call(isolate, get_offset_nanoseconds_for, time_zone_obj, 1,
+ argv),
+ Nothing<int64_t>());
+
+ // 4. If Type(offsetNanoseconds) is not Number, throw a TypeError exception.
+ if (!offset_nanoseconds_obj->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ Nothing<int64_t>());
+ }
+
+ // 5. If ! IsIntegralNumber(offsetNanoseconds) is false, throw a RangeError
+ // exception.
+ double offset_nanoseconds = offset_nanoseconds_obj->Number();
+ if ((offset_nanoseconds - std::floor(offset_nanoseconds) != 0)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<int64_t>());
+ }
+
+ // 6. Set offsetNanoseconds to ℝ(offsetNanoseconds).
+ int64_t offset_nanoseconds_int = static_cast<int64_t>(offset_nanoseconds);
+ // 7. If abs(offsetNanoseconds) > 86400 × 10^9, throw a RangeError exception.
+ if (std::abs(offset_nanoseconds_int) > 86400e9) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<int64_t>());
+ }
+ // 8. Return offsetNanoseconds.
+ return Just(offset_nanoseconds_int);
+}
+
+// #sec-temporal-topositiveinteger
+MaybeHandle<Object> ToPositiveInteger(Isolate* isolate,
+ Handle<Object> argument) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Let integer be ? ToInteger(argument).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, argument, ToIntegerThrowOnInfinity(isolate, argument), Object);
+ // 2. If integer ≤ 0, then
+ if (NumberToInt32(*argument) <= 0) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Object);
+ }
+ return argument;
+}
+
+} // namespace
+
+namespace temporal {
+MaybeHandle<Object> InvokeCalendarMethod(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<String> name,
+ Handle<JSReceiver> date_like) {
+ Handle<Object> result;
+ /* 1. Assert: Type(calendar) is Object. */
+ DCHECK(calendar->IsObject());
+ /* 2. Let result be ? Invoke(calendar, #name, « dateLike »). */
+ Handle<Object> function;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function, Object::GetProperty(isolate, calendar, name), Object);
+ if (!function->IsCallable()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledNonCallable, name),
+ Object);
+ }
+ Handle<Object> argv[] = {date_like};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, calendar, arraysize(argv), argv),
+ Object);
+ return result;
+}
+
+#define CALENDAR_ABSTRACT_OPERATION_INT_ACTION(Name, name, Action) \
+ MaybeHandle<Object> Calendar##Name(Isolate* isolate, \
+ Handle<JSReceiver> calendar, \
+ Handle<JSReceiver> date_like) { \
+ /* 1. Assert: Type(calendar) is Object. */ \
+ /* 2. Let result be ? Invoke(calendar, property, « dateLike »). */ \
+ Handle<Object> result; \
+ ASSIGN_RETURN_ON_EXCEPTION( \
+ isolate, result, \
+ InvokeCalendarMethod(isolate, calendar, \
+ isolate->factory()->name##_string(), date_like), \
+ Object); \
+ /* 3. If result is undefined, throw a RangeError exception. */ \
+ if (result->IsUndefined()) { \
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Object); \
+ } \
+ /* 4. Return ? Action(result). */ \
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Action(isolate, result), \
+ Object); \
+ return Handle<Smi>(Smi::FromInt(result->Number()), isolate); \
+ }
+
+// #sec-temporal-calendaryear
+CALENDAR_ABSTRACT_OPERATION_INT_ACTION(Year, year, ToIntegerThrowOnInfinity)
+// #sec-temporal-calendarmonth
+CALENDAR_ABSTRACT_OPERATION_INT_ACTION(Month, month, ToPositiveInteger)
+// #sec-temporal-calendarday
+CALENDAR_ABSTRACT_OPERATION_INT_ACTION(Day, day, ToPositiveInteger)
+// #sec-temporal-calendarmonthcode
+MaybeHandle<Object> CalendarMonthCode(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<JSReceiver> date_like) {
+ // 1. Assert: Type(calendar) is Object.
+ // 2. Let result be ? Invoke(calendar, monthCode , « dateLike »).
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ InvokeCalendarMethod(isolate, calendar,
+ isolate->factory()->monthCode_string(), date_like),
+ Object);
+ /* 3. If result is undefined, throw a RangeError exception. */
+ if (result->IsUndefined()) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Object);
+ }
+ // 4. Return ? ToString(result).
+ return Object::ToString(isolate, result);
+}
+
+#ifdef V8_INTL_SUPPORT
+// #sec-temporal-calendarerayear
+MaybeHandle<Object> CalendarEraYear(Isolate* isolate,
+ Handle<JSReceiver> calendar,
+ Handle<JSReceiver> date_like) {
+ // 1. Assert: Type(calendar) is Object.
+ // 2. Let result be ? Invoke(calendar, eraYear , « dateLike »).
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ InvokeCalendarMethod(isolate, calendar,
+ isolate->factory()->eraYear_string(), date_like),
+ Object);
+ // 3. If result is not undefined, set result to ? ToIntegerOrInfinity(result).
+ if (!result->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, ToIntegerThrowOnInfinity(isolate, result), Object);
+ }
+ // 4. Return result.
+ return result;
+}
+
+// #sec-temporal-calendarera
+MaybeHandle<Object> CalendarEra(Isolate* isolate, Handle<JSReceiver> calendar,
+ Handle<JSReceiver> date_like) {
+ // 1. Assert: Type(calendar) is Object.
+ // 2. Let result be ? Invoke(calendar, era , « dateLike »).
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ InvokeCalendarMethod(isolate, calendar, isolate->factory()->era_string(),
+ date_like),
+ Object);
+ // 3. If result is not undefined, set result to ? ToString(result).
+ if (!result->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ Object::ToString(isolate, result), Object);
+ }
+ // 4. Return result.
+ return result;
+}
+
+#endif // V8_INTL_SUPPORT
+
+// #sec-temporal-getiso8601calendar
+MaybeHandle<JSTemporalCalendar> GetISO8601Calendar(Isolate* isolate) {
+ return CreateTemporalCalendar(isolate, isolate->factory()->iso8601_string());
+}
+
+} // namespace temporal
+
+namespace {
+
+bool IsUTC(Isolate* isolate, Handle<String> time_zone) {
+ // 1. Assert: Type(timeZone) is String.
+ // 2. Let tzText be ! StringToCodePoints(timeZone).
+ // 3. Let tzUpperText be the result of toUppercase(tzText), according to the
+ // Unicode Default Case Conversion algorithm.
+ // 4. Let tzUpper be ! CodePointsToString(tzUpperText).
+ // 5. If tzUpper and "UTC" are the same sequence of code points, return true.
+ // 6. Return false.
+ if (time_zone->length() != 3) return false;
+ time_zone = String::Flatten(isolate, time_zone);
+ DisallowGarbageCollection no_gc;
+ const String::FlatContent& flat = time_zone->GetFlatContent(no_gc);
+ return (flat.Get(0) == u'U' || flat.Get(0) == u'u') &&
+ (flat.Get(1) == u'T' || flat.Get(1) == u't') &&
+ (flat.Get(2) == u'C' || flat.Get(2) == u'c');
+}
+
+#ifdef V8_INTL_SUPPORT
+class CalendarMap final {
+ public:
+ CalendarMap() {
+ icu::Locale locale("und");
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::Calendar::getKeywordValuesForLocale("ca", locale, false, status));
+ calendar_ids.push_back("iso8601");
+ calendar_id_indices.insert({"iso8601", 0});
+ int32_t i = 1;
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ if (strcmp(item, "iso8601") != 0) {
+ const char* type = uloc_toUnicodeLocaleType("ca", item);
+ calendar_ids.push_back(type);
+ calendar_id_indices.insert({type, i++});
+ }
+ }
+ }
+ bool Contains(const std::string& id) const {
+ return calendar_id_indices.find(id) != calendar_id_indices.end();
+ }
+
+ std::string Id(int32_t index) const {
+ DCHECK_LT(index, calendar_ids.size());
+ return calendar_ids[index];
+ }
+
+ int32_t Index(const char* id) const {
+ return calendar_id_indices.find(id)->second;
+ }
+
+ private:
+ std::map<std::string, int32_t> calendar_id_indices;
+ std::vector<std::string> calendar_ids;
+};
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CalendarMap, GetCalendarMap)
+
+// #sec-temporal-isbuiltincalendar
+bool IsBuiltinCalendar(Isolate* isolate, const std::string& id) {
+ return GetCalendarMap()->Contains(id);
+}
+
+bool IsBuiltinCalendar(Isolate* isolate, Handle<String> id) {
+ return IsBuiltinCalendar(isolate, id->ToCString().get());
+}
+
+Handle<String> CalendarIdentifier(Isolate* isolate, int32_t index) {
+ return isolate->factory()->NewStringFromAsciiChecked(
+ GetCalendarMap()->Id(index).c_str());
+}
+
+int32_t CalendarIndex(Isolate* isolate, Handle<String> id) {
+ return GetCalendarMap()->Index(id->ToCString().get());
+}
+
+bool IsValidTimeZoneName(Isolate* isolate, Handle<String> time_zone) {
+ return Intl::IsValidTimeZoneName(isolate, time_zone);
+}
+
+MaybeHandle<String> CanonicalizeTimeZoneName(Isolate* isolate,
+ Handle<String> identifier) {
+ return Intl::CanonicalizeTimeZoneName(isolate, identifier);
+}
+
+#else // V8_INTL_SUPPORT
+Handle<String> CalendarIdentifier(Isolate* isolate, int32_t index) {
+ DCHECK_EQ(index, 0);
+ return isolate->factory()->iso8601_string();
+}
+
+// #sec-temporal-isbuiltincalendar
+bool IsBuiltinCalendar(Isolate* isolate, Handle<String> id) {
+ // 1. If id is not "iso8601", return false.
+ // 2. Return true
+ return isolate->factory()->iso8601_string()->Equals(*id);
+}
+
+int32_t CalendarIndex(Isolate* isolate, Handle<String> id) { return 0; }
+// #sec-isvalidtimezonename
+bool IsValidTimeZoneName(Isolate* isolate, Handle<String> time_zone) {
+ return IsUTC(isolate, time_zone);
+}
+// #sec-canonicalizetimezonename
+MaybeHandle<String> CanonicalizeTimeZoneName(Isolate* isolate,
+ Handle<String> identifier) {
+ return isolate->factory()->UTC_string();
+}
+#endif // V8_INTL_SUPPORT
+
+// #sec-temporal-mergelargestunitoption
+MaybeHandle<JSObject> MergeLargestUnitOption(Isolate* isolate,
+ Handle<JSReceiver> options,
+ Unit largest_unit) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Let merged be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> merged =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 2. Let keys be ? EnumerableOwnPropertyNames(options, key).
+ // 3. For each element nextKey of keys, do
+ // a. Let propValue be ? Get(options, nextKey).
+ // b. Perform ! CreateDataPropertyOrThrow(merged, nextKey, propValue).
+ JSReceiver::SetOrCopyDataProperties(
+ isolate, merged, options, PropertiesEnumerationMode::kEnumerationOrder,
+ nullptr, false)
+ .Check();
+
+ // 4. Perform ! CreateDataPropertyOrThrow(merged, "largestUnit", largestUnit).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, merged, isolate->factory()->largestUnit_string(),
+ UnitToString(isolate, largest_unit), Just(kThrowOnError))
+ .FromJust());
+ // 5. Return merged.
+ return merged;
+}
+
+// #sec-temporal-tointegerthrowoninfinity
+MaybeHandle<Object> ToIntegerThrowOnInfinity(Isolate* isolate,
+ Handle<Object> argument) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Let integer be ? ToIntegerOrInfinity(argument).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, argument,
+ Object::ToInteger(isolate, argument), Object);
+ // 2. If integer is +∞ or -∞, throw a RangeError exception.
+ if (!std::isfinite(argument->Number())) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Object);
+ }
+ return argument;
+}
+
+// #sec-temporal-largeroftwotemporalunits
+Unit LargerOfTwoTemporalUnits(Isolate* isolate, Unit u1, Unit u2) {
+ // 1. If either u1 or u2 is "year", return "year".
+ if (u1 == Unit::kYear || u2 == Unit::kYear) return Unit::kYear;
+ // 2. If either u1 or u2 is "month", return "month".
+ if (u1 == Unit::kMonth || u2 == Unit::kMonth) return Unit::kMonth;
+ // 3. If either u1 or u2 is "week", return "week".
+ if (u1 == Unit::kWeek || u2 == Unit::kWeek) return Unit::kWeek;
+ // 4. If either u1 or u2 is "day", return "day".
+ if (u1 == Unit::kDay || u2 == Unit::kDay) return Unit::kDay;
+ // 5. If either u1 or u2 is "hour", return "hour".
+ if (u1 == Unit::kHour || u2 == Unit::kHour) return Unit::kHour;
+ // 6. If either u1 or u2 is "minute", return "minute".
+ if (u1 == Unit::kMinute || u2 == Unit::kMinute) return Unit::kMinute;
+ // 7. If either u1 or u2 is "second", return "second".
+ if (u1 == Unit::kSecond || u2 == Unit::kSecond) return Unit::kSecond;
+ // 8. If either u1 or u2 is "millisecond", return "millisecond".
+ if (u1 == Unit::kMillisecond || u2 == Unit::kMillisecond)
+ return Unit::kMillisecond;
+ // 9. If either u1 or u2 is "microsecond", return "microsecond".
+ if (u1 == Unit::kMicrosecond || u2 == Unit::kMicrosecond)
+ return Unit::kMicrosecond;
+ // 10. Return "nanosecond".
+ return Unit::kNanosecond;
+}
+
+Handle<String> UnitToString(Isolate* isolate, Unit unit) {
+ switch (unit) {
+ case Unit::kYear:
+ return ReadOnlyRoots(isolate).year_string_handle();
+ case Unit::kMonth:
+ return ReadOnlyRoots(isolate).month_string_handle();
+ case Unit::kWeek:
+ return ReadOnlyRoots(isolate).week_string_handle();
+ case Unit::kDay:
+ return ReadOnlyRoots(isolate).day_string_handle();
+ case Unit::kHour:
+ return ReadOnlyRoots(isolate).hour_string_handle();
+ case Unit::kMinute:
+ return ReadOnlyRoots(isolate).minute_string_handle();
+ case Unit::kSecond:
+ return ReadOnlyRoots(isolate).second_string_handle();
+ case Unit::kMillisecond:
+ return ReadOnlyRoots(isolate).millisecond_string_handle();
+ case Unit::kMicrosecond:
+ return ReadOnlyRoots(isolate).microsecond_string_handle();
+ case Unit::kNanosecond:
+ return ReadOnlyRoots(isolate).nanosecond_string_handle();
+ default:
+ UNREACHABLE();
+ }
+}
+
+// #sec-temporal-balanceisodate
+void BalanceISODate(Isolate* isolate, int32_t* year, int32_t* month,
+ int32_t* day) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year, month, and day are integers.
+ // 2. Let balancedYearMonth be ! BalanceISOYearMonth(year, month).
+ // 3. Set month to balancedYearMonth.[[Month]].
+ // 4. Set year to balancedYearMonth.[[Year]].
+ BalanceISOYearMonth(isolate, year, month);
+ // 5. NOTE: To deal with negative numbers of days whose absolute value is
+ // greater than the number of days in a year, the following section subtracts
+ // years and adds days until the number of days is greater than −366 or −365.
+ // 6. If month > 2, then
+ // a. Let testYear be year.
+ // 7. Else,
+ // a. Let testYear be year − 1.
+ int32_t test_year = (*month > 2) ? *year : *year - 1;
+ // 8. Repeat, while day < −1 × ! ISODaysInYear(testYear),
+ int32_t iso_days_in_year;
+ while (*day < -(iso_days_in_year = ISODaysInYear(isolate, test_year))) {
+ // a. Set day to day + ! ISODaysInYear(testYear).
+ *day += iso_days_in_year;
+ // b. Set year to year − 1.
+ (*year)--;
+ // c. Set testYear to testYear − 1.
+ test_year--;
+ }
+ // 9. NOTE: To deal with numbers of days greater than the number of days in a
+ // year, the following section adds years and subtracts days until the number
+ // of days is less than 366 or 365.
+ // 10. Let testYear be year + 1.
+ test_year = (*year) + 1;
+ // 11. Repeat, while day > ! ISODaysInYear(testYear),
+ while (*day > (iso_days_in_year = ISODaysInYear(isolate, test_year))) {
+ // a. Set day to day − ! ISODaysInYear(testYear).
+ *day -= iso_days_in_year;
+ // b. Set year to year + 1.
+ (*year)++;
+ // c. Set testYear to testYear + 1.
+ test_year++;
+ }
+ // 12. NOTE: To deal with negative numbers of days whose absolute value is
+ // greater than the number of days in the current month, the following section
+ // subtracts months and adds days until the number of days is greater than 0.
+ // 13. Repeat, while day < 1,
+ while (*day < 1) {
+ // a. Set balancedYearMonth to ! BalanceISOYearMonth(year, month − 1).
+ // b. Set year to balancedYearMonth.[[Year]].
+ // c. Set month to balancedYearMonth.[[Month]].
+ *month -= 1;
+ BalanceISOYearMonth(isolate, year, month);
+ // d. Set day to day + ! ISODaysInMonth(year, month).
+ *day += ISODaysInMonth(isolate, *year, *month);
+ }
+ // 14. NOTE: To deal with numbers of days greater than the number of days in
+ // the current month, the following section adds months and subtracts days
+ // until the number of days is less than the number of days in the month.
+ // 15. Repeat, while day > ! ISODaysInMonth(year, month),
+ int32_t iso_days_in_month;
+ while (*day > (iso_days_in_month = ISODaysInMonth(isolate, *year, *month))) {
+ // a. Set day to day − ! ISODaysInMonth(year, month).
+ *day -= iso_days_in_month;
+ // b. Set balancedYearMonth to ! BalanceISOYearMonth(year, month + 1).
+ // c. Set year to balancedYearMonth.[[Year]].
+ // d. Set month to balancedYearMonth.[[Month]].
+ *month += 1;
+ BalanceISOYearMonth(isolate, year, month);
+ }
+ // 16. Return the new Record { [[Year]]: year, [[Month]]: month, [[Day]]: day
+ // }.
+ return;
+}
+
+// #sec-temporal-adddatetime
+Maybe<DateTimeRecordCommon> AddDateTime(
+ Isolate* isolate, int32_t year, int32_t month, int32_t day, int32_t hour,
+ int32_t minute, int32_t second, int32_t millisecond, int32_t microsecond,
+ int32_t nanosecond, Handle<JSReceiver> calendar, const DurationRecord& dur,
+ Handle<Object> options) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year, month, day, hour, minute, second, millisecond,
+ // microsecond, and nanosecond are integers.
+ // 2. Let timeResult be ! AddTime(hour, minute, second, millisecond,
+ // microsecond, nanosecond, hours, minutes, seconds, milliseconds,
+ // microseconds, nanoseconds).
+ DateTimeRecordCommon time_result =
+ AddTime(isolate, hour, minute, second, millisecond, microsecond,
+ nanosecond, dur.hours, dur.minutes, dur.seconds, dur.milliseconds,
+ dur.microseconds, dur.nanoseconds);
+
+ // 3. Let datePart be ? CreateTemporalDate(year, month, day, calendar).
+ Handle<JSTemporalPlainDate> date_part;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, date_part,
+ CreateTemporalDate(isolate, year, month, day, calendar),
+ Nothing<DateTimeRecordCommon>());
+ // 4. Let dateDuration be ? CreateTemporalDuration(years, months, weeks, days
+ // + timeResult.[[Days]], 0, 0, 0, 0, 0, 0).
+ Handle<JSTemporalDuration> date_duration;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, date_duration,
+ CreateTemporalDuration(isolate, dur.years, dur.months, dur.weeks,
+ dur.days + time_result.day, 0, 0, 0, 0, 0, 0),
+ Nothing<DateTimeRecordCommon>());
+ // 5. Let addedDate be ? CalendarDateAdd(calendar, datePart, dateDuration,
+ // options).
+ Handle<JSTemporalPlainDate> added_date;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, added_date,
+ CalendarDateAdd(isolate, calendar, date_part, date_duration, options),
+ Nothing<DateTimeRecordCommon>());
+ // 6. Return the new Record { [[Year]]: addedDate.[[ISOYear]], [[Month]]:
+ // addedDate.[[ISOMonth]], [[Day]]: addedDate.[[ISODay]], [[Hour]]:
+ // timeResult.[[Hour]], [[Minute]]: timeResult.[[Minute]], [[Second]]:
+ // timeResult.[[Second]], [[Millisecond]]: timeResult.[[Millisecond]],
+ // [[Microsecond]]: timeResult.[[Microsecond]], [[Nanosecond]]:
+ // timeResult.[[Nanosecond]], }.
+ time_result.year = added_date->iso_year();
+ time_result.month = added_date->iso_month();
+ time_result.day = added_date->iso_day();
+ return Just(time_result);
+}
+
+Maybe<bool> BalanceDuration(Isolate* isolate, int64_t* days, int64_t* hours,
+ int64_t* minutes, int64_t* seconds,
+ int64_t* milliseconds, int64_t* microseconds,
+ int64_t* nanoseconds, Unit largest_unit,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. If relativeTo is not present, set relativeTo to undefined.
+ return BalanceDuration(isolate, days, hours, minutes, seconds, milliseconds,
+ microseconds, nanoseconds, largest_unit,
+ isolate->factory()->undefined_value(), method_name);
+}
+
+Maybe<bool> BalanceDuration(Isolate* isolate, int64_t* days, int64_t* hours,
+ int64_t* minutes, int64_t* seconds,
+ int64_t* milliseconds, int64_t* microseconds,
+ int64_t* nanoseconds, Unit largest_unit,
+ Handle<Object> relative_to_obj,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 2. If Type(relativeTo) is Object and relativeTo has an
+ // [[InitializedTemporalZonedDateTime]] internal slot, then
+ if (relative_to_obj->IsJSTemporalZonedDateTime()) {
+ Handle<JSTemporalZonedDateTime> relative_to =
+ Handle<JSTemporalZonedDateTime>::cast(relative_to_obj);
+ // a. Let endNs be ? AddZonedDateTime(relativeTo.[[Nanoseconds]],
+ // relativeTo.[[TimeZone]], relativeTo.[[Calendar]], 0, 0, 0, days, hours,
+ // minutes, seconds, milliseconds, microseconds, nanoseconds).
+ Handle<BigInt> end_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, end_ns,
+ AddZonedDateTime(isolate,
+ Handle<BigInt>(relative_to->nanoseconds(), isolate),
+ Handle<JSReceiver>(relative_to->time_zone(), isolate),
+ Handle<JSReceiver>(relative_to->calendar(), isolate),
+ {0, 0, 0, *days, *hours, *minutes, *seconds,
+ *milliseconds, *microseconds, *nanoseconds},
+ method_name),
+ Nothing<bool>());
+ // b. Set nanoseconds to endNs − relativeTo.[[Nanoseconds]].
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, end_ns,
+ BigInt::Subtract(isolate, end_ns,
+ Handle<BigInt>(relative_to->nanoseconds(), isolate)),
+ Nothing<bool>());
+ *nanoseconds = end_ns->AsInt64();
+ // 3. Else,
+ } else {
+ // a. Set nanoseconds to ℤ(! TotalDurationNanoseconds(days, hours, minutes,
+ // seconds, milliseconds, microseconds, nanoseconds, 0)).
+ *nanoseconds =
+ TotalDurationNanoseconds(isolate, *days, *hours, *minutes, *seconds,
+ *milliseconds, *microseconds, *nanoseconds, 0);
+ }
+ // 4. If largestUnit is one of "year", "month", "week", or "day", then
+ if (largest_unit == Unit::kYear || largest_unit == Unit::kMonth ||
+ largest_unit == Unit::kWeek || largest_unit == Unit::kDay) {
+ int64_t result_day_length;
+ // a. Let result be ? NanosecondsToDays(nanoseconds, relativeTo).
+ Maybe<bool> maybe_result =
+ NanosecondsToDays(isolate, *nanoseconds, relative_to_obj, days,
+ nanoseconds, &result_day_length, method_name);
+ MAYBE_RETURN(maybe_result, Nothing<bool>());
+ DCHECK(maybe_result.FromJust());
+ // b. Set days to result.[[Days]].
+ // c. Set nanoseconds to result.[[Nanoseconds]].
+ // 5. Else,
+ } else {
+ // a. Set days to 0.
+ *days = 0;
+ }
+ // 6. Set hours, minutes, seconds, milliseconds, and microseconds to 0.
+ *hours = *minutes = *seconds = *milliseconds = *microseconds = 0;
+ // 7. Set nanoseconds to ℝ(nanoseconds).
+
+ // 8. If nanoseconds < 0, let sign be −1; else, let sign be 1.
+ int32_t sign = (*nanoseconds < 0) ? -1 : 1;
+ // 9. Set nanoseconds to abs(nanoseconds).
+ *nanoseconds = std::abs(*nanoseconds);
+ // 10. If largestUnit is "year", "month", "week", "day", or "hour", then
+ switch (largest_unit) {
+ case Unit::kYear:
+ case Unit::kMonth:
+ case Unit::kWeek:
+ case Unit::kDay:
+ case Unit::kHour:
+ // a. Set microseconds to floor(nanoseconds / 1000).
+ *microseconds = floor_divide(*nanoseconds, 1000);
+ // b. Set nanoseconds to nanoseconds modulo 1000.
+ *nanoseconds = modulo(*nanoseconds, 1000);
+ // c. Set milliseconds to floor(microseconds / 1000).
+ *milliseconds = floor_divide(*microseconds, 1000);
+ // d. Set microseconds to microseconds modulo 1000.
+ *microseconds = modulo(*microseconds, 1000);
+ // e. Set seconds to floor(milliseconds / 1000).
+ *seconds = floor_divide(*milliseconds, 1000);
+ // f. Set milliseconds to milliseconds modulo 1000.
+ *milliseconds = modulo(*milliseconds, 1000);
+ // g. Set minutes to floor(seconds, 60).
+ *minutes = floor_divide(*seconds, 60);
+ // h. Set seconds to seconds modulo 60.
+ *seconds = modulo(*seconds, 60);
+ // i. Set hours to floor(minutes / 60).
+ *hours = floor_divide(*minutes, 60);
+ // j. Set minutes to minutes modulo 60.
+ *minutes = modulo(*minutes, 60);
+ break;
+ // 11. Else if largestUnit is "minute", then
+ case Unit::kMinute:
+ // a. Set microseconds to floor(nanoseconds / 1000).
+ *microseconds = floor_divide(*nanoseconds, 1000);
+ // b. Set nanoseconds to nanoseconds modulo 1000.
+ *nanoseconds = modulo(*nanoseconds, 1000);
+ // c. Set milliseconds to floor(microseconds / 1000).
+ *milliseconds = floor_divide(*microseconds, 1000);
+ // d. Set microseconds to microseconds modulo 1000.
+ *microseconds = modulo(*microseconds, 1000);
+ // e. Set seconds to floor(milliseconds / 1000).
+ *seconds = floor_divide(*milliseconds, 1000);
+ // f. Set milliseconds to milliseconds modulo 1000.
+ *milliseconds = modulo(*milliseconds, 1000);
+ // g. Set minutes to floor(seconds / 60).
+ *minutes = floor_divide(*seconds, 60);
+ // h. Set seconds to seconds modulo 60.
+ *seconds = modulo(*seconds, 60);
+ break;
+ // 12. Else if largestUnit is "second", then
+ case Unit::kSecond:
+ // a. Set microseconds to floor(nanoseconds / 1000).
+ *microseconds = floor_divide(*nanoseconds, 1000);
+ // b. Set nanoseconds to nanoseconds modulo 1000.
+ *nanoseconds = modulo(*nanoseconds, 1000);
+ // c. Set milliseconds to floor(microseconds / 1000).
+ *milliseconds = floor_divide(*microseconds, 1000);
+ // d. Set microseconds to microseconds modulo 1000.
+ *microseconds = modulo(*microseconds, 1000);
+ // e. Set seconds to floor(milliseconds / 1000).
+ *seconds = floor_divide(*milliseconds, 1000);
+ // f. Set milliseconds to milliseconds modulo 1000.
+ *milliseconds = modulo(*milliseconds, 1000);
+ break;
+ // 13. Else if largestUnit is "millisecond", then
+ case Unit::kMillisecond:
+ // a. Set microseconds to floor(nanoseconds / 1000).
+ *microseconds = floor_divide(*nanoseconds, 1000);
+ // b. Set nanoseconds to nanoseconds modulo 1000.
+ *nanoseconds = modulo(*nanoseconds, 1000);
+ // c. Set milliseconds to floor(microseconds / 1000).
+ *milliseconds = floor_divide(*microseconds, 1000);
+ // d. Set microseconds to microseconds modulo 1000.
+ *microseconds = modulo(*microseconds, 1000);
+ break;
+ // 14. Else if largestUnit is "microsecond", then
+ case Unit::kMicrosecond:
+ // a. Set microseconds to floor(nanoseconds / 1000).
+ *microseconds = floor_divide(*nanoseconds, 1000);
+ // b. Set nanoseconds to nanoseconds modulo 1000.
+ *nanoseconds = modulo(*nanoseconds, 1000);
+ break;
+ // 15. Else,
+ default:
+ // a. Assert: largestUnit is "nanosecond".
+ DCHECK_EQ(largest_unit, Unit::kNanosecond);
+ break;
+ }
+ // 16. Return the new Record { [[Days]]: 𝔽(days), [[Hours]]: 𝔽(hours × sign),
+ // [[Minutes]]: 𝔽(minutes × sign), [[Seconds]]: 𝔽(seconds × sign),
+ // [[Milliseconds]]: 𝔽(milliseconds × sign), [[Microseconds]]: 𝔽(microseconds
+ // × sign), [[Nanoseconds]]: 𝔽(nanoseconds × sign) }.
+ *hours *= sign;
+ *minutes *= sign;
+ *seconds *= sign;
+ *milliseconds *= sign;
+ *microseconds *= sign;
+ *nanoseconds *= sign;
+ return Just(true);
+}
+
+// #sec-temporal-addinstant
+MaybeHandle<BigInt> AddZonedDateTime(Isolate* isolate,
+ Handle<BigInt> epoch_nanoseconds,
+ Handle<JSReceiver> time_zone,
+ Handle<JSReceiver> calendar,
+ const DurationRecord& duration,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. If options is not present, set options to ! OrdinaryObjectCreate(null).
+ Handle<JSReceiver> options = isolate->factory()->NewJSObjectWithNullProto();
+ return AddZonedDateTime(isolate, epoch_nanoseconds, time_zone, calendar,
+ duration, options, method_name);
+}
+
+// #sec-temporal-addzoneddatetime
+MaybeHandle<BigInt> AddZonedDateTime(Isolate* isolate,
+ Handle<BigInt> epoch_nanoseconds,
+ Handle<JSReceiver> time_zone,
+ Handle<JSReceiver> calendar,
+ const DurationRecord& duration,
+ Handle<JSReceiver> options,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 2. If all of years, months, weeks, and days are 0, then
+ if (duration.years == 0 && duration.months == 0 && duration.weeks == 0 &&
+ duration.days == 0) {
+ // a. Return ! AddInstant(epochNanoseconds, hours, minutes, seconds,
+ // milliseconds, microseconds, nanoseconds).
+ return AddInstant(isolate, epoch_nanoseconds, duration.hours,
+ duration.minutes, duration.seconds, duration.milliseconds,
+ duration.microseconds, duration.nanoseconds);
+ }
+ // 3. Let instant be ! CreateTemporalInstant(epochNanoseconds).
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instant,
+ temporal::CreateTemporalInstant(isolate, epoch_nanoseconds), BigInt);
+
+ // 4. Let temporalDateTime be ?
+ // BuiltinTimeZoneGetPlainDateTimeFor(timeZone, instant, calendar).
+ Handle<JSTemporalPlainDateTime> temporal_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(isolate, time_zone, instant,
+ calendar, method_name),
+ BigInt);
+ // 5. Let datePart be ? CreateTemporalDate(temporalDateTime.[[ISOYear]],
+ // temporalDateTime.[[ISOMonth]], temporalDateTime.[[ISODay]], calendar).
+ Handle<JSTemporalPlainDate> date_part;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_part,
+ CreateTemporalDate(isolate, temporal_date_time->iso_year(),
+ temporal_date_time->iso_month(),
+ temporal_date_time->iso_day(), calendar),
+ BigInt);
+ // 6. Let dateDuration be ? CreateTemporalDuration(years, months, weeks, days,
+ // 0, 0, 0, 0, 0, 0).
+ Handle<JSTemporalDuration> date_duration;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_duration,
+ CreateTemporalDuration(isolate, duration.years, duration.months,
+ duration.weeks, duration.days, 0, 0, 0, 0, 0, 0),
+ BigInt);
+ // 7. Let addedDate be ? CalendarDateAdd(calendar, datePart, dateDuration,
+ // options).
+ Handle<JSTemporalPlainDate> added_date;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, added_date,
+ CalendarDateAdd(isolate, calendar, date_part, date_duration, options),
+ BigInt);
+ // 8. Let intermediateDateTime be ?
+ // CreateTemporalDateTime(addedDate.[[ISOYear]], addedDate.[[ISOMonth]],
+ // addedDate.[[ISODay]], temporalDateTime.[[ISOHour]],
+ // temporalDateTime.[[ISOMinute]], temporalDateTime.[[ISOSecond]],
+ // temporalDateTime.[[ISOMillisecond]], temporalDateTime.[[ISOMicrosecond]],
+ // temporalDateTime.[[ISONanosecond]], calendar).
+ Handle<JSTemporalPlainDateTime> intermediate_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, intermediate_date_time,
+ temporal::CreateTemporalDateTime(
+ isolate, added_date->iso_year(), added_date->iso_month(),
+ added_date->iso_day(), temporal_date_time->iso_hour(),
+ temporal_date_time->iso_minute(), temporal_date_time->iso_second(),
+ temporal_date_time->iso_millisecond(),
+ temporal_date_time->iso_microsecond(),
+ temporal_date_time->iso_nanosecond(), calendar),
+ BigInt);
+ // 9. Let intermediateInstant be ? BuiltinTimeZoneGetInstantFor(timeZone,
+ // intermediateDateTime, "compatible").
+ Handle<JSTemporalInstant> intermediate_instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, intermediate_instant,
+ BuiltinTimeZoneGetInstantFor(isolate, time_zone, intermediate_date_time,
+ Disambiguation::kCompatible, method_name),
+ BigInt);
+ // 10. Return ! AddInstant(intermediateInstant.[[Nanoseconds]], hours,
+ // minutes, seconds, milliseconds, microseconds, nanoseconds).
+ return AddInstant(
+ isolate, Handle<BigInt>(intermediate_instant->nanoseconds(), isolate),
+ duration.hours, duration.minutes, duration.seconds, duration.milliseconds,
+ duration.microseconds, duration.nanoseconds);
+}
+
+// #sec-temporal-nanosecondstodays
+Maybe<bool> NanosecondsToDays(Isolate* isolate, int64_t nanoseconds,
+ Handle<Object> relative_to_obj,
+ int64_t* result_days, int64_t* result_nanoseconds,
+ int64_t* result_day_length,
+ const char* method_name) {
+ return NanosecondsToDays(isolate, BigInt::FromInt64(isolate, nanoseconds),
+ relative_to_obj, result_days, result_nanoseconds,
+ result_day_length, method_name);
+}
+
+Maybe<bool> NanosecondsToDays(Isolate* isolate, Handle<BigInt> nanoseconds,
+ Handle<Object> relative_to_obj,
+ int64_t* result_days, int64_t* result_nanoseconds,
+ int64_t* result_day_length,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(nanoseconds) is BigInt.
+ // 2. Set nanoseconds to ℝ(nanoseconds).
+ // 3. Let sign be ! ℝ(Sign(𝔽(nanoseconds))).
+ ComparisonResult compare_result =
+ BigInt::CompareToBigInt(nanoseconds, BigInt::FromInt64(isolate, 0));
+ int64_t sign = COMPARE_RESULT_TO_SIGN(compare_result);
+ // 4. Let dayLengthNs be 8.64 × 10^13.
+ Handle<BigInt> day_length_ns = BigInt::FromInt64(isolate, 86400000000000LLU);
+ // 5. If sign is 0, then
+ if (sign == 0) {
+ // a. Return the new Record { [[Days]]: 0, [[Nanoseconds]]: 0,
+ // [[DayLength]]: dayLengthNs }.
+ *result_days = 0;
+ *result_nanoseconds = 0;
+ *result_day_length = day_length_ns->AsInt64();
+ return Just(true);
+ }
+ // 6. If Type(relativeTo) is not Object or relativeTo does not have an
+ // [[InitializedTemporalZonedDateTime]] internal slot, then
+ if (!relative_to_obj->IsJSTemporalZonedDateTime()) {
+ // Return the Record {
+ // [[Days]]: the integral part of nanoseconds / dayLengthNs,
+ // [[Nanoseconds]]: (abs(nanoseconds) modulo dayLengthNs) × sign,
+ // [[DayLength]]: dayLengthNs }.
+ Handle<BigInt> days_bigint;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, days_bigint,
+ BigInt::Divide(isolate, nanoseconds, day_length_ns), Nothing<bool>());
+
+ if (sign < 0) {
+ nanoseconds = BigInt::UnaryMinus(isolate, nanoseconds);
+ }
+
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, nanoseconds,
+ BigInt::Remainder(isolate, nanoseconds, day_length_ns),
+ Nothing<bool>());
+ *result_days = days_bigint->AsInt64();
+ *result_nanoseconds = nanoseconds->AsInt64() * sign;
+ *result_day_length = day_length_ns->AsInt64();
+ return Just(true);
+ }
+ Handle<JSTemporalZonedDateTime> relative_to =
+ Handle<JSTemporalZonedDateTime>::cast(relative_to_obj);
+ // 7. Let startNs be ℝ(relativeTo.[[Nanoseconds]]).
+ Handle<BigInt> start_ns = Handle<BigInt>(relative_to->nanoseconds(), isolate);
+ // 8. Let startInstant be ! CreateTemporalInstant(ℤ(sartNs)).
+ Handle<JSTemporalInstant> start_instant;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, start_instant,
+ temporal::CreateTemporalInstant(
+ isolate, Handle<BigInt>(relative_to->nanoseconds(), isolate)),
+ Nothing<bool>());
+
+ // 9. Let startDateTime be ?
+ // BuiltinTimeZoneGetPlainDateTimeFor(relativeTo.[[TimeZone]],
+ // startInstant, relativeTo.[[Calendar]]).
+ Handle<JSReceiver> time_zone =
+ Handle<JSReceiver>(relative_to->time_zone(), isolate);
+ Handle<JSReceiver> calendar =
+ Handle<JSReceiver>(relative_to->calendar(), isolate);
+ Handle<JSTemporalPlainDateTime> start_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, start_date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(
+ isolate, time_zone, start_instant, calendar, method_name),
+ Nothing<bool>());
+
+ // 10. Let endNs be startNs + nanoseconds.
+ Handle<BigInt> end_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, end_ns,
+ BigInt::Add(isolate, start_ns, nanoseconds),
+ Nothing<bool>());
+
+ // 11. Let endInstant be ! CreateTemporalInstant(ℤ(endNs)).
+ Handle<JSTemporalInstant> end_instant;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, end_instant, temporal::CreateTemporalInstant(isolate, end_ns),
+ Nothing<bool>());
+ // 12. Let endDateTime be ?
+ // BuiltinTimeZoneGetPlainDateTimeFor(relativeTo.[[TimeZone]],
+ // endInstant, relativeTo.[[Calendar]]).
+ Handle<JSTemporalPlainDateTime> end_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, end_date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(
+ isolate, time_zone, end_instant, calendar, method_name),
+ Nothing<bool>());
+
+ // 13. Let dateDifference be ?
+ // DifferenceISODateTime(startDateTime.[[ISOYear]],
+ // startDateTime.[[ISOMonth]], startDateTime.[[ISODay]],
+ // startDateTime.[[ISOHour]], startDateTime.[[ISOMinute]],
+ // startDateTime.[[ISOSecond]], startDateTime.[[ISOMillisecond]],
+ // startDateTime.[[ISOMicrosecond]], startDateTime.[[ISONanosecond]],
+ // endDateTime.[[ISOYear]], endDateTime.[[ISOMonth]], endDateTime.[[ISODay]],
+ // endDateTime.[[ISOHour]], endDateTime.[[ISOMinute]],
+ // endDateTime.[[ISOSecond]], endDateTime.[[ISOMillisecond]],
+ // endDateTime.[[ISOMicrosecond]], endDateTime.[[ISONanosecond]],
+ // relativeTo.[[Calendar]], "day").
+ Maybe<DurationRecord> maybe_date_difference = DifferenceISODateTime(
+ isolate, start_date_time->iso_year(), start_date_time->iso_month(),
+ start_date_time->iso_day(), start_date_time->iso_hour(),
+ start_date_time->iso_minute(), start_date_time->iso_second(),
+ start_date_time->iso_millisecond(), start_date_time->iso_microsecond(),
+ start_date_time->iso_nanosecond(), end_date_time->iso_year(),
+ end_date_time->iso_month(), end_date_time->iso_day(),
+ end_date_time->iso_hour(), end_date_time->iso_minute(),
+ end_date_time->iso_second(), end_date_time->iso_millisecond(),
+ end_date_time->iso_microsecond(), end_date_time->iso_nanosecond(),
+ calendar, Unit::kDay, relative_to, method_name);
+ MAYBE_RETURN(maybe_date_difference, Nothing<bool>());
+
+ DurationRecord date_difference = maybe_date_difference.FromJust();
+ // 14. Let days be dateDifference.[[Days]].
+ int64_t days = date_difference.days;
+
+ // 15. Let intermediateNs be ℝ(? AddZonedDateTime(ℤ(startNs),
+ // relativeTo.[[TimeZone]], relativeTo.[[Calendar]], 0, 0, 0, days, 0, 0, 0,
+ // 0, 0, 0)).
+ Handle<BigInt> intermediate_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, intermediate_ns,
+ AddZonedDateTime(isolate, start_ns, time_zone, calendar,
+ {0, 0, 0, days, 0, 0, 0, 0, 0, 0}, method_name),
+ Nothing<bool>());
+
+ // 16. If sign is 1, then
+ if (sign == 1) {
+ // a. Repeat, while days > 0 and intermediateNs > endNs,
+ while (days > 0 && BigInt::CompareToBigInt(intermediate_ns, end_ns) ==
+ ComparisonResult::kGreaterThan) {
+ // i. Set days to days − 1.
+ days -= 1;
+ // ii. Set intermediateNs to ℝ(? AddZonedDateTime(ℤ(startNs),
+ // relativeTo.[[TimeZone]], relativeTo.[[Calendar]], 0, 0, 0, days, 0, 0,
+ // 0, 0, 0, 0)).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, intermediate_ns,
+ AddZonedDateTime(isolate, start_ns, time_zone, calendar,
+ {0, 0, 0, days, 0, 0, 0, 0, 0, 0}, method_name),
+ Nothing<bool>());
+ }
+ }
+
+ // 17. Set nanoseconds to endNs − intermediateNs.
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, nanoseconds, BigInt::Subtract(isolate, end_ns, intermediate_ns),
+ Nothing<bool>());
+
+ // 18. Let done be false.
+ bool done = false;
+
+ // 19. Repeat, while done is false,
+ while (!done) {
+ // a. Let oneDayFartherNs be ℝ(? AddZonedDateTime(ℤ(intermediateNs),
+ // relativeTo.[[TimeZone]], relativeTo.[[Calendar]], 0, 0, 0, sign, 0, 0, 0,
+ // 0, 0, 0)).
+ Handle<BigInt> one_day_farther_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, one_day_farther_ns,
+ AddZonedDateTime(isolate, intermediate_ns, time_zone, calendar,
+ {0, 0, 0, sign, 0, 0, 0, 0, 0, 0}, method_name),
+ Nothing<bool>());
+
+ // b. Set dayLengthNs to oneDayFartherNs − intermediateNs.
+ Handle<BigInt> day_length_ns;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, day_length_ns,
+ BigInt::Subtract(isolate, one_day_farther_ns, intermediate_ns),
+ Nothing<bool>());
+
+ // c. If (nanoseconds − dayLengthNs) × sign ≥ 0, then
+ compare_result = BigInt::CompareToBigInt(nanoseconds, day_length_ns);
+ if (sign * COMPARE_RESULT_TO_SIGN(compare_result) >= 0) {
+ // i. Set nanoseconds to nanoseconds − dayLengthNs.
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, nanoseconds,
+ BigInt::Subtract(isolate, nanoseconds, day_length_ns),
+ Nothing<bool>());
+
+ // ii. Set intermediateNs to oneDayFartherNs.
+ intermediate_ns = one_day_farther_ns;
+
+ // iii. Set days to days + sign.
+ days += sign;
+ // d. Else,
+ } else {
+ // i. Set done to true.
+ done = true;
+ }
+ }
+
+ // 20. Return the new Record { [[Days]]: days, [[Nanoseconds]]: nanoseconds,
+ // [[DayLength]]: abs(dayLengthNs) }.
+ *result_days = days;
+ *result_nanoseconds = nanoseconds->AsInt64();
+ *result_day_length = std::abs(day_length_ns->AsInt64());
+ return Just(true);
+}
+
+Maybe<DurationRecord> DifferenceISODateTime(
+ Isolate* isolate, int32_t y1, int32_t mon1, int32_t d1, int32_t h1,
+ int32_t min1, int32_t s1, int32_t ms1, int32_t mus1, int32_t ns1,
+ int32_t y2, int32_t mon2, int32_t d2, int32_t h2, int32_t min2, int32_t s2,
+ int32_t ms2, int32_t mus2, int32_t ns2, Handle<JSReceiver> calendar,
+ Unit largest_unit, Handle<Object> options_obj, const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ DurationRecord result;
+ // 1. Assert: y1, mon1, d1, h1, min1, s1, ms1, mus1, ns1, y2, mon2, d2, h2,
+ // min2, s2, ms2, mus2, and ns2 are integers.
+ // 2. If options is not present, set options to ! OrdinaryObjectCreate(null).
+ Handle<JSReceiver> options;
+ if (options_obj->IsUndefined()) {
+ options = factory->NewJSObjectWithNullProto();
+ } else {
+ DCHECK(options_obj->IsJSReceiver());
+ options = Handle<JSReceiver>::cast(options_obj);
+ }
+ // 3. Let timeDifference be ! DifferenceTime(h1, min1, s1, ms1, mus1, ns1, h2,
+ // min2, s2, ms2, mus2, ns2).
+ DurationRecord time_difference = DifferenceTime(
+ isolate, h1, min1, s1, ms1, mus1, ns1, h2, min2, s2, ms2, mus2, ns2);
+
+ result.hours = time_difference.hours;
+ result.minutes = time_difference.minutes;
+ result.seconds = time_difference.seconds;
+ result.milliseconds = time_difference.milliseconds;
+ result.microseconds = time_difference.microseconds;
+ result.nanoseconds = time_difference.nanoseconds;
+
+ // 4. Let timeSign be ! DurationSign(0, 0, 0, timeDifference.[[Days]],
+ // timeDifference.[[Hours]], timeDifference.[[Minutes]],
+ // timeDifference.[[Seconds]], timeDifference.[[Milliseconds]],
+ // timeDifference.[[Microseconds]], timeDifference.[[Nanoseconds]]).
+ int32_t time_sign = DurationSign(isolate, time_difference);
+ // 5. Let dateSign be ! CompareISODate(y2, mon2, d2, y1, mon1, d1).
+ int32_t date_sign = CompareISODate(isolate, y2, mon2, d2, y1, mon1, d1);
+ // 6. Let balanceResult be ! BalanceISODate(y1, mon1, d1 +
+ // timeDifference.[[Days]]).
+ int32_t balanced_year = y1;
+ int32_t balanced_month = mon1;
+ int32_t balanced_day = d1 + static_cast<int32_t>(time_difference.days);
+ BalanceISODate(isolate, &balanced_year, &balanced_month, &balanced_day);
+
+ // 7. If timeSign is -dateSign, then
+ if (time_sign == -date_sign) {
+ // a. Set balanceResult be ! BalanceISODate(balanceResult.[[Year]],
+ // balanceResult.[[Month]], balanceResult.[[Day]] - timeSign).
+ balanced_day -= time_sign;
+ BalanceISODate(isolate, &balanced_year, &balanced_month, &balanced_day);
+ // b. Set timeDifference to ? BalanceDuration(-timeSign,
+ // timeDifference.[[Hours]], timeDifference.[[Minutes]],
+ // timeDifference.[[Seconds]], timeDifference.[[Milliseconds]],
+ // timeDifference.[[Microseconds]], timeDifference.[[Nanoseconds]],
+ // largestUnit).
+ result.days = -time_sign;
+ result.hours = time_difference.hours;
+ result.minutes = time_difference.minutes;
+ result.seconds = time_difference.seconds;
+ result.milliseconds = time_difference.milliseconds;
+ result.microseconds = time_difference.microseconds;
+ result.nanoseconds = time_difference.nanoseconds;
+
+ Maybe<bool> maybe_time_difference = BalanceDuration(
+ isolate, &(result.days), &(result.hours), &(result.minutes),
+ &(result.seconds), &(result.milliseconds), &(result.microseconds),
+ &(result.nanoseconds), largest_unit, method_name);
+ MAYBE_RETURN(maybe_time_difference, Nothing<DurationRecord>());
+ DCHECK(maybe_time_difference.FromJust());
+ }
+ // 8. Let date1 be ? CreateTemporalDate(balanceResult.[[Year]],
+ // balanceResult.[[Month]], balanceResult.[[Day]], calendar).
+ Handle<JSTemporalPlainDate> date1;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, date1,
+ CreateTemporalDate(isolate, balanced_year, balanced_month, balanced_day,
+ calendar),
+ Nothing<DurationRecord>());
+ // 9. Let date2 be ? CreateTemporalDate(y2, mon2, d2, calendar).
+ Handle<JSTemporalPlainDate> date2;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, date2, CreateTemporalDate(isolate, y2, mon2, d2, calendar),
+ Nothing<DurationRecord>());
+ // 10. Let dateLargestUnit be ! LargerOfTwoTemporalUnits("day", largestUnit).
+ Unit date_largest_unit =
+ LargerOfTwoTemporalUnits(isolate, Unit::kDay, largest_unit);
+
+ // 11. Let untilOptions be ? MergeLargestUnitOption(options, dateLargestUnit).
+ Handle<JSObject> until_options;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, until_options,
+ MergeLargestUnitOption(isolate, options, date_largest_unit),
+ Nothing<DurationRecord>());
+ // 12. Let dateDifference be ? CalendarDateUntil(calendar, date1, date2,
+ // untilOptions).
+ Handle<JSTemporalDuration> date_difference;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, date_difference,
+ CalendarDateUntil(isolate, calendar, date1, date2, until_options),
+ Nothing<DurationRecord>());
+ // 13. Let balanceResult be ? BalanceDuration(dateDifference.[[Days]],
+ // timeDifference.[[Hours]], timeDifference.[[Minutes]],
+ // timeDifference.[[Seconds]], timeDifference.[[Milliseconds]],
+ // timeDifference.[[Microseconds]], timeDifference.[[Nanoseconds]],
+ // largestUnit).
+ result.days = NumberToInt64(date_difference->days());
+
+ Maybe<bool> maybe_balance_result = BalanceDuration(
+ isolate, &(result.days), &(result.hours), &(result.minutes),
+ &(result.seconds), &(result.milliseconds), &(result.microseconds),
+ &(result.nanoseconds), largest_unit, method_name);
+ MAYBE_RETURN(maybe_balance_result, Nothing<DurationRecord>());
+ DCHECK(maybe_balance_result.FromJust());
+ // 14. Return the Record { [[Years]]: dateDifference.[[Years]], [[Months]]:
+ // dateDifference.[[Months]], [[Weeks]]: dateDifference.[[Weeks]], [[Days]]:
+ // balanceResult.[[Days]], [[Hours]]: balanceResult.[[Hours]], [[Minutes]]:
+ // balanceResult.[[Minutes]], [[Seconds]]: balanceResult.[[Seconds]],
+ // [[Milliseconds]]: balanceResult.[[Milliseconds]], [[Microseconds]]:
+ // balanceResult.[[Microseconds]], [[Nanoseconds]]:
+ // balanceResult.[[Nanoseconds]] }.
+ result.years = NumberToInt64(date_difference->years());
+ result.months = NumberToInt64(date_difference->months());
+ result.weeks = NumberToInt64(date_difference->weeks());
+ return Just(result);
+}
+
+// #sec-temporal-addinstant
+MaybeHandle<BigInt> AddInstant(Isolate* isolate,
+ Handle<BigInt> epoch_nanoseconds, int64_t hours,
+ int64_t minutes, int64_t seconds,
+ int64_t milliseconds, int64_t microseconds,
+ int64_t nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: hours, minutes, seconds, milliseconds, microseconds, and
+ // nanoseconds are integer Number values.
+ // 2. Let result be epochNanoseconds + ℤ(nanoseconds) +
+ // ℤ(microseconds) × 1000ℤ + ℤ(milliseconds) × 10^6ℤ + ℤ(seconds) × 10^9ℤ +
+ // ℤ(minutes) × 60ℤ × 10^9ℤ + ℤ(hours) × 3600ℤ × 10^9ℤ.
+ Handle<BigInt> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ BigInt::Add(isolate, epoch_nanoseconds,
+ BigInt::FromInt64(isolate, nanoseconds)),
+ BigInt);
+ Handle<BigInt> temp;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, microseconds),
+ BigInt::FromInt64(isolate, 1000)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, milliseconds),
+ BigInt::FromInt64(isolate, 1000000)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, seconds),
+ BigInt::FromInt64(isolate, 1000000000)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, minutes),
+ BigInt::FromInt64(isolate, 1000000000)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, temp, BigInt::FromInt64(isolate, 60)), BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, hours),
+ BigInt::FromInt64(isolate, 1000000000)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, temp, BigInt::FromInt64(isolate, 3600)),
+ BigInt);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+
+ // 3. If ! IsValidEpochNanoseconds(result) is false, throw a RangeError
+ // exception.
+ if (!IsValidEpochNanoseconds(isolate, result)) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), BigInt);
+ }
+ // 4. Return result.
+ return result;
+}
+
+// #sec-temporal-isvalidepochnanoseconds
+bool IsValidEpochNanoseconds(Isolate* isolate,
+ Handle<BigInt> epoch_nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(epochNanoseconds) is BigInt.
+ // 2. If epochNanoseconds < −86400ℤ × 10^17ℤ or epochNanoseconds > 86400ℤ ×
+ // 10^17ℤ, then a. Return false.
+ // 3. Return true.
+ int64_t ns = epoch_nanoseconds->AsInt64();
+ return !(ns < -86400 * 1e17 || ns > 86400 * 1e17);
+}
+
+MaybeHandle<BigInt> GetEpochFromISOParts(Isolate* isolate, int32_t year,
+ int32_t month, int32_t day,
+ int32_t hour, int32_t minute,
+ int32_t second, int32_t millisecond,
+ int32_t microsecond,
+ int32_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: year, month, day, hour, minute, second, millisecond,
+ // microsecond, and nanosecond are integers.
+ // 2. Assert: ! IsValidISODate(year, month, day) is true.
+ DCHECK(IsValidISODate(isolate, year, month, day));
+ // 3. Assert: ! IsValidTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond) is true.
+ DCHECK(IsValidTime(isolate, hour, minute, second, millisecond, microsecond,
+ nanosecond));
+ // 4. Let date be ! MakeDay(𝔽(year), 𝔽(month − 1), 𝔽(day)).
+ double date = MakeDay(year, month - 1, day);
+ // 5. Let time be ! MakeTime(𝔽(hour), 𝔽(minute), 𝔽(second), 𝔽(millisecond)).
+ double time = MakeTime(hour, minute, second, millisecond);
+ // 6. Let ms be ! MakeDate(date, time).
+ double ms = MakeDate(date, time);
+ // 7. Assert: ms is finite.
+ // 8. Return ℝ(ms) × 10^6 + microsecond × 10^3 + nanosecond.
+ Handle<BigInt> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ BigInt::FromNumber(isolate, isolate->factory()->NewNumber(ms)), BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ BigInt::Multiply(isolate, result, BigInt::FromInt64(isolate, 1000000)),
+ BigInt);
+
+ Handle<BigInt> temp;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temp,
+ BigInt::Multiply(isolate, BigInt::FromInt64(isolate, microsecond),
+ BigInt::FromInt64(isolate, 1000)),
+ BigInt);
+
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ BigInt::Add(isolate, result, temp), BigInt);
+ return BigInt::Add(isolate, result, BigInt::FromInt64(isolate, nanosecond));
+}
+
+// #sec-temporal-durationsign
+int32_t DurationSign(Isolate* isolaet, const DurationRecord& dur) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. For each value v of « years, months, weeks, days, hours, minutes,
+ // seconds, milliseconds, microseconds, nanoseconds », do a. If v < 0, return
+ // −1. b. If v > 0, return 1.
+ // 2. Return 0.
+ if (dur.years < 0) return -1;
+ if (dur.years > 0) return 1;
+ if (dur.months < 0) return -1;
+ if (dur.months > 0) return 1;
+ if (dur.weeks < 0) return -1;
+ if (dur.weeks > 0) return 1;
+ if (dur.days < 0) return -1;
+ if (dur.days > 0) return 1;
+ if (dur.hours < 0) return -1;
+ if (dur.hours > 0) return 1;
+ if (dur.minutes < 0) return -1;
+ if (dur.minutes > 0) return 1;
+ if (dur.seconds < 0) return -1;
+ if (dur.seconds > 0) return 1;
+ if (dur.milliseconds < 0) return -1;
+ if (dur.milliseconds > 0) return 1;
+ if (dur.microseconds < 0) return -1;
+ if (dur.microseconds > 0) return 1;
+ if (dur.nanoseconds < 0) return -1;
+ if (dur.nanoseconds > 0) return 1;
+ return 0;
+}
+
+// #sec-temporal-isvalidduration
+bool IsValidDuration(Isolate* isolate, const DurationRecord& dur) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Let sign be ! DurationSign(years, months, weeks, days, hours, minutes,
+ // seconds, milliseconds, microseconds, nanoseconds).
+ int32_t sign = DurationSign(isolate, dur);
+ // 2. For each value v of « years, months, weeks, days, hours, minutes,
+ // seconds, milliseconds, microseconds, nanoseconds », do a. If v is not
+ // finite, return false. b. If v < 0 and sign > 0, return false. c. If v > 0
+ // and sign < 0, return false.
+ // 3. Return true.
+ return !((sign > 0 && (dur.years < 0 || dur.months < 0 || dur.weeks < 0 ||
+ dur.days < 0 || dur.hours < 0 || dur.minutes < 0 ||
+ dur.seconds < 0 || dur.milliseconds < 0 ||
+ dur.microseconds < 0 || dur.nanoseconds < 0)) ||
+ (sign < 0 && (dur.years > 0 || dur.months > 0 || dur.weeks > 0 ||
+ dur.days > 0 || dur.hours > 0 || dur.minutes > 0 ||
+ dur.seconds > 0 || dur.milliseconds > 0 ||
+ dur.microseconds > 0 || dur.nanoseconds > 0)));
+}
+
+// #sec-temporal-isisoleapyear
+bool IsISOLeapYear(Isolate* isolate, int32_t year) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year is an integer.
+ // 2. If year modulo 4 ≠ 0, return false.
+ // 3. If year modulo 400 = 0, return true.
+ // 4. If year modulo 100 = 0, return false.
+ // 5. Return true.
+ return isolate->date_cache()->IsLeap(year);
+}
+
+// #sec-temporal-isodaysinmonth
+int32_t ISODaysInMonth(Isolate* isolate, int32_t year, int32_t month) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year is an integer.
+ // 2. Assert: month is an integer, month ≥ 1, and month ≤ 12.
+ DCHECK_GE(month, 1);
+ DCHECK_LE(month, 12);
+ // 3. If month is 1, 3, 5, 7, 8, 10, or 12, return 31.
+ if (month % 2 == ((month < 8) ? 1 : 0)) return 31;
+ // 4. If month is 4, 6, 9, or 11, return 30.
+ DCHECK(month == 2 || month == 4 || month == 6 || month == 9 || month == 11);
+ if (month != 2) return 30;
+ // 5. If ! IsISOLeapYear(year) is true, return 29.
+ return IsISOLeapYear(isolate, year) ? 29 : 28;
+ // 6. Return 28.
+}
+
+// #sec-temporal-isodaysinyear
+int32_t ISODaysInYear(Isolate* isolate, int32_t year) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year is an integer.
+ // 2. If ! IsISOLeapYear(year) is true, then
+ // a. Return 366.
+ // 3. Return 365.
+ return IsISOLeapYear(isolate, year) ? 366 : 365;
+}
+
+bool IsValidTime(Isolate* isolate, int32_t hour, int32_t minute, int32_t second,
+ int32_t millisecond, int32_t microsecond, int32_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 2. If hour < 0 or hour > 23, then
+ // a. Return false.
+ if (hour < 0 || hour > 23) return false;
+ // 3. If minute < 0 or minute > 59, then
+ // a. Return false.
+ if (minute < 0 || minute > 59) return false;
+ // 4. If second < 0 or second > 59, then
+ // a. Return false.
+ if (second < 0 || second > 59) return false;
+ // 5. If millisecond < 0 or millisecond > 999, then
+ // a. Return false.
+ if (millisecond < 0 || millisecond > 999) return false;
+ // 6. If microsecond < 0 or microsecond > 999, then
+ // a. Return false.
+ if (microsecond < 0 || microsecond > 999) return false;
+ // 7. If nanosecond < 0 or nanosecond > 999, then
+ // a. Return false.
+ if (nanosecond < 0 || nanosecond > 999) return false;
+ // 8. Return true.
+ return true;
+}
+
+// #sec-temporal-isvalidisodate
+bool IsValidISODate(Isolate* isolate, int32_t year, int32_t month,
+ int32_t day) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year, month, and day are integers.
+ // 2. If month < 1 or month > 12, then
+ // a. Return false.
+ if (month < 1 || month > 12) return false;
+ // 3. Let daysInMonth be ! ISODaysInMonth(year, month).
+ // 4. If day < 1 or day > daysInMonth, then
+ // a. Return false.
+ if (day < 1 || day > ISODaysInMonth(isolate, year, month)) return false;
+ // 5. Return true.
+ return true;
+}
+
+// #sec-temporal-compareisodate
+int32_t CompareISODate(Isolate* isolate, int32_t y1, int32_t m1, int32_t d1,
+ int32_t y2, int32_t m2, int32_t d2) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: y1, m1, d1, y2, m2, and d2 are integers.
+ // 2. If y1 > y2, return 1.
+ if (y1 > y2) return 1;
+ // 3. If y1 < y2, return -1.
+ if (y1 < y2) return -1;
+ // 4. If m1 > m2, return 1.
+ if (m1 > m2) return 1;
+ // 5. If m1 < m2, return -1.
+ if (m1 < m2) return -1;
+ // 6. If d1 > d2, return 1.
+ if (d1 > d2) return 1;
+ // 7. If d1 < d2, return -1.
+ if (d1 < d2) return -1;
+ // 8. Return 0.
+ return 0;
+}
+
+// #sec-temporal-balanceisoyearmonth
+void BalanceISOYearMonth(Isolate* isolate, int32_t* year, int32_t* month) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year and month are integers.
+ // 2. Set year to year + floor((month - 1) / 12).
+ *year += floor_divide((*month - 1), 12);
+ // 3. Set month to (month − 1) modulo 12 + 1.
+ *month = static_cast<int32_t>(modulo(*month - 1, 12)) + 1;
+
+ // 4. Return the new Record { [[Year]]: year, [[Month]]: month }.
+}
+// #sec-temporal-balancetime
+DateTimeRecordCommon BalanceTime(Isolate* isolate, int64_t hour, int64_t minute,
+ int64_t second, int64_t millisecond,
+ int64_t microsecond, int64_t nanosecond) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: hour, minute, second, millisecond, microsecond, and nanosecond
+ // are integers.
+ // 2. Set microsecond to microsecond + floor(nanosecond / 1000).
+ microsecond += floor_divide(nanosecond, 1000L);
+ // 3. Set nanosecond to nanosecond modulo 1000.
+ nanosecond = modulo(nanosecond, 1000L);
+ // 4. Set millisecond to millisecond + floor(microsecond / 1000).
+ millisecond += floor_divide(microsecond, 1000L);
+ // 5. Set microsecond to microsecond modulo 1000.
+ microsecond = modulo(microsecond, 1000L);
+ // 6. Set second to second + floor(millisecond / 1000).
+ second += floor_divide(millisecond, 1000L);
+ // 7. Set millisecond to millisecond modulo 1000.
+ millisecond = modulo(millisecond, 1000L);
+ // 8. Set minute to minute + floor(second / 60).
+ minute += floor_divide(second, 60L);
+ // 9. Set second to second modulo 60.
+ second = modulo(second, 60L);
+ // 10. Set hour to hour + floor(minute / 60).
+ hour += floor_divide(minute, 60L);
+ // 11. Set minute to minute modulo 60.
+ minute = modulo(minute, 60L);
+ // 12. Let days be floor(hour / 24).
+ int64_t days = floor_divide(hour, 24L);
+ // 13. Set hour to hour modulo 24.
+ hour = modulo(hour, 24L);
+ // 14. Return the new Record { [[Days]]: days, [[Hour]]: hour, [[Minute]]:
+ // minute, [[Second]]: second, [[Millisecond]]: millisecond, [[Microsecond]]:
+ // microsecond, [[Nanosecond]]: nanosecond }.
+ return {0,
+ 0,
+ static_cast<int32_t>(days),
+ static_cast<int32_t>(hour),
+ static_cast<int32_t>(minute),
+ static_cast<int32_t>(second),
+ static_cast<int32_t>(millisecond),
+ static_cast<int32_t>(microsecond),
+ static_cast<int32_t>(nanosecond)};
+}
+
+// #sec-temporal-differencetime
+DurationRecord DifferenceTime(Isolate* isolate, int32_t h1, int32_t min1,
+ int32_t s1, int32_t ms1, int32_t mus1,
+ int32_t ns1, int32_t h2, int32_t min2, int32_t s2,
+ int32_t ms2, int32_t mus2, int32_t ns2) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: h1, min1, s1, ms1, mus1, ns1, h2, min2, s2, ms2, mus2, and ns2
+ // are integers.
+ DurationRecord dur;
+ // 2. Let hours be h2 − h1.
+ dur.hours = h2 - h1;
+ // 3. Let minutes be min2 − min1.
+ dur.minutes = min2 - min1;
+ // 4. Let seconds be s2 − s1.
+ dur.seconds = s2 - s1;
+ // 5. Let milliseconds be ms2 − ms1.
+ dur.milliseconds = ms2 - ms1;
+ // 6. Let microseconds be mus2 − mus1.
+ dur.microseconds = mus2 - mus1;
+ // 7. Let nanoseconds be ns2 − ns1.
+ dur.nanoseconds = ns2 - ns1;
+ // 8. Let sign be ! DurationSign(0, 0, 0, 0, hours, minutes, seconds,
+ // milliseconds, microseconds, nanoseconds).
+ double sign = DurationSign(isolate, dur);
+
+ // See https://github.com/tc39/proposal-temporal/pull/1885
+ // 9. Let bt be ! BalanceTime(hours × sign, minutes × sign, seconds × sign,
+ // milliseconds × sign, microseconds × sign, nanoseconds × sign).
+ DateTimeRecordCommon bt = BalanceTime(
+ isolate, dur.hours * sign, dur.minutes * sign, dur.seconds * sign,
+ dur.milliseconds * sign, dur.microseconds * sign, dur.nanoseconds * sign);
+
+ // 10. Return the new Record { [[Days]]: bt.[[Days]] × sign, [[Hours]]:
+ // bt.[[Hour]] × sign, [[Minutes]]: bt.[[Minute]] × sign, [[Seconds]]:
+ // bt.[[Second]] × sign, [[Milliseconds]]: bt.[[Millisecond]] × sign,
+ // [[Microseconds]]: bt.[[Microsecond]] × sign, [[Nanoseconds]]:
+ // bt.[[Nanosecond]] × sign }.
+ return {0,
+ 0,
+ 0,
+ static_cast<int64_t>(bt.day * sign),
+ static_cast<int64_t>(bt.hour * sign),
+ static_cast<int64_t>(bt.minute * sign),
+ static_cast<int64_t>(bt.second * sign),
+ static_cast<int64_t>(bt.millisecond * sign),
+ static_cast<int64_t>(bt.microsecond * sign),
+ static_cast<int64_t>(bt.nanosecond * sign)};
+}
+
+// #sec-temporal-addtime
+DateTimeRecordCommon AddTime(Isolate* isolate, int64_t hour, int64_t minute,
+ int64_t second, int64_t millisecond,
+ int64_t microsecond, int64_t nanosecond,
+ int64_t hours, int64_t minutes, int64_t seconds,
+ int64_t milliseconds, int64_t microseconds,
+ int64_t nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: hour, minute, second, millisecond, microsecond, nanosecond,
+ // hours, minutes, seconds, milliseconds, microseconds, and nanoseconds are
+ // integers.
+ // 2. Let hour be hour + hours.
+ return BalanceTime(isolate, hour + hours,
+ // 3. Let minute be minute + minutes.
+ minute + minutes,
+ // 4. Let second be second + seconds.
+ second + seconds,
+ // 5. Let millisecond be millisecond + milliseconds.
+ millisecond + milliseconds,
+ // 6. Let microsecond be microsecond + microseconds.
+ microsecond + microseconds,
+ // 7. Let nanosecond be nanosecond + nanoseconds.
+ nanosecond + nanoseconds);
+ // 8. Return ! BalanceTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond).
+}
+
+// #sec-temporal-totaldurationnanoseconds
+int64_t TotalDurationNanoseconds(Isolate* isolate, int64_t days, int64_t hours,
+ int64_t minutes, int64_t seconds,
+ int64_t milliseconds, int64_t microseconds,
+ int64_t nanoseconds, int64_t offset_shift) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: offsetShift is an integer.
+ // 2. Set nanoseconds to ℝ(nanoseconds).
+ // 3. If days ≠ 0, then
+ if (days != 0) {
+ // a. Set nanoseconds to nanoseconds − offsetShift.
+ nanoseconds -= offset_shift;
+ }
+
+ // 4. Set hours to ℝ(hours) + ℝ(days) × 24.
+ hours += days * 24;
+
+ // 5. Set minutes to ℝ(minutes) + hours × 60.
+ minutes += hours * 60;
+
+ // 6. Set seconds to ℝ(seconds) + minutes × 60.
+ seconds += minutes * 60;
+
+ // 7. Set milliseconds to ℝ(milliseconds) + seconds × 1000.
+ milliseconds += seconds * 1000;
+
+ // 8. Set microseconds to ℝ(microseconds) + milliseconds × 1000.
+ microseconds += milliseconds * 1000;
+
+ // 9. Return nanoseconds + microseconds × 1000.
+ return nanoseconds + microseconds * 1000;
+}
+
+} // namespace
+
+// #sec-temporal.duration
+MaybeHandle<JSTemporalDuration> JSTemporalDuration::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> years, Handle<Object> months, Handle<Object> weeks,
+ Handle<Object> days, Handle<Object> hours, Handle<Object> minutes,
+ Handle<Object> seconds, Handle<Object> milliseconds,
+ Handle<Object> microseconds, Handle<Object> nanoseconds) {
+ const char* method_name = "Temporal.Duration";
+ // 1. If NewTarget is undefined, then
+ if (new_target->IsUndefined()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalDuration);
+ }
+ // 2. Let y be ? ToIntegerThrowOnInfinity(years).
+ Handle<Object> number_years;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_years,
+ ToIntegerThrowOnInfinity(isolate, years),
+ JSTemporalDuration);
+ int64_t y = NumberToInt64(*number_years);
+
+ // 3. Let mo be ? ToIntegerThrowOnInfinity(months).
+ Handle<Object> number_months;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_months,
+ ToIntegerThrowOnInfinity(isolate, months),
+ JSTemporalDuration);
+ int64_t mo = NumberToInt64(*number_months);
+
+ // 4. Let w be ? ToIntegerThrowOnInfinity(weeks).
+ Handle<Object> number_weeks;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_weeks,
+ ToIntegerThrowOnInfinity(isolate, weeks),
+ JSTemporalDuration);
+ int64_t w = NumberToInt64(*number_weeks);
+
+ // 5. Let d be ? ToIntegerThrowOnInfinity(days).
+ Handle<Object> number_days;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_days,
+ ToIntegerThrowOnInfinity(isolate, days),
+ JSTemporalDuration);
+ int64_t d = NumberToInt64(*number_days);
+
+ // 6. Let h be ? ToIntegerThrowOnInfinity(hours).
+ Handle<Object> number_hours;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_hours,
+ ToIntegerThrowOnInfinity(isolate, hours),
+ JSTemporalDuration);
+ int64_t h = NumberToInt64(*number_hours);
+
+ // 7. Let m be ? ToIntegerThrowOnInfinity(minutes).
+ Handle<Object> number_minutes;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_minutes,
+ ToIntegerThrowOnInfinity(isolate, minutes),
+ JSTemporalDuration);
+ int64_t m = NumberToInt64(*number_minutes);
+
+ // 8. Let s be ? ToIntegerThrowOnInfinity(seconds).
+ Handle<Object> number_seconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_seconds,
+ ToIntegerThrowOnInfinity(isolate, seconds),
+ JSTemporalDuration);
+ int64_t s = NumberToInt64(*number_seconds);
+
+ // 9. Let ms be ? ToIntegerThrowOnInfinity(milliseconds).
+ Handle<Object> number_milliseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_milliseconds,
+ ToIntegerThrowOnInfinity(isolate, milliseconds),
+ JSTemporalDuration);
+ int64_t ms = NumberToInt64(*number_milliseconds);
+
+ // 10. Let mis be ? ToIntegerThrowOnInfinity(microseconds).
+ Handle<Object> number_microseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_microseconds,
+ ToIntegerThrowOnInfinity(isolate, microseconds),
+ JSTemporalDuration);
+ int64_t mis = NumberToInt64(*number_microseconds);
+
+ // 11. Let ns be ? ToIntegerThrowOnInfinity(nanoseconds).
+ Handle<Object> number_nanoseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_nanoseconds,
+ ToIntegerThrowOnInfinity(isolate, nanoseconds),
+ JSTemporalDuration);
+ int64_t ns = NumberToInt64(*number_nanoseconds);
+
+ // 12. Return ? CreateTemporalDuration(y, mo, w, d, h, m, s, ms, mis, ns,
+ // NewTarget).
+ return CreateTemporalDuration(isolate, target, new_target, y, mo, w, d, h, m,
+ s, ms, mis, ns);
+}
+
+// #sec-get-temporal.duration.prototype.sign
+MaybeHandle<Smi> JSTemporalDuration::Sign(Isolate* isolate,
+ Handle<JSTemporalDuration> duration) {
+ // 1. Let duration be the this value.
+ // 2. Perform ? RequireInternalSlot(duration,
+ // [[InitializedTemporalDuration]]).
+ // 3. Return ! DurationSign(duration.[[Years]], duration.[[Months]],
+ // duration.[[Weeks]], duration.[[Days]], duration.[[Hours]],
+ // duration.[[Minutes]], duration.[[Seconds]], duration.[[Milliseconds]],
+ // duration.[[Microseconds]], duration.[[Nanoseconds]]).
+ return Handle<Smi>(
+ Smi::FromInt(DurationSign(
+ isolate,
+ {NumberToInt64(duration->years()), NumberToInt64(duration->months()),
+ NumberToInt64(duration->weeks()), NumberToInt64(duration->days()),
+ NumberToInt64(duration->hours()), NumberToInt64(duration->minutes()),
+ NumberToInt64(duration->seconds()),
+ NumberToInt64(duration->milliseconds()),
+ NumberToInt64(duration->microseconds()),
+ NumberToInt64(duration->nanoseconds())})),
+ isolate);
+}
+
+// #sec-get-temporal.duration.prototype.blank
+MaybeHandle<Oddball> JSTemporalDuration::Blank(
+ Isolate* isolate, Handle<JSTemporalDuration> duration) {
+ // 1. Let duration be the this value.
+ // 2. Perform ? RequireInternalSlot(duration,
+ // [[InitializedTemporalDuration]]).
+ // 3. Let sign be ! DurationSign(duration.[[Years]], duration.[[Months]],
+ // duration.[[Weeks]], duration.[[Days]], duration.[[Hours]],
+ // duration.[[Minutes]], duration.[[Seconds]], duration.[[Milliseconds]],
+ // duration.[[Microseconds]], duration.[[Nanoseconds]]).
+ // 4. If sign = 0, return true.
+ // 5. Return false.
+ int32_t sign = DurationSign(
+ isolate,
+ {NumberToInt64(duration->years()), NumberToInt64(duration->months()),
+ NumberToInt64(duration->weeks()), NumberToInt64(duration->days()),
+ NumberToInt64(duration->hours()), NumberToInt64(duration->minutes()),
+ NumberToInt64(duration->seconds()),
+ NumberToInt64(duration->milliseconds()),
+ NumberToInt64(duration->microseconds()),
+ NumberToInt64(duration->nanoseconds())});
+ return sign == 0 ? isolate->factory()->true_value()
+ : isolate->factory()->false_value();
+}
+
+// #sec-temporal.calendar
+MaybeHandle<JSTemporalCalendar> JSTemporalCalendar::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> identifier_obj) {
+ // 1. If NewTarget is undefined, then
+ if (new_target->IsUndefined(isolate)) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromStaticChars(
+ "Temporal.Calendar")),
+ JSTemporalCalendar);
+ }
+ // 2. Set identifier to ? ToString(identifier).
+ Handle<String> identifier;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ Object::ToString(isolate, identifier_obj),
+ JSTemporalCalendar);
+ // 3. If ! IsBuiltinCalendar(id) is false, then
+ if (!IsBuiltinCalendar(isolate, identifier)) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidCalendar, identifier),
+ JSTemporalCalendar);
+ }
+ return CreateTemporalCalendar(isolate, target, new_target, identifier);
+}
+
+// #sec-temporal.calendar.prototype.tostring
+MaybeHandle<String> JSTemporalCalendar::ToString(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ const char* method_name) {
+ return CalendarIdentifier(isolate, calendar->calendar_index());
+}
+
+// #sec-temporal.now.timezone
+MaybeHandle<JSTemporalTimeZone> JSTemporalTimeZone::Now(Isolate* isolate) {
+ return SystemTimeZone(isolate);
+}
+
+// #sec-temporal.timezone
+MaybeHandle<JSTemporalTimeZone> JSTemporalTimeZone::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> identifier_obj) {
+ // 1. If NewTarget is undefined, then
+ if (new_target->IsUndefined(isolate)) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Temporal.TimeZone")),
+ JSTemporalTimeZone);
+ }
+ // 2. Set identifier to ? ToString(identifier).
+ Handle<String> identifier;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ Object::ToString(isolate, identifier_obj),
+ JSTemporalTimeZone);
+ Handle<String> canonical;
+ // 3. If identifier satisfies the syntax of a TimeZoneNumericUTCOffset
+ // (see 13.33), then
+ Maybe<bool> maybe_valid =
+ IsValidTimeZoneNumericUTCOffsetString(isolate, identifier);
+ MAYBE_RETURN(maybe_valid, Handle<JSTemporalTimeZone>());
+
+ if (maybe_valid.FromJust()) {
+ // a. Let offsetNanoseconds be ? ParseTimeZoneOffsetString(identifier).
+ Maybe<int64_t> maybe_offset_nanoseconds =
+ ParseTimeZoneOffsetString(isolate, identifier);
+ MAYBE_RETURN(maybe_offset_nanoseconds, Handle<JSTemporalTimeZone>());
+ int64_t offset_nanoseconds = maybe_offset_nanoseconds.FromJust();
+
+ // b. Let canonical be ! FormatTimeZoneOffsetString(offsetNanoseconds).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, canonical,
+ FormatTimeZoneOffsetString(isolate, offset_nanoseconds),
+ JSTemporalTimeZone);
+ } else {
+ // 4. Else,
+ // a. If ! IsValidTimeZoneName(identifier) is false, then
+ if (!IsValidTimeZoneName(isolate, identifier)) {
+ // i. Throw a RangeError exception.
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeZone, identifier),
+ JSTemporalTimeZone);
+ }
+ // b. Let canonical be ! CanonicalizeTimeZoneName(identifier).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, canonical,
+ CanonicalizeTimeZoneName(isolate, identifier),
+ JSTemporalTimeZone);
+ }
+ // 5. Return ? CreateTemporalTimeZone(canonical, NewTarget).
+ return CreateTemporalTimeZone(isolate, target, new_target, canonical);
+}
+
+// #sec-temporal.timezone.prototype.tostring
+MaybeHandle<Object> JSTemporalTimeZone::ToString(
+ Isolate* isolate, Handle<JSTemporalTimeZone> time_zone,
+ const char* method_name) {
+ return time_zone->id(isolate);
+}
+
+int32_t JSTemporalTimeZone::time_zone_index() const {
+ DCHECK(is_offset() == false);
+ return offset_milliseconds_or_time_zone_index();
+}
+
+int64_t JSTemporalTimeZone::offset_nanoseconds() const {
+ TEMPORAL_ENTER_FUNC();
+ DCHECK(is_offset());
+ return 1000000L * offset_milliseconds() + offset_sub_milliseconds();
+}
+
+void JSTemporalTimeZone::set_offset_nanoseconds(int64_t ns) {
+ this->set_offset_milliseconds(static_cast<int32_t>(ns / 1000000L));
+ this->set_offset_sub_milliseconds(static_cast<int32_t>(ns % 1000000L));
+}
+
+MaybeHandle<String> JSTemporalTimeZone::id(Isolate* isolate) const {
+ if (is_offset()) {
+ return FormatTimeZoneOffsetString(isolate, offset_nanoseconds());
+ }
+#ifdef V8_INTL_SUPPORT
+ std::string id =
+ Intl::TimeZoneIdFromIndex(offset_milliseconds_or_time_zone_index());
+ return isolate->factory()->NewStringFromAsciiChecked(id.c_str());
+#else // V8_INTL_SUPPORT
+ DCHECK_EQ(0, offset_milliseconds_or_time_zone_index());
+ return isolate->factory()->UTC_string();
+#endif // V8_INTL_SUPPORT
+}
+
+MaybeHandle<JSTemporalPlainDate> JSTemporalPlainDate::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> iso_year_obj, Handle<Object> iso_month_obj,
+ Handle<Object> iso_day_obj, Handle<Object> calendar_like) {
+ const char* method_name = "Temporal.PlainDate";
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (new_target->IsUndefined()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainDate);
+ }
+#define TO_INT_THROW_ON_INFTY(name, T) \
+ int32_t name; \
+ { \
+ Handle<Object> number_##name; \
+ /* x. Let name be ? ToIntegerThrowOnInfinity(name). */ \
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_##name, \
+ ToIntegerThrowOnInfinity(isolate, name##_obj), \
+ T); \
+ name = NumberToInt32(*number_##name); \
+ }
+
+ TO_INT_THROW_ON_INFTY(iso_year, JSTemporalPlainDate);
+ TO_INT_THROW_ON_INFTY(iso_month, JSTemporalPlainDate);
+ TO_INT_THROW_ON_INFTY(iso_day, JSTemporalPlainDate);
+
+ // 8. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_like, method_name),
+ JSTemporalPlainDate);
+
+ // 9. Return ? CreateTemporalDate(y, m, d, calendar, NewTarget).
+ return CreateTemporalDate(isolate, target, new_target, iso_year, iso_month,
+ iso_day, calendar);
+}
+
+// #sec-temporal.plaindate.prototype.withcalendar
+MaybeHandle<JSTemporalPlainDate> JSTemporalPlainDate::WithCalendar(
+ Isolate* isolate, Handle<JSTemporalPlainDate> temporal_date,
+ Handle<Object> calendar_like) {
+ const char* method_name = "Temporal.PlainDate.prototype.withCalendar";
+ // 1. Let temporalDate be the this value.
+ // 2. Perform ? RequireInternalSlot(temporalDate,
+ // [[InitializedTemporalDate]]).
+ // 3. Let calendar be ? ToTemporalCalendar(calendar).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ temporal::ToTemporalCalendar(isolate, calendar_like, method_name),
+ JSTemporalPlainDate);
+ // 4. Return ? CreateTemporalDate(temporalDate.[[ISOYear]],
+ // temporalDate.[[ISOMonth]], temporalDate.[[ISODay]], calendar).
+ return CreateTemporalDate(isolate, temporal_date->iso_year(),
+ temporal_date->iso_month(),
+ temporal_date->iso_day(), calendar);
+}
+
+// #sec-temporal.now.plaindate
+MaybeHandle<JSTemporalPlainDate> JSTemporalPlainDate::Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.plainDate";
+ // 1. Let dateTime be ? SystemDateTime(temporalTimeZoneLike, calendarLike).
+ Handle<JSTemporalPlainDateTime> date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, date_time,
+ SystemDateTime(isolate, temporal_time_zone_like,
+ calendar_like, method_name),
+ JSTemporalPlainDate);
+ // 2. Return ! CreateTemporalDate(dateTime.[[ISOYear]], dateTime.[[ISOMonth]],
+ // dateTime.[[ISODay]], dateTime.[[Calendar]]).
+ return CreateTemporalDate(isolate, date_time->iso_year(),
+ date_time->iso_month(), date_time->iso_day(),
+ Handle<JSReceiver>(date_time->calendar(), isolate));
+}
+
+// #sec-temporal.now.plaindateiso
+MaybeHandle<JSTemporalPlainDate> JSTemporalPlainDate::NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.plainDateISO";
+ // 1. Let calendar be ! GetISO8601Calendar().
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalPlainDate);
+ // 2. Let dateTime be ? SystemDateTime(temporalTimeZoneLike, calendar).
+ Handle<JSTemporalPlainDateTime> date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_time,
+ SystemDateTime(isolate, temporal_time_zone_like, calendar, method_name),
+ JSTemporalPlainDate);
+ // 3. Return ! CreateTemporalDate(dateTime.[[ISOYear]], dateTime.[[ISOMonth]],
+ // dateTime.[[ISODay]], dateTime.[[Calendar]]).
+ return CreateTemporalDate(isolate, date_time->iso_year(),
+ date_time->iso_month(), date_time->iso_day(),
+ Handle<JSReceiver>(date_time->calendar(), isolate));
+}
+
+// #sec-temporal.plaindate.from
+MaybeHandle<JSTemporalPlainDate> JSTemporalPlainDate::From(
+ Isolate* isolate, Handle<Object> item, Handle<Object> options_obj) {
+ const char* method_name = "Temporal.PlainDate.from";
+ // 1. Set options to ? GetOptionsObject(options).
+ Handle<JSReceiver> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, GetOptionsObject(isolate, options_obj, method_name),
+ JSTemporalPlainDate);
+ // 2. If Type(item) is Object and item has an [[InitializedTemporalDate]]
+ // internal slot, then
+ if (item->IsJSTemporalPlainDate()) {
+ // a. Perform ? ToTemporalOverflow(options).
+ Maybe<ShowOverflow> maybe_overflow =
+ ToTemporalOverflow(isolate, options, method_name);
+ MAYBE_RETURN(maybe_overflow, Handle<JSTemporalPlainDate>());
+ // b. Return ? CreateTemporalDate(item.[[ISOYear]], item.[[ISOMonth]],
+ // item.[[ISODay]], item.[[Calendar]]).
+ Handle<JSTemporalPlainDate> date = Handle<JSTemporalPlainDate>::cast(item);
+ return CreateTemporalDate(isolate, date->iso_year(), date->iso_month(),
+ date->iso_day(),
+ Handle<JSReceiver>(date->calendar(), isolate));
+ }
+ // 3. Return ? ToTemporalDate(item, options).
+ return ToTemporalDate(isolate, item, options, method_name);
+}
+
+#define DEFINE_INT_FIELD(obj, str, field, item) \
+ CHECK(JSReceiver::CreateDataProperty( \
+ isolate, obj, factory->str##_string(), \
+ Handle<Smi>(Smi::FromInt(item->field()), isolate), \
+ Just(kThrowOnError)) \
+ .FromJust());
+
+// #sec-temporal.plaindate.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalPlainDate::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainDate> temporal_date) {
+ Factory* factory = isolate->factory();
+ // 1. Let temporalDate be the this value.
+ // 2. Perform ? RequireInternalSlot(temporalDate,
+ // [[InitializedTemporalDate]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 4. Perform ! CreateDataPropertyOrThrow(fields, "calendar",
+ // temporalDate.[[Calendar]]).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, fields, factory->calendar_string(),
+ Handle<JSReceiver>(temporal_date->calendar(), isolate),
+ Just(kThrowOnError))
+ .FromJust());
+ // 5. Perform ! CreateDataPropertyOrThrow(fields, "isoDay",
+ // 𝔽(temporalDate.[[ISODay]])).
+ // 6. Perform ! CreateDataPropertyOrThrow(fields, "isoMonth",
+ // 𝔽(temporalDate.[[ISOMonth]])).
+ // 7. Perform ! CreateDataPropertyOrThrow(fields, "isoYear",
+ // 𝔽(temporalDate.[[ISOYear]])).
+ DEFINE_INT_FIELD(fields, isoDay, iso_day, temporal_date)
+ DEFINE_INT_FIELD(fields, isoMonth, iso_month, temporal_date)
+ DEFINE_INT_FIELD(fields, isoYear, iso_year, temporal_date)
+ // 8. Return fields.
+ return fields;
+}
+
+// #sec-temporal-createtemporaldatetime
+MaybeHandle<JSTemporalPlainDateTime> JSTemporalPlainDateTime::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> iso_year_obj, Handle<Object> iso_month_obj,
+ Handle<Object> iso_day_obj, Handle<Object> hour_obj,
+ Handle<Object> minute_obj, Handle<Object> second_obj,
+ Handle<Object> millisecond_obj, Handle<Object> microsecond_obj,
+ Handle<Object> nanosecond_obj, Handle<Object> calendar_like) {
+ const char* method_name = "Temporal.PlainDateTime";
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (new_target->IsUndefined()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainDateTime);
+ }
+
+ TO_INT_THROW_ON_INFTY(iso_year, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(iso_month, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(iso_day, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(hour, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(minute, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(second, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(millisecond, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(microsecond, JSTemporalPlainDateTime);
+ TO_INT_THROW_ON_INFTY(nanosecond, JSTemporalPlainDateTime);
+
+ // 20. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_like, method_name),
+ JSTemporalPlainDateTime);
+
+ // 21. Return ? CreateTemporalDateTime(isoYear, isoMonth, isoDay, hour,
+ // minute, second, millisecond, microsecond, nanosecond, calendar, NewTarget).
+ return CreateTemporalDateTime(isolate, target, new_target, iso_year,
+ iso_month, iso_day, hour, minute, second,
+ millisecond, microsecond, nanosecond, calendar);
+}
+
+// #sec-temporal.plaindatetime.prototype.withcalendar
+MaybeHandle<JSTemporalPlainDateTime> JSTemporalPlainDateTime::WithCalendar(
+ Isolate* isolate, Handle<JSTemporalPlainDateTime> date_time,
+ Handle<Object> calendar_like) {
+ const char* method_name = "Temporal.PlainDateTime.prototype.withCalendar";
+ // 1. Let temporalDateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(temporalDateTime,
+ // [[InitializedTemporalDateTime]]).
+ // 3. Let calendar be ? ToTemporalCalendar(calendar).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ temporal::ToTemporalCalendar(isolate, calendar_like, method_name),
+ JSTemporalPlainDateTime);
+ // 4. Return ? CreateTemporalDateTime(temporalDateTime.[[ISOYear]],
+ // temporalDateTime.[[ISOMonth]], temporalDateTime.[[ISODay]],
+ // temporalDateTime.[[ISOHour]], temporalDateTime.[[ISOMinute]],
+ // temporalDateTime.[[ISOSecond]], temporalDateTime.[[ISOMillisecond]],
+ // temporalDateTime.[[ISOMicrosecond]], temporalDateTime.[[ISONanosecond]],
+ // calendar).
+ return temporal::CreateTemporalDateTime(
+ isolate, date_time->iso_year(), date_time->iso_month(),
+ date_time->iso_day(), date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond(), calendar);
+}
+
+// #sec-temporal.now.plaindatetime
+MaybeHandle<JSTemporalPlainDateTime> JSTemporalPlainDateTime::Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.plainDateTime";
+ // 1. Return ? SystemDateTime(temporalTimeZoneLike, calendarLike).
+ return SystemDateTime(isolate, temporal_time_zone_like, calendar_like,
+ method_name);
+}
+
+// #sec-temporal.now.plaindatetimeiso
+MaybeHandle<JSTemporalPlainDateTime> JSTemporalPlainDateTime::NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.plainDateTimeISO";
+ // 1. Let calendar be ! GetISO8601Calendar().
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalPlainDateTime);
+ // 2. Return ? SystemDateTime(temporalTimeZoneLike, calendar).
+ return SystemDateTime(isolate, temporal_time_zone_like, calendar,
+ method_name);
+}
+
+// #sec-temporal.plaindatetime.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalPlainDateTime::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainDateTime> date_time) {
+ Factory* factory = isolate->factory();
+ // 1. Let dateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(temporalDateTime,
+ // [[InitializedTemporalDateTime]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 4. Perform ! CreateDataPropertyOrThrow(fields, "calendar",
+ // temporalTime.[[Calendar]]).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, fields, factory->calendar_string(),
+ Handle<JSReceiver>(date_time->calendar(), isolate),
+ Just(kThrowOnError))
+ .FromJust());
+ // 5. Perform ! CreateDataPropertyOrThrow(fields, "isoDay",
+ // 𝔽(dateTime.[[ISODay]])).
+ // 6. Perform ! CreateDataPropertyOrThrow(fields, "isoHour",
+ // 𝔽(temporalTime.[[ISOHour]])).
+ // 7. Perform ! CreateDataPropertyOrThrow(fields, "isoMicrosecond",
+ // 𝔽(temporalTime.[[ISOMicrosecond]])).
+ // 8. Perform ! CreateDataPropertyOrThrow(fields, "isoMillisecond",
+ // 𝔽(temporalTime.[[ISOMillisecond]])).
+ // 9. Perform ! CreateDataPropertyOrThrow(fields, "isoMinute",
+ // 𝔽(temporalTime.[[ISOMinute]])).
+ // 10. Perform ! CreateDataPropertyOrThrow(fields, "isoMonth",
+ // 𝔽(temporalTime.[[ISOMonth]])).
+ // 11. Perform ! CreateDataPropertyOrThrow(fields, "isoNanosecond",
+ // 𝔽(temporalTime.[[ISONanosecond]])).
+ // 12. Perform ! CreateDataPropertyOrThrow(fields, "isoSecond",
+ // 𝔽(temporalTime.[[ISOSecond]])).
+ // 13. Perform ! CreateDataPropertyOrThrow(fields, "isoYear",
+ // 𝔽(temporalTime.[[ISOYear]])).
+ DEFINE_INT_FIELD(fields, isoDay, iso_day, date_time)
+ DEFINE_INT_FIELD(fields, isoHour, iso_hour, date_time)
+ DEFINE_INT_FIELD(fields, isoMicrosecond, iso_microsecond, date_time)
+ DEFINE_INT_FIELD(fields, isoMillisecond, iso_millisecond, date_time)
+ DEFINE_INT_FIELD(fields, isoMinute, iso_minute, date_time)
+ DEFINE_INT_FIELD(fields, isoMonth, iso_month, date_time)
+ DEFINE_INT_FIELD(fields, isoNanosecond, iso_nanosecond, date_time)
+ DEFINE_INT_FIELD(fields, isoSecond, iso_second, date_time)
+ DEFINE_INT_FIELD(fields, isoYear, iso_year, date_time)
+ // 14. Return fields.
+ return fields;
+}
+
+// #sec-temporal.plainmonthday
+MaybeHandle<JSTemporalPlainMonthDay> JSTemporalPlainMonthDay::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> iso_month_obj, Handle<Object> iso_day_obj,
+ Handle<Object> calendar_like, Handle<Object> reference_iso_year_obj) {
+ const char* method_name = "Temporal.PlainMonthDay";
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (new_target->IsUndefined()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainMonthDay);
+ }
+
+ // 3. Let m be ? ToIntegerThrowOnInfinity(isoMonth).
+ TO_INT_THROW_ON_INFTY(iso_month, JSTemporalPlainMonthDay);
+ // 5. Let d be ? ToIntegerThrowOnInfinity(isoDay).
+ TO_INT_THROW_ON_INFTY(iso_day, JSTemporalPlainMonthDay);
+ // 7. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_like, method_name),
+ JSTemporalPlainMonthDay);
+
+ // 2. If referenceISOYear is undefined, then
+ // a. Set referenceISOYear to 1972𝔽.
+ // ...
+ // 8. Let ref be ? ToIntegerThrowOnInfinity(referenceISOYear).
+ int32_t ref = 1972;
+ if (!reference_iso_year_obj->IsUndefined()) {
+ TO_INT_THROW_ON_INFTY(reference_iso_year, JSTemporalPlainMonthDay);
+ ref = reference_iso_year;
+ }
+
+ // 10. Return ? CreateTemporalMonthDay(y, m, calendar, ref, NewTarget).
+ return CreateTemporalMonthDay(isolate, target, new_target, iso_month, iso_day,
+ calendar, ref);
+}
+
+// #sec-temporal.plainmonthday.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalPlainMonthDay::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainMonthDay> month_day) {
+ Factory* factory = isolate->factory();
+ // 1. Let monthDay be the this value.
+ // 2. Perform ? RequireInternalSlot(monthDay,
+ // [[InitializedTemporalMonthDay]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields = factory->NewJSObject(isolate->object_function());
+ // 4. Perform ! CreateDataPropertyOrThrow(fields, "calendar",
+ // montyDay.[[Calendar]]).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, fields, factory->calendar_string(),
+ Handle<JSReceiver>(month_day->calendar(), isolate),
+ Just(kThrowOnError))
+ .FromJust());
+
+ // 5. Perform ! CreateDataPropertyOrThrow(fields, "isoDay",
+ // 𝔽(montyDay.[[ISODay]])).
+ // 6. Perform ! CreateDataPropertyOrThrow(fields, "isoMonth",
+ // 𝔽(montyDay.[[ISOMonth]])).
+ // 7. Perform ! CreateDataPropertyOrThrow(fields, "isoYear",
+ // 𝔽(montyDay.[[ISOYear]])).
+ DEFINE_INT_FIELD(fields, isoDay, iso_day, month_day)
+ DEFINE_INT_FIELD(fields, isoMonth, iso_month, month_day)
+ DEFINE_INT_FIELD(fields, isoYear, iso_year, month_day)
+ // 8. Return fields.
+ return fields;
+}
+
+MaybeHandle<JSTemporalPlainYearMonth> JSTemporalPlainYearMonth::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> iso_year_obj, Handle<Object> iso_month_obj,
+ Handle<Object> calendar_like, Handle<Object> reference_iso_day_obj) {
+ const char* method_name = "Temporal.PlainYearMonth";
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (new_target->IsUndefined()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainYearMonth);
+ }
+ // 7. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ // 10. Return ? CreateTemporalYearMonth(y, m, calendar, ref, NewTarget).
+
+ // 3. Let y be ? ToIntegerThrowOnInfinity(isoYear).
+ TO_INT_THROW_ON_INFTY(iso_year, JSTemporalPlainYearMonth);
+ // 5. Let m be ? ToIntegerThrowOnInfinity(isoMonth).
+ TO_INT_THROW_ON_INFTY(iso_month, JSTemporalPlainYearMonth);
+ // 7. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_like, method_name),
+ JSTemporalPlainYearMonth);
+
+ // 2. If referenceISODay is undefined, then
+ // a. Set referenceISODay to 1𝔽.
+ // ...
+ // 8. Let ref be ? ToIntegerThrowOnInfinity(referenceISODay).
+ int32_t ref = 1;
+ if (!reference_iso_day_obj->IsUndefined()) {
+ TO_INT_THROW_ON_INFTY(reference_iso_day, JSTemporalPlainYearMonth);
+ ref = reference_iso_day;
+ }
+
+ // 10. Return ? CreateTemporalYearMonth(y, m, calendar, ref, NewTarget).
+ return CreateTemporalYearMonth(isolate, target, new_target, iso_year,
+ iso_month, calendar, ref);
+}
+
+// #sec-temporal.plainyearmonth.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalPlainYearMonth::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainYearMonth> year_month) {
+ Factory* factory = isolate->factory();
+ // 1. Let yearMonth be the this value.
+ // 2. Perform ? RequireInternalSlot(yearMonth,
+ // [[InitializedTemporalYearMonth]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 4. Perform ! CreateDataPropertyOrThrow(fields, "calendar",
+ // yearMonth.[[Calendar]]).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, fields, factory->calendar_string(),
+ Handle<JSReceiver>(year_month->calendar(), isolate),
+ Just(kThrowOnError))
+ .FromJust());
+ // 5. Perform ! CreateDataPropertyOrThrow(fields, "isoDay",
+ // 𝔽(yearMonth.[[ISODay]])).
+ // 6. Perform ! CreateDataPropertyOrThrow(fields, "isoMonth",
+ // 𝔽(yearMonth.[[ISOMonth]])).
+ // 7. Perform ! CreateDataPropertyOrThrow(fields, "isoYear",
+ // 𝔽(yearMonth.[[ISOYear]])).
+ DEFINE_INT_FIELD(fields, isoDay, iso_day, year_month)
+ DEFINE_INT_FIELD(fields, isoMonth, iso_month, year_month)
+ DEFINE_INT_FIELD(fields, isoYear, iso_year, year_month)
+ // 8. Return fields.
+ return fields;
+}
+
+// #sec-temporal-plaintime-constructor
+MaybeHandle<JSTemporalPlainTime> JSTemporalPlainTime::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> hour_obj, Handle<Object> minute_obj,
+ Handle<Object> second_obj, Handle<Object> millisecond_obj,
+ Handle<Object> microsecond_obj, Handle<Object> nanosecond_obj) {
+ const char* method_name = "Temporal.PlainTime";
+ // 1. If NewTarget is undefined, then
+ // a. Throw a TypeError exception.
+ if (new_target->IsUndefined()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainTime);
+ }
+
+ TO_INT_THROW_ON_INFTY(hour, JSTemporalPlainTime);
+ TO_INT_THROW_ON_INFTY(minute, JSTemporalPlainTime);
+ TO_INT_THROW_ON_INFTY(second, JSTemporalPlainTime);
+ TO_INT_THROW_ON_INFTY(millisecond, JSTemporalPlainTime);
+ TO_INT_THROW_ON_INFTY(microsecond, JSTemporalPlainTime);
+ TO_INT_THROW_ON_INFTY(nanosecond, JSTemporalPlainTime);
+
+ // 14. Return ? CreateTemporalTime(hour, minute, second, millisecond,
+ // microsecond, nanosecond, NewTarget).
+ return CreateTemporalTime(isolate, target, new_target, hour, minute, second,
+ millisecond, microsecond, nanosecond);
+}
+
+// #sec-temporal.now.plaintimeiso
+MaybeHandle<JSTemporalPlainTime> JSTemporalPlainTime::NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.plainTimeISO";
+ // 1. Let calendar be ! GetISO8601Calendar().
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalPlainTime);
+ // 2. Let dateTime be ? SystemDateTime(temporalTimeZoneLike, calendar).
+ Handle<JSTemporalPlainDateTime> date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_time,
+ SystemDateTime(isolate, temporal_time_zone_like, calendar, method_name),
+ JSTemporalPlainTime);
+ // 3. Return ! CreateTemporalTime(dateTime.[[ISOHour]],
+ // dateTime.[[ISOMinute]], dateTime.[[ISOSecond]],
+ // dateTime.[[ISOMillisecond]], dateTime.[[ISOMicrosecond]],
+ // dateTime.[[ISONanosecond]]).
+ return CreateTemporalTime(
+ isolate, date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond());
+}
+
+// #sec-temporal.plaintime.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalPlainTime::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainTime> temporal_time) {
+ Factory* factory = isolate->factory();
+ // 1. Let temporalTime be the this value.
+ // 2. Perform ? RequireInternalSlot(temporalTime,
+ // [[InitializedTemporalTime]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 4. Perform ! CreateDataPropertyOrThrow(fields, "calendar",
+ // temporalTime.[[Calendar]]).
+ Handle<JSTemporalCalendar> iso8601_calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, iso8601_calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalPlainTime);
+ CHECK(JSReceiver::CreateDataProperty(isolate, fields,
+ factory->calendar_string(),
+ iso8601_calendar, Just(kThrowOnError))
+ .FromJust());
+
+ // 5. Perform ! CreateDataPropertyOrThrow(fields, "isoHour",
+ // 𝔽(temporalTime.[[ISOHour]])).
+ // 6. Perform ! CreateDataPropertyOrThrow(fields, "isoMicrosecond",
+ // 𝔽(temporalTime.[[ISOMicrosecond]])).
+ // 7. Perform ! CreateDataPropertyOrThrow(fields, "isoMillisecond",
+ // 𝔽(temporalTime.[[ISOMillisecond]])).
+ // 8. Perform ! CreateDataPropertyOrThrow(fields, "isoMinute",
+ // 𝔽(temporalTime.[[ISOMinute]])).
+ // 9. Perform ! CreateDataPropertyOrThrow(fields, "isoNanosecond",
+ // 𝔽(temporalTime.[[ISONanosecond]])).
+ // 10. Perform ! CreateDataPropertyOrThrow(fields, "isoSecond",
+ // 𝔽(temporalTime.[[ISOSecond]])).
+ DEFINE_INT_FIELD(fields, isoHour, iso_hour, temporal_time)
+ DEFINE_INT_FIELD(fields, isoMicrosecond, iso_microsecond, temporal_time)
+ DEFINE_INT_FIELD(fields, isoMillisecond, iso_millisecond, temporal_time)
+ DEFINE_INT_FIELD(fields, isoMinute, iso_minute, temporal_time)
+ DEFINE_INT_FIELD(fields, isoNanosecond, iso_nanosecond, temporal_time)
+ DEFINE_INT_FIELD(fields, isoSecond, iso_second, temporal_time)
+ // 11. Return fields.
+ return fields;
+}
+
+// #sec-temporal.zoneddatetime
+MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> epoch_nanoseconds_obj, Handle<Object> time_zone_like,
+ Handle<Object> calendar_like) {
+ const char* method_name = "Temporal.ZonedDateTime";
+ // 1. If NewTarget is undefined, then
+ if (new_target->IsUndefined()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalZonedDateTime);
+ }
+ // 2. Set epochNanoseconds to ? ToBigInt(epochNanoseconds).
+ Handle<BigInt> epoch_nanoseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, epoch_nanoseconds,
+ BigInt::FromObject(isolate, epoch_nanoseconds_obj),
+ JSTemporalZonedDateTime);
+ // 3. If ! IsValidEpochNanoseconds(epochNanoseconds) is false, throw a
+ // RangeError exception.
+ if (!IsValidEpochNanoseconds(isolate, epoch_nanoseconds)) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalZonedDateTime);
+ }
+
+ // 4. Let timeZone be ? ToTemporalTimeZone(timeZoneLike).
+ Handle<JSReceiver> time_zone;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, time_zone,
+ temporal::ToTemporalTimeZone(isolate, time_zone_like, method_name),
+ JSTemporalZonedDateTime);
+
+ // 5. Let calendar be ? ToTemporalCalendarWithISODefault(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ ToTemporalCalendarWithISODefault(isolate, calendar_like, method_name),
+ JSTemporalZonedDateTime);
+
+ // 6. Return ? CreateTemporalZonedDateTime(epochNanoseconds, timeZone,
+ // calendar, NewTarget).
+ return CreateTemporalZonedDateTime(isolate, target, new_target,
+ epoch_nanoseconds, time_zone, calendar);
+}
+
+// #sec-temporal.zoneddatetime.prototype.withcalendar
+MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::WithCalendar(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time,
+ Handle<Object> calendar_like) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.ZonedDateTime.prototype.withCalendar";
+ // 1. Let zonedDateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(zonedDateTime,
+ // [[InitializedTemporalZonedDateTime]]).
+ // 3. Let calendar be ? ToTemporalCalendar(calendarLike).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ temporal::ToTemporalCalendar(isolate, calendar_like, method_name),
+ JSTemporalZonedDateTime);
+
+ // 4. Return ? CreateTemporalZonedDateTime(zonedDateTime.[[Nanoseconds]],
+ // zonedDateTime.[[TimeZone]], calendar).
+ Handle<BigInt> nanoseconds = handle(zoned_date_time->nanoseconds(), isolate);
+ Handle<JSReceiver> time_zone = handle(zoned_date_time->time_zone(), isolate);
+ return CreateTemporalZonedDateTime(isolate, nanoseconds, time_zone, calendar);
+}
+
+// #sec-temporal.zoneddatetime.prototype.withtimezone
+MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::WithTimeZone(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time,
+ Handle<Object> time_zone_like) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.ZonedDateTime.prototype.withTimeZone";
+ // 1. Let zonedDateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(zonedDateTime,
+ // [[InitializedTemporalZonedDateTime]]).
+ // 3. Let timeZone be ? ToTemporalTimeZone(timeZoneLike).
+ Handle<JSReceiver> time_zone;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, time_zone,
+ temporal::ToTemporalTimeZone(isolate, time_zone_like, method_name),
+ JSTemporalZonedDateTime);
+
+ // 4. Return ? CreateTemporalZonedDateTime(zonedDateTime.[[Nanoseconds]],
+ // timeZone, zonedDateTime.[[Calendar]]).
+ Handle<BigInt> nanoseconds =
+ Handle<BigInt>(zoned_date_time->nanoseconds(), isolate);
+ Handle<JSReceiver> calendar =
+ Handle<JSReceiver>(zoned_date_time->calendar(), isolate);
+ return CreateTemporalZonedDateTime(isolate, nanoseconds, time_zone, calendar);
+}
+
+// #sec-temporal.now.zoneddatetime
+MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like) {
+ const char* method_name = "Temporal.Now.zonedDateTime";
+ // 1. Return ? SystemZonedDateTime(temporalTimeZoneLike, calendarLike).
+ return SystemZonedDateTime(isolate, temporal_time_zone_like, calendar_like,
+ method_name);
+}
+
+// #sec-temporal.now.zoneddatetimeiso
+MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.Now.zonedDateTimeISO";
+ // 1. Let calendar be ! GetISO8601Calendar().
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, calendar,
+ temporal::GetISO8601Calendar(isolate),
+ JSTemporalZonedDateTime);
+ // 2. Return ? SystemZonedDateTime(temporalTimeZoneLike, calendar).
+ return SystemZonedDateTime(isolate, temporal_time_zone_like, calendar,
+ method_name);
+}
+
+// #sec-temporal.zoneddatetime.prototype.getisofields
+MaybeHandle<JSReceiver> JSTemporalZonedDateTime::GetISOFields(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.ZonedDateTime.prototype.getISOFields";
+ Factory* factory = isolate->factory();
+ // 1. Let zonedDateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(zonedDateTime,
+ // [[InitializedTemporalZonedDateTime]]).
+ // 3. Let fields be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> fields =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // 4. Let timeZone be zonedDateTime.[[TimeZone]].
+ Handle<JSReceiver> time_zone =
+ Handle<JSReceiver>(zoned_date_time->time_zone(), isolate);
+ // 5. Let instant be ? CreateTemporalInstant(zonedDateTime.[[Nanoseconds]]).
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instant,
+ temporal::CreateTemporalInstant(
+ isolate, Handle<BigInt>(zoned_date_time->nanoseconds(), isolate)),
+ JSReceiver);
+
+ // 6. Let calendar be zonedDateTime.[[Calendar]].
+ Handle<JSReceiver> calendar =
+ Handle<JSReceiver>(zoned_date_time->calendar(), isolate);
+ // 7. Let dateTime be ? BuiltinTimeZoneGetPlainDateTimeFor(timeZone,
+ // instant, calendar).
+ Handle<JSTemporalPlainDateTime> date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(isolate, time_zone, instant,
+ calendar, method_name),
+ JSReceiver);
+ // 8. Let offset be ? BuiltinTimeZoneGetOffsetStringFor(timeZone, instant).
+ Handle<String> offset;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, offset,
+ BuiltinTimeZoneGetOffsetStringFor(
+ isolate, time_zone, instant, method_name),
+ JSReceiver);
+
+#define DEFINE_STRING_FIELD(obj, str, field) \
+ CHECK(JSReceiver::CreateDataProperty(isolate, obj, factory->str##_string(), \
+ field, Just(kThrowOnError)) \
+ .FromJust());
+
+ // 9. Perform ! CreateDataPropertyOrThrow(fields, "calendar", calendar).
+ // 10. Perform ! CreateDataPropertyOrThrow(fields, "isoDay",
+ // 𝔽(dateTime.[[ISODay]])).
+ // 11. Perform ! CreateDataPropertyOrThrow(fields, "isoHour",
+ // 𝔽(temporalTime.[[ISOHour]])).
+ // 12. Perform ! CreateDataPropertyOrThrow(fields, "isoMicrosecond",
+ // 𝔽(temporalTime.[[ISOMicrosecond]])).
+ // 13. Perform ! CreateDataPropertyOrThrow(fields, "isoMillisecond",
+ // 𝔽(temporalTime.[[ISOMillisecond]])).
+ // 14. Perform ! CreateDataPropertyOrThrow(fields, "isoMinute",
+ // 𝔽(temporalTime.[[ISOMinute]])).
+ // 15. Perform ! CreateDataPropertyOrThrow(fields, "isoMonth",
+ // 𝔽(temporalTime.[[ISOMonth]])).
+ // 16. Perform ! CreateDataPropertyOrThrow(fields, "isoNanosecond",
+ // 𝔽(temporalTime.[[ISONanosecond]])).
+ // 17. Perform ! CreateDataPropertyOrThrow(fields, "isoSecond",
+ // 𝔽(temporalTime.[[ISOSecond]])).
+ // 18. Perform ! CreateDataPropertyOrThrow(fields, "isoYear",
+ // 𝔽(temporalTime.[[ISOYear]])).
+ // 19. Perform ! CreateDataPropertyOrThrow(fields, "offset", offset).
+ // 20. Perform ! CreateDataPropertyOrThrow(fields, "timeZone", timeZone).
+ DEFINE_STRING_FIELD(fields, calendar, calendar)
+ DEFINE_INT_FIELD(fields, isoDay, iso_day, date_time)
+ DEFINE_INT_FIELD(fields, isoHour, iso_hour, date_time)
+ DEFINE_INT_FIELD(fields, isoMicrosecond, iso_microsecond, date_time)
+ DEFINE_INT_FIELD(fields, isoMillisecond, iso_millisecond, date_time)
+ DEFINE_INT_FIELD(fields, isoMinute, iso_minute, date_time)
+ DEFINE_INT_FIELD(fields, isoMonth, iso_month, date_time)
+ DEFINE_INT_FIELD(fields, isoNanosecond, iso_nanosecond, date_time)
+ DEFINE_INT_FIELD(fields, isoSecond, iso_second, date_time)
+ DEFINE_INT_FIELD(fields, isoYear, iso_year, date_time)
+ DEFINE_STRING_FIELD(fields, offset, offset)
+ DEFINE_STRING_FIELD(fields, timeZone, time_zone)
+ // 21. Return fields.
+ return fields;
+}
+
+// #sec-temporal.now.instant
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::Now(Isolate* isolate) {
+ TEMPORAL_ENTER_FUNC();
+ return SystemInstant(isolate);
+}
+
+// #sec-temporal.instant
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::Constructor(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<Object> epoch_nanoseconds_obj) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.Instant";
+ // 1. If NewTarget is undefined, then
+ if (new_target->IsUndefined()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalInstant);
+ }
+ // 2. Let epochNanoseconds be ? ToBigInt(epochNanoseconds).
+ Handle<BigInt> epoch_nanoseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, epoch_nanoseconds,
+ BigInt::FromObject(isolate, epoch_nanoseconds_obj),
+ JSTemporalInstant);
+ // 3. If ! IsValidEpochNanoseconds(epochNanoseconds) is false, throw a
+ // RangeError exception.
+ if (!IsValidEpochNanoseconds(isolate, epoch_nanoseconds)) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ // 4. Return ? CreateTemporalInstant(epochNanoseconds, NewTarget).
+ return temporal::CreateTemporalInstant(isolate, target, new_target,
+ epoch_nanoseconds);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-temporal-objects.h b/deps/v8/src/objects/js-temporal-objects.h
index 07aa843f32..8650584e15 100644
--- a/deps/v8/src/objects/js-temporal-objects.h
+++ b/deps/v8/src/objects/js-temporal-objects.h
@@ -40,9 +40,23 @@ namespace internal {
UNIMPLEMENTED(); \
}
+class JSTemporalPlainDate;
+class JSTemporalPlainMonthDay;
+class JSTemporalPlainYearMonth;
+
class JSTemporalCalendar
: public TorqueGeneratedJSTemporalCalendar<JSTemporalCalendar, JSObject> {
public:
+ // #sec-temporal.calendar
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalCalendar> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> identifier);
+
+ // #sec-temporal.calendar.prototype.tostring
+ static MaybeHandle<String> ToString(Isolate* isolate,
+ Handle<JSTemporalCalendar> calendar,
+ const char* method);
+
DECL_PRINTER(JSTemporalCalendar)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_CALENDAR_FLAGS()
@@ -55,6 +69,23 @@ class JSTemporalCalendar
class JSTemporalDuration
: public TorqueGeneratedJSTemporalDuration<JSTemporalDuration, JSObject> {
public:
+ // #sec-temporal.duration
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalDuration> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> years,
+ Handle<Object> months, Handle<Object> weeks, Handle<Object> days,
+ Handle<Object> hours, Handle<Object> minutes, Handle<Object> seconds,
+ Handle<Object> milliseconds, Handle<Object> microseconds,
+ Handle<Object> nanoseconds);
+
+ // #sec-get-temporal.duration.prototype.sign
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> Sign(
+ Isolate* isolate, Handle<JSTemporalDuration> duration);
+
+ // #sec-get-temporal.duration.prototype.blank
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Oddball> Blank(
+ Isolate* isolate, Handle<JSTemporalDuration> duration);
+
DECL_PRINTER(JSTemporalDuration)
TQ_OBJECT_CONSTRUCTORS(JSTemporalDuration)
@@ -63,6 +94,15 @@ class JSTemporalDuration
class JSTemporalInstant
: public TorqueGeneratedJSTemporalInstant<JSTemporalInstant, JSObject> {
public:
+ // #sec-temporal-instant-constructor
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> epoch_nanoseconds);
+
+ // #sec-temporal.now.instant
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant> Now(
+ Isolate* isolate);
+
DECL_PRINTER(JSTemporalInstant)
TQ_OBJECT_CONSTRUCTORS(JSTemporalInstant)
@@ -71,6 +111,34 @@ class JSTemporalInstant
class JSTemporalPlainDate
: public TorqueGeneratedJSTemporalPlainDate<JSTemporalPlainDate, JSObject> {
public:
+ // #sec-temporal-createtemporaldate
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> iso_year,
+ Handle<Object> iso_month, Handle<Object> iso_day,
+ Handle<Object> calendar_like);
+
+ // #sec-temporal.plaindate.prototype.withcalendar
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> WithCalendar(
+ Isolate* isolate, Handle<JSTemporalPlainDate> plain_date,
+ Handle<Object> calendar_like);
+
+ // #sec-temporal.plaindate.from
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> From(
+ Isolate* isolate, Handle<Object> item, Handle<Object> options);
+
+ // #sec-temporal.plaindate.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainDate> plain_date);
+
+ // #sec-temporal.now.plaindate
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like);
+
+ // #sec-temporal.now.plaindateiso
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like);
DECL_PRINTER(JSTemporalPlainDate)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_YEAR_MONTH_DAY()
@@ -84,6 +152,33 @@ class JSTemporalPlainDateTime
: public TorqueGeneratedJSTemporalPlainDateTime<JSTemporalPlainDateTime,
JSObject> {
public:
+ // #sec-temporal-createtemporaldatetime
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDateTime> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> iso_year,
+ Handle<Object> iso_month, Handle<Object> iso_day, Handle<Object> hour,
+ Handle<Object> minute, Handle<Object> second, Handle<Object> millisecond,
+ Handle<Object> microsecond, Handle<Object> nanosecond,
+ Handle<Object> calendar_like);
+
+ // #sec-temporal.plaindatetime.prototype.withcalendar
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDateTime>
+ WithCalendar(Isolate* isolate, Handle<JSTemporalPlainDateTime> date_time,
+ Handle<Object> calendar_like);
+
+ // #sec-temporal.plaindatetime.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainDateTime> date_time);
+
+ // #sec-temporal.now.plaindatetime
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDateTime> Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like);
+
+ // #sec-temporal.now.plaindatetimeiso
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDateTime> NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like);
+
DECL_PRINTER(JSTemporalPlainDateTime)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_YEAR_MONTH_DAY()
@@ -100,6 +195,17 @@ class JSTemporalPlainMonthDay
: public TorqueGeneratedJSTemporalPlainMonthDay<JSTemporalPlainMonthDay,
JSObject> {
public:
+ // ##sec-temporal.plainmonthday
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainMonthDay> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> iso_month,
+ Handle<Object> iso_day, Handle<Object> calendar_like,
+ Handle<Object> reference_iso_year);
+
+ // #sec-temporal.plainmonthday.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainMonthDay> month_day);
+
DECL_PRINTER(JSTemporalPlainMonthDay)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_YEAR_MONTH_DAY()
@@ -112,6 +218,21 @@ class JSTemporalPlainMonthDay
class JSTemporalPlainTime
: public TorqueGeneratedJSTemporalPlainTime<JSTemporalPlainTime, JSObject> {
public:
+ // #sec-temporal-plaintime-constructor
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainTime> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> hour, Handle<Object> minute,
+ Handle<Object> second, Handle<Object> millisecond,
+ Handle<Object> microsecond, Handle<Object> nanosecond);
+
+ // #sec-temporal.plaintime.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainTime> plain_time);
+
+ // #sec-temporal.now.plaintimeiso
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainTime> NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like);
+
DECL_PRINTER(JSTemporalPlainTime)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_HOUR_MINUTE_SECOND()
@@ -126,6 +247,19 @@ class JSTemporalPlainYearMonth
: public TorqueGeneratedJSTemporalPlainYearMonth<JSTemporalPlainYearMonth,
JSObject> {
public:
+ // ##sec-temporal.plainyearmonth
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainYearMonth>
+ Constructor(Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> iso_year,
+ Handle<Object> iso_month, Handle<Object> calendar_like,
+ Handle<Object> reference_iso_day);
+
+ // #sec-temporal.plainyearmonth.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalPlainYearMonth> year_month);
+
+ // Abstract Operations
+
DECL_PRINTER(JSTemporalPlainYearMonth)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_YEAR_MONTH_DAY()
@@ -138,13 +272,37 @@ class JSTemporalPlainYearMonth
class JSTemporalTimeZone
: public TorqueGeneratedJSTemporalTimeZone<JSTemporalTimeZone, JSObject> {
public:
+ // #sec-temporal.now.timezone
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalTimeZone> Now(
+ Isolate* isolate);
+
+ // #sec-temporal.timezone
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalTimeZone> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> identifier);
+
+ // #sec-temporal.timezone.prototype.tostring
+ static MaybeHandle<Object> ToString(Isolate* isolate,
+ Handle<JSTemporalTimeZone> time_zone,
+ const char* method);
+
DECL_PRINTER(JSTemporalTimeZone)
DEFINE_TORQUE_GENERATED_JS_TEMPORAL_TIME_ZONE_FLAGS()
+ DEFINE_TORQUE_GENERATED_JS_TEMPORAL_TIME_ZONE_SUB_MILLISECONDS()
DECL_BOOLEAN_ACCESSORS(is_offset)
DECL_INT_ACCESSORS(offset_milliseconds_or_time_zone_index)
+ DECLARE_TEMPORAL_INLINE_GETTER_SETTER(offset_milliseconds)
+ DECLARE_TEMPORAL_INLINE_GETTER_SETTER(offset_sub_milliseconds)
+
+ int32_t time_zone_index() const;
+ int64_t offset_nanoseconds() const;
+ void set_offset_nanoseconds(int64_t offset_nanoseconds);
+
+ MaybeHandle<String> id(Isolate* isolate) const;
+
TQ_OBJECT_CONSTRUCTORS(JSTemporalTimeZone)
};
@@ -152,11 +310,93 @@ class JSTemporalZonedDateTime
: public TorqueGeneratedJSTemporalZonedDateTime<JSTemporalZonedDateTime,
JSObject> {
public:
+ // #sec-temporal.zoneddatetime
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime> Constructor(
+ Isolate* isolate, Handle<JSFunction> target,
+ Handle<HeapObject> new_target, Handle<Object> epoch_nanoseconds,
+ Handle<Object> time_zone_like, Handle<Object> calendar_like);
+
+ // #sec-temporal.zoneddatetime.prototype.withcalendar
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime>
+ WithCalendar(Isolate* isolate,
+ Handle<JSTemporalZonedDateTime> zoned_date_time,
+ Handle<Object> calendar_like);
+
+ // #sec-temporal.zoneddatetime.prototype.withtimezone
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime>
+ WithTimeZone(Isolate* isolate,
+ Handle<JSTemporalZonedDateTime> zoned_date_time,
+ Handle<Object> time_zone_like);
+
+ // #sec-temporal.zoneddatetime.prototype.getisofields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time);
+
+ // #sec-temporal.now.zoneddatetime
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime> Now(
+ Isolate* isolate, Handle<Object> calendar_like,
+ Handle<Object> temporal_time_zone_like);
+
+ // #sec-temporal.now.zoneddatetimeiso
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime> NowISO(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like);
+
DECL_PRINTER(JSTemporalZonedDateTime)
TQ_OBJECT_CONSTRUCTORS(JSTemporalZonedDateTime)
};
+namespace temporal {
+
+// #sec-temporal-createtemporalinstant
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalInstant> CreateTemporalInstant(
+ Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
+ Handle<BigInt> epoch_nanoseconds);
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalInstant> CreateTemporalInstant(
+ Isolate* isolate, Handle<BigInt> epoch_nanoseconds);
+
+// #sec-temporal-calendaryear
+#define DECLARE_CALENDAR_ABSTRACT_OPERATION(Name) \
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Calendar##Name( \
+ Isolate* isolate, Handle<JSReceiver> calendar, \
+ Handle<JSReceiver> date_like);
+DECLARE_CALENDAR_ABSTRACT_OPERATION(Year)
+DECLARE_CALENDAR_ABSTRACT_OPERATION(Month)
+DECLARE_CALENDAR_ABSTRACT_OPERATION(MonthCode)
+DECLARE_CALENDAR_ABSTRACT_OPERATION(Day)
+
+#ifdef V8_INTL_SUPPORT
+DECLARE_CALENDAR_ABSTRACT_OPERATION(Era)
+DECLARE_CALENDAR_ABSTRACT_OPERATION(EraYear)
+#endif // V8_INTL_SUPPORT
+
+#undef DECLARE_CALENDAR_ABSTRACT_OPERATION
+
+// #sec-temporal-getiso8601calendar
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalCalendar> GetISO8601Calendar(
+ Isolate* isolate);
+
+// #sec-temporal-builtintimezonegetplaindatetimefor
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTemporalPlainDateTime>
+BuiltinTimeZoneGetPlainDateTimeFor(Isolate* isolate,
+ Handle<JSReceiver> time_zone,
+ Handle<JSTemporalInstant> instant,
+ Handle<JSReceiver> calendar,
+ const char* method);
+
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> InvokeCalendarMethod(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<String> name,
+ Handle<JSReceiver> temporal_like);
+
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> ToTemporalCalendar(
+ Isolate* isolate, Handle<Object> temporal_calendar_like,
+ const char* method);
+
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> ToTemporalTimeZone(
+ Isolate* isolate, Handle<Object> temporal_time_zone_like,
+ const char* method);
+
+} // namespace temporal
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/objects/js-temporal-objects.tq b/deps/v8/src/objects/js-temporal-objects.tq
index f440907770..4c3fb4e569 100644
--- a/deps/v8/src/objects/js-temporal-objects.tq
+++ b/deps/v8/src/objects/js-temporal-objects.tq
@@ -9,7 +9,7 @@ bitfield struct JSTemporalCalendarFlags extends uint31 {
}
bitfield struct JSTemporalYearMonthDay extends uint31 {
- iso_year: int32: 16 bit;
+ iso_year: int32: 20 bit;
iso_month: int32: 4 bit;
iso_day: int32: 5 bit;
}
@@ -30,6 +30,9 @@ bitfield struct JSTemporalTimeZoneFlags extends uint31 {
is_offset: bool: 1 bit;
offset_milliseconds_or_time_zone_index: int32: 28 bit;
}
+bitfield struct JSTemporalTimeZoneSubMilliseconds extends uint31 {
+ offset_sub_milliseconds: int32: 21 bit;
+}
extern class JSTemporalCalendar extends JSObject {
flags: SmiTagged<JSTemporalCalendarFlags>;
@@ -80,6 +83,7 @@ extern class JSTemporalPlainYearMonth extends JSObject {
extern class JSTemporalTimeZone extends JSObject {
flags: SmiTagged<JSTemporalTimeZoneFlags>;
+ details: SmiTagged<JSTemporalTimeZoneSubMilliseconds>;
}
extern class JSTemporalZonedDateTime extends JSObject {
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index dd087b49c5..790b9c0e08 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -15,6 +15,7 @@
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-struct-inl.h"
#include "src/objects/map-updater.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/property-details.h"
@@ -167,7 +168,7 @@ Handle<Map> LookupIterator::GetReceiverMap() const {
}
bool LookupIterator::HasAccess() const {
- // TRANSITION is true when being called from StoreOwnIC.
+ // TRANSITION is true when being called from DefineNamedOwnIC.
DCHECK(state_ == ACCESS_CHECK || state_ == TRANSITION);
return isolate_->MayAccess(handle(isolate_->context(), isolate_),
GetHolder<JSObject>());
@@ -587,7 +588,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
// Don't set enumeration index (it will be set during value store).
property_details_ =
PropertyDetails(PropertyKind::kData, attributes,
- PropertyCell::InitialType(isolate_, value));
+ PropertyCell::InitialType(isolate_, *value));
transition_ = isolate_->factory()->NewPropertyCell(
name(), property_details_, value);
has_property_ = true;
@@ -926,8 +927,8 @@ Handle<Object> LookupIterator::FetchValue(
field_index.is_inobject() && field_index.is_double()) {
return isolate_->factory()->undefined_value();
}
- return JSObject::FastPropertyAt(holder, property_details_.representation(),
- field_index);
+ return JSObject::FastPropertyAt(
+ isolate_, holder, property_details_.representation(), field_index);
} else {
result =
holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
@@ -1053,6 +1054,18 @@ Handle<Object> LookupIterator::GetDataValue(
return value;
}
+Handle<Object> LookupIterator::GetDataValue(SeqCstAccessTag tag) const {
+ DCHECK_EQ(DATA, state_);
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
+ // Currently only shared structs support sequentially consistent access.
+ Handle<JSSharedStruct> holder = GetHolder<JSSharedStruct>();
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ return JSObject::FastPropertyAt(
+ isolate_, holder, property_details_.representation(), field_index, tag);
+}
+
void LookupIterator::WriteDataValue(Handle<Object> value,
bool initializing_store) {
DCHECK_EQ(DATA, state_);
@@ -1061,6 +1074,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
// WasmObjects.
DCHECK(!holder_->IsWasmObject(isolate_));
#endif // V8_ENABLE_WEBASSEMBLY
+ DCHECK_IMPLIES(holder_->IsJSSharedStruct(), value->IsShared());
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement(*holder)) {
@@ -1111,6 +1125,18 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
}
}
+void LookupIterator::WriteDataValue(Handle<Object> value, SeqCstAccessTag tag) {
+ DCHECK_EQ(DATA, state_);
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
+ // Currently only shared structs support sequentially consistent access.
+ Handle<JSSharedStruct> holder = GetHolder<JSSharedStruct>();
+ DisallowGarbageCollection no_gc;
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ holder->FastPropertyAtPut(field_index, *value, tag);
+}
+
#if V8_ENABLE_WEBASSEMBLY
wasm::ValueType LookupIterator::wasm_value_type() const {
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index de678f35b0..fb62f407eb 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -188,6 +188,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<Object> GetDataValue(AllocationPolicy allocation_policy =
AllocationPolicy::kAllocationAllowed) const;
void WriteDataValue(Handle<Object> value, bool initializing_store);
+ Handle<Object> GetDataValue(SeqCstAccessTag tag) const;
+ void WriteDataValue(Handle<Object> value, SeqCstAccessTag tag);
inline void UpdateProtector();
static inline void UpdateProtector(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 37d189b67a..ac31be483d 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -688,8 +688,8 @@ bool Map::IsBooleanMap() const {
}
bool Map::IsNullOrUndefinedMap() const {
- return *this == GetReadOnlyRoots().null_map() ||
- *this == GetReadOnlyRoots().undefined_map();
+ auto roots = GetReadOnlyRoots();
+ return *this == roots.null_map() || *this == roots.undefined_map();
}
bool Map::IsPrimitiveMap() const {
@@ -768,8 +768,7 @@ void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
// static
Map Map::ElementsTransitionMap(Isolate* isolate, ConcurrencyMode cmode) {
- DisallowGarbageCollection no_gc;
- return TransitionsAccessor(isolate, *this, &no_gc,
+ return TransitionsAccessor(isolate, *this,
cmode == ConcurrencyMode::kConcurrent)
.SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 6d8b1cf482..34de7eb21e 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -277,7 +277,7 @@ Handle<Map> MapUpdater::UpdateImpl() {
}
DCHECK_EQ(kEnd, state_);
if (FLAG_fast_map_update) {
- TransitionsAccessor(isolate_, old_map_).SetMigrationTarget(*result_map_);
+ TransitionsAccessor::SetMigrationTarget(isolate_, old_map_, *result_map_);
}
return result_map_;
}
@@ -304,7 +304,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// be the last one in the transition tree).
DCHECK(!map.is_extensible());
Map previous = Map::cast(map.GetBackPointer(isolate));
- TransitionsAccessor last_transitions(isolate, previous, no_gc, is_concurrent);
+ TransitionsAccessor last_transitions(isolate, previous, is_concurrent);
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &info.integrity_level_symbol, &info.integrity_level)) {
// The last transition was not integrity level transition - just bail out.
@@ -322,7 +322,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// with integrity level transitions, just bail out.
while (!source_map.is_extensible()) {
previous = Map::cast(source_map.GetBackPointer(isolate));
- TransitionsAccessor transitions(isolate, previous, no_gc, is_concurrent);
+ TransitionsAccessor transitions(isolate, previous, is_concurrent);
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
}
@@ -390,7 +390,7 @@ base::Optional<Map> MapUpdater::TryUpdateNoLock(Isolate* isolate, Map old_map,
if (info.has_integrity_level_transition) {
// Now replay the integrity level transition.
- result = TransitionsAccessor(isolate, result, &no_gc,
+ result = TransitionsAccessor(isolate, result,
cmode == ConcurrencyMode::kConcurrent)
.SearchSpecial(info.integrity_level_symbol);
}
@@ -423,14 +423,13 @@ MapUpdater::State MapUpdater::Normalize(const char* reason) {
// static
void MapUpdater::CompleteInobjectSlackTracking(Isolate* isolate,
Map initial_map) {
- DisallowGarbageCollection no_gc;
// Has to be an initial map.
DCHECK(initial_map.GetBackPointer().IsUndefined(isolate));
const int slack = initial_map.ComputeMinObjectSlack(isolate);
DCHECK_GE(slack, 0);
- TransitionsAccessor transitions(isolate, initial_map, &no_gc);
+ TransitionsAccessor transitions(isolate, initial_map);
TransitionsAccessor::TraverseCallback callback;
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
@@ -518,7 +517,7 @@ bool MapUpdater::TrySaveIntegrityLevelTransitions() {
Handle<Map> previous =
handle(Map::cast(old_map_->GetBackPointer()), isolate_);
Symbol integrity_level_symbol;
- TransitionsAccessor last_transitions(isolate_, previous);
+ TransitionsAccessor last_transitions(isolate_, *previous);
if (!last_transitions.HasIntegrityLevelTransitionTo(
*old_map_, &integrity_level_symbol, &integrity_level_)) {
// The last transition was not integrity level transition - just bail out.
@@ -538,7 +537,7 @@ bool MapUpdater::TrySaveIntegrityLevelTransitions() {
while (!integrity_source_map_->is_extensible()) {
previous =
handle(Map::cast(integrity_source_map_->GetBackPointer()), isolate_);
- TransitionsAccessor transitions(isolate_, previous);
+ TransitionsAccessor transitions(isolate_, *previous);
if (!transitions.HasIntegrityLevelTransitionTo(*integrity_source_map_)) {
return false;
}
@@ -641,12 +640,11 @@ MapUpdater::State MapUpdater::FindTargetMap() {
int root_nof = root_map_->NumberOfOwnDescriptors();
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) {
PropertyDetails old_details = GetDetails(i);
- Map transition = TransitionsAccessor(isolate_, target_map_)
- .SearchTransition(GetKey(i), old_details.kind(),
- old_details.attributes());
- if (transition.is_null()) break;
- Handle<Map> tmp_map(transition, isolate_);
-
+ Handle<Map> tmp_map;
+ MaybeHandle<Map> maybe_tmp_map = TransitionsAccessor::SearchTransition(
+ isolate_, target_map_, GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (!maybe_tmp_map.ToHandle(&tmp_map)) break;
Handle<DescriptorArray> tmp_descriptors(
tmp_map->instance_descriptors(isolate_), isolate_);
@@ -727,10 +725,9 @@ MapUpdater::State MapUpdater::FindTargetMap() {
}
// We try to replay the integrity level transition here.
- Map transition = TransitionsAccessor(isolate_, target_map_)
- .SearchSpecial(*integrity_level_symbol_);
- if (!transition.is_null()) {
- result_map_ = handle(transition, isolate_);
+ MaybeHandle<Map> maybe_transition = TransitionsAccessor::SearchSpecial(
+ isolate_, target_map_, *integrity_level_symbol_);
+ if (maybe_transition.ToHandle(&result_map_)) {
state_ = kEnd;
return state_; // Done.
}
@@ -739,11 +736,11 @@ MapUpdater::State MapUpdater::FindTargetMap() {
// Find the last compatible target map in the transition tree.
for (InternalIndex i : InternalIndex::Range(target_nof, old_nof_)) {
PropertyDetails old_details = GetDetails(i);
- Map transition = TransitionsAccessor(isolate_, target_map_)
- .SearchTransition(GetKey(i), old_details.kind(),
- old_details.attributes());
- if (transition.is_null()) break;
- Handle<Map> tmp_map(transition, isolate_);
+ Handle<Map> tmp_map;
+ MaybeHandle<Map> maybe_tmp_map = TransitionsAccessor::SearchTransition(
+ isolate_, target_map_, GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (!maybe_tmp_map.ToHandle(&tmp_map)) break;
Handle<DescriptorArray> tmp_descriptors(
tmp_map->instance_descriptors(isolate_), isolate_);
#ifdef DEBUG
@@ -933,15 +930,13 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
}
Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
- DisallowGarbageCollection no_gc;
-
int root_nof = root_map_->NumberOfOwnDescriptors();
Map current = *root_map_;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) {
Name name = descriptors->GetKey(i);
PropertyDetails details = descriptors->GetDetails(i);
Map next =
- TransitionsAccessor(isolate_, current, &no_gc)
+ TransitionsAccessor(isolate_, current)
.SearchTransition(name, details.kind(), details.attributes());
if (next.is_null()) break;
DescriptorArray next_descriptors = next.instance_descriptors(isolate_);
@@ -981,21 +976,20 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
}
InternalIndex split_index(split_nof);
PropertyDetails split_details = GetDetails(split_index);
- TransitionsAccessor transitions(isolate_, split_map);
// Invalidate a transition target at |key|.
- Handle<Map> maybe_transition(
- transitions.SearchTransition(GetKey(split_index), split_details.kind(),
- split_details.attributes()),
- isolate_);
- if (!maybe_transition->is_null()) {
- maybe_transition->DeprecateTransitionTree(isolate_);
+ MaybeHandle<Map> maybe_transition = TransitionsAccessor::SearchTransition(
+ isolate_, split_map, GetKey(split_index), split_details.kind(),
+ split_details.attributes());
+ if (!maybe_transition.is_null()) {
+ maybe_transition.ToHandleChecked()->DeprecateTransitionTree(isolate_);
}
// If |maybe_transition| is not nullptr then the transition array already
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (maybe_transition->is_null() && !transitions.CanHaveMoreTransitions()) {
+ if (maybe_transition.is_null() &&
+ !TransitionsAccessor::CanHaveMoreTransitions(isolate_, split_map)) {
return Normalize("Normalize_CantHaveMoreTransitions");
}
@@ -1056,8 +1050,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
DCHECK_EQ(kAtIntegrityLevelSource, state_);
- TransitionsAccessor transitions(isolate_, target_map_);
- if (!transitions.CanHaveMoreTransitions()) {
+ if (!TransitionsAccessor::CanHaveMoreTransitions(isolate_, target_map_)) {
return Normalize("Normalize_CantHaveMoreTransitions");
}
@@ -1127,7 +1120,6 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
const MaybeObjectHandle& new_wrapped_type) {
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
- DisallowGarbageCollection no_gc;
PropertyDetails details =
map->instance_descriptors(isolate).GetDetails(descriptor);
if (details.location() != PropertyLocation::kField) return;
@@ -1144,7 +1136,7 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
Map current = backlog.front();
backlog.pop();
- TransitionsAccessor transitions(isolate, current, &no_gc);
+ TransitionsAccessor transitions(isolate, current);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
Map target = transitions.GetTarget(i);
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 47a0e36ee3..6aaccd72cc 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -129,6 +129,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitEmbedderDataArray;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ case REGISTERED_SYMBOL_TABLE_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
@@ -204,6 +206,9 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_DATA_VIEW_TYPE:
return kVisitJSDataView;
+ case JS_EXTERNAL_OBJECT_TYPE:
+ return kVisitJSExternalObject;
+
case JS_FUNCTION_TYPE:
case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
@@ -271,6 +276,8 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_SET_PROTOTYPE_TYPE:
case JS_SET_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_SHADOW_REALM_TYPE:
+ case JS_SHARED_STRUCT_TYPE:
case JS_STRING_ITERATOR_PROTOTYPE_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
@@ -306,7 +313,8 @@ VisitorId Map::GetVisitorId(Map map) {
case WASM_TABLE_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
#endif // V8_ENABLE_WEBASSEMBLY
- case JS_BOUND_FUNCTION_TYPE: {
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_WRAPPED_FUNCTION_TYPE: {
const bool has_raw_data_fields =
COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0;
return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
@@ -372,6 +380,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitWasmInternalFunction;
case WASM_JS_FUNCTION_DATA_TYPE:
return kVisitWasmJSFunctionData;
+ case WASM_ON_FULFILLED_DATA_TYPE:
+ return kVisitWasmOnFulfilledData;
case WASM_API_FUNCTION_REF_TYPE:
return kVisitWasmApiFunctionRef;
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
@@ -553,8 +563,7 @@ bool Map::HasOutOfObjectProperties() const {
void Map::DeprecateTransitionTree(Isolate* isolate) {
if (is_deprecated()) return;
- DisallowGarbageCollection no_gc;
- TransitionsAccessor transitions(isolate, *this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
transitions.GetTarget(i).DeprecateTransitionTree(isolate);
@@ -641,7 +650,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
Map target = old_map;
do {
- target = TransitionsAccessor(isolate, target, &no_gc).GetMigrationTarget();
+ target = TransitionsAccessor(isolate, target).GetMigrationTarget();
} while (!target.is_null() && target.is_deprecated());
if (target.is_null()) return Map();
@@ -689,8 +698,7 @@ MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
isolate, *old_map, ConcurrencyMode::kNotConcurrent);
if (!new_map.has_value()) return MaybeHandle<Map>();
if (FLAG_fast_map_update) {
- TransitionsAccessor(isolate, *old_map, &no_gc)
- .SetMigrationTarget(new_map.value());
+ TransitionsAccessor::SetMigrationTarget(isolate, old_map, new_map.value());
}
return handle(new_map.value(), isolate);
}
@@ -712,7 +720,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
- TransitionsAccessor(isolate, new_map, &no_gc, is_concurrent)
+ TransitionsAccessor(isolate, new_map, is_concurrent)
.SearchTransition(old_descriptors.GetKey(i), old_details.kind(),
old_details.attributes());
if (transition.is_null()) return Map();
@@ -922,6 +930,7 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind to_kind,
ConcurrencyMode cmode) {
+ DisallowGarbageCollection no_gc;
// Ensure we are requested to search elements kind transition "near the root".
DCHECK_EQ(map.FindRootMap(isolate).NumberOfOwnDescriptors(),
map.NumberOfOwnDescriptors());
@@ -1127,29 +1136,38 @@ bool Map::MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate) {
return false;
}
-Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
- int inobject_properties) {
+Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> src_handle,
+ int instance_size, int inobject_properties) {
Handle<Map> result = isolate->factory()->NewMap(
- map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
+ src_handle->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
inobject_properties);
- Handle<HeapObject> prototype(map->prototype(), isolate);
+ // We have to set the bitfields before any potential GCs could happen because
+ // heap verification might fail otherwise.
+ {
+ DisallowGarbageCollection no_gc;
+ Map src = *src_handle;
+ Map raw = *result;
+ raw.set_constructor_or_back_pointer(src.GetConstructor());
+ raw.set_bit_field(src.bit_field());
+ raw.set_bit_field2(src.bit_field2());
+ int new_bit_field3 = src.bit_field3();
+ new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
+ new_bit_field3 =
+ Bits3::NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
+ new_bit_field3 = Bits3::EnumLengthBits::update(new_bit_field3,
+ kInvalidEnumCacheSentinel);
+ new_bit_field3 = Bits3::IsDeprecatedBit::update(new_bit_field3, false);
+ new_bit_field3 =
+ Bits3::IsInRetainedMapListBit::update(new_bit_field3, false);
+ if (!src.is_dictionary_map()) {
+ new_bit_field3 = Bits3::IsUnstableBit::update(new_bit_field3, false);
+ }
+ // Same as bit_field comment above.
+ raw.set_bit_field3(new_bit_field3);
+ raw.clear_padding();
+ }
+ Handle<HeapObject> prototype(src_handle->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
- result->set_constructor_or_back_pointer(map->GetConstructor());
- result->set_bit_field(map->bit_field());
- result->set_bit_field2(map->bit_field2());
- int new_bit_field3 = map->bit_field3();
- new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
- new_bit_field3 = Bits3::NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
- new_bit_field3 =
- Bits3::EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel);
- new_bit_field3 = Bits3::IsDeprecatedBit::update(new_bit_field3, false);
- new_bit_field3 = Bits3::IsInRetainedMapListBit::update(new_bit_field3, false);
- if (!map->is_dictionary_map()) {
- new_bit_field3 = Bits3::IsUnstableBit::update(new_bit_field3, false);
- }
- // Same as bit_field comment above.
- result->set_bit_field3(new_bit_field3);
- result->clear_padding();
return result;
}
@@ -1240,13 +1258,17 @@ Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
Handle<Map> result = RawCopy(
isolate, map, new_instance_size,
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
- // Clear the unused_property_fields explicitly as this field should not
- // be accessed for normalized maps.
- result->SetInObjectUnusedPropertyFields(0);
- result->set_is_dictionary_map(true);
- result->set_is_migration_target(false);
- result->set_may_have_interesting_symbols(true);
- result->set_construction_counter(kNoSlackTracking);
+ {
+ DisallowGarbageCollection no_gc;
+ Map raw = *result;
+ // Clear the unused_property_fields explicitly as this field should not
+ // be accessed for normalized maps.
+ raw.SetInObjectUnusedPropertyFields(0);
+ raw.set_is_dictionary_map(true);
+ raw.set_is_migration_target(false);
+ raw.set_may_have_interesting_symbols(true);
+ raw.set_construction_counter(kNoSlackTracking);
+ }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) result->DictionaryMapVerify(isolate);
@@ -1400,7 +1422,7 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
LOG(isolate, MapEvent("Transition", parent, child, "prototype", name));
}
} else {
- TransitionsAccessor(isolate, parent).Insert(name, child, flag);
+ TransitionsAccessor::Insert(isolate, parent, name, child, flag);
if (FLAG_log_maps) {
LOG(isolate, MapEvent("Transition", parent, child, "", name));
}
@@ -1428,7 +1450,7 @@ Handle<Map> Map::CopyReplaceDescriptors(Isolate* isolate, Handle<Map> map,
result->InitializeDescriptors(isolate, *descriptors);
} else {
if (flag == INSERT_TRANSITION &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
+ TransitionsAccessor::CanHaveMoreTransitions(isolate, map)) {
result->InitializeDescriptors(isolate, *descriptors);
DCHECK(!maybe_name.is_null());
@@ -1540,7 +1562,7 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
bool insert_transition =
flag == INSERT_TRANSITION &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions() &&
+ TransitionsAccessor::CanHaveMoreTransitions(isolate, map) &&
maybe_elements_transition_map.is_null();
if (insert_transition) {
@@ -1574,10 +1596,10 @@ Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
Handle<Symbol> transition_symbol =
isolate->factory()->strict_function_transition_symbol();
- Map maybe_transition = TransitionsAccessor(isolate, initial_map)
- .SearchSpecial(*transition_symbol);
+ MaybeHandle<Map> maybe_transition = TransitionsAccessor::SearchSpecial(
+ isolate, initial_map, *transition_symbol);
if (!maybe_transition.is_null()) {
- return handle(maybe_transition, isolate);
+ return maybe_transition.ToHandleChecked();
}
initial_map->NotifyLeafMapLayoutChange(isolate);
@@ -1591,7 +1613,7 @@ Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
map->set_prototype(initial_map->prototype());
map->set_construction_counter(initial_map->construction_counter());
- if (TransitionsAccessor(isolate, initial_map).CanHaveMoreTransitions()) {
+ if (TransitionsAccessor::CanHaveMoreTransitions(isolate, initial_map)) {
Map::ConnectTransition(isolate, initial_map, map, transition_symbol,
SPECIAL_TRANSITION);
}
@@ -1633,9 +1655,11 @@ Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
}
Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
- Handle<Map> copy =
+ Handle<Map> copy_handle =
Copy(isolate, handle(isolate->object_function()->initial_map(), isolate),
"MapCreate");
+ DisallowGarbageCollection no_gc;
+ Map copy = *copy_handle;
// Check that we do not overflow the instance size when adding the extra
// inobject properties. If the instance size overflows, we allocate as many
@@ -1648,12 +1672,13 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
JSObject::kHeaderSize + kTaggedSize * inobject_properties;
// Adjust the map with the extra inobject properties.
- copy->set_instance_size(new_instance_size);
- copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
- DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
- copy->SetInObjectUnusedPropertyFields(inobject_properties);
- copy->set_visitor_id(Map::GetVisitorId(*copy));
- return copy;
+ copy.set_instance_size(new_instance_size);
+ copy.SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
+ DCHECK_EQ(copy.GetInObjectProperties(), inobject_properties);
+ copy.SetInObjectUnusedPropertyFields(inobject_properties);
+ copy.set_visitor_id(Map::GetVisitorId(copy));
+
+ return copy_handle;
}
Handle<Map> Map::CopyForPreventExtensions(
@@ -1803,11 +1828,10 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
// Migrate to the newest map before storing the property.
map = Update(isolate, map);
- Map maybe_transition =
- TransitionsAccessor(isolate, map)
- .SearchTransition(*name, PropertyKind::kData, attributes);
- if (!maybe_transition.is_null()) {
- Handle<Map> transition(maybe_transition, isolate);
+ MaybeHandle<Map> maybe_transition = TransitionsAccessor::SearchTransition(
+ isolate, map, *name, PropertyKind::kData, attributes);
+ Handle<Map> transition;
+ if (maybe_transition.ToHandle(&transition)) {
InternalIndex descriptor = transition->LastAdded();
DCHECK_EQ(attributes, transition->instance_descriptors(isolate)
@@ -1899,11 +1923,10 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- Map maybe_transition =
- TransitionsAccessor(isolate, map)
- .SearchTransition(*name, PropertyKind::kAccessor, attributes);
- if (!maybe_transition.is_null()) {
- Handle<Map> transition(maybe_transition, isolate);
+ MaybeHandle<Map> maybe_transition = TransitionsAccessor::SearchTransition(
+ isolate, map, *name, PropertyKind::kAccessor, attributes);
+ Handle<Map> transition;
+ if (maybe_transition.ToHandle(&transition)) {
DescriptorArray descriptors = transition->instance_descriptors(isolate);
InternalIndex last_descriptor = transition->LastAdded();
DCHECK(descriptors.GetKey(last_descriptor).Equals(*name));
@@ -1996,7 +2019,7 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
!map->GetBackPointer().IsUndefined(isolate) &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
+ TransitionsAccessor::CanHaveMoreTransitions(isolate, map)) {
return ShareDescriptor(isolate, map, descriptors, descriptor);
}
@@ -2054,16 +2077,21 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
int Map::Hash() {
// For performance reasons we only hash the 2 most variable fields of a map:
- // prototype map and bit_field2. For predictability reasons we use objects'
- // offsets in respective pages for hashing instead of raw addresses. We use
- // the map of the prototype because the prototype itself could be compacted,
- // whereas the map will not be moved.
- // NOTE: If we want to compact maps, this hash function won't work as intended
- // anymore.
+ // prototype and bit_field2.
+
+ HeapObject prototype = this->prototype();
+ int prototype_hash;
- // Shift away the tag.
- int hash = ObjectAddressForHashing(prototype().map().ptr()) >> 2;
- return hash ^ bit_field2();
+ if (prototype.IsNull()) {
+ // No identity hash for null, so just pick a random number.
+ prototype_hash = 1;
+ } else {
+ JSReceiver receiver = JSReceiver::cast(prototype);
+ Isolate* isolate = GetIsolateFromWritableObject(receiver);
+ prototype_hash = receiver.GetOrCreateIdentityHash(isolate).value();
+ }
+
+ return prototype_hash ^ bit_field2();
}
namespace {
@@ -2143,12 +2171,11 @@ bool Map::EquivalentToForNormalization(const Map other,
}
int Map::ComputeMinObjectSlack(Isolate* isolate) {
- DisallowGarbageCollection no_gc;
// Has to be an initial map.
DCHECK(GetBackPointer().IsUndefined(isolate));
int slack = UnusedPropertyFields();
- TransitionsAccessor transitions(isolate, *this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this);
TransitionsAccessor::TraverseCallback callback = [&](Map map) {
slack = std::min(slack, map.UnusedPropertyFields());
};
@@ -2274,11 +2301,11 @@ void Map::StartInobjectSlackTracking() {
Handle<Map> Map::TransitionToPrototype(Isolate* isolate, Handle<Map> map,
Handle<HeapObject> prototype) {
Handle<Map> new_map =
- TransitionsAccessor(isolate, map).GetPrototypeTransition(prototype);
+ TransitionsAccessor::GetPrototypeTransition(isolate, map, prototype);
if (new_map.is_null()) {
new_map = Copy(isolate, map, "TransitionToPrototype");
- TransitionsAccessor(isolate, map)
- .PutPrototypeTransition(prototype, new_map);
+ TransitionsAccessor::PutPrototypeTransition(isolate, map, prototype,
+ new_map);
Map::SetPrototype(isolate, new_map, prototype);
}
return new_map;
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index fe2cdf150a..1e95302283 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -46,6 +46,7 @@ enum InstanceType : uint16_t;
V(JSApiObject) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
@@ -78,6 +79,7 @@ enum InstanceType : uint16_t;
IF_WASM(V, WasmInstanceObject) \
IF_WASM(V, WasmInternalFunction) \
IF_WASM(V, WasmJSFunctionData) \
+ IF_WASM(V, WasmOnFulfilledData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmTypeInfo) \
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index a1f4eb5368..cb80fe87e9 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -88,12 +88,31 @@ bool Name::IsHashFieldComputed(uint32_t raw_hash_field) {
return (raw_hash_field & kHashNotComputedMask) == 0;
}
+bool Name::IsHash(uint32_t raw_hash_field) {
+ return HashFieldTypeBits::decode(raw_hash_field) == HashFieldType::kHash;
+}
+
+bool Name::IsIntegerIndex(uint32_t raw_hash_field) {
+ return HashFieldTypeBits::decode(raw_hash_field) ==
+ HashFieldType::kIntegerIndex;
+}
+
+bool Name::IsForwardingIndex(uint32_t raw_hash_field) {
+ return HashFieldTypeBits::decode(raw_hash_field) ==
+ HashFieldType::kForwardingIndex;
+}
+
+uint32_t Name::CreateHashFieldValue(uint32_t hash, HashFieldType type) {
+ return HashBits::encode(hash & HashBits::kMax) |
+ HashFieldTypeBits::encode(type);
+}
+
bool Name::HasHashCode() const { return IsHashFieldComputed(raw_hash_field()); }
uint32_t Name::EnsureHash() {
// Fast case: has hash code already been computed?
uint32_t field = raw_hash_field();
- if (IsHashFieldComputed(field)) return field >> kHashShift;
+ if (IsHashFieldComputed(field)) return HashBits::decode(field);
// Slow case: compute hash code and set it. Has to be a string.
return String::cast(*this).ComputeAndSetHash();
}
@@ -101,7 +120,7 @@ uint32_t Name::EnsureHash() {
uint32_t Name::EnsureHash(const SharedStringAccessGuardIfNeeded& access_guard) {
// Fast case: has hash code already been computed?
uint32_t field = raw_hash_field();
- if (IsHashFieldComputed(field)) return field >> kHashShift;
+ if (IsHashFieldComputed(field)) return HashBits::decode(field);
// Slow case: compute hash code and set it. Has to be a string.
return String::cast(*this).ComputeAndSetHash(access_guard);
}
@@ -109,7 +128,7 @@ uint32_t Name::EnsureHash(const SharedStringAccessGuardIfNeeded& access_guard) {
uint32_t Name::hash() const {
uint32_t field = raw_hash_field();
DCHECK(IsHashFieldComputed(field));
- return field >> kHashShift;
+ return HashBits::decode(field);
}
DEF_GETTER(Name, IsInterestingSymbol, bool) {
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index a9f3932c4b..d762a49a8c 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -89,21 +89,30 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
void NameShortPrint();
int NameShortPrint(base::Vector<char> str);
- // Mask constant for checking if a name has a computed hash code
- // and if it is a string that is an integer index. The least significant bit
- // indicates whether a hash code has been computed. If the hash code has
- // been computed the 2nd bit tells whether the string can be used as an
- // integer index (up to MAX_SAFE_INTEGER).
- static const int kHashNotComputedMask = 1;
- static const int kIsNotIntegerIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Only these bits are relevant in the hash, since the top two are shifted
- // out.
- static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+ // Mask constant for checking if a name has a computed hash code and the type
+ // of information stored in the hash field. The least significant bit
+ // indicates whether the value can be used as a hash (i.e. different values
+ // imply different strings).
+ enum class HashFieldType : uint32_t {
+ kHash = 0b10,
+ kIntegerIndex = 0b00,
+ kForwardingIndex = 0b01,
+ kEmpty = 0b11
+ };
+
+ using HashFieldTypeBits = base::BitField<HashFieldType, 0, 2>;
+ using HashBits =
+ HashFieldTypeBits::Next<uint32_t, kBitsPerInt - HashFieldTypeBits::kSize>;
+
+ static constexpr int kHashNotComputedMask = 1;
+ // Value of empty hash field indicating that the hash is not computed.
+ static constexpr int kEmptyHashField =
+ HashFieldTypeBits::encode(HashFieldType::kEmpty);
+
+ // Empty hash and forwarding indices can not be used as hash.
+ STATIC_ASSERT((kEmptyHashField & kHashNotComputedMask) != 0);
+ STATIC_ASSERT((HashFieldTypeBits::encode(HashFieldType::kForwardingIndex) &
+ kHashNotComputedMask) != 0);
// Array index strings this short can keep their index in the hash field.
static const int kMaxCachedArrayIndexLength = 7;
@@ -124,16 +133,15 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
// the case for the string '0'. 24 bits are used for the array index value.
static const int kArrayIndexValueBits = 24;
static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+ kBitsPerInt - kArrayIndexValueBits - HashFieldTypeBits::kSize;
STATIC_ASSERT(kArrayIndexLengthBits > 0);
STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
using ArrayIndexValueBits =
- base::BitField<unsigned int, kNofHashBitFields, kArrayIndexValueBits>;
+ HashFieldTypeBits::Next<unsigned int, kArrayIndexValueBits>;
using ArrayIndexLengthBits =
- base::BitField<unsigned int, kNofHashBitFields + kArrayIndexValueBits,
- kArrayIndexLengthBits>;
+ ArrayIndexValueBits::Next<unsigned int, kArrayIndexLengthBits>;
// Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
// could use a mask to test if the length of string is less than or equal to
@@ -143,16 +151,19 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
// When any of these bits is set then the hash field does not contain a cached
// array index.
+ STATIC_ASSERT(HashFieldTypeBits::encode(HashFieldType::kIntegerIndex) == 0);
static const unsigned int kDoesNotContainCachedArrayIndexMask =
(~static_cast<unsigned>(kMaxCachedArrayIndexLength)
<< ArrayIndexLengthBits::kShift) |
- kIsNotIntegerIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotIntegerIndexMask | kHashNotComputedMask;
+ HashFieldTypeBits::kMask;
static inline bool IsHashFieldComputed(uint32_t raw_hash_field);
+ static inline bool IsHash(uint32_t raw_hash_field);
+ static inline bool IsIntegerIndex(uint32_t raw_hash_field);
+ static inline bool IsForwardingIndex(uint32_t raw_hash_field);
+
+ static inline uint32_t CreateHashFieldValue(uint32_t hash,
+ HashFieldType type);
TQ_OBJECT_CONSTRUCTORS(Name)
};
diff --git a/deps/v8/src/objects/name.tq b/deps/v8/src/objects/name.tq
index 6fe141f90c..81566e7961 100644
--- a/deps/v8/src/objects/name.tq
+++ b/deps/v8/src/objects/name.tq
@@ -8,8 +8,7 @@ extern class Name extends PrimitiveHeapObject {
}
bitfield struct NameHash extends uint32 {
- hash_not_computed: bool: 1 bit;
- is_not_integer_index_mask: bool: 1 bit;
+ hash_field_type: HashFieldType: 2 bit;
array_index_value: uint32: 24 bit;
array_index_length: uint32: 6 bit;
}
@@ -35,25 +34,21 @@ extern class Symbol extends Name {
type PublicSymbol extends Symbol;
type PrivateSymbol extends Symbol;
-const kNameEmptyHashField: NameHash = NameHash{
- hash_not_computed: true,
- is_not_integer_index_mask: true,
- array_index_value: 0,
- array_index_length: 0
-};
-
const kMaxCachedArrayIndexLength: constexpr uint32
generates 'Name::kMaxCachedArrayIndexLength';
const kMaxArrayIndexSize: constexpr uint32
generates 'Name::kMaxArrayIndexSize';
const kNofHashBitFields: constexpr int31
- generates 'Name::kNofHashBitFields';
+ generates 'Name::HashFieldTypeBits::kSize';
const kArrayIndexValueBits: constexpr int31
generates 'Name::kArrayIndexValueBits';
const kDoesNotContainCachedArrayIndexMask: constexpr uint32
generates 'Name::kDoesNotContainCachedArrayIndexMask';
-const kIsNotIntegerIndexMask: constexpr uint32
- generates 'Name::kIsNotIntegerIndexMask';
+const kNameEmptyHashField: NameHash = NameHash{
+ hash_field_type: HashFieldType::kEmpty,
+ array_index_value: 0,
+ array_index_length: 0
+};
macro ContainsCachedArrayIndex(hash: uint32): bool {
return (hash & kDoesNotContainCachedArrayIndexMask) == 0;
@@ -72,16 +67,22 @@ macro TenToThe(exponent: uint32): uint32 {
return Unsigned(answer);
}
+macro IsIntegerIndex(hash: NameHash): bool {
+ return hash.hash_field_type == HashFieldType::kIntegerIndex;
+}
+
macro MakeArrayIndexHash(value: uint32, length: uint32): NameHash {
// This is in sync with StringHasher::MakeArrayIndexHash.
dcheck(length <= kMaxArrayIndexSize);
const one: uint32 = 1;
dcheck(TenToThe(kMaxCachedArrayIndexLength) < (one << kArrayIndexValueBits));
- let hash: uint32 = value;
- hash = (hash << kArrayIndexValueBitsShift) |
+ let rawHash: uint32 = value;
+ rawHash = (rawHash << kArrayIndexValueBitsShift) |
(length << kArrayIndexLengthBitsShift);
- dcheck((hash & kIsNotIntegerIndexMask) == 0);
dcheck(
- (length <= kMaxCachedArrayIndexLength) == ContainsCachedArrayIndex(hash));
- return %RawDownCast<NameHash>(hash);
+ (length <= kMaxCachedArrayIndexLength) ==
+ ContainsCachedArrayIndex(rawHash));
+ const hash: NameHash = %RawDownCast<NameHash>(rawHash);
+ dcheck(IsIntegerIndex(hash));
+ return hash;
}
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index b9d8733043..6c98766f29 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -74,188 +74,196 @@ class ZoneForwardList;
V(Number) \
V(Numeric)
-#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
- V(AbstractCode) \
- V(AccessCheckNeeded) \
- V(AllocationSite) \
- V(ArrayList) \
- V(BigInt) \
- V(BigIntBase) \
- V(BigIntWrapper) \
- V(ObjectBoilerplateDescription) \
- V(Boolean) \
- V(BooleanWrapper) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(CallHandlerInfo) \
- V(Callable) \
- V(Cell) \
- V(ClassBoilerplate) \
- V(Code) \
- V(CodeDataContainer) \
- V(CompilationCacheTable) \
- V(ConsString) \
- V(Constructor) \
- V(Context) \
- V(CoverageInfo) \
- V(ClosureFeedbackCellArray) \
- V(DataHandler) \
- V(DeoptimizationData) \
- V(DependentCode) \
- V(DescriptorArray) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(ExternalOneByteString) \
- V(ExternalString) \
- V(ExternalTwoByteString) \
- V(FeedbackCell) \
- V(FeedbackMetadata) \
- V(FeedbackVector) \
- V(Filler) \
- V(FixedArray) \
- V(FixedArrayBase) \
- V(FixedArrayExact) \
- V(FixedDoubleArray) \
- V(Foreign) \
- V(FreeSpace) \
- V(Function) \
- V(GlobalDictionary) \
- V(HandlerTable) \
- V(HeapNumber) \
- V(InternalizedString) \
- V(JSArgumentsObject) \
- V(JSArray) \
- V(JSArrayBuffer) \
- V(JSArrayBufferView) \
- V(JSArrayIterator) \
- V(JSAsyncFromSyncIterator) \
- V(JSAsyncFunctionObject) \
- V(JSAsyncGeneratorObject) \
- V(JSBoundFunction) \
- V(JSCollection) \
- V(JSCollectionIterator) \
- V(JSContextExtensionObject) \
- V(JSCustomElementsObject) \
- V(JSDataView) \
- V(JSDate) \
- V(JSError) \
- V(JSFinalizationRegistry) \
- V(JSFunction) \
- V(JSFunctionOrBoundFunction) \
- V(JSGeneratorObject) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(JSMap) \
- V(JSMapIterator) \
- V(JSMessageObject) \
- V(JSModuleNamespace) \
- V(JSObject) \
- V(JSPrimitiveWrapper) \
- V(JSPromise) \
- V(JSProxy) \
- V(JSReceiver) \
- V(JSRegExp) \
- V(JSRegExpStringIterator) \
- V(JSSet) \
- V(JSSetIterator) \
- V(JSSpecialObject) \
- V(JSStringIterator) \
- V(JSTemporalCalendar) \
- V(JSTemporalDuration) \
- V(JSTemporalInstant) \
- V(JSTemporalPlainDate) \
- V(JSTemporalPlainTime) \
- V(JSTemporalPlainDateTime) \
- V(JSTemporalPlainMonthDay) \
- V(JSTemporalPlainYearMonth) \
- V(JSTemporalTimeZone) \
- V(JSTemporalZonedDateTime) \
- V(JSTypedArray) \
- V(JSWeakCollection) \
- V(JSWeakRef) \
- V(JSWeakMap) \
- V(JSWeakSet) \
- V(LoadHandler) \
- V(Map) \
- V(MapCache) \
- V(MegaDomHandler) \
- V(Module) \
- V(Microtask) \
- V(Name) \
- V(NameDictionary) \
- V(NativeContext) \
- V(NormalizedMapCache) \
- V(NumberDictionary) \
- V(NumberWrapper) \
- V(ObjectHashSet) \
- V(ObjectHashTable) \
- V(Oddball) \
- V(OrderedHashMap) \
- V(OrderedHashSet) \
- V(OrderedNameDictionary) \
- V(OSROptimizedCodeCache) \
- V(PreparseData) \
- V(PrimitiveHeapObject) \
- V(PromiseReactionJobTask) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(RegExpMatchInfo) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- V(ScriptWrapper) \
- V(SeqOneByteString) \
- V(SeqString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SimpleNumberDictionary) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourceTextModule) \
- V(SourceTextModuleInfo) \
- V(StoreHandler) \
- V(String) \
- V(StringSet) \
- V(StringWrapper) \
- V(Struct) \
- V(SwissNameDictionary) \
- V(Symbol) \
- V(SymbolWrapper) \
- V(SyntheticModule) \
- V(TemplateInfo) \
- V(TemplateList) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledData) \
- V(UncompiledDataWithPreparseData) \
- V(UncompiledDataWithoutPreparseData) \
- V(UncompiledDataWithPreparseDataAndJob) \
- V(UncompiledDataWithoutPreparseDataWithJob) \
- V(Undetectable) \
- V(UniqueName) \
- IF_WASM(V, WasmApiFunctionRef) \
- IF_WASM(V, WasmArray) \
- IF_WASM(V, WasmCapiFunctionData) \
- IF_WASM(V, WasmTagObject) \
- IF_WASM(V, WasmExceptionPackage) \
- IF_WASM(V, WasmExportedFunctionData) \
- IF_WASM(V, WasmFunctionData) \
- IF_WASM(V, WasmGlobalObject) \
- IF_WASM(V, WasmInternalFunction) \
- IF_WASM(V, WasmInstanceObject) \
- IF_WASM(V, WasmJSFunctionData) \
- IF_WASM(V, WasmMemoryObject) \
- IF_WASM(V, WasmModuleObject) \
- IF_WASM(V, WasmObject) \
- IF_WASM(V, WasmStruct) \
- IF_WASM(V, WasmTypeInfo) \
- IF_WASM(V, WasmTableObject) \
- IF_WASM(V, WasmValueObject) \
- IF_WASM(V, WasmSuspenderObject) \
- V(WeakFixedArray) \
- V(WeakArrayList) \
- V(WeakCell) \
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(AbstractCode) \
+ V(AccessCheckNeeded) \
+ V(AllocationSite) \
+ V(ArrayList) \
+ V(BigInt) \
+ V(BigIntBase) \
+ V(BigIntWrapper) \
+ V(ObjectBoilerplateDescription) \
+ V(Boolean) \
+ V(BooleanWrapper) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(CallHandlerInfo) \
+ V(Callable) \
+ V(Cell) \
+ V(ClassBoilerplate) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(CompilationCacheTable) \
+ V(ConsString) \
+ V(Constructor) \
+ V(Context) \
+ V(CoverageInfo) \
+ V(ClosureFeedbackCellArray) \
+ V(DataHandler) \
+ V(DeoptimizationData) \
+ V(DependentCode) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(ExternalOneByteString) \
+ V(ExternalString) \
+ V(ExternalTwoByteString) \
+ V(FeedbackCell) \
+ V(FeedbackMetadata) \
+ V(FeedbackVector) \
+ V(Filler) \
+ V(FixedArray) \
+ V(FixedArrayBase) \
+ V(FixedArrayExact) \
+ V(FixedDoubleArray) \
+ V(Foreign) \
+ V(FreeSpace) \
+ V(Function) \
+ V(GlobalDictionary) \
+ V(HandlerTable) \
+ V(HeapNumber) \
+ V(InternalizedString) \
+ V(JSArgumentsObject) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSArrayIterator) \
+ V(JSAsyncFromSyncIterator) \
+ V(JSAsyncFunctionObject) \
+ V(JSAsyncGeneratorObject) \
+ V(JSBoundFunction) \
+ V(JSCollection) \
+ V(JSCollectionIterator) \
+ V(JSContextExtensionObject) \
+ V(JSCustomElementsObject) \
+ V(JSDataView) \
+ V(JSDate) \
+ V(JSError) \
+ V(JSExternalObject) \
+ V(JSFinalizationRegistry) \
+ V(JSFunction) \
+ V(JSFunctionOrBoundFunctionOrWrappedFunction) \
+ V(JSGeneratorObject) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(JSMap) \
+ V(JSMapIterator) \
+ V(JSMessageObject) \
+ V(JSModuleNamespace) \
+ V(JSObject) \
+ V(JSObjectWithEmbedderSlots) \
+ V(JSPrimitiveWrapper) \
+ V(JSPromise) \
+ V(JSProxy) \
+ V(JSReceiver) \
+ V(JSRegExp) \
+ V(JSRegExpStringIterator) \
+ V(JSSet) \
+ V(JSSetIterator) \
+ V(JSShadowRealm) \
+ V(JSSharedStruct) \
+ V(JSSpecialObject) \
+ V(JSStringIterator) \
+ V(JSTemporalCalendar) \
+ V(JSTemporalDuration) \
+ V(JSTemporalInstant) \
+ V(JSTemporalPlainDate) \
+ V(JSTemporalPlainTime) \
+ V(JSTemporalPlainDateTime) \
+ V(JSTemporalPlainMonthDay) \
+ V(JSTemporalPlainYearMonth) \
+ V(JSTemporalTimeZone) \
+ V(JSTemporalZonedDateTime) \
+ V(JSTypedArray) \
+ V(JSWeakCollection) \
+ V(JSWeakRef) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(JSWrappedFunction) \
+ V(LoadHandler) \
+ V(Map) \
+ V(MapCache) \
+ V(MegaDomHandler) \
+ V(Module) \
+ V(Microtask) \
+ V(Name) \
+ V(NameDictionary) \
+ V(NameToIndexHashTable) \
+ V(NativeContext) \
+ V(NormalizedMapCache) \
+ V(NumberDictionary) \
+ V(NumberWrapper) \
+ V(ObjectHashSet) \
+ V(ObjectHashTable) \
+ V(Oddball) \
+ V(OrderedHashMap) \
+ V(OrderedHashSet) \
+ V(OrderedNameDictionary) \
+ V(OSROptimizedCodeCache) \
+ V(PreparseData) \
+ V(PrimitiveHeapObject) \
+ V(PromiseReactionJobTask) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(RegExpMatchInfo) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(ScriptWrapper) \
+ V(SeqOneByteString) \
+ V(SeqString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SimpleNumberDictionary) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(SourceTextModuleInfo) \
+ V(StoreHandler) \
+ V(String) \
+ V(StringSet) \
+ V(RegisteredSymbolTable) \
+ V(StringWrapper) \
+ V(Struct) \
+ V(SwissNameDictionary) \
+ V(Symbol) \
+ V(SymbolWrapper) \
+ V(SyntheticModule) \
+ V(TemplateInfo) \
+ V(TemplateList) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledData) \
+ V(UncompiledDataWithPreparseData) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseDataAndJob) \
+ V(UncompiledDataWithoutPreparseDataWithJob) \
+ V(Undetectable) \
+ V(UniqueName) \
+ IF_WASM(V, WasmApiFunctionRef) \
+ IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmCapiFunctionData) \
+ IF_WASM(V, WasmTagObject) \
+ IF_WASM(V, WasmExceptionPackage) \
+ IF_WASM(V, WasmExportedFunctionData) \
+ IF_WASM(V, WasmFunctionData) \
+ IF_WASM(V, WasmGlobalObject) \
+ IF_WASM(V, WasmInternalFunction) \
+ IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmJSFunctionData) \
+ IF_WASM(V, WasmMemoryObject) \
+ IF_WASM(V, WasmModuleObject) \
+ IF_WASM(V, WasmObject) \
+ IF_WASM(V, WasmOnFulfilledData) \
+ IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmTypeInfo) \
+ IF_WASM(V, WasmTableObject) \
+ IF_WASM(V, WasmValueObject) \
+ IF_WASM(V, WasmSuspenderObject) \
+ V(WeakFixedArray) \
+ V(WeakArrayList) \
+ V(WeakCell) \
TORQUE_DEFINED_CLASS_LIST(V)
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index e678ff5780..f54f296a80 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -16,10 +16,13 @@
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
#undef DECL_INT32_ACCESSORS
-#undef DECL_RELAXED_INT32_ACCESSORS
+#undef DECL_SANDBOXED_POINTER_ACCESSORS
#undef DECL_UINT16_ACCESSORS
#undef DECL_INT16_ACCESSORS
#undef DECL_UINT8_ACCESSORS
+#undef DECL_RELAXED_PRIMITIVE_ACCESSORS
+#undef DECL_RELAXED_INT32_ACCESSORS
+#undef DECL_RELAXED_UINT16_ACCESSORS
#undef DECL_GETTER
#undef DEF_GETTER
#undef DEF_RELAXED_GETTER
@@ -37,16 +40,18 @@
#undef DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS
#undef DECL_CAST
#undef CAST_ACCESSOR
+#undef DEF_PRIMITIVE_ACCESSORS
#undef INT_ACCESSORS
#undef INT32_ACCESSORS
-#undef RELAXED_INT32_ACCESSORS
#undef UINT16_ACCESSORS
#undef UINT8_ACCESSORS
+#undef RELAXED_INT32_ACCESSORS
+#undef RELAXED_UINT16_ACCESSORS
#undef ACCESSORS_CHECKED2
#undef ACCESSORS_CHECKED
#undef ACCESSORS
#undef RENAME_TORQUE_ACCESSORS
-#undef RENAME_UINT16_TORQUE_ACCESSORS
+#undef RENAME_PRIMITIVE_TORQUE_ACCESSORS
#undef ACCESSORS_RELAXED_CHECKED2
#undef ACCESSORS_RELAXED_CHECKED
#undef ACCESSORS_RELAXED
@@ -89,6 +94,7 @@
#undef CONDITIONAL_WRITE_BARRIER
#undef CONDITIONAL_WEAK_WRITE_BARRIER
#undef CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER
+#undef ACQUIRE_READ_INT8_FIELD
#undef ACQUIRE_READ_INT32_FIELD
#undef RELAXED_WRITE_INT8_FIELD
#undef RELAXED_READ_INT8_FIELD
@@ -99,6 +105,7 @@
#undef RELAXED_READ_UINT32_FIELD
#undef ACQUIRE_READ_UINT32_FIELD
#undef RELAXED_WRITE_UINT32_FIELD
+#undef RELEASE_WRITE_INT8_FIELD
#undef RELEASE_WRITE_UINT32_FIELD
#undef RELAXED_READ_INT32_FIELD
#undef RELEASE_WRITE_INT32_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 6fc9967789..70651aab83 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -57,21 +57,25 @@
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
-#define DECL_RELAXED_INT32_ACCESSORS(name) \
- inline int32_t name(RelaxedLoadTag) const; \
- inline void set_##name(int32_t value, RelaxedStoreTag);
+#define DECL_SANDBOXED_POINTER_ACCESSORS(name, type) \
+ DECL_PRIMITIVE_GETTER(name, type) \
+ DECL_PRIMITIVE_SETTER(name, type)
+
+#define DECL_UINT16_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, uint16_t)
+
+#define DECL_INT16_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int16_t)
-#define DECL_UINT16_ACCESSORS(name) \
- inline uint16_t name() const; \
- inline void set_##name(int value);
+#define DECL_UINT8_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, uint8_t)
-#define DECL_INT16_ACCESSORS(name) \
- inline int16_t name() const; \
- inline void set_##name(int16_t value);
+#define DECL_RELAXED_PRIMITIVE_ACCESSORS(name, type) \
+ inline type name(RelaxedLoadTag) const; \
+ inline void set_##name(type value, RelaxedStoreTag);
-#define DECL_UINT8_ACCESSORS(name) \
- inline uint8_t name() const; \
- inline void set_##name(int value);
+#define DECL_RELAXED_INT32_ACCESSORS(name) \
+ DECL_RELAXED_PRIMITIVE_ACCESSORS(name, int32_t)
+
+#define DECL_RELAXED_UINT16_ACCESSORS(name) \
+ DECL_RELAXED_PRIMITIVE_ACCESSORS(name, uint16_t)
// TODO(ishell): eventually isolate-less getters should not be used anymore.
// For full pointer-mode the C++ compiler should optimize away unused isolate
@@ -150,13 +154,21 @@
#define CAST_ACCESSOR(Type) \
Type Type::cast(Object object) { return Type(object.ptr()); }
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() const { return ReadField<int>(offset); } \
- void holder::set_##name(int value) { WriteField<int>(offset, value); }
+#define DEF_PRIMITIVE_ACCESSORS(holder, name, offset, type) \
+ type holder::name() const { return ReadField<type>(offset); } \
+ void holder::set_##name(type value) { WriteField<type>(offset, value); }
+
+#define INT_ACCESSORS(holder, name, offset) \
+ DEF_PRIMITIVE_ACCESSORS(holder, name, offset, int)
+
+#define INT32_ACCESSORS(holder, name, offset) \
+ DEF_PRIMITIVE_ACCESSORS(holder, name, offset, int32_t)
-#define INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { return ReadField<int32_t>(offset); } \
- void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
+#define UINT16_ACCESSORS(holder, name, offset) \
+ DEF_PRIMITIVE_ACCESSORS(holder, name, offset, uint16_t)
+
+#define UINT8_ACCESSORS(holder, name, offset) \
+ DEF_PRIMITIVE_ACCESSORS(holder, name, offset, uint8_t)
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name(RelaxedLoadTag) const { \
@@ -166,20 +178,12 @@
RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
-#define UINT16_ACCESSORS(holder, name, offset) \
- uint16_t holder::name() const { return ReadField<uint16_t>(offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint16_t>(-1)); \
- WriteField<uint16_t>(offset, value); \
- }
-
-#define UINT8_ACCESSORS(holder, name, offset) \
- uint8_t holder::name() const { return ReadField<uint8_t>(offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint8_t>(-1)); \
- WriteField<uint8_t>(offset, value); \
+#define RELAXED_UINT16_ACCESSORS(holder, name, offset) \
+ uint16_t holder::name(RelaxedLoadTag) const { \
+ return RELAXED_READ_UINT16_FIELD(*this, offset); \
+ } \
+ void holder::set_##name(uint16_t value, RelaxedStoreTag) { \
+ RELAXED_WRITE_UINT16_FIELD(*this, offset, value); \
}
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
@@ -209,13 +213,10 @@
TorqueGeneratedClass::set_##torque_name(value, mode); \
}
-#define RENAME_UINT16_TORQUE_ACCESSORS(holder, name, torque_name) \
- uint16_t holder::name() const { \
- return TorqueGeneratedClass::torque_name(); \
- } \
- void holder::set_##name(int value) { \
- DCHECK_EQ(value, static_cast<uint16_t>(value)); \
- TorqueGeneratedClass::set_##torque_name(value); \
+#define RENAME_PRIMITIVE_TORQUE_ACCESSORS(holder, name, torque_name, type) \
+ type holder::name() const { return TorqueGeneratedClass::torque_name(); } \
+ void holder::set_##name(type value) { \
+ TorqueGeneratedClass::set_##torque_name(value); \
}
#define ACCESSORS_RELAXED_CHECKED2(holder, name, type, offset, get_condition, \
@@ -411,6 +412,9 @@
#define FIELD_ADDR(p, offset) ((p).ptr() + offset - kHeapObjectTag)
+#define SEQ_CST_READ_FIELD(p, offset) \
+ TaggedField<Object>::SeqCst_Load(p, offset)
+
#define ACQUIRE_READ_FIELD(p, offset) \
TaggedField<Object>::Acquire_Load(p, offset)
@@ -423,6 +427,9 @@
#define WRITE_FIELD(p, offset, value) \
TaggedField<Object>::store(p, offset, value)
+#define SEQ_CST_WRITE_FIELD(p, offset, value) \
+ TaggedField<Object>::SeqCst_Store(p, offset, value)
+
#define RELEASE_WRITE_FIELD(p, offset, value) \
TaggedField<Object>::Release_Store(p, offset, value)
@@ -484,6 +491,8 @@
WriteBarrier::Marking(object, (object).RawField(offset), value); \
} \
GenerationalBarrier(object, (object).RawField(offset), value); \
+ } else { \
+ SLOW_DCHECK(!WriteBarrier::IsRequired(object, value)); \
} \
} while (false)
#endif
@@ -504,6 +513,8 @@
value); \
} \
GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ } else { \
+ SLOW_DCHECK(!WriteBarrier::IsRequired(object, value)); \
} \
} while (false)
#endif
@@ -522,6 +533,8 @@
} \
GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), \
value); \
+ } else { \
+ SLOW_DCHECK(!WriteBarrier::IsRequired(object, value)); \
} \
} while (false)
#endif
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 23c15fc4b3..e2eb0da133 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -11,8 +11,10 @@
#include "src/ic/handler-configuration.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/call-site-info.h"
#include "src/objects/cell.h"
#include "src/objects/data-handler.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/free-space-inl.h"
@@ -26,7 +28,6 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/source-text-module.h"
-#include "src/objects/stack-frame-info.h"
#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects.h"
@@ -83,22 +84,24 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
ObjectVisitor* v) {
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
- int header_size = JSObject::GetHeaderSize(map);
- int inobject_fields_offset = map.GetInObjectPropertyOffset(0);
+ int header_end_offset = JSObject::GetHeaderSize(map);
+ int inobject_fields_start_offset = map.GetInObjectPropertyOffset(0);
// We are always requested to process header and embedder fields.
- DCHECK_LE(inobject_fields_offset, end_offset);
+ DCHECK_LE(inobject_fields_start_offset, end_offset);
// Embedder fields are located between header and inobject properties.
- if (header_size < inobject_fields_offset) {
+ if (header_end_offset < inobject_fields_start_offset) {
// There are embedder fields.
- IteratePointers(obj, start_offset, header_size, v);
- // Iterate only tagged payload of the embedder slots and skip raw payload.
- DCHECK_EQ(header_size, JSObject::GetEmbedderFieldsStartOffset(map));
- for (int offset = header_size + EmbedderDataSlot::kTaggedPayloadOffset;
- offset < inobject_fields_offset; offset += kEmbedderDataSlotSize) {
- IteratePointer(obj, offset, v);
+ DCHECK_EQ(header_end_offset, JSObject::GetEmbedderFieldsStartOffset(map));
+ IteratePointers(obj, start_offset, header_end_offset, v);
+ for (int offset = header_end_offset; offset < inobject_fields_start_offset;
+ offset += kEmbedderDataSlotSize) {
+ IteratePointer(obj, offset + EmbedderDataSlot::kTaggedPayloadOffset, v);
+ v->VisitExternalPointer(
+ obj, obj.RawExternalPointerField(
+ offset + EmbedderDataSlot::kExternalPointerOffset));
}
// Proceed processing inobject properties.
- start_offset = inobject_fields_offset;
+ start_offset = inobject_fields_start_offset;
}
#else
// We store raw aligned pointers as Smis, so it's safe to iterate the whole
@@ -433,6 +436,22 @@ class JSDataView::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class JSExternalObject::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ v->VisitExternalPointer(obj, obj.RawExternalPointerField(kValueOffset));
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
template <typename Derived>
class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
: public BodyDescriptorBase {
@@ -635,6 +654,8 @@ class Foreign::BodyDescriptor final : public BodyDescriptorBase {
v->VisitExternalReference(
Foreign::cast(obj), reinterpret_cast<Address*>(
obj.RawField(kForeignAddressOffset).address()));
+ v->VisitExternalPointer(obj,
+ obj.RawExternalPointerField(kForeignAddressOffset));
}
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
@@ -783,7 +804,14 @@ class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {}
+ ObjectVisitor* v) {
+ ExternalString string = ExternalString::cast(obj);
+ v->VisitExternalPointer(obj,
+ string.RawExternalPointerField(kResourceOffset));
+ if (string.is_uncached()) return;
+ v->VisitExternalPointer(
+ obj, string.RawExternalPointerField(kResourceDataOffset));
+ }
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
@@ -794,7 +822,14 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {}
+ ObjectVisitor* v) {
+ ExternalString string = ExternalString::cast(obj);
+ v->VisitExternalPointer(obj,
+ string.RawExternalPointerField(kResourceOffset));
+ if (string.is_uncached()) return;
+ v->VisitExternalPointer(
+ obj, string.RawExternalPointerField(kResourceDataOffset));
+ }
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
@@ -916,6 +951,8 @@ class NativeContext::BodyDescriptor final : public BodyDescriptorBase {
NativeContext::kEndOfStrongFieldsOffset, v);
IterateCustomWeakPointers(obj, NativeContext::kStartOfWeakFieldsOffset,
NativeContext::kEndOfWeakFieldsOffset, v);
+ v->VisitExternalPointer(obj,
+ obj.RawExternalPointerField(kMicrotaskQueueOffset));
}
static inline int SizeOf(Map map, HeapObject object) {
@@ -941,6 +978,8 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
v->VisitCodePointer(obj, obj.RawCodeField(kCodeOffset));
+ v->VisitExternalPointer(
+ obj, obj.RawExternalPointerField(kCodeEntryPointOffset));
}
}
@@ -972,12 +1011,14 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
- // Iterate only tagged payload of the embedder slots and skip raw payload.
- for (int offset = EmbedderDataArray::OffsetOfElementAt(0) +
- EmbedderDataSlot::kTaggedPayloadOffset;
+ for (int offset = EmbedderDataArray::OffsetOfElementAt(0);
offset < object_size; offset += kEmbedderDataSlotSize) {
- IteratePointer(obj, offset, v);
+ IteratePointer(obj, offset + EmbedderDataSlot::kTaggedPayloadOffset, v);
+ v->VisitExternalPointer(
+ obj, obj.RawExternalPointerField(
+ offset + EmbedderDataSlot::kExternalPointerOffset));
}
+
#else
// We store raw aligned pointers as Smis, so it's safe to iterate the whole
// array.
@@ -1036,6 +1077,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ case REGISTERED_SYMBOL_TABLE_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
return CALL_APPLY(FixedArray);
case EPHEMERON_HASH_TABLE_TYPE:
@@ -1077,6 +1120,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(WasmInternalFunction);
case WASM_JS_FUNCTION_DATA_TYPE:
return CALL_APPLY(WasmJSFunctionData);
+ case WASM_ON_FULFILLED_DATA_TYPE:
+ return CALL_APPLY(WasmOnFulfilledData);
case WASM_STRUCT_TYPE:
return CALL_APPLY(WasmStruct);
case WASM_TYPE_INFO_TYPE:
@@ -1120,6 +1165,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case JS_SET_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
+ case JS_SHADOW_REALM_TYPE:
+ case JS_SHARED_STRUCT_TYPE:
case JS_STRING_ITERATOR_PROTOTYPE_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
@@ -1137,6 +1184,7 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_WRAPPED_FUNCTION_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
@@ -1179,6 +1227,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(JSDataView);
case JS_TYPED_ARRAY_TYPE:
return CALL_APPLY(JSTypedArray);
+ case JS_EXTERNAL_OBJECT_TYPE:
+ return CALL_APPLY(JSExternalObject);
case WEAK_CELL_TYPE:
return CALL_APPLY(WeakCell);
case JS_WEAK_REF_TYPE:
@@ -1265,6 +1315,12 @@ void HeapObject::IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
}
template <typename ObjectVisitor>
+void HeapObject::IterateFast(Map map, int object_size, ObjectVisitor* v) {
+ v->VisitMapPointer(*this);
+ IterateBodyFast(map, object_size, v);
+}
+
+template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
Map m = map(cage_base);
IterateBodyFast(m, SizeFromMap(m), v);
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index c31db239c5..eb1cab6664 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -140,9 +140,11 @@ namespace internal {
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
cached_template_object) \
+ V(_, CALL_SITE_INFO_TYPE, CallSiteInfo, call_site_info) \
V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
+ V(_, ERROR_STACK_DATA_TYPE, ErrorStackData, error_stack_data) \
V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index c079675d11..82d8776ef3 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -42,8 +42,8 @@
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
-#include "src/security/caged-pointer-inl.h"
-#include "src/security/external-pointer-inl.h"
+#include "src/sandbox/external-pointer-inl.h"
+#include "src/sandbox/sandboxed-pointer-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -159,6 +159,21 @@ T Object::Relaxed_ReadField(size_t offset) const {
reinterpret_cast<AtomicT*>(field_address(offset))));
}
+template <class T,
+ typename std::enable_if<(std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value) &&
+ !std::is_floating_point<T>::value,
+ int>::type>
+void Object::Relaxed_WriteField(size_t offset, T value) {
+ // Pointer compression causes types larger than kTaggedSize to be
+ // unaligned. Atomic stores must be aligned.
+ DCHECK_IMPLIES(COMPRESS_POINTERS_BOOL, sizeof(T) <= kTaggedSize);
+ using AtomicT = typename base::AtomicTypeFromByteWidth<sizeof(T)>::type;
+ base::AsAtomicImpl<AtomicT>::Relaxed_Store(
+ reinterpret_cast<AtomicT*>(field_address(offset)),
+ static_cast<AtomicT>(value));
+}
+
bool HeapObject::InSharedHeap() const {
if (IsReadOnlyHeapObject(*this)) return V8_SHARED_RO_HEAP_BOOL;
return InSharedWritableHeap();
@@ -190,7 +205,7 @@ DEF_GETTER(HeapObject, IsUniqueName, bool) {
}
DEF_GETTER(HeapObject, IsFunction, bool) {
- return IsJSFunctionOrBoundFunction();
+ return IsJSFunctionOrBoundFunctionOrWrappedFunction();
}
DEF_GETTER(HeapObject, IsCallable, bool) {
@@ -288,9 +303,8 @@ bool Object::IsNumeric(PtrComprCageBase cage_base) const {
}
DEF_GETTER(HeapObject, IsArrayList, bool) {
- ReadOnlyRoots roots = GetReadOnlyRoots(cage_base);
- return *this == roots.empty_fixed_array() ||
- map(cage_base) == roots.array_list_map();
+ return map(cage_base) ==
+ GetReadOnlyRoots(cage_base).unchecked_array_list_map();
}
DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
@@ -563,10 +577,11 @@ MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
}
// static
-MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
+MaybeHandle<Object> Object::ToPrimitive(Isolate* isolate, Handle<Object> input,
ToPrimitiveHint hint) {
if (input->IsPrimitive()) return input;
- return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
+ return JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(input),
+ hint);
}
// static
@@ -644,24 +659,26 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
-Address Object::ReadCagedPointerField(size_t offset,
- PtrComprCageBase cage_base) const {
- return i::ReadCagedPointerField(field_address(offset), cage_base);
+Address Object::ReadSandboxedPointerField(size_t offset,
+ PtrComprCageBase cage_base) const {
+ return i::ReadSandboxedPointerField(field_address(offset), cage_base);
}
-void Object::WriteCagedPointerField(size_t offset, PtrComprCageBase cage_base,
- Address value) {
- i::WriteCagedPointerField(field_address(offset), cage_base, value);
+void Object::WriteSandboxedPointerField(size_t offset,
+ PtrComprCageBase cage_base,
+ Address value) {
+ i::WriteSandboxedPointerField(field_address(offset), cage_base, value);
}
-void Object::WriteCagedPointerField(size_t offset, Isolate* isolate,
- Address value) {
- i::WriteCagedPointerField(field_address(offset), PtrComprCageBase(isolate),
- value);
+void Object::WriteSandboxedPointerField(size_t offset, Isolate* isolate,
+ Address value) {
+ i::WriteSandboxedPointerField(field_address(offset),
+ PtrComprCageBase(isolate), value);
}
-void Object::InitExternalPointerField(size_t offset, Isolate* isolate) {
- i::InitExternalPointerField(field_address(offset), isolate);
+void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
+ ExternalPointerTag tag) {
+ i::InitExternalPointerField(field_address(offset), isolate, tag);
}
void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
@@ -691,6 +708,10 @@ CodeObjectSlot HeapObject::RawCodeField(int byte_offset) const {
return CodeObjectSlot(field_address(byte_offset));
}
+ExternalPointer_t HeapObject::RawExternalPointerField(int byte_offset) const {
+ return ReadRawExternalPointerField(field_address(byte_offset));
+}
+
MapWord MapWord::FromMap(const Map map) {
DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
#ifdef V8_MAP_PACKING
@@ -721,7 +742,7 @@ HeapObject MapWord::ToForwardingAddress() {
HeapObject obj = HeapObject::FromAddress(value_);
// For objects allocated outside of the main pointer compression cage the
// variant with explicit cage base must be used.
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode());
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(obj));
return obj;
}
@@ -782,58 +803,70 @@ Map HeapObject::map(PtrComprCageBase cage_base) const {
}
void HeapObject::set_map(Map value) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !value.is_null()) {
- GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
- }
-#endif
- set_map_word(MapWord::FromMap(value), kRelaxedStore);
-#ifndef V8_DISABLE_WRITE_BARRIERS
- if (!value.is_null()) {
- // TODO(1600) We are passing kNullAddress as a slot because maps can never
- // be on an evacuation candidate.
- WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
- }
-#endif
+ set_map<EmitWriteBarrier::kYes>(value, kRelaxedStore,
+ VerificationMode::kPotentialLayoutChange);
}
-DEF_ACQUIRE_GETTER(HeapObject, map, Map) {
- return map_word(cage_base, kAcquireLoad).ToMap();
+void HeapObject::set_map(Map value, ReleaseStoreTag tag) {
+ set_map<EmitWriteBarrier::kYes>(value, kReleaseStore,
+ VerificationMode::kPotentialLayoutChange);
}
-void HeapObject::set_map(Map value, ReleaseStoreTag tag) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !value.is_null()) {
- GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
- }
-#endif
- set_map_word(MapWord::FromMap(value), tag);
-#ifndef V8_DISABLE_WRITE_BARRIERS
- if (!value.is_null()) {
- // TODO(1600) We are passing kNullAddress as a slot because maps can never
- // be on an evacuation candidate.
- WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
- }
-#endif
+void HeapObject::set_map_safe_transition(Map value) {
+ set_map<EmitWriteBarrier::kYes>(value, kRelaxedStore,
+ VerificationMode::kSafeMapTransition);
+}
+
+void HeapObject::set_map_safe_transition(Map value, ReleaseStoreTag tag) {
+ set_map<EmitWriteBarrier::kYes>(value, kReleaseStore,
+ VerificationMode::kSafeMapTransition);
}
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map value, RelaxedStoreTag tag) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !value.is_null()) {
- GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
- }
-#endif
- set_map_word(MapWord::FromMap(value), tag);
+ set_map<EmitWriteBarrier::kNo>(value, kRelaxedStore,
+ VerificationMode::kPotentialLayoutChange);
}
void HeapObject::set_map_no_write_barrier(Map value, ReleaseStoreTag tag) {
+ set_map<EmitWriteBarrier::kNo>(value, kReleaseStore,
+ VerificationMode::kPotentialLayoutChange);
+}
+
+template <HeapObject::EmitWriteBarrier emit_write_barrier, typename MemoryOrder>
+void HeapObject::set_map(Map value, MemoryOrder order, VerificationMode mode) {
+#if V8_ENABLE_WEBASSEMBLY
+ // In {WasmGraphBuilder::SetMap} and {WasmGraphBuilder::LoadMap}, we treat
+ // maps as immutable. Therefore we are not allowed to mutate them here.
+ DCHECK(!value.IsWasmStructMap() && !value.IsWasmArrayMap());
+#endif
+ // Object layout changes are currently not supported on background threads.
+ // This method might change object layout and therefore can't be used on
+ // background threads.
+ DCHECK_IMPLIES(mode != VerificationMode::kSafeMapTransition,
+ !LocalHeap::Current());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !value.is_null()) {
- GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
+ Heap* heap = GetHeapFromWritableObject(*this);
+ if (mode == VerificationMode::kSafeMapTransition) {
+ heap->VerifySafeMapTransition(*this, value);
+ } else {
+ DCHECK_EQ(mode, VerificationMode::kPotentialLayoutChange);
+ heap->VerifyObjectLayoutChange(*this, value);
+ }
+ }
+#endif
+ set_map_word(MapWord::FromMap(value), order);
+#ifndef V8_DISABLE_WRITE_BARRIERS
+ if (!value.is_null()) {
+ if (emit_write_barrier == EmitWriteBarrier::kYes) {
+ WriteBarrier::Marking(*this, map_slot(), value);
+ } else {
+ DCHECK_EQ(emit_write_barrier, EmitWriteBarrier::kNo);
+ SLOW_DCHECK(!WriteBarrier::IsRequired(*this, value));
+ }
}
#endif
- set_map_word(MapWord::FromMap(value), tag);
}
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
@@ -842,13 +875,17 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
- // TODO(1600) We are passing kNullAddress as a slot because maps can never
- // be on an evacuation candidate.
- WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
+ WriteBarrier::Marking(*this, map_slot(), value);
+ } else {
+ SLOW_DCHECK(!WriteBarrier::IsRequired(*this, value));
}
#endif
}
+DEF_ACQUIRE_GETTER(HeapObject, map, Map) {
+ return map_word(cage_base, kAcquireLoad).ToMap();
+}
+
ObjectSlot HeapObject::map_slot() const {
return ObjectSlot(MapField::address(*this));
}
@@ -1083,7 +1120,8 @@ Object Object::GetSimpleHash(Object object) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(object));
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (object.IsHeapNumber()) {
+ auto instance_type = HeapObject::cast(object).map().instance_type();
+ if (InstanceTypeChecker::IsHeapNumber(instance_type)) {
double num = HeapNumber::cast(object).value();
if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
// Use ComputeUnseededHash for all values in Signed32 range, including -0,
@@ -1096,20 +1134,16 @@ Object Object::GetSimpleHash(Object object) {
hash = ComputeLongHash(base::double_to_uint64(num));
}
return Smi::FromInt(hash & Smi::kMaxValue);
- }
- if (object.IsName()) {
+ } else if (InstanceTypeChecker::IsName(instance_type)) {
uint32_t hash = Name::cast(object).EnsureHash();
return Smi::FromInt(hash);
- }
- if (object.IsOddball()) {
+ } else if (InstanceTypeChecker::IsOddball(instance_type)) {
uint32_t hash = Oddball::cast(object).to_string().EnsureHash();
return Smi::FromInt(hash);
- }
- if (object.IsBigInt()) {
+ } else if (InstanceTypeChecker::IsBigInt(instance_type)) {
uint32_t hash = BigInt::cast(object).Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
- }
- if (object.IsSharedFunctionInfo()) {
+ } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
uint32_t hash = SharedFunctionInfo::cast(object).Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
}
@@ -1127,6 +1161,51 @@ Object Object::GetHash() {
return receiver.GetIdentityHash();
}
+bool Object::IsShared() const {
+ // This logic should be kept in sync with fast paths in
+ // CodeStubAssembler::SharedValueBarrier.
+
+ // Smis are trivially shared.
+ if (IsSmi()) return true;
+
+ HeapObject object = HeapObject::cast(*this);
+
+ // RO objects are shared when the RO space is shared.
+ if (IsReadOnlyHeapObject(object)) {
+ return ReadOnlyHeap::IsReadOnlySpaceShared();
+ }
+
+ // Check if this object is already shared.
+ switch (object.map().instance_type()) {
+ case SHARED_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
+ case JS_SHARED_STRUCT_TYPE:
+ DCHECK(object.InSharedHeap());
+ return true;
+ case INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ if (FLAG_shared_string_table) {
+ DCHECK(object.InSharedHeap());
+ return true;
+ }
+ return false;
+ case HEAP_NUMBER_TYPE:
+ return object.InSharedWritableHeap();
+ default:
+ return false;
+ }
+}
+
+// static
+MaybeHandle<Object> Object::Share(Isolate* isolate, Handle<Object> value,
+ ShouldThrow throw_if_cannot_be_shared) {
+ // Sharing values requires the RO space be shared.
+ DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
+ if (value->IsShared()) return value;
+ return ShareSlow(isolate, Handle<HeapObject>::cast(value),
+ throw_if_cannot_be_shared);
+}
+
Handle<Object> ObjectHashTableShape::AsHandle(Handle<Object> key) {
return key;
}
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 1d180f7e30..3fa06d921c 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -49,6 +49,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-table-inl.h"
@@ -110,7 +111,6 @@
#include "src/objects/property-descriptor.h"
#include "src/objects/prototype.h"
#include "src/objects/slots-atomic-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-comparator.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/struct-inl.h"
@@ -322,7 +322,7 @@ MaybeHandle<Object> Object::ConvertToNumberOrNumeric(Isolate* isolate,
}
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kNumber),
Object);
}
@@ -362,8 +362,8 @@ MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
Handle<Object> input) {
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
- Name);
+ isolate, input,
+ Object::ToPrimitive(isolate, input, ToPrimitiveHint::kString), Name);
if (input->IsName()) return Handle<Name>::cast(input);
return ToString(isolate, input);
}
@@ -374,7 +374,7 @@ MaybeHandle<Object> Object::ConvertToPropertyKey(Isolate* isolate,
Handle<Object> value) {
// 1. Let key be ToPrimitive(argument, hint String).
MaybeHandle<Object> maybe_key =
- Object::ToPrimitive(value, ToPrimitiveHint::kString);
+ Object::ToPrimitive(isolate, value, ToPrimitiveHint::kString);
// 2. ReturnIfAbrupt(key).
Handle<Object> key;
if (!maybe_key.ToHandle(&key)) return key;
@@ -412,7 +412,7 @@ MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
}
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kString),
String);
// The previous isString() check happened in Object::ToString and thus we
@@ -427,8 +427,9 @@ namespace {
bool IsErrorObject(Isolate* isolate, Handle<Object> object) {
if (!object->IsJSReceiver()) return false;
- Handle<Symbol> symbol = isolate->factory()->stack_trace_symbol();
- return JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol)
+ Handle<Symbol> symbol = isolate->factory()->error_stack_symbol();
+ return JSReceiver::HasOwnProperty(isolate, Handle<JSReceiver>::cast(object),
+ symbol)
.FromMaybe(false);
}
@@ -440,11 +441,11 @@ Handle<String> AsStringOrEmpty(Isolate* isolate, Handle<Object> object) {
Handle<String> NoSideEffectsErrorToString(Isolate* isolate,
Handle<JSReceiver> error) {
Handle<Name> name_key = isolate->factory()->name_string();
- Handle<Object> name = JSReceiver::GetDataProperty(error, name_key);
+ Handle<Object> name = JSReceiver::GetDataProperty(isolate, error, name_key);
Handle<String> name_str = AsStringOrEmpty(isolate, name);
Handle<Name> msg_key = isolate->factory()->message_string();
- Handle<Object> msg = JSReceiver::GetDataProperty(error, msg_key);
+ Handle<Object> msg = JSReceiver::GetDataProperty(isolate, error, msg_key);
Handle<String> msg_str = AsStringOrEmpty(isolate, msg);
if (name_str->length() == 0) return msg_str;
@@ -529,7 +530,7 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
// -- J S R e c e i v e r
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(input);
Handle<Object> to_string = JSReceiver::GetDataProperty(
- receiver, isolate->factory()->toString_string());
+ isolate, receiver, isolate->factory()->toString_string());
if (IsErrorObject(isolate, input) ||
*to_string == *isolate->error_to_string()) {
@@ -540,7 +541,7 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
Handle<JSReceiver>::cast(input));
} else if (*to_string == *isolate->object_to_string()) {
Handle<Object> ctor = JSReceiver::GetDataProperty(
- receiver, isolate->factory()->constructor_string());
+ isolate, receiver, isolate->factory()->constructor_string());
if (ctor->IsFunction()) {
Handle<String> ctor_name;
if (ctor->IsJSBoundFunction()) {
@@ -598,7 +599,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
Handle<String> builtin_tag = handle(receiver->class_name(), isolate);
Handle<Object> tag_obj = JSReceiver::GetDataProperty(
- receiver, isolate->factory()->to_string_tag_symbol());
+ isolate, receiver, isolate->factory()->to_string_tag_symbol());
Handle<String> tag =
tag_obj->IsString() ? Handle<String>::cast(tag_obj) : builtin_tag;
@@ -707,8 +708,8 @@ ComparisonResult Reverse(ComparisonResult result) {
Maybe<ComparisonResult> Object::Compare(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
// ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
- if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
- !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
+ if (!Object::ToPrimitive(isolate, x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
+ !Object::ToPrimitive(isolate, y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
return Nothing<ComparisonResult>();
}
if (x->IsString() && y->IsString()) {
@@ -768,7 +769,7 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
} else if (y->IsBigInt()) {
return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ if (!JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
}
@@ -790,7 +791,7 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
return BigInt::EqualToString(isolate, Handle<BigInt>::cast(y),
Handle<String>::cast(x));
} else if (y->IsJSReceiver()) {
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ if (!JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
}
@@ -811,7 +812,7 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
x = Oddball::ToNumber(isolate, Handle<Oddball>::cast(x));
return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ if (!JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
}
@@ -823,7 +824,7 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
if (y->IsSymbol()) {
return Just(x.is_identical_to(y));
} else if (y->IsJSReceiver()) {
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ if (!JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
}
@@ -842,7 +843,7 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
return Just(x->IsUndetectable());
} else if (y->IsBoolean()) {
y = Oddball::ToNumber(isolate, Handle<Oddball>::cast(y));
- } else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
+ } else if (!JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(x))
.ToHandle(&x)) {
return Nothing<bool>();
}
@@ -890,8 +891,10 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
Handle<String>::cast(rhs));
}
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(rhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(isolate, lhs),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(isolate, rhs),
+ Object);
if (lhs->IsString() || rhs->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToString(isolate, rhs),
Object);
@@ -2326,6 +2329,8 @@ bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
case ORDERED_HASH_SET_TYPE:
return false; // We'll rehash from the JSMap or JSSet referencing them.
case NAME_DICTIONARY_TYPE:
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ case REGISTERED_SYMBOL_TABLE_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
@@ -2354,6 +2359,8 @@ bool HeapObject::CanBeRehashed(PtrComprCageBase cage_base) const {
case ORDERED_NAME_DICTIONARY_TYPE:
return false;
case NAME_DICTIONARY_TYPE:
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ case REGISTERED_SYMBOL_TABLE_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
@@ -2383,6 +2390,12 @@ void HeapObject::RehashBasedOnMap(IsolateT* isolate) {
case NAME_DICTIONARY_TYPE:
NameDictionary::cast(*this).Rehash(isolate);
break;
+ case NAME_TO_INDEX_HASH_TABLE_TYPE:
+ NameToIndexHashTable::cast(*this).Rehash(isolate);
+ break;
+ case REGISTERED_SYMBOL_TABLE_TYPE:
+ RegisteredSymbolTable::cast(*this).Rehash(isolate);
+ break;
case SWISS_NAME_DICTIONARY_TYPE:
SwissNameDictionary::cast(*this).Rehash(isolate);
break;
@@ -2435,10 +2448,6 @@ void HeapObject::RehashBasedOnMap(IsolateT* isolate) {
template void HeapObject::RehashBasedOnMap(Isolate* isolate);
template void HeapObject::RehashBasedOnMap(LocalIsolate* isolate);
-bool HeapObject::IsExternal(Isolate* isolate) const {
- return map(isolate).FindRootMap(isolate) == isolate->heap()->external_map();
-}
-
void DescriptorArray::GeneralizeAllFields() {
int length = number_of_descriptors();
for (InternalIndex i : InternalIndex::Range(length)) {
@@ -2502,6 +2511,12 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Maybe<bool> result =
JSObject::SetPropertyWithInterceptor(it, should_throw, value);
if (result.IsNothing() || result.FromJust()) return result;
+ // Assuming that the callback have side effects, we use
+ // Object::SetSuperProperty() which works properly regardless on
+ // whether the property was present on the receiver or not when
+ // storing to the receiver.
+ // Proceed lookup from the next state.
+ it->Next();
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -2522,10 +2537,8 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
// property to the receiver.
it->NotFound();
}
- return Object::SetSuperProperty(it, value, store_origin,
- should_throw);
}
- break;
+ return Object::SetSuperProperty(it, value, store_origin, should_throw);
}
case LookupIterator::ACCESSOR: {
@@ -2815,7 +2828,16 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
} else // NOLINT(readability/braces)
#endif // V8_ENABLE_WEBASSEMBLY
- {
+ // clang-format off
+ if (V8_UNLIKELY(receiver->IsJSSharedStruct(isolate))) {
+ // clang-format on
+
+ // Shared structs can only point to primitives or shared values.
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, to_assign, Object::Share(isolate, to_assign, kThrowOnError),
+ Nothing<bool>());
+ it->WriteDataValue(to_assign, false);
+ } else {
// Possibly migrate to the most up-to-date map that will be able to store
// |value| under it->name().
it->PrepareForDataProperty(to_assign);
@@ -2907,6 +2929,30 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
return Just(true);
}
+// static
+MaybeHandle<Object> Object::ShareSlow(Isolate* isolate,
+ Handle<HeapObject> value,
+ ShouldThrow throw_if_cannot_be_shared) {
+ // Use Object::Share() if value might already be shared.
+ DCHECK(!value->IsShared());
+
+ if (value->IsString()) {
+ return String::Share(isolate, Handle<String>::cast(value));
+ }
+
+ if (value->IsHeapNumber()) {
+ uint64_t bits = HeapNumber::cast(*value).value_as_bits(kRelaxedLoad);
+ return isolate->factory()
+ ->NewHeapNumberFromBits<AllocationType::kSharedOld>(bits);
+ }
+
+ if (throw_if_cannot_be_shared == kThrowOnError) {
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kCannotBeShared, value), Object);
+ }
+ return MaybeHandle<Object>();
+}
+
template <class T>
static int AppendUniqueCallbacks(Isolate* isolate,
Handle<TemplateList> callbacks,
@@ -3016,7 +3062,7 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
// 7. If trap is undefined, then
if (trap->IsUndefined(isolate)) {
// 7a. Return target.[[HasProperty]](P).
- return JSReceiver::HasProperty(target, name);
+ return JSReceiver::HasProperty(isolate, target, name);
}
// 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, P»)).
Handle<Object> trap_result_obj;
@@ -3808,22 +3854,25 @@ Handle<DescriptorArray> DescriptorArray::CopyUpTo(Isolate* isolate,
}
Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
- Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
- PropertyAttributes attributes, int slack) {
+ Isolate* isolate, Handle<DescriptorArray> source_handle,
+ int enumeration_index, PropertyAttributes attributes, int slack) {
if (enumeration_index + slack == 0) {
return isolate->factory()->empty_descriptor_array();
}
int size = enumeration_index;
-
- Handle<DescriptorArray> descriptors =
+ Handle<DescriptorArray> copy_handle =
DescriptorArray::Allocate(isolate, size, slack);
+ DisallowGarbageCollection no_gc;
+ auto source = *source_handle;
+ auto copy = *copy_handle;
+
if (attributes != NONE) {
for (InternalIndex i : InternalIndex::Range(size)) {
- MaybeObject value_or_field_type = desc->GetValue(i);
- Name key = desc->GetKey(i);
- PropertyDetails details = desc->GetDetails(i);
+ MaybeObject value_or_field_type = source.GetValue(i);
+ Name key = source.GetKey(i);
+ PropertyDetails details = source.GetDetails(i);
// Bulk attribute changes never affect private properties.
if (!key.IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
@@ -3837,35 +3886,39 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attributes & mask));
}
- descriptors->Set(i, key, value_or_field_type, details);
+ copy.Set(i, key, value_or_field_type, details);
}
} else {
for (InternalIndex i : InternalIndex::Range(size)) {
- descriptors->CopyFrom(i, *desc);
+ copy.CopyFrom(i, source);
}
}
- if (desc->number_of_descriptors() != enumeration_index) descriptors->Sort();
+ if (source.number_of_descriptors() != enumeration_index) copy.Sort();
- return descriptors;
+ return copy_handle;
}
// Create a new descriptor array with only enumerable, configurable, writeable
// data properties, but identical field locations.
Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
- Isolate* isolate, Handle<DescriptorArray> src, int enumeration_index,
+ Isolate* isolate, Handle<DescriptorArray> src_handle, int enumeration_index,
int slack) {
if (enumeration_index + slack == 0) {
return isolate->factory()->empty_descriptor_array();
}
int size = enumeration_index;
- Handle<DescriptorArray> descriptors =
+ Handle<DescriptorArray> descriptors_handle =
DescriptorArray::Allocate(isolate, size, slack);
+ DisallowGarbageCollection no_gc;
+ auto src = *src_handle;
+ auto descriptors = *descriptors_handle;
+
for (InternalIndex i : InternalIndex::Range(size)) {
- Name key = src->GetKey(i);
- PropertyDetails details = src->GetDetails(i);
+ Name key = src.GetKey(i);
+ PropertyDetails details = src.GetDetails(i);
Representation new_representation = details.representation();
DCHECK(!key.IsPrivateName());
@@ -3874,7 +3927,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
// If the new representation is an in-place changeable field, make it
// generic as possible (under in-place changes) to avoid type confusion if
// the source representation changes after this feedback has been collected.
- MaybeObject type = src->GetValue(i);
+ MaybeObject type = src.GetValue(i);
if (details.location() == PropertyLocation::kField) {
type = MaybeObject::FromObject(FieldType::Any());
// TODO(bmeurer,ishell): Igor suggested to use some kind of dynamic
@@ -3891,12 +3944,12 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
details.constness(), new_representation,
details.field_index());
- descriptors->Set(i, key, type, new_details);
+ descriptors.Set(i, key, type, new_details);
}
- descriptors->Sort();
+ descriptors.Sort();
- return descriptors;
+ return descriptors_handle;
}
bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
@@ -4048,15 +4101,11 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
// static
Handle<ArrayList> ArrayList::EnsureSpace(Isolate* isolate,
Handle<ArrayList> array, int length) {
- const bool empty = (array->length() == 0);
- Handle<FixedArray> ret =
- EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length);
- if (empty) {
- ret->set_map_no_write_barrier(array->GetReadOnlyRoots().array_list_map());
-
- Handle<ArrayList>::cast(ret)->SetLength(0);
- }
- return Handle<ArrayList>::cast(ret);
+ DCHECK_LT(0, length);
+ auto new_array = Handle<ArrayList>::cast(
+ EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length));
+ DCHECK_EQ(array->Length(), new_array->Length());
+ return new_array;
}
// static
@@ -4065,10 +4114,14 @@ Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
const MaybeObjectHandle& value) {
int length = array->length();
array = EnsureSpace(isolate, array, length + 1);
- // Reload length; GC might have removed elements from the array.
- length = array->length();
- array->Set(length, *value);
- array->set_length(length + 1);
+ {
+ DisallowGarbageCollection no_gc;
+ WeakArrayList raw = *array;
+ // Reload length; GC might have removed elements from the array.
+ length = raw.length();
+ raw.Set(length, *value);
+ raw.set_length(length + 1);
+ }
return array;
}
@@ -4078,11 +4131,15 @@ Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
const MaybeObjectHandle& value2) {
int length = array->length();
array = EnsureSpace(isolate, array, length + 2);
- // Reload length; GC might have removed elements from the array.
- length = array->length();
- array->Set(length, *value1);
- array->Set(length + 1, *value2);
- array->set_length(length + 2);
+ {
+ DisallowGarbageCollection no_gc;
+ WeakArrayList raw = *array;
+ // Reload length; GC might have removed elements from the array.
+ length = array->length();
+ raw.Set(length, *value1);
+ raw.Set(length + 1, *value2);
+ raw.set_length(length + 2);
+ }
return array;
}
@@ -4091,17 +4148,23 @@ Handle<WeakArrayList> WeakArrayList::Append(Isolate* isolate,
Handle<WeakArrayList> array,
const MaybeObjectHandle& value,
AllocationType allocation) {
- int length = array->length();
+ int length = 0;
+ int new_length = 0;
+ {
+ DisallowGarbageCollection no_gc;
+ WeakArrayList raw = *array;
+ length = raw.length();
- if (length < array->capacity()) {
- array->Set(length, *value);
- array->set_length(length + 1);
- return array;
- }
+ if (length < raw.capacity()) {
+ raw.Set(length, *value);
+ raw.set_length(length + 1);
+ return array;
+ }
- // Not enough space in the array left, either grow, shrink or
- // compact the array.
- int new_length = array->CountLiveElements() + 1;
+ // Not enough space in the array left, either grow, shrink or
+ // compact the array.
+ new_length = raw.CountLiveElements() + 1;
+ }
bool shrink = new_length < length / 4;
bool grow = 3 * (length / 4) < new_length;
@@ -4120,14 +4183,19 @@ Handle<WeakArrayList> WeakArrayList::Append(Isolate* isolate,
// Now append value to the array, there should always be enough space now.
DCHECK_LT(array->length(), array->capacity());
- // Reload length, allocation might have killed some weak refs.
- int index = array->length();
- array->Set(index, *value);
- array->set_length(index + 1);
+ {
+ DisallowGarbageCollection no_gc;
+ WeakArrayList raw = *array;
+ // Reload length, allocation might have killed some weak refs.
+ int index = raw.length();
+ raw.Set(index, *value);
+ raw.set_length(index + 1);
+ }
return array;
}
void WeakArrayList::Compact(Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
int length = this->length();
int new_length = 0;
@@ -4145,7 +4213,7 @@ void WeakArrayList::Compact(Isolate* isolate) {
set_length(new_length);
}
-bool WeakArrayList::IsFull() { return length() == capacity(); }
+bool WeakArrayList::IsFull() const { return length() == capacity(); }
// static
Handle<WeakArrayList> WeakArrayList::EnsureSpace(Isolate* isolate,
@@ -4684,7 +4752,7 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
value <<= String::ArrayIndexValueBits::kShift;
value |= length << String::ArrayIndexLengthBits::kShift;
- DCHECK_EQ(value & String::kIsNotIntegerIndexMask, 0);
+ DCHECK(String::IsIntegerIndex(value));
DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
Name::ContainsCachedArrayIndex(value));
return value;
@@ -4804,6 +4872,17 @@ bool Script::GetPositionInfo(Handle<Script> script, int position,
return script->GetPositionInfo(position, info, offset_flag);
}
+bool Script::IsSubjectToDebugging() const {
+ switch (type()) {
+ case TYPE_NORMAL:
+#if V8_ENABLE_WEBASSEMBLY
+ case TYPE_WASM:
+#endif // V8_ENABLE_WEBASSEMBLY
+ return true;
+ }
+ return false;
+}
+
bool Script::IsUserJavaScript() const { return type() == Script::TYPE_NORMAL; }
#if V8_ENABLE_WEBASSEMBLY
@@ -5107,10 +5186,9 @@ Maybe<bool> JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
// ES6: 9.5.2 [[SetPrototypeOf]] (V)
// static
-Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
- bool from_javascript,
+Maybe<bool> JSProxy::SetPrototype(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
- Isolate* isolate = proxy->GetIsolate();
STACK_CHECK(isolate, Nothing<bool>());
Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
// 1. Assert: Either Type(V) is Object or Type(V) is Null.
@@ -5134,7 +5212,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
Nothing<bool>());
// 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
if (trap->IsUndefined(isolate)) {
- return JSReceiver::SetPrototype(target, value, from_javascript,
+ return JSReceiver::SetPrototype(isolate, target, value, from_javascript,
should_throw);
}
// 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, V»)).
@@ -5870,12 +5948,6 @@ InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(
}
}
-template <typename Derived, typename Shape>
-InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(Isolate* isolate,
- uint32_t hash) {
- return FindInsertionEntry(isolate, ReadOnlyRoots(isolate), hash);
-}
-
base::Optional<PropertyCell>
GlobalDictionary::TryFindPropertyCellForConcurrentLookupIterator(
Isolate* isolate, Handle<Name> name, RelaxedLoadTag tag) {
@@ -5926,6 +5998,21 @@ bool StringSet::Has(Isolate* isolate, Handle<String> name) {
return FindEntry(isolate, *name).is_found();
}
+Handle<RegisteredSymbolTable> RegisteredSymbolTable::Add(
+ Isolate* isolate, Handle<RegisteredSymbolTable> table, Handle<String> key,
+ Handle<Symbol> symbol) {
+ // Validate that the key is absent.
+ SLOW_DCHECK(table->FindEntry(isolate, key).is_not_found());
+
+ table = EnsureCapacity(isolate, table);
+ uint32_t hash = ShapeT::Hash(ReadOnlyRoots(isolate), key);
+ InternalIndex entry = table->FindInsertionEntry(isolate, hash);
+ table->set(EntryToIndex(entry), *key);
+ table->set(EntryToValueIndex(entry), *symbol);
+ table->ElementAdded();
+ return table;
+}
+
Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
Handle<ObjectHashSet> set,
Handle<Object> key) {
@@ -6216,6 +6303,18 @@ Object ObjectHashTableBase<Derived, Shape>::Lookup(PtrComprCageBase cage_base,
return this->get(Derived::EntryToIndex(entry) + 1);
}
+// The implementation should be in sync with
+// CodeStubAssembler::NameToIndexHashTableLookup.
+int NameToIndexHashTable::Lookup(Handle<Name> key) {
+ DisallowGarbageCollection no_gc;
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ ReadOnlyRoots roots = this->GetReadOnlyRoots(cage_base);
+
+ InternalIndex entry = this->FindEntry(cage_base, roots, key, key->hash());
+ if (entry.is_not_found()) return -1;
+ return Smi::cast(this->get(EntryToValueIndex(entry))).value();
+}
+
template <typename Derived, typename Shape>
Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowGarbageCollection no_gc;
@@ -6243,6 +6342,24 @@ Object ObjectHashTableBase<Derived, Shape>::ValueAt(InternalIndex entry) {
return this->get(EntryToValueIndex(entry));
}
+Object RegisteredSymbolTable::ValueAt(InternalIndex entry) {
+ return this->get(EntryToValueIndex(entry));
+}
+
+Object NameToIndexHashTable::ValueAt(InternalIndex entry) {
+ return this->get(EntryToValueIndex(entry));
+}
+
+int NameToIndexHashTable::IndexAt(InternalIndex entry) {
+ Object value = ValueAt(entry);
+ if (value.IsSmi()) {
+ int index = Smi::ToInt(value);
+ DCHECK_LE(0, index);
+ return index;
+ }
+ return -1;
+}
+
template <typename Derived, typename Shape>
Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Handle<Derived> table,
Handle<Object> key,
@@ -6277,7 +6394,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
}
// Rehash if more than 33% of the entries are deleted entries.
- // TODO(jochen): Consider to shrink the fixed array in place.
+ // TODO(verwaest): Consider to shrink the fixed array in place.
if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
table->Rehash(isolate);
}
@@ -6492,38 +6609,36 @@ Handle<PropertyCell> PropertyCell::InvalidateAndReplaceEntry(
return new_cell;
}
-static bool RemainsConstantType(Handle<PropertyCell> cell,
- Handle<Object> value) {
+static bool RemainsConstantType(PropertyCell cell, Object value) {
+ DisallowGarbageCollection no_gc;
// TODO(dcarney): double->smi and smi->double transition from kConstant
- if (cell->value().IsSmi() && value->IsSmi()) {
+ if (cell.value().IsSmi() && value.IsSmi()) {
return true;
- } else if (cell->value().IsHeapObject() && value->IsHeapObject()) {
- return HeapObject::cast(cell->value()).map() ==
- HeapObject::cast(*value).map() &&
- HeapObject::cast(*value).map().is_stable();
+ } else if (cell.value().IsHeapObject() && value.IsHeapObject()) {
+ Map map = HeapObject::cast(value).map();
+ return HeapObject::cast(cell.value()).map() == map && map.is_stable();
}
return false;
}
// static
-PropertyCellType PropertyCell::InitialType(Isolate* isolate,
- Handle<Object> value) {
- return value->IsUndefined(isolate) ? PropertyCellType::kUndefined
- : PropertyCellType::kConstant;
+PropertyCellType PropertyCell::InitialType(Isolate* isolate, Object value) {
+ return value.IsUndefined(isolate) ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
}
// static
-PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
- Handle<PropertyCell> cell,
- Handle<Object> value,
+PropertyCellType PropertyCell::UpdatedType(Isolate* isolate, PropertyCell cell,
+ Object value,
PropertyDetails details) {
- DCHECK(!value->IsTheHole(isolate));
- DCHECK(!cell->value().IsTheHole(isolate));
+ DisallowGarbageCollection no_gc;
+ DCHECK(!value.IsTheHole(isolate));
+ DCHECK(!cell.value().IsTheHole(isolate));
switch (details.cell_type()) {
case PropertyCellType::kUndefined:
return PropertyCellType::kConstant;
case PropertyCellType::kConstant:
- if (*value == cell->value()) return PropertyCellType::kConstant;
+ if (value == cell.value()) return PropertyCellType::kConstant;
V8_FALLTHROUGH;
case PropertyCellType::kConstantType:
if (RemainsConstantType(cell, value)) {
@@ -6541,9 +6656,9 @@ Handle<PropertyCell> PropertyCell::PrepareForAndSetValue(
Isolate* isolate, Handle<GlobalDictionary> dictionary, InternalIndex entry,
Handle<Object> value, PropertyDetails details) {
DCHECK(!value->IsTheHole(isolate));
- Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
- CHECK(!cell->value().IsTheHole(isolate));
- const PropertyDetails original_details = cell->property_details();
+ PropertyCell raw_cell = dictionary->CellAt(entry);
+ CHECK(!raw_cell.value().IsTheHole(isolate));
+ const PropertyDetails original_details = raw_cell.property_details();
// Data accesses could be cached in ics or optimized code.
bool invalidate = original_details.kind() == PropertyKind::kData &&
details.kind() == PropertyKind::kAccessor;
@@ -6552,9 +6667,11 @@ Handle<PropertyCell> PropertyCell::PrepareForAndSetValue(
details = details.set_index(index);
PropertyCellType new_type =
- UpdatedType(isolate, cell, value, original_details);
+ UpdatedType(isolate, raw_cell, *value, original_details);
details = details.set_cell_type(new_type);
+ Handle<PropertyCell> cell(raw_cell, isolate);
+
if (invalidate) {
cell = PropertyCell::InvalidateAndReplaceEntry(isolate, dictionary, entry,
details, value);
@@ -6817,6 +6934,8 @@ Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
EXTERN_DEFINE_HASH_TABLE(StringSet, StringSetShape)
EXTERN_DEFINE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
EXTERN_DEFINE_HASH_TABLE(ObjectHashSet, ObjectHashSetShape)
+EXTERN_DEFINE_HASH_TABLE(NameToIndexHashTable, NameToIndexShape)
+EXTERN_DEFINE_HASH_TABLE(RegisteredSymbolTable, RegisteredSymbolTableShape)
EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(ObjectHashTable, ObjectHashTableShape)
EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(EphemeronHashTable, ObjectHashTableShape)
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 82e1680b47..d57ad0a847 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -19,6 +19,8 @@
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
#include "src/common/message-template.h"
+#include "src/common/operation.h"
+#include "src/common/ptr-compr.h"
#include "src/flags/flags.h"
#include "src/objects/elements-kind.h"
#include "src/objects/field-index.h"
@@ -56,14 +58,17 @@
// - JSModuleNamespace
// - JSPrimitiveWrapper
// - JSDate
-// - JSFunctionOrBoundFunction
+// - JSFunctionOrBoundFunctionOrWrappedFunction
// - JSBoundFunction
// - JSFunction
+// - JSWrappedFunction
// - JSGeneratorObject
// - JSMapIterator
// - JSMessageObject
// - JSRegExp
// - JSSetIterator
+// - JSShadowRealm
+// - JSSharedStruct
// - JSStringIterator
// - JSTemporalCalendar
// - JSTemporalDuration
@@ -175,7 +180,7 @@
// - BreakPoint
// - BreakPointInfo
// - CachedTemplateObject
-// - StackFrameInfo
+// - CallSiteInfo
// - CodeCache
// - PropertyDescriptorObject
// - PrototypeInfo
@@ -190,6 +195,7 @@
// - SourceTextModule
// - SyntheticModule
// - SourceTextModuleInfoEntry
+// - StackFrameInfo
// - FeedbackCell
// - FeedbackVector
// - PreparseData
@@ -405,7 +411,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToPrimitive(
- Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+ Isolate* isolate, Handle<Object> input,
+ ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
// ES6 section 7.1.3 ToNumber
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumber(
@@ -667,18 +674,14 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
std::is_enum<T>::value,
int>::type = 0>
inline T ReadField(size_t offset) const {
- // Pointer compression causes types larger than kTaggedSize to be unaligned.
-#ifdef V8_COMPRESS_POINTERS
- constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
-#else
- constexpr bool v8_pointer_compression_unaligned = false;
-#endif
- if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
- // Bug(v8:8875) Double fields may be unaligned.
- return base::ReadUnalignedValue<T>(field_address(offset));
- } else {
- return base::Memory<T>(field_address(offset));
- }
+ return ReadMaybeUnalignedValue<T>(field_address(offset));
+ }
+
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value,
+ int>::type = 0>
+ inline void WriteField(size_t offset, T value) const {
+ return WriteMaybeUnalignedValue<T>(field_address(offset), value);
}
// Atomically reads a field using relaxed memory ordering. Can only be used
@@ -690,38 +693,31 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
int>::type = 0>
inline T Relaxed_ReadField(size_t offset) const;
- template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
- std::is_enum<T>::value,
- int>::type = 0>
- inline void WriteField(size_t offset, T value) const {
- // Pointer compression causes types larger than kTaggedSize to be unaligned.
-#ifdef V8_COMPRESS_POINTERS
- constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
-#else
- constexpr bool v8_pointer_compression_unaligned = false;
-#endif
- if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
- // Bug(v8:8875) Double fields may be unaligned.
- base::WriteUnalignedValue<T>(field_address(offset), value);
- } else {
- base::Memory<T>(field_address(offset)) = value;
- }
- }
+ // Atomically writes a field using relaxed memory ordering. Can only be used
+ // with integral types whose size is <= kTaggedSize (to guarantee alignment).
+ template <class T,
+ typename std::enable_if<(std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value) &&
+ !std::is_floating_point<T>::value,
+ int>::type = 0>
+ inline void Relaxed_WriteField(size_t offset, T value);
//
- // CagedPointer_t field accessors.
+ // SandboxedPointer_t field accessors.
//
- inline Address ReadCagedPointerField(size_t offset,
- PtrComprCageBase cage_base) const;
- inline void WriteCagedPointerField(size_t offset, PtrComprCageBase cage_base,
- Address value);
- inline void WriteCagedPointerField(size_t offset, Isolate* isolate,
- Address value);
+ inline Address ReadSandboxedPointerField(size_t offset,
+ PtrComprCageBase cage_base) const;
+ inline void WriteSandboxedPointerField(size_t offset,
+ PtrComprCageBase cage_base,
+ Address value);
+ inline void WriteSandboxedPointerField(size_t offset, Isolate* isolate,
+ Address value);
//
// ExternalPointer_t field accessors.
//
- inline void InitExternalPointerField(size_t offset, Isolate* isolate);
+ inline void InitExternalPointerField(size_t offset, Isolate* isolate,
+ ExternalPointerTag tag);
inline void InitExternalPointerField(size_t offset, Isolate* isolate,
Address value, ExternalPointerTag tag);
inline Address ReadExternalPointerField(size_t offset, Isolate* isolate,
@@ -736,6 +732,27 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
static bool CheckContextualStoreToJSGlobalObject(
LookupIterator* it, Maybe<ShouldThrow> should_throw);
+ // Returns whether the object is safe to share across Isolates.
+ //
+ // Currently, the following kinds of values can be safely shared across
+ // Isolates:
+ // - Smis
+ // - Objects in RO space when the RO space is shared
+ // - HeapNumbers in the shared old space
+ // - Strings for which String::IsShared() is true
+ // - JSSharedStructs
+ inline bool IsShared() const;
+
+ // Returns an equivalent value that's safe to share across Isolates if
+ // possible. Acts as the identity function when value->IsShared().
+ static inline MaybeHandle<Object> Share(
+ Isolate* isolate, Handle<Object> value,
+ ShouldThrow throw_if_cannot_be_shared);
+
+ static MaybeHandle<Object> ShareSlow(Isolate* isolate,
+ Handle<HeapObject> value,
+ ShouldThrow throw_if_cannot_be_shared);
+
protected:
inline Address field_address(size_t offset) const {
return ptr() + offset - kHeapObjectTag;
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index 95f5f294bb..c5a38dbb38 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -40,6 +40,11 @@ DEF_GETTER(HeapObject, IsBoolean, bool) {
((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
+bool Oddball::ToBool(Isolate* isolate) const {
+ DCHECK(IsBoolean(isolate));
+ return IsTrue(isolate);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index 30f6fa70f8..eb7b72c7e2 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -31,6 +31,8 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
Isolate* isolate, Handle<Oddball> input);
+ V8_INLINE bool ToBool(Isolate* isolate) const;
+
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
diff --git a/deps/v8/src/objects/option-utils.h b/deps/v8/src/objects/option-utils.h
index 5bb2c35701..16041a5cb2 100644
--- a/deps/v8/src/objects/option-utils.h
+++ b/deps/v8/src/objects/option-utils.h
@@ -65,6 +65,77 @@ V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
return Just(default_value);
}
+// A helper template to get string from option into a enum.
+// The enum in the enum_values is the corresponding value to the strings
+// in the str_values. If the option does not contains name,
+// default_value will be return.
+template <typename T>
+V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOrBooleanOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ const char* method, const std::vector<const char*>& str_values,
+ const std::vector<T>& enum_values, T true_value, T false_value,
+ T fallback_value) {
+ DCHECK_EQ(str_values.size(), enum_values.size());
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<T>());
+ // 2. If value is undefined, then return fallback.
+ if (value->IsUndefined(isolate)) {
+ return Just(fallback_value);
+ }
+ // 3. If value is true, then return trueValue.
+ if (value->IsTrue(isolate)) {
+ return Just(true_value);
+ }
+ // 4. Let valueBoolean be ToBoolean(value).
+ bool valueBoolean = value->BooleanValue(isolate);
+ // 5. If valueBoolean is false, then return valueBoolean.
+ if (!valueBoolean) {
+ return Just(false_value);
+ }
+
+ Handle<String> value_str;
+ // 6. Let value be ? ToString(value).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_str, Object::ToString(isolate, value), Nothing<T>());
+ // 7. If values does not contain an element equal to value, throw a
+ // RangeError exception.
+ // 8. Return value.
+ value_str = String::Flatten(isolate, value_str);
+ {
+ DisallowGarbageCollection no_gc;
+ const String::FlatContent& flat = value_str->GetFlatContent(no_gc);
+ int32_t length = value_str->length();
+ for (size_t i = 0; i < str_values.size(); i++) {
+ if (static_cast<int32_t>(strlen(str_values.at(i))) == length) {
+ if (flat.IsOneByte()) {
+ if (CompareCharsEqual(str_values.at(i),
+ flat.ToOneByteVector().begin(), length)) {
+ return Just(enum_values[i]);
+ }
+ } else {
+ if (CompareCharsEqual(str_values.at(i), flat.ToUC16Vector().begin(),
+ length)) {
+ return Just(enum_values[i]);
+ }
+ }
+ }
+ }
+ } // end of no_gc
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kValueOutOfRange, value,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ property_str),
+ Nothing<T>());
+}
+
// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
// ecma402/#sec-getoption
//
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index c6754db937..db97269d51 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -406,6 +406,13 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
return table;
}
+void OrderedHashMap::SetEntry(InternalIndex entry, Object key, Object value) {
+ DisallowGarbageCollection no_gc;
+ int index = EntryToIndex(entry);
+ this->set(index, key);
+ this->set(index + kValueOffset, value);
+}
+
template <typename IsolateT>
InternalIndex OrderedNameDictionary::FindEntry(IsolateT* isolate, Object key) {
DisallowGarbageCollection no_gc;
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 45682e45e9..ec30417054 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -331,6 +331,9 @@ class V8_EXPORT_PRIVATE OrderedHashMap
int new_capacity);
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table);
+
+ void SetEntry(InternalIndex entry, Object key, Object value);
+
Object ValueAt(InternalIndex entry);
// This takes and returns raw Address values containing tagged Object
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.cc b/deps/v8/src/objects/osr-optimized-code-cache.cc
index f6c8cdb2d6..4ffefd59a8 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.cc
+++ b/deps/v8/src/objects/osr-optimized-code-cache.cc
@@ -17,7 +17,7 @@ const int OSROptimizedCodeCache::kMaxLength;
void OSROptimizedCodeCache::AddOptimizedCode(
Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
- Handle<Code> code, BytecodeOffset osr_offset) {
+ Handle<CodeT> code, BytecodeOffset osr_offset) {
DCHECK(!osr_offset.IsNone());
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
STATIC_ASSERT(kEntryLength == 3);
@@ -90,16 +90,16 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
native_context->set_osr_code_cache(*new_osr_cache);
}
-Code OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
- BytecodeOffset osr_offset,
- Isolate* isolate) {
+CodeT OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
+ BytecodeOffset osr_offset,
+ Isolate* isolate) {
DisallowGarbageCollection no_gc;
int index = FindEntry(shared, osr_offset);
- if (index == -1) return Code();
- Code code = GetCodeFromEntry(index);
+ if (index == -1) return CodeT();
+ CodeT code = GetCodeFromEntry(index);
if (code.is_null()) {
ClearEntry(index, isolate);
- return code;
+ return CodeT();
}
DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
return code;
@@ -114,8 +114,7 @@ void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
HeapObject heap_object;
if (!code_entry->GetHeapObject(&heap_object)) continue;
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- Code code = FromCodeT(CodeT::cast(heap_object));
+ CodeT code = CodeT::cast(heap_object);
DCHECK(code.is_optimized_code());
if (!code.marked_for_deoptimization()) continue;
@@ -123,6 +122,18 @@ void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
}
}
+std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI(
+ SharedFunctionInfo shared) {
+ std::vector<int> bytecode_offsets;
+ DisallowGarbageCollection gc;
+ for (int index = 0; index < length(); index += kEntryLength) {
+ if (GetSFIFromEntry(index) == shared) {
+ bytecode_offsets.push_back(GetBytecodeOffsetFromEntry(index).ToInt());
+ }
+ }
+ return bytecode_offsets;
+}
+
int OSROptimizedCodeCache::GrowOSRCache(
Handle<NativeContext> native_context,
Handle<OSROptimizedCodeCache>* osr_cache) {
@@ -140,14 +151,14 @@ int OSROptimizedCodeCache::GrowOSRCache(
return old_length;
}
-Code OSROptimizedCodeCache::GetCodeFromEntry(int index) {
+CodeT OSROptimizedCodeCache::GetCodeFromEntry(int index) {
DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
DCHECK_EQ(index % kEntryLength, 0);
HeapObject code_entry;
Get(index + OSRCodeCacheConstants::kCachedCodeOffset)
->GetHeapObject(&code_entry);
- if (code_entry.is_null()) return Code();
- return FromCodeT(CodeT::cast(code_entry));
+ if (code_entry.is_null()) return CodeT();
+ return CodeT::cast(code_entry);
}
SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
@@ -180,25 +191,43 @@ int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared,
}
void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
- Set(index + OSRCodeCacheConstants::kSharedOffset,
- HeapObjectReference::ClearedValue(isolate));
- Set(index + OSRCodeCacheConstants::kCachedCodeOffset,
- HeapObjectReference::ClearedValue(isolate));
- Set(index + OSRCodeCacheConstants::kOsrIdOffset,
- HeapObjectReference::ClearedValue(isolate));
+ SharedFunctionInfo shared = GetSFIFromEntry(index);
+ DCHECK_GT(shared.osr_code_cache_state(), kNotCached);
+ if (V8_LIKELY(shared.osr_code_cache_state() == kCachedOnce)) {
+ shared.set_osr_code_cache_state(kNotCached);
+ } else if (shared.osr_code_cache_state() == kCachedMultiple) {
+ int osr_code_cache_count = 0;
+ for (int index = 0; index < length(); index += kEntryLength) {
+ if (GetSFIFromEntry(index) == shared) {
+ osr_code_cache_count++;
+ }
+ }
+ if (osr_code_cache_count == 2) {
+ shared.set_osr_code_cache_state(kCachedOnce);
+ }
+ }
+ HeapObjectReference cleared_value =
+ HeapObjectReference::ClearedValue(isolate);
+ Set(index + OSRCodeCacheConstants::kSharedOffset, cleared_value);
+ Set(index + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
+ Set(index + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
}
void OSROptimizedCodeCache::InitializeEntry(int entry,
SharedFunctionInfo shared,
- Code code,
+ CodeT code,
BytecodeOffset osr_offset) {
Set(entry + OSRCodeCacheConstants::kSharedOffset,
HeapObjectReference::Weak(shared));
- HeapObjectReference weak_code_entry =
- HeapObjectReference::Weak(ToCodeT(code));
+ HeapObjectReference weak_code_entry = HeapObjectReference::Weak(code);
Set(entry + OSRCodeCacheConstants::kCachedCodeOffset, weak_code_entry);
Set(entry + OSRCodeCacheConstants::kOsrIdOffset,
MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt())));
+ if (V8_LIKELY(shared.osr_code_cache_state() == kNotCached)) {
+ shared.set_osr_code_cache_state(kCachedOnce);
+ } else if (shared.osr_code_cache_state() == kCachedOnce) {
+ shared.set_osr_code_cache_state(kCachedMultiple);
+ }
}
void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
@@ -207,7 +236,11 @@ void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
Set(dst + OSRCodeCacheConstants::kCachedCodeOffset,
Get(src + OSRCodeCacheConstants::kCachedCodeOffset));
Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset));
- ClearEntry(src, isolate);
+ HeapObjectReference cleared_value =
+ HeapObjectReference::ClearedValue(isolate);
+ Set(src + OSRCodeCacheConstants::kSharedOffset, cleared_value);
+ Set(src + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
+ Set(src + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
}
int OSROptimizedCodeCache::CapacityForLength(int curr_length) {
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.h b/deps/v8/src/objects/osr-optimized-code-cache.h
index 62e135b02e..7b28ba0001 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.h
+++ b/deps/v8/src/objects/osr-optimized-code-cache.h
@@ -12,6 +12,17 @@
namespace v8 {
namespace internal {
+// This enum are states that how many OSR code caches belong to a SFI. Without
+// this enum, need to check all OSR code cache entries to know whether a
+// JSFunction's SFI has OSR code cache. The enum value kCachedMultiple is for
+// doing time-consuming loop check only when the very unlikely state change
+// kCachedMultiple -> { kCachedOnce | kCachedMultiple }.
+enum OSRCodeCacheStateOfSFI : uint8_t {
+ kNotCached, // Likely state, no OSR code cache
+ kCachedOnce, // Unlikely state, one OSR code cache
+ kCachedMultiple, // Very unlikely state, multiple OSR code caches
+};
+
class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
public:
DECL_CAST(OSROptimizedCodeCache)
@@ -32,7 +43,7 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// kOSRCodeCacheInitialLength entries.
static void AddOptimizedCode(Handle<NativeContext> context,
Handle<SharedFunctionInfo> shared,
- Handle<Code> code, BytecodeOffset osr_offset);
+ Handle<CodeT> code, BytecodeOffset osr_offset);
// Reduces the size of the OSR code cache if the number of valid entries are
// less than the current capacity of the cache.
static void Compact(Handle<NativeContext> context);
@@ -42,12 +53,16 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// Returns the code corresponding to the shared function |shared| and
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
// object otherwise.
- Code GetOptimizedCode(Handle<SharedFunctionInfo> shared,
- BytecodeOffset osr_offset, Isolate* isolate);
+ CodeT GetOptimizedCode(Handle<SharedFunctionInfo> shared,
+ BytecodeOffset osr_offset, Isolate* isolate);
// Remove all code objects marked for deoptimization from OSR code cache.
void EvictMarkedCode(Isolate* isolate);
+ // Returns vector of bytecode offsets corresponding to the shared function
+ // |shared|
+ std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
+
private:
// Functions that implement heuristics on when to grow / shrink the cache.
static int CapacityForLength(int curr_capacity);
@@ -56,14 +71,14 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
Handle<OSROptimizedCodeCache>* osr_cache);
// Helper functions to get individual items from an entry in the cache.
- Code GetCodeFromEntry(int index);
+ CodeT GetCodeFromEntry(int index);
SharedFunctionInfo GetSFIFromEntry(int index);
BytecodeOffset GetBytecodeOffsetFromEntry(int index);
inline int FindEntry(Handle<SharedFunctionInfo> shared,
BytecodeOffset osr_offset);
inline void ClearEntry(int src, Isolate* isolate);
- inline void InitializeEntry(int entry, SharedFunctionInfo shared, Code code,
+ inline void InitializeEntry(int entry, SharedFunctionInfo shared, CodeT code,
BytecodeOffset osr_offset);
inline void MoveEntry(int src, int dst, Isolate* isolate);
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index df5f2a1502..3bc39847e6 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -38,6 +38,19 @@ Object PropertyArray::get(PtrComprCageBase cage_base, int index) const {
OffsetOfElementAt(index));
}
+Object PropertyArray::get(int index, SeqCstAccessTag tag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index, tag);
+}
+
+Object PropertyArray::get(PtrComprCageBase cage_base, int index,
+ SeqCstAccessTag tag) const {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length(kAcquireLoad)));
+ return TaggedField<Object>::SeqCst_Load(cage_base, *this,
+ OffsetOfElementAt(index));
+}
+
void PropertyArray::set(int index, Object value) {
DCHECK(IsPropertyArray());
DCHECK_LT(static_cast<unsigned>(index),
@@ -55,6 +68,19 @@ void PropertyArray::set(int index, Object value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
+void PropertyArray::set(int index, Object value, SeqCstAccessTag tag) {
+ DCHECK(IsPropertyArray());
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length(kAcquireLoad)));
+ DCHECK(value.IsShared());
+ int offset = OffsetOfElementAt(index);
+ SEQ_CST_WRITE_FIELD(*this, offset, value);
+ // JSSharedStructs are allocated in the shared old space, which is currently
+ // collected by stopping the world, so the incremental write barrier is not
+ // needed. They can only store Smis and other HeapObjects in the shared old
+ // space, so the generational write barrier is also not needed.
+}
+
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 03c2ccd005..9599ca4d1b 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -31,8 +31,12 @@ class PropertyArray
inline Object get(int index) const;
inline Object get(PtrComprCageBase cage_base, int index) const;
+ inline Object get(int index, SeqCstAccessTag tag) const;
+ inline Object get(PtrComprCageBase cage_base, int index,
+ SeqCstAccessTag tag) const;
inline void set(int index, Object value);
+ inline void set(int index, Object value, SeqCstAccessTag tag);
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index a85bc1e4df..7b607cd90b 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -41,14 +41,12 @@ class PropertyCell
// For protectors:
void InvalidateProtector();
- static PropertyCellType InitialType(Isolate* isolate, Handle<Object> value);
+ static PropertyCellType InitialType(Isolate* isolate, Object value);
// Computes the new type of the cell's contents for the given value, but
// without actually modifying the details.
- static PropertyCellType UpdatedType(Isolate* isolate,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- PropertyDetails details);
+ static PropertyCellType UpdatedType(Isolate* isolate, PropertyCell cell,
+ Object value, PropertyDetails details);
// Prepares property cell at given entry for receiving given value and sets
// that value. As a result the old cell could be invalidated and/or dependent
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index 46aac7e0ed..4878070d0d 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -63,7 +63,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
Handle<Object> value;
if (details.location() == PropertyLocation::kField) {
if (details.kind() == PropertyKind::kData) {
- value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
+ value = JSObject::FastPropertyAt(isolate, Handle<JSObject>::cast(obj),
details.representation(),
FieldIndex::ForDescriptor(*map, i));
} else {
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index 0837aac7f2..10ddabd389 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -32,6 +32,11 @@ enum PropertyAttributes {
// a non-existent property.
};
+V8_INLINE PropertyAttributes PropertyAttributesFromInt(int value) {
+ DCHECK_EQ(value & ~PropertyAttributes::ALL_ATTRIBUTES_MASK, 0);
+ return static_cast<PropertyAttributes>(value);
+}
+
// Number of distinct bits in PropertyAttributes.
static const int kPropertyAttributesBitsCount = 3;
diff --git a/deps/v8/src/objects/scope-info-inl.h b/deps/v8/src/objects/scope-info-inl.h
index a31f0e989b..ae8c60129f 100644
--- a/deps/v8/src/objects/scope-info-inl.h
+++ b/deps/v8/src/objects/scope-info-inl.h
@@ -9,6 +9,7 @@
#include "src/objects/fixed-array-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/string.h"
+#include "src/roots/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,6 +33,115 @@ int ScopeInfo::ContextLocalCount() const { return context_local_count(); }
ObjectSlot ScopeInfo::data_start() { return RawField(OffsetOfElementAt(0)); }
+bool ScopeInfo::HasInlinedLocalNames() const {
+ return ContextLocalCount() < kScopeInfoMaxInlinedLocalNamesSize;
+}
+
+template <typename ScopeInfoPtr>
+class ScopeInfo::LocalNamesRange {
+ public:
+ class Iterator {
+ public:
+ Iterator(const LocalNamesRange* range, InternalIndex index)
+ : range_(range), index_(index) {
+ DCHECK_NOT_NULL(range);
+ if (!range_->inlined()) advance_hashtable_index();
+ }
+
+ Iterator& operator++() {
+ DCHECK_LT(index_, range_->max_index());
+ ++index_;
+ if (range_->inlined()) return *this;
+ advance_hashtable_index();
+ return *this;
+ }
+
+ friend bool operator==(const Iterator& a, const Iterator& b) {
+ return a.range_ == b.range_ && a.index_ == b.index_;
+ }
+
+ friend bool operator!=(const Iterator& a, const Iterator& b) {
+ return !(a == b);
+ }
+
+ String name(PtrComprCageBase cage_base) const {
+ DCHECK_LT(index_, range_->max_index());
+ if (range_->inlined()) {
+ return scope_info()->ContextInlinedLocalName(cage_base,
+ index_.as_int());
+ }
+ return String::cast(table().KeyAt(cage_base, index_));
+ }
+
+ String name() const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*scope_info());
+ return name(cage_base);
+ }
+
+ const Iterator* operator*() const { return this; }
+
+ int index() const {
+ if (range_->inlined()) return index_.as_int();
+ return table().IndexAt(index_);
+ }
+
+ private:
+ const LocalNamesRange* range_;
+ InternalIndex index_;
+
+ ScopeInfoPtr scope_info() const { return range_->scope_info_; }
+
+ NameToIndexHashTable table() const {
+ return scope_info()->context_local_names_hashtable();
+ }
+
+ void advance_hashtable_index() {
+ DisallowGarbageCollection no_gc;
+ ReadOnlyRoots roots = scope_info()->GetReadOnlyRoots();
+ InternalIndex max = range_->max_index();
+ // Increment until iterator points to a valid key or max.
+ while (index_ < max) {
+ Object key = table().KeyAt(index_);
+ if (table().IsKey(roots, key)) break;
+ ++index_;
+ }
+ }
+
+ friend class LocalNamesRange;
+ };
+
+ bool inlined() const { return scope_info_->HasInlinedLocalNames(); }
+
+ InternalIndex max_index() const {
+ int max = inlined()
+ ? scope_info_->ContextLocalCount()
+ : scope_info_->context_local_names_hashtable().Capacity();
+ return InternalIndex(max);
+ }
+
+ explicit LocalNamesRange(ScopeInfoPtr scope_info) : scope_info_(scope_info) {}
+
+ inline Iterator begin() const { return Iterator(this, InternalIndex(0)); }
+
+ inline Iterator end() const { return Iterator(this, max_index()); }
+
+ private:
+ ScopeInfoPtr scope_info_;
+};
+
+// static
+ScopeInfo::LocalNamesRange<Handle<ScopeInfo>> ScopeInfo::IterateLocalNames(
+ Handle<ScopeInfo> scope_info) {
+ return LocalNamesRange<Handle<ScopeInfo>>(scope_info);
+}
+
+// static
+ScopeInfo::LocalNamesRange<ScopeInfo*> ScopeInfo::IterateLocalNames(
+ ScopeInfo* scope_info, const DisallowGarbageCollection& no_gc) {
+ USE(no_gc);
+ return LocalNamesRange<ScopeInfo*>(scope_info);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index e20493d468..70bc3de7b6 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -102,6 +102,11 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
// Make sure we allocate the correct amount.
DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
+ // If the number of locals is small, we inline directly
+ // in the scope info object.
+ bool has_inlined_local_names =
+ context_local_count < kScopeInfoMaxInlinedLocalNamesSize;
+
const bool has_new_target =
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->new_target_var() != nullptr;
@@ -133,9 +138,11 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
function_name_info = VariableAllocationInfo::NONE;
}
- const bool has_brand = scope->is_class_scope()
- ? scope->AsClassScope()->brand() != nullptr
- : false;
+ const bool has_brand =
+ scope->is_class_scope()
+ ? scope->AsClassScope()->brand() != nullptr
+ : scope->IsConstructorScope() &&
+ scope->AsDeclarationScope()->class_scope_has_private_brand();
const bool should_save_class_variable_index =
scope->is_class_scope()
? scope->AsClassScope()->should_save_class_variable_index()
@@ -161,7 +168,11 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(ASSERT_MATCHED_FIELD)
#undef ASSERT_MATCHED_FIELD
- const int length = kVariablePartIndex + 2 * context_local_count +
+ const int local_names_container_size =
+ has_inlined_local_names ? context_local_count : 1;
+
+ const int length = kVariablePartIndex + local_names_container_size +
+ context_local_count +
(should_save_class_variable_index ? 1 : 0) +
(has_function_name ? kFunctionNameEntries : 0) +
(has_inferred_function_name ? 1 : 0) +
@@ -171,6 +182,13 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
? 2 + kModuleVariableEntryLength * module_vars_count
: 0);
+ // Create hash table if local names are not inlined.
+ Handle<NameToIndexHashTable> local_names_hashtable;
+ if (!has_inlined_local_names) {
+ local_names_hashtable = NameToIndexHashTable::New(
+ isolate, context_local_count, AllocationType::kOld);
+ }
+
Handle<ScopeInfo> scope_info_handle =
isolate->factory()->NewScopeInfo(length);
int index = kVariablePartIndex;
@@ -203,9 +221,8 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
LanguageModeBit::encode(scope->language_mode()) |
DeclarationScopeBit::encode(scope->is_declaration_scope()) |
ReceiverVariableBits::encode(receiver_info) |
- HasClassBrandBit::encode(has_brand) |
- HasSavedClassVariableIndexBit::encode(
- should_save_class_variable_index) |
+ ClassScopeHasPrivateBrandBit::encode(has_brand) |
+ HasSavedClassVariableBit::encode(should_save_class_variable_index) |
HasNewTargetBit::encode(has_new_target) |
FunctionVariableBits::encode(function_name_info) |
HasInferredFunctionNameBit::encode(has_inferred_function_name) |
@@ -231,11 +248,15 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
if (scope->is_module_scope()) {
scope_info.set_module_variable_count(module_vars_count);
}
+ if (!has_inlined_local_names) {
+ scope_info.set_context_local_names_hashtable(*local_names_hashtable);
+ }
// Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
int context_local_base = index;
- int context_local_info_base = context_local_base + context_local_count;
+ int context_local_info_base =
+ context_local_base + local_names_container_size;
int module_var_entry = scope_info.ModuleVariableCountIndex() + 1;
for (Variable* var : *scope->locals()) {
@@ -253,7 +274,15 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
MaybeAssignedFlagBit::encode(var->maybe_assigned()) |
ParameterNumberBits::encode(ParameterNumberBits::kMax) |
IsStaticFlagBit::encode(var->is_static_flag());
- scope_info.set(context_local_base + local_index, *var->name(), mode);
+ if (has_inlined_local_names) {
+ scope_info.set(context_local_base + local_index, *var->name(),
+ mode);
+ } else {
+ Handle<NameToIndexHashTable> new_table = NameToIndexHashTable::Add(
+ isolate, local_names_hashtable, var->name(), local_index);
+ DCHECK_EQ(*new_table, *local_names_hashtable);
+ USE(new_table);
+ }
scope_info.set(context_local_info_base + local_index,
Smi::FromInt(info));
break;
@@ -306,7 +335,8 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
}
}
- index += 2 * context_local_count;
+ // Advance past local names and local names info.
+ index += local_names_container_size + context_local_count;
DCHECK_EQ(index, scope_info.SavedClassVariableInfoIndex());
// If the scope is a class scope and has used static private methods, save
@@ -315,7 +345,15 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
if (should_save_class_variable_index) {
Variable* class_variable = scope->AsClassScope()->class_variable();
DCHECK_EQ(class_variable->location(), VariableLocation::CONTEXT);
- scope_info.set(index++, Smi::FromInt(class_variable->index()));
+ int local_index;
+ if (has_inlined_local_names) {
+ local_index = class_variable->index();
+ } else {
+ Handle<Name> name = class_variable->name();
+ InternalIndex entry = local_names_hashtable->FindEntry(isolate, name);
+ local_index = entry.as_int();
+ }
+ scope_info.set(index++, Smi::FromInt(local_index));
}
// If present, add the function variable name and its index.
@@ -368,6 +406,7 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
DCHECK_EQ(index, scope_info_handle->length());
DCHECK_EQ(parameter_count, scope_info_handle->ParameterCount());
DCHECK_EQ(scope->num_heap_slots(), scope_info_handle->ContextLength());
+
return scope_info_handle;
}
@@ -396,9 +435,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
LanguageModeBit::encode(LanguageMode::kSloppy) |
DeclarationScopeBit::encode(false) |
ReceiverVariableBits::encode(VariableAllocationInfo::NONE) |
- HasClassBrandBit::encode(false) |
- HasSavedClassVariableIndexBit::encode(false) |
- HasNewTargetBit::encode(false) |
+ ClassScopeHasPrivateBrandBit::encode(false) |
+ HasSavedClassVariableBit::encode(false) | HasNewTargetBit::encode(false) |
FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
IsAsmModuleBit::encode(false) | HasSimpleParametersBit::encode(true) |
FunctionKindBits::encode(FunctionKind::kNormalFunction) |
@@ -453,6 +491,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
is_empty_function || is_native_context ? 0 : 1;
const bool has_inferred_function_name = is_empty_function;
const bool has_position_info = true;
+ // NOTE: Local names are always inlined here, since context_local_count < 2.
+ DCHECK_LT(context_local_count, kScopeInfoMaxInlinedLocalNamesSize);
const int length = kVariablePartIndex + 2 * context_local_count +
(is_empty_function ? kFunctionNameEntries : 0) +
(has_inferred_function_name ? 1 : 0) +
@@ -470,9 +510,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
DeclarationScopeBit::encode(true) |
ReceiverVariableBits::encode(is_script ? VariableAllocationInfo::CONTEXT
: VariableAllocationInfo::UNUSED) |
- HasClassBrandBit::encode(false) |
- HasSavedClassVariableIndexBit::encode(false) |
- HasNewTargetBit::encode(false) |
+ ClassScopeHasPrivateBrandBit::encode(false) |
+ HasSavedClassVariableBit::encode(false) | HasNewTargetBit::encode(false) |
FunctionVariableBits::encode(is_empty_function
? VariableAllocationInfo::UNUSED
: VariableAllocationInfo::NONE) |
@@ -681,12 +720,12 @@ bool ScopeInfo::HasAllocatedReceiver() const {
allocation == VariableAllocationInfo::CONTEXT;
}
-bool ScopeInfo::HasClassBrand() const {
- return HasClassBrandBit::decode(Flags());
+bool ScopeInfo::ClassScopeHasPrivateBrand() const {
+ return ClassScopeHasPrivateBrandBit::decode(Flags());
}
-bool ScopeInfo::HasSavedClassVariableIndex() const {
- return HasSavedClassVariableIndexBit::decode(Flags());
+bool ScopeInfo::HasSavedClassVariable() const {
+ return HasSavedClassVariableBit::decode(Flags());
}
bool ScopeInfo::HasNewTarget() const {
@@ -714,7 +753,7 @@ bool ScopeInfo::HasPositionInfo() const {
// static
bool ScopeInfo::NeedsPositionInfo(ScopeType type) {
return type == FUNCTION_SCOPE || type == SCRIPT_SCOPE || type == EVAL_SCOPE ||
- type == MODULE_SCOPE;
+ type == MODULE_SCOPE || type == CLASS_SCOPE;
}
bool ScopeInfo::HasSharedFunctionName() const {
@@ -817,10 +856,17 @@ SourceTextModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
return SourceTextModuleInfo::cast(module_info());
}
-String ScopeInfo::ContextLocalName(int var) const {
+String ScopeInfo::ContextInlinedLocalName(int var) const {
+ DCHECK(HasInlinedLocalNames());
return context_local_names(var);
}
+String ScopeInfo::ContextInlinedLocalName(PtrComprCageBase cage_base,
+ int var) const {
+ DCHECK(HasInlinedLocalNames());
+ return context_local_names(cage_base, var);
+}
+
VariableMode ScopeInfo::ContextLocalMode(int var) const {
int value = context_local_infos(var);
return VariableModeBits::decode(value);
@@ -890,41 +936,67 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
return 0;
}
-// static
-int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
+int ScopeInfo::InlinedLocalNamesLookup(String name) {
+ DisallowGarbageCollection no_gc;
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ int local_count = context_local_count();
+ for (int i = 0; i < local_count; ++i) {
+ if (name == ContextInlinedLocalName(cage_base, i)) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+int ScopeInfo::ContextSlotIndex(Handle<String> name,
VariableLookupResult* lookup_result) {
DisallowGarbageCollection no_gc;
- DCHECK(name.IsInternalizedString());
+ DCHECK(name->IsInternalizedString());
DCHECK_NOT_NULL(lookup_result);
- if (scope_info.IsEmpty()) return -1;
-
- int context_local_count = scope_info.context_local_count();
- for (int var = 0; var < context_local_count; ++var) {
- if (name != scope_info.context_local_names(var)) {
- continue;
- }
- lookup_result->mode = scope_info.ContextLocalMode(var);
- lookup_result->is_static_flag = scope_info.ContextLocalIsStaticFlag(var);
- lookup_result->init_flag = scope_info.ContextLocalInitFlag(var);
- lookup_result->maybe_assigned_flag =
- scope_info.ContextLocalMaybeAssignedFlag(var);
- lookup_result->is_repl_mode = scope_info.IsReplModeScope();
- int result = scope_info.ContextHeaderLength() + var;
-
- DCHECK_LT(result, scope_info.ContextLength());
- return result;
+ if (IsEmpty()) return -1;
+
+ int index = HasInlinedLocalNames()
+ ? InlinedLocalNamesLookup(*name)
+ : context_local_names_hashtable().Lookup(name);
+
+ if (index != -1) {
+ lookup_result->mode = ContextLocalMode(index);
+ lookup_result->is_static_flag = ContextLocalIsStaticFlag(index);
+ lookup_result->init_flag = ContextLocalInitFlag(index);
+ lookup_result->maybe_assigned_flag = ContextLocalMaybeAssignedFlag(index);
+ lookup_result->is_repl_mode = IsReplModeScope();
+ int context_slot = ContextHeaderLength() + index;
+ DCHECK_LT(context_slot, ContextLength());
+ return context_slot;
}
return -1;
}
-int ScopeInfo::SavedClassVariableContextLocalIndex() const {
- if (HasSavedClassVariableIndexBit::decode(Flags())) {
- int index = saved_class_variable_info();
- return index - Context::MIN_CONTEXT_SLOTS;
+int ScopeInfo::ContextSlotIndex(Handle<String> name) {
+ VariableLookupResult lookup_result;
+ return ContextSlotIndex(name, &lookup_result);
+}
+
+std::pair<String, int> ScopeInfo::SavedClassVariable() const {
+ DCHECK(HasSavedClassVariableBit::decode(Flags()));
+ if (HasInlinedLocalNames()) {
+ // The saved class variable info corresponds to the context slot index.
+ int index = saved_class_variable_info() - Context::MIN_CONTEXT_SLOTS;
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, ContextLocalCount());
+ String name = ContextInlinedLocalName(index);
+ return std::make_pair(name, index);
+ } else {
+ // The saved class variable info corresponds to the offset in the hash
+ // table storage.
+ InternalIndex entry(saved_class_variable_info());
+ NameToIndexHashTable table = context_local_names_hashtable();
+ Object name = table.KeyAt(entry);
+ DCHECK(name.IsString());
+ return std::make_pair(String::cast(name), table.IndexAt(entry));
}
- return -1;
}
int ScopeInfo::ReceiverContextSlotIndex() const {
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 5544efea9d..06363f2316 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -19,6 +19,9 @@
namespace v8 {
namespace internal {
+// scope-info-tq.inc uses NameToIndexHashTable.
+class NameToIndexHashTable;
+
#include "torque-generated/src/objects/scope-info-tq.inc"
template <typename T>
@@ -88,12 +91,15 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
// or context-allocated?
bool HasAllocatedReceiver() const;
- // Does this scope has class brand (for private methods)?
- bool HasClassBrand() const;
+ // Does this scope has class brand (for private methods)? If it's a class
+ // scope, this indicates whether the class has a private brand. If it's a
+ // constructor scope, this indicates whther it needs to initialize the
+ // brand.
+ bool ClassScopeHasPrivateBrand() const;
- // Does this scope contain a saved class variable context local slot index
- // for checking receivers of static private methods?
- bool HasSavedClassVariableIndex() const;
+ // Does this scope contain a saved class variable for checking receivers of
+ // static private methods?
+ bool HasSavedClassVariable() const;
// Does this scope declare a "new.target" binding?
bool HasNewTarget() const;
@@ -140,8 +146,22 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
SourceTextModuleInfo ModuleDescriptorInfo() const;
- // Return the name of the given context local.
- String ContextLocalName(int var) const;
+ // Return true if the local names are inlined in the scope info object.
+ inline bool HasInlinedLocalNames() const;
+
+ template <typename ScopeInfoPtr>
+ class LocalNamesRange;
+
+ static inline LocalNamesRange<Handle<ScopeInfo>> IterateLocalNames(
+ Handle<ScopeInfo> scope_info);
+
+ static inline LocalNamesRange<ScopeInfo*> IterateLocalNames(
+ ScopeInfo* scope_info, const DisallowGarbageCollection& no_gc);
+
+ // Return the name of a given context local.
+ // It should only be used if inlined local names.
+ String ContextInlinedLocalName(int var) const;
+ String ContextInlinedLocalName(PtrComprCageBase cage_base, int var) const;
// Return the mode of the given context local.
VariableMode ContextLocalMode(int var) const;
@@ -167,8 +187,9 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
// returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != nullptr, sets *mode to the corresponding
// mode for that variable.
- static int ContextSlotIndex(ScopeInfo scope_info, String name,
- VariableLookupResult* lookup_result);
+ int ContextSlotIndex(Handle<String> name);
+ int ContextSlotIndex(Handle<String> name,
+ VariableLookupResult* lookup_result);
// Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
// module variable with the given name (the index value of a MODULE variable
@@ -193,11 +214,10 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
// Returns the first parameter context slot index.
int ParametersStartIndex() const;
- // Lookup support for serialized scope info. Returns the index of the
- // saved class variable in context local slots if scope is a class scope
+ // Lookup support for serialized scope info. Returns the name and index of
+ // the saved class variable in context local slots if scope is a class scope
// and it contains static private methods that may be accessed.
- // Otherwise returns a value < 0.
- int SavedClassVariableContextLocalIndex() const;
+ std::pair<String, int> SavedClassVariable() const;
FunctionKind function_kind() const;
@@ -287,6 +307,8 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
private:
friend class WebSnapshotDeserializer;
+ int InlinedLocalNamesLookup(String name);
+
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int SavedClassVariableInfoIndex() const;
diff --git a/deps/v8/src/objects/scope-info.tq b/deps/v8/src/objects/scope-info.tq
index 1fefd8e87a..d03228063f 100644
--- a/deps/v8/src/objects/scope-info.tq
+++ b/deps/v8/src/objects/scope-info.tq
@@ -6,7 +6,11 @@ extern macro EmptyScopeInfoConstant(): ScopeInfo;
const kEmptyScopeInfo: ScopeInfo = EmptyScopeInfoConstant();
extern enum ScopeType extends uint32 {
- CLASS_SCOPE, // Also used for the empty scope (for NativeContext & builtins).
+ // The empty scope info for builtins and NativeContexts is allocated
+ // in a way that it gets the first scope type in line, see
+ // Heap::CreateInitialMaps(). It's always guarded with the IsEmpty
+ // bit, so it doesn't matter what scope type it gets.
+ CLASS_SCOPE,
EVAL_SCOPE,
FUNCTION_SCOPE,
MODULE_SCOPE,
@@ -53,8 +57,11 @@ bitfield struct ScopeFlags extends uint31 {
language_mode: LanguageMode: 1 bit;
declaration_scope: bool: 1 bit;
receiver_variable: VariableAllocationInfo: 2 bit;
- has_class_brand: bool: 1 bit;
- has_saved_class_variable_index: bool: 1 bit;
+ // In class scope, this indicates whether the class has a private brand.
+ // In constructor scope, this indicates whether the constructor needs
+ // private brand initialization.
+ class_scope_has_private_brand: bool: 1 bit;
+ has_saved_class_variable: bool: 1 bit;
has_new_target: bool: 1 bit;
// TODO(cbruni): Combine with function variable field when only storing the
// function name.
@@ -97,6 +104,9 @@ struct ModuleVariable {
properties: SmiTagged<VariableProperties>;
}
+const kMaxInlinedLocalNamesSize:
+ constexpr int32 generates 'kScopeInfoMaxInlinedLocalNamesSize';
+
@generateBodyDescriptor
extern class ScopeInfo extends HeapObject {
const flags: SmiTagged<ScopeFlags>;
@@ -108,10 +118,17 @@ extern class ScopeInfo extends HeapObject {
// context.
const context_local_count: Smi;
- // Contains the names of local variables and parameters that are allocated
- // in the context. They are stored in increasing order of the context slot
- // index starting with Context::MIN_CONTEXT_SLOTS.
- context_local_names[context_local_count]: String;
+ // Contains the names of inlined local variables and parameters that are
+ // allocated in the context. They are stored in increasing order of the
+ // context slot index starting with Context::MIN_CONTEXT_SLOTS.
+ context_local_names[Convert<intptr>(context_local_count) < kMaxInlinedLocalNamesSize ? context_local_count : 0]:
+ String;
+
+ // Contains a hash_map from local names to context slot index.
+ // This is only used when local names are not inlined in the scope info.
+ context_local_names_hashtable?
+ [kMaxInlinedLocalNamesSize <= Convert<intptr>(context_local_count)]:
+ NameToIndexHashTable;
// Contains the variable modes and initialization flags corresponding to
// the context locals in ContextLocalNames.
@@ -119,8 +136,9 @@ extern class ScopeInfo extends HeapObject {
// If the scope is a class scope and it has static private methods that
// may be accessed directly or through eval, one slot is reserved to hold
- // the context slot index for the class variable.
- saved_class_variable_info?[flags.has_saved_class_variable_index]: Smi;
+ // the offset in the field storage of the hash table (or the slot index if
+ // local names are inlined) for the class variable.
+ saved_class_variable_info?[flags.has_saved_class_variable]: Smi;
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
@@ -139,7 +157,9 @@ extern class ScopeInfo extends HeapObject {
[flags.scope_type == ScopeType::FUNCTION_SCOPE ||
flags.scope_type == ScopeType::SCRIPT_SCOPE ||
flags.scope_type == ScopeType::EVAL_SCOPE ||
- flags.scope_type == ScopeType::MODULE_SCOPE]: PositionInfo;
+ flags.scope_type == ScopeType::MODULE_SCOPE ||
+ (flags.is_empty ? false : flags.scope_type == ScopeType::CLASS_SCOPE)]:
+ PositionInfo;
outer_scope_info?[flags.has_outer_scope_info]: ScopeInfo|TheHole;
@@ -159,11 +179,11 @@ extern class ScopeInfo extends HeapObject {
ModuleVariable;
}
-// Returns the index of the named local in a ScopeInfo.
-// Assumes that the given name is internalized; uses pointer comparisons.
-@export
-macro IndexOfLocalName(scopeInfo: ScopeInfo, name: Name):
- intptr labels NotFound {
+extern macro NameToIndexHashTableLookup(
+ NameToIndexHashTable, Name): intptr labels NotFound;
+
+macro IndexOfInlinedLocalName(
+ scopeInfo: ScopeInfo, name: Name): intptr labels NotFound {
const count: intptr = Convert<intptr>(scopeInfo.context_local_count);
for (let i: intptr = 0; i < count; ++i) {
if (TaggedEqual(name, scopeInfo.context_local_names[i])) {
@@ -172,3 +192,17 @@ macro IndexOfLocalName(scopeInfo: ScopeInfo, name: Name):
}
goto NotFound;
}
+
+// Returns the index of the named local in a ScopeInfo.
+// Assumes that the given name is internalized; uses pointer comparisons.
+@export
+macro IndexOfLocalName(scopeInfo: ScopeInfo, name: Name):
+ intptr labels NotFound {
+ const count: intptr = Convert<intptr>(scopeInfo.context_local_count);
+ if (count < kMaxInlinedLocalNamesSize) {
+ return IndexOfInlinedLocalName(scopeInfo, name) otherwise goto NotFound;
+ } else {
+ return NameToIndexHashTableLookup(
+ scopeInfo.context_local_names_hashtable, name) otherwise goto NotFound;
+ }
+}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 09a65dbb1b..776209f4d4 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -191,6 +191,13 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
V8_EXPORT_PRIVATE bool GetPositionInfo(int position, PositionInfo* info,
OffsetFlag offset_flag) const;
+ // Tells whether this script should be subject to debugging, e.g. for
+ // - scope inspection
+ // - internal break points
+ // - coverage and type profile
+ // - error stack trace
+ bool IsSubjectToDebugging() const;
+
bool IsUserJavaScript() const;
// Wrappers for GetPositionInfo
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index a54ea4599f..c8783c362e 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -122,8 +122,7 @@ DEF_ACQUIRE_GETTER(SharedFunctionInfo,
uint16_t SharedFunctionInfo::internal_formal_parameter_count_with_receiver()
const {
const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
- if (param_count == kDontAdaptArgumentsSentinel) return param_count;
- return param_count + (kJSArgcIncludesReceiver ? 0 : 1);
+ return param_count;
}
uint16_t SharedFunctionInfo::internal_formal_parameter_count_without_receiver()
@@ -139,8 +138,8 @@ void SharedFunctionInfo::set_internal_formal_parameter_count(int value) {
TorqueGeneratedClass::set_formal_parameter_count(value);
}
-RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
- function_token_offset)
+RENAME_PRIMITIVE_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
+ function_token_offset, uint16_t)
RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
int32_t SharedFunctionInfo::relaxed_flags() const {
@@ -225,7 +224,7 @@ bool SharedFunctionInfo::AreSourcePositionsAvailable(IsolateT* isolate) const {
template <typename IsolateT>
SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
- IsolateT* isolate, bool is_turboprop) const {
+ IsolateT* isolate) const {
if (!script().IsScript()) return kHasNoScript;
if (GetIsolate()->is_precise_binary_code_coverage() &&
@@ -244,11 +243,7 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
// inline.
if (!HasBytecodeArray()) return kHasNoBytecode;
- int max_inlined_size = FLAG_max_inlined_bytecode_size;
- if (is_turboprop) {
- max_inlined_size = max_inlined_size / FLAG_turboprop_inline_scaling_factor;
- }
- if (GetBytecodeArray(isolate).length() > max_inlined_size) {
+ if (GetBytecodeArray(isolate).length() > FLAG_max_inlined_bytecode_size) {
return kExceedsBytecodeLimit;
}
@@ -307,6 +302,17 @@ BailoutReason SharedFunctionInfo::disabled_optimization_reason() const {
return DisabledOptimizationReasonBits::decode(flags(kRelaxedLoad));
}
+OSRCodeCacheStateOfSFI SharedFunctionInfo::osr_code_cache_state() const {
+ return OsrCodeCacheStateBits::decode(flags(kRelaxedLoad));
+}
+
+void SharedFunctionInfo::set_osr_code_cache_state(
+ OSRCodeCacheStateOfSFI state) {
+ int hints = flags(kRelaxedLoad);
+ hints = OsrCodeCacheStateBits::update(hints, state);
+ set_flags(hints, kRelaxedStore);
+}
+
LanguageMode SharedFunctionInfo::language_mode() const {
STATIC_ASSERT(LanguageModeSize == 2);
return construct_language_mode(IsStrictBit::decode(flags(kRelaxedLoad)));
@@ -402,8 +408,6 @@ bool SharedFunctionInfo::IsDontAdaptArguments() const {
kDontAdaptArgumentsSentinel;
}
-bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
-
DEF_ACQUIRE_GETTER(SharedFunctionInfo, scope_info, ScopeInfo) {
Object maybe_scope_info = name_or_scope_info(cage_base, kAcquireLoad);
if (maybe_scope_info.IsScopeInfo(cage_base)) {
@@ -712,6 +716,10 @@ bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
return function_data(kAcquireLoad).IsWasmCapiFunctionData();
}
+bool SharedFunctionInfo::HasWasmOnFulfilledData() const {
+ return function_data(kAcquireLoad).IsWasmOnFulfilledData();
+}
+
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
return AsmWasmData::cast(function_data(kAcquireLoad));
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index a62688c6ee..624e7a26e6 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -79,12 +79,12 @@ CodeT SharedFunctionInfo::GetCode() const {
if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
- return isolate->builtins()->codet(builtin_id());
+ return isolate->builtins()->code(builtin_id());
}
if (data.IsBytecodeArray()) {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
- return isolate->builtins()->codet(Builtin::kInterpreterEntryTrampoline);
+ return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
}
if (data.IsCodeT()) {
// Having baseline Code means we are a compiled, baseline function.
@@ -95,34 +95,37 @@ CodeT SharedFunctionInfo::GetCode() const {
if (data.IsAsmWasmData()) {
// Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
- return isolate->builtins()->codet(Builtin::kInstantiateAsmJs);
+ return isolate->builtins()->code(Builtin::kInstantiateAsmJs);
}
if (data.IsWasmExportedFunctionData()) {
// Having a WasmExportedFunctionData means the code is in there.
DCHECK(HasWasmExportedFunctionData());
- return ToCodeT(wasm_exported_function_data().wrapper_code());
+ return wasm_exported_function_data().wrapper_code();
}
if (data.IsWasmJSFunctionData()) {
- return ToCodeT(wasm_js_function_data().wrapper_code());
+ return wasm_js_function_data().wrapper_code();
}
if (data.IsWasmCapiFunctionData()) {
- return ToCodeT(wasm_capi_function_data().wrapper_code());
+ return wasm_capi_function_data().wrapper_code();
+ }
+ if (data.IsWasmOnFulfilledData()) {
+ return isolate->builtins()->code(Builtin::kWasmResume);
}
#endif // V8_ENABLE_WEBASSEMBLY
if (data.IsUncompiledData()) {
// Having uncompiled data (with or without scope) means we need to compile.
DCHECK(HasUncompiledData());
- return isolate->builtins()->codet(Builtin::kCompileLazy);
+ return isolate->builtins()->code(Builtin::kCompileLazy);
}
if (data.IsFunctionTemplateInfo()) {
// Having a function template info means we are an API function.
DCHECK(IsApiFunction());
- return isolate->builtins()->codet(Builtin::kHandleApiCall);
+ return isolate->builtins()->code(Builtin::kHandleApiCall);
}
if (data.IsInterpreterData()) {
CodeT code = InterpreterTrampoline();
DCHECK(code.IsCodeT());
- DCHECK(FromCodeT(code).is_interpreter_trampoline_builtin());
+ DCHECK(code.is_interpreter_trampoline_builtin());
return code;
}
UNREACHABLE();
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index f7c27455e1..dd33a8ce12 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -15,6 +15,7 @@
#include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h"
#include "src/objects/objects.h"
+#include "src/objects/osr-optimized-code-cache.h"
#include "src/objects/script.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
@@ -210,10 +211,6 @@ class SharedFunctionInfo
template <typename IsolateT>
inline AbstractCode abstract_code(IsolateT* isolate);
- // Tells whether or not this shared function info has an attached
- // BytecodeArray.
- inline bool IsInterpreted() const;
-
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
V8_EXPORT_PRIVATE void SetScript(ReadOnlyRoots roots,
@@ -351,6 +348,7 @@ class SharedFunctionInfo
inline bool HasWasmExportedFunctionData() const;
inline bool HasWasmJSFunctionData() const;
inline bool HasWasmCapiFunctionData() const;
+ inline bool HasWasmOnFulfilledData() const;
inline AsmWasmData asm_wasm_data() const;
inline void set_asm_wasm_data(AsmWasmData data);
@@ -519,6 +517,10 @@ class SharedFunctionInfo
// shared function info.
void DisableOptimization(BailoutReason reason);
+ inline OSRCodeCacheStateOfSFI osr_code_cache_state() const;
+
+ inline void set_osr_code_cache_state(OSRCodeCacheStateOfSFI state);
+
// This class constructor needs to call out to an instance fields
// initializer. This flag is set when creating the
// SharedFunctionInfo as a reminder to emit the initializer call
@@ -577,7 +579,7 @@ class SharedFunctionInfo
};
// Returns the first value that applies (see enum definition for the order).
template <typename IsolateT>
- Inlineability GetInlineability(IsolateT* isolate, bool is_turboprop) const;
+ Inlineability GetInlineability(IsolateT* isolate) const;
// Source size of this function.
int SourceSize();
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index ab6eec747c..048e871e7e 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -17,6 +17,7 @@ extern class InterpreterData extends Struct {
type FunctionKind extends uint8 constexpr 'FunctionKind';
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
type BailoutReason extends uint8 constexpr 'BailoutReason';
+type OSRCodeCacheStateOfSFI extends uint8 constexpr 'OSRCodeCacheStateOfSFI';
bitfield struct SharedFunctionInfoFlags extends uint32 {
// Have FunctionKind first to make it cheaper to access.
@@ -37,6 +38,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
is_top_level: bool: 1 bit;
properties_are_final: bool: 1 bit;
private_name_lookup_skips_outer_class: bool: 1 bit;
+ osr_code_cache_state: OSRCodeCacheStateOfSFI: 2 bit;
}
bitfield struct SharedFunctionInfoFlags2 extends uint8 {
@@ -63,8 +65,7 @@ extern class SharedFunctionInfo extends HeapObject {
// [formal_parameter_count]: The number of declared parameters (or the special
// value kDontAdaptArgumentsSentinel to indicate that arguments are passed
// unaltered).
- // In contrast to [length], formal_parameter_count includes the receiver if
- // kJSArgcIncludesReceiver is true.
+ // In contrast to [length], formal_parameter_count includes the receiver.
formal_parameter_count: uint16;
function_token_offset: uint16;
// [expected_nof_properties]: Expected number of properties for the
@@ -83,16 +84,14 @@ extern class SharedFunctionInfo extends HeapObject {
const kDontAdaptArgumentsSentinel: constexpr int32
generates 'kDontAdaptArgumentsSentinel';
-const kJSArgcIncludesReceiver:
- constexpr bool generates 'kJSArgcIncludesReceiver';
+
@export
macro LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(
sfi: SharedFunctionInfo): uint16 {
let formalParameterCount = sfi.formal_parameter_count;
- if (kJSArgcIncludesReceiver) {
- if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
- formalParameterCount = Convert<uint16>(formalParameterCount - 1);
- }
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount =
+ Convert<uint16>(formalParameterCount - kJSArgcReceiverSlots);
}
return formalParameterCount;
}
@@ -100,13 +99,7 @@ macro LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(
@export
macro LoadSharedFunctionInfoFormalParameterCountWithReceiver(
sfi: SharedFunctionInfo): uint16 {
- let formalParameterCount = sfi.formal_parameter_count;
- if (!kJSArgcIncludesReceiver) {
- if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
- formalParameterCount = Convert<uint16>(formalParameterCount + 1);
- }
- }
- return formalParameterCount;
+ return sfi.formal_parameter_count;
}
@export
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index 3d8056bc5f..3dfcfac10d 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -1038,8 +1038,7 @@ MaybeHandle<Object> SourceTextModule::InnerModuleEvaluation(
DCHECK(!module->HasPendingAsyncDependencies());
// 9. Set module.[[AsyncParentModules]] to a new empty List.
- Handle<ArrayList> async_parent_modules = ArrayList::New(isolate, 0);
- module->set_async_parent_modules(*async_parent_modules);
+ module->set_async_parent_modules(ReadOnlyRoots(isolate).empty_array_list());
// 10. Set index to index + 1.
(*dfs_index)++;
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 9a75dd2d06..b4e28bd832 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -15,8 +15,8 @@
#include "src/objects/smi-inl.h"
#include "src/objects/string-table-inl.h"
#include "src/objects/string.h"
-#include "src/security/external-pointer-inl.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer-inl.h"
+#include "src/sandbox/external-pointer.h"
#include "src/strings/string-hasher-inl.h"
#include "src/utils/utils.h"
@@ -190,9 +190,12 @@ bool StringShape::CanMigrateInParallel() const {
// Shared ThinStrings do not migrate.
return false;
default:
+ // TODO(v8:12007): Set is_shared to true on internalized string when
+ // FLAG_shared_string_table is removed.
+ //
// If you crashed here, you probably added a new shared string
// type. Explicitly handle all shared string cases above.
- DCHECK(!IsShared());
+ DCHECK((FLAG_shared_string_table && IsInternalized()) || !IsShared());
return false;
}
}
@@ -372,27 +375,26 @@ class SequentialStringKey final : public StringTableKey {
return s.IsEqualTo<String::EqualityType::kNoLengthCheck>(chars_, isolate);
}
- Handle<String> AsHandle(Isolate* isolate) {
+ template <typename IsolateT>
+ void PrepareForInsertion(IsolateT* isolate) {
if (sizeof(Char) == 1) {
- return isolate->factory()->NewOneByteInternalizedString(
+ internalized_string_ = isolate->factory()->NewOneByteInternalizedString(
base::Vector<const uint8_t>::cast(chars_), raw_hash_field());
+ } else {
+ internalized_string_ = isolate->factory()->NewTwoByteInternalizedString(
+ base::Vector<const uint16_t>::cast(chars_), raw_hash_field());
}
- return isolate->factory()->NewTwoByteInternalizedString(
- base::Vector<const uint16_t>::cast(chars_), raw_hash_field());
}
- Handle<String> AsHandle(LocalIsolate* isolate) {
- if (sizeof(Char) == 1) {
- return isolate->factory()->NewOneByteInternalizedString(
- base::Vector<const uint8_t>::cast(chars_), raw_hash_field());
- }
- return isolate->factory()->NewTwoByteInternalizedString(
- base::Vector<const uint16_t>::cast(chars_), raw_hash_field());
+ Handle<String> GetHandleForInsertion() {
+ DCHECK(!internalized_string_.is_null());
+ return internalized_string_;
}
private:
base::Vector<const Char> chars_;
bool convert_;
+ Handle<String> internalized_string_;
};
using OneByteStringKey = SequentialStringKey<uint8_t>;
@@ -440,7 +442,7 @@ class SeqSubStringKey final : public StringTableKey {
isolate);
}
- Handle<String> AsHandle(Isolate* isolate) {
+ void PrepareForInsertion(Isolate* isolate) {
if (sizeof(Char) == 1 || (sizeof(Char) == 2 && convert_)) {
Handle<SeqOneByteString> result =
isolate->factory()->AllocateRawOneByteInternalizedString(
@@ -448,7 +450,7 @@ class SeqSubStringKey final : public StringTableKey {
DisallowGarbageCollection no_gc;
CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
length());
- return result;
+ internalized_string_ = result;
}
Handle<SeqTwoByteString> result =
isolate->factory()->AllocateRawTwoByteInternalizedString(
@@ -456,13 +458,19 @@ class SeqSubStringKey final : public StringTableKey {
DisallowGarbageCollection no_gc;
CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
length());
- return result;
+ internalized_string_ = result;
+ }
+
+ Handle<String> GetHandleForInsertion() {
+ DCHECK(!internalized_string_.is_null());
+ return internalized_string_;
}
private:
Handle<typename CharTraits<Char>::String> string_;
int from_;
bool convert_;
+ Handle<String> internalized_string_;
};
using SeqOneByteSubStringKey = SeqSubStringKey<SeqOneByteString>;
@@ -633,6 +641,7 @@ const Char* String::GetChars(
access_guard);
}
+// static
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
DisallowGarbageCollection no_gc; // Unhandlified code.
@@ -662,6 +671,7 @@ Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
return handle(s, isolate);
}
+// static
Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
AllocationType allocation) {
// We should never pass non-flat strings to String::Flatten when off-thread.
@@ -707,7 +717,10 @@ String::FlatContent String::GetFlatContent(
{
Isolate* isolate;
// We don't have to check read only strings as those won't move.
- DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ //
+ // TODO(v8:12007): Currently character data is never overwritten for
+ // shared strings.
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate) && !InSharedHeap(),
ThreadId::Current() == isolate->thread_id());
}
#endif
@@ -715,6 +728,51 @@ String::FlatContent String::GetFlatContent(
return GetFlatContent(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
}
+String::FlatContent::FlatContent(const uint8_t* start, int length,
+ const DisallowGarbageCollection& no_gc)
+ : onebyte_start(start), length_(length), state_(ONE_BYTE), no_gc_(no_gc) {
+#ifdef ENABLE_SLOW_DCHECKS
+ checksum_ = ComputeChecksum();
+#endif
+}
+
+String::FlatContent::FlatContent(const base::uc16* start, int length,
+ const DisallowGarbageCollection& no_gc)
+ : twobyte_start(start), length_(length), state_(TWO_BYTE), no_gc_(no_gc) {
+#ifdef ENABLE_SLOW_DCHECKS
+ checksum_ = ComputeChecksum();
+#endif
+}
+
+String::FlatContent::~FlatContent() {
+ // When ENABLE_SLOW_DCHECKS, check the string contents did not change during
+ // the lifetime of the FlatContent. To avoid extra memory use, only the hash
+ // is checked instead of snapshotting the full character data.
+ //
+ // If you crashed here, it means something changed the character data of this
+ // FlatContent during its lifetime (e.g. GC relocated the string). This is
+ // almost always a bug. If you are certain it is not a bug, you can disable
+ // the checksum verification in the caller by calling
+ // UnsafeDisableChecksumVerification().
+ SLOW_DCHECK(checksum_ == kChecksumVerificationDisabled ||
+ checksum_ == ComputeChecksum());
+}
+
+#ifdef ENABLE_SLOW_DCHECKS
+uint32_t String::FlatContent::ComputeChecksum() const {
+ constexpr uint64_t hashseed = 1;
+ uint32_t hash;
+ if (state_ == ONE_BYTE) {
+ hash = StringHasher::HashSequentialString(onebyte_start, length_, hashseed);
+ } else {
+ DCHECK_EQ(TWO_BYTE, state_);
+ hash = StringHasher::HashSequentialString(twobyte_start, length_, hashseed);
+ }
+ DCHECK_NE(kChecksumVerificationDisabled, hash);
+ return hash;
+}
+#endif
+
String::FlatContent String::GetFlatContent(
const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) {
@@ -1046,13 +1104,15 @@ bool ExternalString::is_uncached() const {
}
void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kResourceOffset, isolate);
+ InitExternalPointerField(kResourceOffset, isolate,
+ kExternalStringResourceTag);
if (is_uncached()) return;
- InitExternalPointerField(kResourceDataOffset, isolate);
+ InitExternalPointerField(kResourceDataOffset, isolate,
+ kExternalStringResourceDataTag);
}
DEF_GETTER(ExternalString, resource_as_address, Address) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Isolate* isolate = GetIsolateForSandbox(*this);
return ReadExternalPointerField(kResourceOffset, isolate,
kExternalStringResourceTag);
}
@@ -1348,7 +1408,7 @@ bool String::AsArrayIndex(uint32_t* index) {
*index = ArrayIndexValueBits::decode(field);
return true;
}
- if (IsHashFieldComputed(field) && (field & kIsNotIntegerIndexMask)) {
+ if (IsHashFieldComputed(field) && !IsIntegerIndex(field)) {
return false;
}
return SlowAsArrayIndex(index);
@@ -1360,7 +1420,7 @@ bool String::AsIntegerIndex(size_t* index) {
*index = ArrayIndexValueBits::decode(field);
return true;
}
- if (IsHashFieldComputed(field) && (field & kIsNotIntegerIndexMask)) {
+ if (IsHashFieldComputed(field) && !IsIntegerIndex(field)) {
return false;
}
return SlowAsIntegerIndex(index);
@@ -1434,6 +1494,13 @@ bool String::IsInPlaceInternalizable(InstanceType instance_type) {
}
}
+// static
+bool String::IsInPlaceInternalizableExcludingExternal(
+ InstanceType instance_type) {
+ return IsInPlaceInternalizable(instance_type) &&
+ !InstanceTypeChecker::IsExternalString(instance_type);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
index 4a33dd8c5c..511a821216 100644
--- a/deps/v8/src/objects/string-table-inl.h
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -21,7 +21,7 @@ void StringTableKey::set_raw_hash_field(uint32_t raw_hash_field) {
}
uint32_t StringTableKey::hash() const {
- return raw_hash_field_ >> Name::kHashShift;
+ return Name::HashBits::decode(raw_hash_field_);
}
} // namespace internal
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index 7d9e9d898d..e20ed29484 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -361,55 +361,73 @@ class InternalizedStringKey final : public StringTableKey {
return string_->SlowEquals(string);
}
- Handle<String> AsHandle(Isolate* isolate) {
- // Internalize the string in-place if possible.
- MaybeHandle<Map> maybe_internalized_map;
+ void PrepareForInsertion(Isolate* isolate) {
StringTransitionStrategy strategy =
isolate->factory()->ComputeInternalizationStrategyForString(
- string_, &maybe_internalized_map);
+ string_, &maybe_internalized_map_);
switch (strategy) {
case StringTransitionStrategy::kCopy:
break;
case StringTransitionStrategy::kInPlace:
- // A relaxed write is sufficient here even with concurrent
- // internalization. Though it is not synchronizing, a thread that does
- // not see the relaxed write will wait on the string table write
- // mutex. When that thread acquires that mutex, the ordering of the
- // mutex's underlying memory access will force this map update to become
- // visible to it.
- string_->set_map_no_write_barrier(
- *maybe_internalized_map.ToHandleChecked());
- DCHECK(string_->IsInternalizedString());
- return string_;
+ // In-place transition will be done in GetHandleForInsertion, when we
+ // are sure that we are going to insert the string into the table.
+ return;
case StringTransitionStrategy::kAlreadyTransitioned:
// We can see already internalized strings here only when sharing the
// string table and allowing concurrent internalization.
DCHECK(FLAG_shared_string_table);
- return string_;
+ return;
}
+ // Copying the string here is always threadsafe, as no instance type
+ // requiring a copy can transition any further.
+ StringShape shape(*string_);
// External strings get special treatment, to avoid copying their
// contents as long as they are not uncached.
- StringShape shape(*string_);
if (shape.IsExternalOneByte() && !shape.IsUncachedExternal()) {
// TODO(syg): External strings not yet supported.
DCHECK(!FLAG_shared_string_table);
- return isolate->factory()
- ->InternalizeExternalString<ExternalOneByteString>(string_);
+ string_ =
+ isolate->factory()->InternalizeExternalString<ExternalOneByteString>(
+ string_);
} else if (shape.IsExternalTwoByte() && !shape.IsUncachedExternal()) {
// TODO(syg): External strings not yet supported.
DCHECK(!FLAG_shared_string_table);
- return isolate->factory()
- ->InternalizeExternalString<ExternalTwoByteString>(string_);
+ string_ =
+ isolate->factory()->InternalizeExternalString<ExternalTwoByteString>(
+ string_);
} else {
// Otherwise allocate a new internalized string.
- return isolate->factory()->NewInternalizedStringImpl(
+ string_ = isolate->factory()->NewInternalizedStringImpl(
string_, string_->length(), string_->raw_hash_field());
}
}
+ Handle<String> GetHandleForInsertion() {
+ Handle<Map> internalized_map;
+ // When preparing the string, the strategy was to in-place migrate it.
+ if (maybe_internalized_map_.ToHandle(&internalized_map)) {
+ // It is always safe to overwrite the map. The only transition possible
+ // is another thread migrated the string to internalized already.
+ // Migrations to thin are impossible, as we only call this method on table
+ // misses inside the critical section.
+ string_->set_map_no_write_barrier(*internalized_map);
+ DCHECK(string_->IsInternalizedString());
+ return string_;
+ }
+ // We prepared an internalized copy for the string or the string was already
+ // internalized.
+ // In theory we could have created a copy of a SeqString in young generation
+ // that has been promoted to old space by now. In that case we could
+ // in-place migrate the original string instead of internalizing the copy
+ // and migrating the original string to a ThinString. This scenario doesn't
+ // seem to be common enough to justify re-computing the strategy here.
+ return string_;
+ }
+
private:
Handle<String> string_;
+ MaybeHandle<Map> maybe_internalized_map_;
};
Handle<String> StringTable::LookupString(Isolate* isolate,
@@ -510,14 +528,7 @@ Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
}
// No entry found, so adding new string.
-
- // Allocate the string before the first insertion attempt, reuse this
- // allocated value on insertion retries. If another thread concurrently
- // allocates the same string, the insert will fail, the lookup above will
- // succeed, and this string will be discarded.
- Handle<String> new_string = key->AsHandle(isolate);
- DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
-
+ key->PrepareForInsertion(isolate);
{
base::MutexGuard table_write_guard(&write_mutex_);
@@ -531,12 +542,16 @@ Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
if (element == empty_element()) {
// This entry is empty, so write it and register that we added an
// element.
+ Handle<String> new_string = key->GetHandleForInsertion();
+ DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
data->Set(entry, *new_string);
data->ElementAdded();
return new_string;
} else if (element == deleted_element()) {
// This entry was deleted, so overwrite it and register that we
// overwrote a deleted element.
+ Handle<String> new_string = key->GetHandleForInsertion();
+ DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
data->Set(entry, *new_string);
data->DeletedElementOverwritten();
return new_string;
@@ -647,7 +662,7 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
.ptr();
}
- if ((raw_hash_field & Name::kIsNotIntegerIndexMask) == 0) {
+ if (Name::IsIntegerIndex(raw_hash_field)) {
// It is an index, but it's not cached.
return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
}
@@ -663,7 +678,15 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
}
String internalized = String::cast(string_table_data->Get(isolate, entry));
- string.MakeThin(isolate, internalized);
+ // string can be internalized here, if another thread internalized it.
+ // If we found and entry in the string table and string is not internalized,
+ // there is no way that it can transition to internalized later on. So a last
+ // check here is sufficient.
+ if (!string.IsInternalizedString()) {
+ string.MakeThin(isolate, internalized);
+ } else {
+ DCHECK(FLAG_shared_string_table);
+ }
return internalized.ptr();
}
@@ -671,7 +694,12 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
Address StringTable::TryStringToIndexOrLookupExisting(Isolate* isolate,
Address raw_string) {
String string = String::cast(Object(raw_string));
- DCHECK(!string.IsInternalizedString());
+ if (string.IsInternalizedString()) {
+ // string could be internalized, if the string table is shared and another
+ // thread internalized it.
+ DCHECK(FLAG_shared_string_table);
+ return raw_string;
+ }
// Valid array indices are >= 0, so they cannot be mixed up with any of
// the result sentinels, which are negative.
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index aea42741d2..ce72d499d4 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -288,7 +288,7 @@ StringMigrationResult MigrateStringMapUnderLockIfNeeded(
CHECK(string.release_compare_and_swap_map_word(
MapWord::FromMap(sentinel_map), MapWord::FromMap(target_map)));
} else {
- string.set_map(target_map, kReleaseStore);
+ string.set_map_safe_transition(target_map, kReleaseStore);
}
return StringMigrationResult::kThisThreadMigrated;
@@ -308,11 +308,12 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
Map initial_map = this->map(kAcquireLoad);
StringShape initial_shape(initial_map);
- // Another thread may have already migrated the string.
- if (initial_shape.IsThin()) {
- DCHECK(initial_shape.IsShared());
- return;
- }
+ // TODO(v8:12007): Support shared ThinStrings.
+ //
+ // Currently in-place migrations to ThinStrings are disabled for shared
+ // strings to unblock prototyping.
+ if (initial_shape.IsShared()) return;
+ DCHECK(!initial_shape.IsThin());
bool has_pointers = initial_shape.IsIndirect();
int old_size = this->SizeFromMap(initial_map);
@@ -336,7 +337,9 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
break;
case StringMigrationResult::kAnotherThreadMigrated:
// Nothing to do.
- return;
+ //
+ // TODO(v8:12007): Support shared ThinStrings.
+ UNREACHABLE();
}
ThinString thin = ThinString::cast(*this);
@@ -538,6 +541,10 @@ bool String::SupportsExternalization() {
return false;
}
+ // External strings in the shared heap conflicts with the heap sandbox at the
+ // moment. Disable it until supported.
+ if (InSharedHeap()) return false;
+
#ifdef V8_COMPRESS_POINTERS
// Small strings may not be in-place externalizable.
if (this->Size() < ExternalString::kUncachedSize) return false;
@@ -1629,7 +1636,7 @@ uint32_t String::ComputeAndSetHash(
// Check the hash code is there.
DCHECK(HasHashCode());
- uint32_t result = raw_hash_field >> kHashShift;
+ uint32_t result = HashBits::decode(raw_hash_field);
DCHECK_NE(result, 0); // Ensure that the hash value of 0 is never computed.
return result;
}
@@ -1640,7 +1647,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
if (length <= kMaxCachedArrayIndexLength) {
EnsureHash(); // Force computation of hash code.
uint32_t field = raw_hash_field();
- if ((field & kIsNotIntegerIndexMask) != 0) return false;
+ if (!IsIntegerIndex(field)) return false;
*index = ArrayIndexValueBits::decode(field);
return true;
}
@@ -1655,7 +1662,7 @@ bool String::SlowAsIntegerIndex(size_t* index) {
if (length <= kMaxCachedArrayIndexLength) {
EnsureHash(); // Force computation of hash code.
uint32_t field = raw_hash_field();
- if ((field & kIsNotIntegerIndexMask) != 0) return false;
+ if (!IsIntegerIndex(field)) return false;
*index = ArrayIndexValueBits::decode(field);
return true;
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 092e5e707b..838ef304d3 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -116,6 +116,8 @@ class String : public TorqueGeneratedString<String, Name> {
// FlatStringReader is relocatable.
class FlatContent {
public:
+ inline ~FlatContent();
+
// Returns true if the string is flat and this structure contains content.
bool IsFlat() const { return state_ != NON_FLAT; }
// Returns true if the structure contains one-byte content.
@@ -147,24 +149,27 @@ class String : public TorqueGeneratedString<String, Name> {
return onebyte_start == other.onebyte_start;
}
+ // It is almost always a bug if the contents of a FlatContent changes during
+ // its lifetime, which can happen due to GC or bugs in concurrent string
+ // access. Rarely, callers need the ability to GC and have ensured safety in
+ // other ways, such as in IrregexpInterpreter. Those callers can disable the
+ // checksum verification with this call.
+ void UnsafeDisableChecksumVerification() {
+#ifdef ENABLE_SLOW_DCHECKS
+ checksum_ = kChecksumVerificationDisabled;
+#endif
+ }
+
int length() const { return length_; }
private:
enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- FlatContent(const uint8_t* start, int length,
- const DisallowGarbageCollection& no_gc)
- : onebyte_start(start),
- length_(length),
- state_(ONE_BYTE),
- no_gc_(no_gc) {}
- FlatContent(const base::uc16* start, int length,
- const DisallowGarbageCollection& no_gc)
- : twobyte_start(start),
- length_(length),
- state_(TWO_BYTE),
- no_gc_(no_gc) {}
+ inline FlatContent(const uint8_t* start, int length,
+ const DisallowGarbageCollection& no_gc);
+ inline FlatContent(const base::uc16* start, int length,
+ const DisallowGarbageCollection& no_gc);
explicit FlatContent(const DisallowGarbageCollection& no_gc)
: onebyte_start(nullptr), length_(0), state_(NON_FLAT), no_gc_(no_gc) {}
@@ -176,6 +181,14 @@ class String : public TorqueGeneratedString<String, Name> {
State state_;
const DisallowGarbageCollection& no_gc_;
+ static constexpr uint32_t kChecksumVerificationDisabled = 0;
+
+#ifdef ENABLE_SLOW_DCHECKS
+ inline uint32_t ComputeChecksum() const;
+
+ uint32_t checksum_;
+#endif
+
friend class String;
friend class IterableSubString;
};
@@ -577,6 +590,9 @@ class String : public TorqueGeneratedString<String, Name> {
static inline bool IsInPlaceInternalizable(String string);
static inline bool IsInPlaceInternalizable(InstanceType instance_type);
+ static inline bool IsInPlaceInternalizableExcludingExternal(
+ InstanceType instance_type);
+
private:
friend class Name;
friend class StringTableInsertionKey;
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.h b/deps/v8/src/objects/swiss-hash-table-helpers.h
index 98a1abd39d..792742b243 100644
--- a/deps/v8/src/objects/swiss-hash-table-helpers.h
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.h
@@ -6,6 +6,7 @@
// container, like SwissNameDictionary. Taken almost in verbatim from Abseil,
// comments in this file indicate what is taken from what Abseil file.
+#include <climits>
#include <cstdint>
#include <type_traits>
diff --git a/deps/v8/src/objects/symbol-table.cc b/deps/v8/src/objects/symbol-table.cc
new file mode 100644
index 0000000000..4dcce78d6d
--- /dev/null
+++ b/deps/v8/src/objects/symbol-table.cc
@@ -0,0 +1,22 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/hash-table-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Object RegisteredSymbolTable::SlowReverseLookup(Object value) {
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ for (InternalIndex i : this->IterateEntries()) {
+ Object k;
+ if (!this->ToKey(roots, i, &k)) continue;
+ Object e = this->ValueAt(i);
+ if (e == value) return k;
+ }
+ return roots.undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index 5c3a18982e..e1b73bd2bb 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -202,6 +202,40 @@ Tagged_t TaggedField<T, kFieldOffset>::Release_CompareAndSwap(HeapObject host,
return result;
}
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::SeqCst_Load(HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::SeqCst_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::SeqCst_Load(PtrComprCageBase cage_base,
+ HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::SeqCst_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ return T(tagged_to_full(cage_base, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::SeqCst_Store(HeapObject host, T value) {
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
+ AsAtomicTagged::SeqCst_Store(location(host), full_to_tagged(ptr));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::SeqCst_Store(HeapObject host, int offset,
+ T value) {
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ AsAtomicTagged::SeqCst_Store(location(host, offset), full_to_tagged(ptr));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..8745d8dfe3 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
@@ -60,6 +60,13 @@ class TaggedField : public AllStatic {
static inline void Release_Store(HeapObject host, T value);
static inline void Release_Store(HeapObject host, int offset, T value);
+ static inline T SeqCst_Load(HeapObject host, int offset = 0);
+ static inline T SeqCst_Load(PtrComprCageBase cage_base, HeapObject host,
+ int offset = 0);
+
+ static inline void SeqCst_Store(HeapObject host, T value);
+ static inline void SeqCst_Store(HeapObject host, int offset, T value);
+
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 85f19ad487..0bcca301d9 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -19,22 +19,27 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
Isolate* isolate, Handle<NativeContext> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id) {
- // Check the template weakmap to see if the template object already exists.
- Handle<EphemeronHashTable> template_weakmap =
- native_context->template_weakmap().IsUndefined(isolate)
- ? EphemeronHashTable::New(isolate, 0)
- : handle(EphemeronHashTable::cast(native_context->template_weakmap()),
- isolate);
-
uint32_t hash = shared_info->Hash();
- Object maybe_cached_template = template_weakmap->Lookup(shared_info, hash);
- while (!maybe_cached_template.IsTheHole()) {
- CachedTemplateObject cached_template =
- CachedTemplateObject::cast(maybe_cached_template);
- if (cached_template.slot_id() == slot_id)
- return handle(cached_template.template_object(), isolate);
- maybe_cached_template = cached_template.next();
+ // Check the template weakmap to see if the template object already exists.
+ Handle<EphemeronHashTable> template_weakmap;
+
+ if (native_context->template_weakmap().IsUndefined(isolate)) {
+ template_weakmap = EphemeronHashTable::New(isolate, 1);
+ } else {
+ DisallowGarbageCollection no_gc;
+ ReadOnlyRoots roots(isolate);
+ template_weakmap = handle(
+ EphemeronHashTable::cast(native_context->template_weakmap()), isolate);
+ Object maybe_cached_template = template_weakmap->Lookup(shared_info, hash);
+ while (!maybe_cached_template.IsTheHole(roots)) {
+ CachedTemplateObject cached_template =
+ CachedTemplateObject::cast(maybe_cached_template);
+ if (cached_template.slot_id() == slot_id) {
+ return handle(cached_template.template_object(), isolate);
+ }
+ maybe_cached_template = cached_template.next();
+ }
}
// Create the raw object from the {raw_strings}.
@@ -83,13 +88,17 @@ Handle<CachedTemplateObject> CachedTemplateObject::New(
Isolate* isolate, int slot_id, Handle<JSArray> template_object,
Handle<HeapObject> next) {
DCHECK(next->IsCachedTemplateObject() || next->IsTheHole());
- Factory* factory = isolate->factory();
- Handle<CachedTemplateObject> result = Handle<CachedTemplateObject>::cast(
- factory->NewStruct(CACHED_TEMPLATE_OBJECT_TYPE, AllocationType::kOld));
- result->set_slot_id(slot_id);
- result->set_template_object(*template_object);
- result->set_next(*next);
- return result;
+ Handle<CachedTemplateObject> result_handle =
+ Handle<CachedTemplateObject>::cast(isolate->factory()->NewStruct(
+ CACHED_TEMPLATE_OBJECT_TYPE, AllocationType::kOld));
+ {
+ DisallowGarbageCollection no_gc;
+ auto result = *result_handle;
+ result.set_slot_id(slot_id);
+ result.set_template_object(*template_object);
+ result.set_next(*next);
+ }
+ return result_handle;
}
} // namespace internal
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 16f50969ae..cfed97da44 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -17,9 +17,23 @@
namespace v8 {
namespace internal {
+// static
+TransitionArray TransitionsAccessor::GetTransitionArray(
+ Isolate* isolate, MaybeObject raw_transitions) {
+ DCHECK_EQ(kFullTransitionArray, GetEncoding(isolate, raw_transitions));
+ USE(isolate);
+ return TransitionArray::cast(raw_transitions.GetHeapObjectAssumeStrong());
+}
+
+// static
+TransitionArray TransitionsAccessor::GetTransitionArray(Isolate* isolate,
+ Handle<Map> map) {
+ MaybeObject raw_transitions = map->raw_transitions(isolate, kAcquireLoad);
+ return GetTransitionArray(isolate, raw_transitions);
+}
+
TransitionArray TransitionsAccessor::transitions() {
- DCHECK_EQ(kFullTransitionArray, encoding());
- return TransitionArray::cast(raw_transitions_->GetHeapObjectAssumeStrong());
+ return GetTransitionArray(isolate_, raw_transitions_);
}
OBJECT_CONSTRUCTORS_IMPL(TransitionArray, WeakFixedArray)
@@ -193,26 +207,13 @@ int TransitionArray::SearchName(Name name, bool concurrent_search,
}
TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Map map,
- DisallowGarbageCollection* no_gc,
- bool concurrent_access)
- : isolate_(isolate), map_(map), concurrent_access_(concurrent_access) {
- Initialize();
- USE(no_gc);
-}
-
-TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Handle<Map> map,
bool concurrent_access)
: isolate_(isolate),
- map_handle_(map),
- map_(*map),
+ map_(map),
+ raw_transitions_(map.raw_transitions(isolate_, kAcquireLoad)),
+ encoding_(GetEncoding(isolate_, raw_transitions_)),
concurrent_access_(concurrent_access) {
- Initialize();
-}
-
-void TransitionsAccessor::Reload() {
- DCHECK(!map_handle_.is_null());
- map_ = *map_handle_;
- Initialize();
+ DCHECK_IMPLIES(encoding_ == kMigrationTarget, map_.is_deprecated());
}
int TransitionsAccessor::Capacity() { return transitions().Capacity(); }
@@ -239,13 +240,36 @@ TransitionsAccessor::Encoding TransitionsAccessor::GetEncoding(
}
}
-void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
- encoding_ = GetEncoding(isolate_, raw_transitions_);
- DCHECK_IMPLIES(encoding_ == kMigrationTarget, map_.is_deprecated());
-#if DEBUG
- needs_reload_ = false;
-#endif
+// static
+TransitionsAccessor::Encoding TransitionsAccessor::GetEncoding(
+ Isolate* isolate, TransitionArray array) {
+ return GetEncoding(isolate, MaybeObject::FromObject(array));
+}
+
+// static
+TransitionsAccessor::Encoding TransitionsAccessor::GetEncoding(
+ Isolate* isolate, Handle<Map> map) {
+ MaybeObject raw_transitions = map->raw_transitions(isolate, kAcquireLoad);
+ return GetEncoding(isolate, raw_transitions);
+}
+
+// static
+MaybeHandle<Map> TransitionsAccessor::SearchTransition(
+ Isolate* isolate, Handle<Map> map, Name name, PropertyKind kind,
+ PropertyAttributes attributes) {
+ Map result = TransitionsAccessor(isolate, *map)
+ .SearchTransition(name, kind, attributes);
+ if (result.is_null()) return MaybeHandle<Map>();
+ return MaybeHandle<Map>(result, isolate);
+}
+
+// static
+MaybeHandle<Map> TransitionsAccessor::SearchSpecial(Isolate* isolate,
+ Handle<Map> map,
+ Symbol name) {
+ Map result = TransitionsAccessor(isolate, *map).SearchSpecial(name);
+ if (result.is_null()) return MaybeHandle<Map>();
+ return MaybeHandle<Map>(result, isolate);
}
int TransitionArray::number_of_transitions() const {
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 7e83392c86..9fe131ea31 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -12,10 +12,13 @@
namespace v8 {
namespace internal {
-Map TransitionsAccessor::GetSimpleTransition() {
- switch (encoding()) {
+// static
+Map TransitionsAccessor::GetSimpleTransition(Isolate* isolate,
+ Handle<Map> map) {
+ MaybeObject raw_transitions = map->raw_transitions(isolate, kAcquireLoad);
+ switch (GetEncoding(isolate, raw_transitions)) {
case kWeakRef:
- return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ return Map::cast(raw_transitions->GetHeapObjectAssumeWeak());
default:
return Map();
}
@@ -34,56 +37,56 @@ bool TransitionsAccessor::HasSimpleTransitionTo(Map map) {
UNREACHABLE();
}
-void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
+// static
+void TransitionsAccessor::Insert(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name, Handle<Map> target,
SimpleTransitionFlag flag) {
- DCHECK(!concurrent_access_);
- DCHECK(!map_handle_.is_null());
- DCHECK_NE(kPrototypeInfo, encoding());
- target->SetBackPointer(map_);
+ Encoding encoding = GetEncoding(isolate, map);
+ DCHECK_NE(kPrototypeInfo, encoding);
+ target->SetBackPointer(*map);
// If the map doesn't have any transitions at all yet, install the new one.
- if (encoding() == kUninitialized || encoding() == kMigrationTarget) {
+ if (encoding == kUninitialized || encoding == kMigrationTarget) {
if (flag == SIMPLE_PROPERTY_TRANSITION) {
- ReplaceTransitions(HeapObjectReference::Weak(*target));
+ ReplaceTransitions(isolate, map, HeapObjectReference::Weak(*target));
return;
}
// If the flag requires a full TransitionArray, allocate one.
Handle<TransitionArray> result =
- isolate_->factory()->NewTransitionArray(1, 0);
+ isolate->factory()->NewTransitionArray(1, 0);
result->Set(0, *name, HeapObjectReference::Weak(*target));
- ReplaceTransitions(MaybeObject::FromObject(*result));
- Reload();
- DCHECK_EQ(kFullTransitionArray, encoding());
+ ReplaceTransitions(isolate, map, result);
+ DCHECK_EQ(kFullTransitionArray, GetEncoding(isolate, *result));
return;
}
- if (encoding() == kWeakRef) {
- Map simple_transition = GetSimpleTransition();
+ if (encoding == kWeakRef) {
+ Map simple_transition = GetSimpleTransition(isolate, map);
DCHECK(!simple_transition.is_null());
if (flag == SIMPLE_PROPERTY_TRANSITION) {
Name key = GetSimpleTransitionKey(simple_transition);
- PropertyDetails old_details = GetSimpleTargetDetails(simple_transition);
+ PropertyDetails old_details =
+ simple_transition.GetLastDescriptorDetails(isolate);
PropertyDetails new_details = GetTargetDetails(*name, *target);
if (key.Equals(*name) && old_details.kind() == new_details.kind() &&
old_details.attributes() == new_details.attributes()) {
- ReplaceTransitions(HeapObjectReference::Weak(*target));
+ ReplaceTransitions(isolate, map, HeapObjectReference::Weak(*target));
return;
}
}
// Otherwise allocate a full TransitionArray with slack for a new entry.
- Handle<Map> map(simple_transition, isolate_);
Handle<TransitionArray> result =
- isolate_->factory()->NewTransitionArray(1, 1);
- // Reload state; allocations might have caused it to be cleared.
- Reload();
- simple_transition = GetSimpleTransition();
+ isolate->factory()->NewTransitionArray(1, 1);
+
+ // Reload `simple_transition`. Allocations might have caused it to be
+ // cleared.
+ simple_transition = GetSimpleTransition(isolate, map);
if (simple_transition.is_null()) {
result->Set(0, *name, HeapObjectReference::Weak(*target));
- ReplaceTransitions(MaybeObject::FromObject(*result));
- Reload();
- DCHECK_EQ(kFullTransitionArray, encoding());
+ ReplaceTransitions(isolate, map, result);
+ DCHECK_EQ(kFullTransitionArray, GetEncoding(isolate, *result));
return;
}
@@ -116,28 +119,27 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
result->SetRawTarget(insertion_index, HeapObjectReference::Weak(*target));
SLOW_DCHECK(result->IsSortedNoDuplicates());
- ReplaceTransitions(MaybeObject::FromObject(*result));
- Reload();
- DCHECK_EQ(kFullTransitionArray, encoding());
+ ReplaceTransitions(isolate, map, result);
+ DCHECK_EQ(kFullTransitionArray, GetEncoding(isolate, *result));
return;
}
// At this point, we know that the map has a full TransitionArray.
- DCHECK_EQ(kFullTransitionArray, encoding());
+ DCHECK_EQ(kFullTransitionArray, encoding);
int number_of_transitions = 0;
int new_nof = 0;
int insertion_index = kNotFound;
const bool is_special_transition = flag == SPECIAL_TRANSITION;
DCHECK_EQ(is_special_transition,
- IsSpecialTransition(ReadOnlyRoots(isolate_), *name));
+ IsSpecialTransition(ReadOnlyRoots(isolate), *name));
PropertyDetails details = is_special_transition
? PropertyDetails::Empty()
: GetTargetDetails(*name, *target);
{
DisallowGarbageCollection no_gc;
- TransitionArray array = transitions();
+ TransitionArray array = GetTransitionArray(isolate, map);
number_of_transitions = array.number_of_transitions();
int index =
@@ -148,7 +150,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate_->full_transition_array_access());
+ isolate->full_transition_array_access());
array.SetRawTarget(index, HeapObjectReference::Weak(*target));
return;
}
@@ -161,7 +163,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// If there is enough capacity, insert new entry into the existing array.
if (new_nof <= array.Capacity()) {
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate_->full_transition_array_access());
+ isolate->full_transition_array_access());
array.SetNumberOfTransitions(new_nof);
for (int i = number_of_transitions; i > insertion_index; --i) {
array.SetKey(i, array.GetKey(i - 1));
@@ -175,16 +177,15 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
// We're gonna need a bigger TransitionArray.
- Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(
+ Handle<TransitionArray> result = isolate->factory()->NewTransitionArray(
new_nof,
Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
// The map's transition array may have shrunk during the allocation above as
// it was weakly traversed, though it is guaranteed not to disappear. Trim the
// result copy if needed, and recompute variables.
- Reload();
DisallowGarbageCollection no_gc;
- TransitionArray array = transitions();
+ TransitionArray array = GetTransitionArray(isolate, map);
if (array.number_of_transitions() != number_of_transitions) {
DCHECK_LT(array.number_of_transitions(), number_of_transitions);
@@ -217,7 +218,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
SLOW_DCHECK(result->IsSortedNoDuplicates());
- ReplaceTransitions(MaybeObject::FromObject(*result));
+ ReplaceTransitions(isolate, map, result);
}
Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
@@ -305,10 +306,14 @@ void TransitionsAccessor::ForEachTransitionTo(
UNREACHABLE();
}
-bool TransitionsAccessor::CanHaveMoreTransitions() {
- if (map_.is_dictionary_map()) return false;
- if (encoding() == kFullTransitionArray) {
- return transitions().number_of_transitions() < kMaxNumberOfTransitions;
+// static
+bool TransitionsAccessor::CanHaveMoreTransitions(Isolate* isolate,
+ Handle<Map> map) {
+ if (map->is_dictionary_map()) return false;
+ MaybeObject raw_transitions = map->raw_transitions(isolate, kAcquireLoad);
+ if (GetEncoding(isolate, raw_transitions) == kFullTransitionArray) {
+ return GetTransitionArray(isolate, raw_transitions)
+ .number_of_transitions() < kMaxNumberOfTransitions;
}
return true;
}
@@ -375,31 +380,33 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
return array;
}
-void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
+// static
+void TransitionsAccessor::PutPrototypeTransition(Isolate* isolate,
+ Handle<Map> map,
+ Handle<Object> prototype,
Handle<Map> target_map) {
DCHECK(HeapObject::cast(*prototype).map().IsMap());
// Don't cache prototype transition if this map is either shared, or a map of
// a prototype.
- if (map_.is_prototype_map()) return;
- if (map_.is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
+ if (map->is_prototype_map()) return;
+ if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
const int header = TransitionArray::kProtoTransitionHeaderSize;
- Handle<WeakFixedArray> cache(GetPrototypeTransitions(), isolate_);
+ Handle<WeakFixedArray> cache(GetPrototypeTransitions(isolate, map), isolate);
int capacity = cache->length() - header;
int transitions = TransitionArray::NumberOfPrototypeTransitions(*cache) + 1;
base::SharedMutexGuard<base::kExclusive> scope(
- isolate_->full_transition_array_access());
+ isolate->full_transition_array_access());
if (transitions > capacity) {
// Grow the array if compacting it doesn't free space.
- if (!TransitionArray::CompactPrototypeTransitionArray(isolate_, *cache)) {
+ if (!TransitionArray::CompactPrototypeTransitionArray(isolate, *cache)) {
if (capacity == TransitionArray::kMaxCachedPrototypeTransitions) return;
cache = TransitionArray::GrowPrototypeTransitionArray(
- cache, 2 * transitions, isolate_);
- Reload();
- SetPrototypeTransitions(cache);
+ cache, 2 * transitions, isolate);
+ SetPrototypeTransitions(isolate, map, cache);
}
}
@@ -411,10 +418,11 @@ void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
TransitionArray::SetNumberOfPrototypeTransitions(*cache, last + 1);
}
+// static
Handle<Map> TransitionsAccessor::GetPrototypeTransition(
- Handle<Object> prototype) {
+ Isolate* isolate, Handle<Map> map, Handle<Object> prototype) {
DisallowGarbageCollection no_gc;
- WeakFixedArray cache = GetPrototypeTransitions();
+ WeakFixedArray cache = GetPrototypeTransitions(isolate, map);
int length = TransitionArray::NumberOfPrototypeTransitions(cache);
for (int i = 0; i < length; i++) {
MaybeObject target =
@@ -422,21 +430,28 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
DCHECK(target->IsWeakOrCleared());
HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
- Map map = Map::cast(heap_object);
- if (map.prototype() == *prototype) {
- return handle(map, isolate_);
+ Map target_map = Map::cast(heap_object);
+ if (target_map.prototype() == *prototype) {
+ return handle(target_map, isolate);
}
}
}
return Handle<Map>();
}
-WeakFixedArray TransitionsAccessor::GetPrototypeTransitions() {
- if (encoding() != kFullTransitionArray ||
- !transitions().HasPrototypeTransitions()) {
- return ReadOnlyRoots(isolate_).empty_weak_fixed_array();
- }
- return transitions().GetPrototypeTransitions();
+// static
+WeakFixedArray TransitionsAccessor::GetPrototypeTransitions(Isolate* isolate,
+ Handle<Map> map) {
+ MaybeObject raw_transitions = map->raw_transitions(isolate, kAcquireLoad);
+ if (GetEncoding(isolate, raw_transitions) != kFullTransitionArray) {
+ return ReadOnlyRoots(isolate).empty_weak_fixed_array();
+ }
+ TransitionArray transition_array =
+ GetTransitionArray(isolate, raw_transitions);
+ if (!transition_array.HasPrototypeTransitions()) {
+ return ReadOnlyRoots(isolate).empty_weak_fixed_array();
+ }
+ return transition_array.GetPrototypeTransitions();
}
// static
@@ -461,14 +476,15 @@ int TransitionsAccessor::NumberOfTransitions() {
UNREACHABLE();
}
-void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
+// static
+void TransitionsAccessor::SetMigrationTarget(Isolate* isolate, Handle<Map> map,
+ Map migration_target) {
// We only cache the migration target for maps with empty transitions for GC's
// sake.
- if (encoding() != kUninitialized) return;
- DCHECK(map_.is_deprecated());
- map_.set_raw_transitions(MaybeObject::FromObject(migration_target),
+ if (GetEncoding(isolate, map) != kUninitialized) return;
+ DCHECK(map->is_deprecated());
+ map->set_raw_transitions(MaybeObject::FromObject(migration_target),
kReleaseStore);
- MarkNeedsReload();
}
Map TransitionsAccessor::GetMigrationTarget() {
@@ -478,44 +494,60 @@ Map TransitionsAccessor::GetMigrationTarget() {
return Map();
}
-void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
- if (encoding() == kFullTransitionArray) {
+// static
+void TransitionsAccessor::ReplaceTransitions(Isolate* isolate, Handle<Map> map,
+ MaybeObject new_transitions) {
#if DEBUG
- TransitionArray old_transitions = transitions();
+ if (GetEncoding(isolate, map) == kFullTransitionArray) {
CheckNewTransitionsAreConsistent(
- old_transitions, new_transitions->GetHeapObjectAssumeStrong());
- DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
-#endif
+ isolate, map, new_transitions->GetHeapObjectAssumeStrong());
+ DCHECK_NE(GetTransitionArray(isolate, map),
+ new_transitions->GetHeapObjectAssumeStrong());
}
- map_.set_raw_transitions(new_transitions, kReleaseStore);
- MarkNeedsReload();
+#endif
+ map->set_raw_transitions(new_transitions, kReleaseStore);
+ USE(isolate);
}
+// static
+void TransitionsAccessor::ReplaceTransitions(
+ Isolate* isolate, Handle<Map> map,
+ Handle<TransitionArray> new_transitions) {
+ ReplaceTransitions(isolate, map, MaybeObject::FromObject(*new_transitions));
+}
+
+// static
void TransitionsAccessor::SetPrototypeTransitions(
+ Isolate* isolate, Handle<Map> map,
Handle<WeakFixedArray> proto_transitions) {
- EnsureHasFullTransitionArray();
- transitions().SetPrototypeTransitions(*proto_transitions);
+ EnsureHasFullTransitionArray(isolate, map);
+ GetTransitionArray(isolate, map->raw_transitions(isolate, kAcquireLoad))
+ .SetPrototypeTransitions(*proto_transitions);
}
-void TransitionsAccessor::EnsureHasFullTransitionArray() {
- if (encoding() == kFullTransitionArray) return;
+// static
+void TransitionsAccessor::EnsureHasFullTransitionArray(Isolate* isolate,
+ Handle<Map> map) {
+ Encoding encoding =
+ GetEncoding(isolate, map->raw_transitions(isolate, kAcquireLoad));
+ if (encoding == kFullTransitionArray) return;
int nof =
- (encoding() == kUninitialized || encoding() == kMigrationTarget) ? 0 : 1;
- Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(nof);
- Reload(); // Reload after possible GC.
+ (encoding == kUninitialized || encoding == kMigrationTarget) ? 0 : 1;
+ Handle<TransitionArray> result = isolate->factory()->NewTransitionArray(nof);
+ // Reload encoding after possible GC.
+ encoding = GetEncoding(isolate, map->raw_transitions(isolate, kAcquireLoad));
if (nof == 1) {
- if (encoding() == kUninitialized) {
+ if (encoding == kUninitialized) {
// If allocation caused GC and cleared the target, trim the new array.
result->SetNumberOfTransitions(0);
} else {
// Otherwise populate the new array.
- Handle<Map> target(GetSimpleTransition(), isolate_);
- Name key = GetSimpleTransitionKey(*target);
- result->Set(0, key, HeapObjectReference::Weak(*target));
+ Map target = GetSimpleTransition(isolate, map);
+ Name key = GetSimpleTransitionKey(target);
+ result->Set(0, key, HeapObjectReference::Weak(target));
}
}
- ReplaceTransitions(MaybeObject::FromObject(*result));
- Reload(); // Reload after replacing transitions.
+ ReplaceTransitions(isolate, map, result);
}
void TransitionsAccessor::TraverseTransitionTreeInternal(
@@ -575,18 +607,21 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
}
#ifdef DEBUG
-void TransitionsAccessor::CheckNewTransitionsAreConsistent(
- TransitionArray old_transitions, Object transitions) {
+// static
+void TransitionsAccessor::CheckNewTransitionsAreConsistent(Isolate* isolate,
+ Handle<Map> map,
+ Object transitions) {
// This function only handles full transition arrays.
- DCHECK_EQ(kFullTransitionArray, encoding());
+ TransitionArray old_transitions = GetTransitionArray(isolate, map);
+ DCHECK_EQ(kFullTransitionArray, GetEncoding(isolate, old_transitions));
TransitionArray new_transitions = TransitionArray::cast(transitions);
for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
Map target = old_transitions.GetTarget(i);
- if (target.instance_descriptors(isolate_) ==
- map_.instance_descriptors(isolate_)) {
+ if (target.instance_descriptors(isolate) ==
+ map->instance_descriptors(isolate)) {
Name key = old_transitions.GetKey(i);
int new_target_index;
- if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
+ if (IsSpecialTransition(ReadOnlyRoots(isolate), key)) {
new_target_index = new_transitions.SearchSpecial(Symbol::cast(key));
} else {
PropertyDetails details = GetTargetDetails(key, target);
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index c59c48c757..bda8c30d1e 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -50,20 +50,23 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
// in background threads. It acquires a reader lock for critical paths, as
// well as blocking the accessor from modifying the TransitionsArray.
inline TransitionsAccessor(Isolate* isolate, Map map,
- DisallowGarbageCollection* no_gc,
- bool concurrent_access = false);
- inline TransitionsAccessor(Isolate* isolate, Handle<Map> map,
bool concurrent_access = false);
+
// Insert a new transition into |map|'s transition array, extending it
- // as necessary.
- // Requires the constructor that takes a Handle<Map> to have been used.
- // This TransitionsAccessor instance is unusable after this operation.
- void Insert(Handle<Name> name, Handle<Map> target, SimpleTransitionFlag flag);
+ // as necessary. This can trigger GC.
+ static void Insert(Isolate* isolate, Handle<Map> map, Handle<Name> name,
+ Handle<Map> target, SimpleTransitionFlag flag);
Map SearchTransition(Name name, PropertyKind kind,
PropertyAttributes attributes);
+ static inline MaybeHandle<Map> SearchTransition(
+ Isolate* isolate, Handle<Map> map, Name name, PropertyKind kind,
+ PropertyAttributes attributes);
Map SearchSpecial(Symbol name);
+ static inline MaybeHandle<Map> SearchSpecial(Isolate* isolate,
+ Handle<Map> map, Symbol name);
+
// Returns true for non-property transitions like elements kind, or
// or frozen/sealed transitions.
static bool IsSpecialTransition(ReadOnlyRoots roots, Name name);
@@ -92,11 +95,12 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
// object space. Otherwise ClearNonLiveReferences would leak memory while
// applying in-place right trimming.
static const int kMaxNumberOfTransitions = 1024 + 512;
- bool CanHaveMoreTransitions();
inline Name GetKey(int transition_number);
inline Map GetTarget(int transition_number);
static inline PropertyDetails GetTargetDetails(Name name, Map target);
+ static bool CanHaveMoreTransitions(Isolate* isolate, Handle<Map> map);
+
static bool IsMatchingMap(Map target, Name name, PropertyKind kind,
PropertyAttributes attributes);
@@ -124,15 +128,20 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
// prototype is set, rather than creating a new map every time. The
// transitions are in the form of a map where the keys are prototype objects
// and the values are the maps they transition to.
- void PutPrototypeTransition(Handle<Object> prototype, Handle<Map> target_map);
- Handle<Map> GetPrototypeTransition(Handle<Object> prototype);
+ // PutPrototypeTransition can trigger GC.
+ static void PutPrototypeTransition(Isolate* isolate, Handle<Map>,
+ Handle<Object> prototype,
+ Handle<Map> target_map);
+ static Handle<Map> GetPrototypeTransition(Isolate* isolate, Handle<Map> map,
+ Handle<Object> prototype);
// During the first-time Map::Update and Map::TryUpdate, the migration target
// map could be cached in the raw_transitions slot of the old map that is
// deprecated from the map transition tree. The next time old map is updated,
// we will check this cache slot as a shortcut to get the migration target
// map.
- void SetMigrationTarget(Map migration_target);
+ static void SetMigrationTarget(Isolate* isolate, Handle<Map> map,
+ Map migration_target);
Map GetMigrationTarget();
#if DEBUG || OBJECT_PRINT
@@ -143,8 +152,9 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
DisallowGarbageCollection* no_gc);
#endif
#if DEBUG
- void CheckNewTransitionsAreConsistent(TransitionArray old_transitions,
- Object transitions);
+ static void CheckNewTransitionsAreConsistent(Isolate* isolate,
+ Handle<Map> map,
+ Object transitions);
bool IsConsistentWithBackPointers();
bool IsSortedNoDuplicates();
#endif
@@ -159,17 +169,14 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
kFullTransitionArray,
};
- inline void Reload();
-
- inline Encoding encoding() {
- DCHECK(!needs_reload_);
- return encoding_;
- }
+ inline Encoding encoding() { return encoding_; }
inline int Capacity();
inline TransitionArray transitions();
+ DISALLOW_GARBAGE_COLLECTION(no_gc_)
+
private:
friend class MarkCompactCollector; // For HasSimpleTransitionTo.
friend class third_party_heap::Impl;
@@ -177,44 +184,44 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
static inline Encoding GetEncoding(Isolate* isolate,
MaybeObject raw_transitions);
+ static inline Encoding GetEncoding(Isolate* isolate, TransitionArray array);
+ static inline Encoding GetEncoding(Isolate* isolate, Handle<Map> map);
- inline PropertyDetails GetSimpleTargetDetails(Map transition);
+ static inline TransitionArray GetTransitionArray(Isolate* isolate,
+ MaybeObject raw_transitions);
+ static inline TransitionArray GetTransitionArray(Isolate* isolate,
+ Handle<Map> map);
+ static inline Map GetSimpleTransition(Isolate* isolate, Handle<Map> map);
static inline Name GetSimpleTransitionKey(Map transition);
+ inline PropertyDetails GetSimpleTargetDetails(Map transition);
static inline Map GetTargetFromRaw(MaybeObject raw);
- void MarkNeedsReload() {
-#if DEBUG
- needs_reload_ = true;
-#endif
- }
+ static void EnsureHasFullTransitionArray(Isolate* isolate, Handle<Map> map);
+ static void SetPrototypeTransitions(Isolate* isolate, Handle<Map> map,
+ Handle<WeakFixedArray> proto_transitions);
+ static WeakFixedArray GetPrototypeTransitions(Isolate* isolate,
+ Handle<Map> map);
- inline void Initialize();
+ static inline void ReplaceTransitions(Isolate* isolate, Handle<Map> map,
+ MaybeObject new_transitions);
+ static inline void ReplaceTransitions(
+ Isolate* isolate, Handle<Map> map,
+ Handle<TransitionArray> new_transitions);
- inline Map GetSimpleTransition();
bool HasSimpleTransitionTo(Map map);
- void ReplaceTransitions(MaybeObject new_transitions);
-
inline Map GetTargetMapFromWeakRef();
- void EnsureHasFullTransitionArray();
- void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
- WeakFixedArray GetPrototypeTransitions();
-
void TraverseTransitionTreeInternal(const TraverseCallback& callback,
DisallowGarbageCollection* no_gc);
Isolate* isolate_;
- Handle<Map> map_handle_;
Map map_;
MaybeObject raw_transitions_;
Encoding encoding_;
bool concurrent_access_;
-#if DEBUG
- bool needs_reload_;
-#endif
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionsAccessor);
};
diff --git a/deps/v8/src/objects/turbofan-types.tq b/deps/v8/src/objects/turbofan-types.tq
index 05e93918a0..861346db49 100644
--- a/deps/v8/src/objects/turbofan-types.tq
+++ b/deps/v8/src/objects/turbofan-types.tq
@@ -4,6 +4,11 @@
#include "src/objects/turbofan-types.h"
+const kMaxIntPtr: constexpr IntegerLiteral
+ generates 'IntegerLiteral(ca_.Is64() ? 0x7FFFFFFFFFFFFFFF : 0x7FFFFFFF)';
+const kMinIntPtr: constexpr IntegerLiteral
+ generates 'IntegerLiteral(ca_.Is64() ? 0x8000000000000000 : 0x80000000)';
+
@export
@abstract
class TurbofanType extends HeapObject {
@@ -33,7 +38,8 @@ bitfield struct TurbofanTypeLowBits extends uint32 {
other_undetectable: bool: 1 bit;
callable_proxy: bool: 1 bit;
other_proxy: bool: 1 bit;
- function: bool: 1 bit;
+ callable_function: bool: 1 bit;
+ class_constructor: bool: 1 bit;
bound_function: bool: 1 bit;
hole: bool: 1 bit;
other_internal: bool: 1 bit;
@@ -43,12 +49,11 @@ bitfield struct TurbofanTypeLowBits extends uint32 {
other_unsigned_big_int_64: bool: 1 bit;
negative_big_int_63: bool: 1 bit;
other_big_int: bool: 1 bit;
- sandboxed_external_pointer: bool: 1 bit;
- caged_pointer: bool: 1 bit;
+ wasm_object: bool: 1 bit;
}
bitfield struct TurbofanTypeHighBits extends uint32 {
- wasm_object: bool: 1 bit;
+ sandboxed_pointer: bool: 1 bit;
}
@export
@@ -80,7 +85,7 @@ class TurbofanOtherNumberConstantType extends TurbofanType {
}
macro IsMinusZero(x: float64): bool {
- return x == 0 && 1 / x < 0;
+ return x == 0 && 1.0 / x < 0;
}
macro TestTurbofanBitsetType(
@@ -94,13 +99,13 @@ macro TestTurbofanBitsetType(
if (IsInteger(value)) {
if (IsMinusZero(valueF)) {
return bitsetLow.minus_zero;
- } else if (valueF < Convert<float64>(-0x80000000)) {
+ } else if (valueF < -0x80000000) {
return bitsetLow.other_number;
} else if (valueF < -0x40000000) {
return bitsetLow.other_signed32;
} else if (valueF < 0) {
return bitsetLow.negative31;
- } else if (valueF < Convert<float64>(0x40000000)) {
+ } else if (valueF < 0x40000000) {
return bitsetLow.unsigned30;
} else if (valueF < 0x80000000) {
return bitsetLow.other_unsigned31;
@@ -138,8 +143,12 @@ macro TestTurbofanBitsetType(
return Is<Callable>(proxy) ? bitsetLow.callable_proxy :
bitsetLow.other_proxy;
}
- case (JSFunction): {
- return bitsetLow.function;
+ case (fun: JSFunction): {
+ if (fun.shared_function_info.flags.is_class_constructor) {
+ return bitsetLow.class_constructor;
+ } else {
+ return bitsetLow.callable_function;
+ }
}
case (JSBoundFunction): {
return bitsetLow.bound_function;
@@ -150,11 +159,37 @@ macro TestTurbofanBitsetType(
case (JSArray): {
return bitsetLow.array;
}
- case (BigInt): {
- // TODO (tebbi): Distinguish different BigInt types.
- return bitsetLow.unsigned_big_int_63 |
- bitsetLow.other_unsigned_big_int_64 | bitsetLow.negative_big_int_63 |
- bitsetLow.other_big_int;
+ case (bi: BigInt): {
+ dcheck(!bitsetLow.other_big_int || bitsetLow.other_unsigned_big_int_64);
+ dcheck(!bitsetLow.other_big_int || bitsetLow.negative_big_int_63);
+ dcheck(
+ !bitsetLow.other_unsigned_big_int_64 ||
+ bitsetLow.unsigned_big_int_63);
+ dcheck(!bitsetLow.negative_big_int_63 || bitsetLow.unsigned_big_int_63);
+
+ // On 32 bit architectures, [Un]signedBigInt64 types are not used, yet.
+ if (!Is64()) {
+ return bitsetLow.other_big_int;
+ }
+
+ const length = bigint::ReadBigIntLength(bi);
+ if (length > 1) {
+ return bitsetLow.other_big_int;
+ } else if (length == 0) {
+ return bitsetLow.unsigned_big_int_63;
+ }
+ dcheck(length == 1);
+ const sign = bigint::ReadBigIntSign(bi);
+ const digit = bigint::LoadBigIntDigit(bi, 0);
+ if (sign == bigint::kPositiveSign) {
+ return bitsetLow.other_unsigned_big_int_64 ||
+ (digit <= Convert<uintptr>(kMaxIntPtr) &&
+ bitsetLow.unsigned_big_int_63);
+ } else {
+ return bitsetLow.other_big_int ||
+ (digit <= Convert<uintptr>(kMinIntPtr) &&
+ bitsetLow.negative_big_int_63);
+ }
}
case (object: JSObject): {
if (object.map.IsUndetectable()) {
@@ -167,7 +202,7 @@ macro TestTurbofanBitsetType(
}
@if(V8_ENABLE_WEBASSEMBLY)
case (WasmObject): {
- return bitsetHigh.wasm_object;
+ return bitsetLow.wasm_object;
}
case (Object): {
return false;
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index a82582a48d..7e7a93999e 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -25,6 +25,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-struct-inl.h"
#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -49,6 +50,8 @@ namespace internal {
// Version 12: regexp and string objects share normal string encoding
// Version 13: host objects have an explicit tag (rather than handling all
// unknown tags)
+// Version 14: flags for JSArrayBufferViews
+// Version 15: support for shared objects with an explicit tag
//
// WARNING: Increasing this value is a change which cannot safely be rolled
// back without breaking compatibility with data stored on disk. It is
@@ -57,7 +60,7 @@ namespace internal {
//
// Recent changes are routinely reverted in preparation for branch, and this
// has been the cause of at least one bug in the past.
-static const uint32_t kLatestVersion = 13;
+static const uint32_t kLatestVersion = 15;
static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(),
"Exported format version must match latest version.");
@@ -153,6 +156,8 @@ enum class SerializationTag : uint8_t {
kArrayBufferView = 'V',
// Shared array buffer. transferID:uint32_t
kSharedArrayBuffer = 'u',
+ // A HeapObject shared across Isolates. sharedValueID:uint32_t
+ kSharedObject = 'p',
// A wasm module object transfer. next value is its index.
kWasmModuleTransfer = 'w',
// The delegate is responsible for processing all following data.
@@ -244,6 +249,7 @@ ValueSerializer::ValueSerializer(Isolate* isolate,
v8::ValueSerializer::Delegate* delegate)
: isolate_(isolate),
delegate_(delegate),
+ supports_shared_values_(delegate && delegate->SupportsSharedValues()),
zone_(isolate->allocator(), ZONE_NAME),
id_map_(isolate->heap(), ZoneAllocationPolicy(&zone_)),
array_buffer_transfer_map_(isolate->heap(),
@@ -413,7 +419,8 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
}
DCHECK(object->IsHeapObject());
- InstanceType instance_type = HeapObject::cast(*object).map().instance_type();
+ InstanceType instance_type =
+ HeapObject::cast(*object).map(isolate_).instance_type();
switch (instance_type) {
case ODDBALL_TYPE:
WriteOddball(Oddball::cast(*object));
@@ -443,13 +450,16 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
}
default:
if (InstanceTypeChecker::IsString(instance_type)) {
- WriteString(Handle<String>::cast(object));
+ auto string = Handle<String>::cast(object);
+ if (FLAG_shared_string_table && supports_shared_values_) {
+ return WriteSharedObject(String::Share(isolate_, string));
+ }
+ WriteString(string);
return ThrowIfOutOfMemory();
} else if (InstanceTypeChecker::IsJSReceiver(instance_type)) {
return WriteJSReceiver(Handle<JSReceiver>::cast(object));
} else {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
}
}
}
@@ -530,8 +540,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
InstanceType instance_type = receiver->map().instance_type();
if (receiver->IsCallable() || (IsSpecialReceiverInstanceType(instance_type) &&
instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
}
// If we are at the end of the stack, abort. This function may recurse.
@@ -554,7 +563,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
case JS_API_OBJECT_TYPE: {
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
- if (JSObject::GetEmbedderFieldCount(js_object->map())) {
+ if (JSObject::GetEmbedderFieldCount(js_object->map(isolate_))) {
return WriteHostObject(js_object);
} else {
return WriteJSObject(js_object);
@@ -582,6 +591,8 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
case JS_ERROR_TYPE:
return WriteJSError(Handle<JSObject>::cast(receiver));
+ case JS_SHARED_STRUCT_TYPE:
+ return WriteJSSharedStruct(Handle<JSSharedStruct>::cast(receiver));
#if V8_ENABLE_WEBASSEMBLY
case WASM_MODULE_OBJECT_TYPE:
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
@@ -597,14 +608,13 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
break;
}
- ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
}
Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
DCHECK(!object->map().IsCustomElementsReceiverMap());
const bool can_serialize_fast =
- object->HasFastProperties() && object->elements().length() == 0;
+ object->HasFastProperties(isolate_) && object->elements().length() == 0;
if (!can_serialize_fast) return WriteJSObjectSlow(object);
Handle<Map> map(object->map(), isolate_);
@@ -616,7 +626,7 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
bool map_changed = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
Handle<Name> key(map->instance_descriptors(isolate_).GetKey(i), isolate_);
- if (!key->IsString()) continue;
+ if (!key->IsString(isolate_)) continue;
PropertyDetails details = map->instance_descriptors(isolate_).GetDetails(i);
if (details.IsDontEnum()) continue;
@@ -626,8 +636,8 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
details.location() == PropertyLocation::kField)) {
DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- value = JSObject::FastPropertyAt(object, details.representation(),
- field_index);
+ value = JSObject::FastPropertyAt(isolate_, object,
+ details.representation(), field_index);
} else {
// This logic should essentially match WriteJSObjectPropertiesSlow.
// If the property is no longer found, do not serialize it.
@@ -785,24 +795,28 @@ void ValueSerializer::WriteJSDate(JSDate date) {
Maybe<bool> ValueSerializer::WriteJSPrimitiveWrapper(
Handle<JSPrimitiveWrapper> value) {
- Object inner_value = value->value();
- if (inner_value.IsTrue(isolate_)) {
- WriteTag(SerializationTag::kTrueObject);
- } else if (inner_value.IsFalse(isolate_)) {
- WriteTag(SerializationTag::kFalseObject);
- } else if (inner_value.IsNumber()) {
- WriteTag(SerializationTag::kNumberObject);
- WriteDouble(inner_value.Number());
- } else if (inner_value.IsBigInt()) {
- WriteTag(SerializationTag::kBigIntObject);
- WriteBigIntContents(BigInt::cast(inner_value));
- } else if (inner_value.IsString()) {
- WriteTag(SerializationTag::kStringObject);
- WriteString(handle(String::cast(inner_value), isolate_));
- } else {
- DCHECK(inner_value.IsSymbol());
- ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
- return Nothing<bool>();
+ PtrComprCageBase cage_base(isolate_);
+ {
+ DisallowGarbageCollection no_gc;
+ Object inner_value = value->value();
+ if (inner_value.IsTrue(isolate_)) {
+ WriteTag(SerializationTag::kTrueObject);
+ } else if (inner_value.IsFalse(isolate_)) {
+ WriteTag(SerializationTag::kFalseObject);
+ } else if (inner_value.IsNumber(cage_base)) {
+ WriteTag(SerializationTag::kNumberObject);
+ WriteDouble(inner_value.Number());
+ } else if (inner_value.IsBigInt(cage_base)) {
+ WriteTag(SerializationTag::kBigIntObject);
+ WriteBigIntContents(BigInt::cast(inner_value));
+ } else if (inner_value.IsString(cage_base)) {
+ WriteTag(SerializationTag::kStringObject);
+ WriteString(handle(String::cast(inner_value), isolate_));
+ } else {
+ AllowGarbageCollection allow_gc;
+ DCHECK(inner_value.IsSymbol());
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
+ }
}
return ThrowIfOutOfMemory();
}
@@ -813,20 +827,22 @@ void ValueSerializer::WriteJSRegExp(Handle<JSRegExp> regexp) {
WriteVarint(static_cast<uint32_t>(regexp->flags()));
}
-Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
+Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> js_map) {
// First copy the key-value pairs, since getters could mutate them.
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()), isolate_);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(js_map->table()), isolate_);
int length = table->NumberOfElements() * 2;
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowGarbageCollection no_gc;
+ OrderedHashMap raw_table = *table;
+ FixedArray raw_entries = *entries;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int result_index = 0;
- for (InternalIndex entry : table->IterateEntries()) {
- Object key = table->KeyAt(entry);
+ for (InternalIndex entry : raw_table.IterateEntries()) {
+ Object key = raw_table.KeyAt(entry);
if (key == the_hole) continue;
- entries->set(result_index++, key);
- entries->set(result_index++, table->ValueAt(entry));
+ raw_entries.set(result_index++, key);
+ raw_entries.set(result_index++, raw_table.ValueAt(entry));
}
DCHECK_EQ(result_index, length);
}
@@ -843,19 +859,21 @@ Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
return ThrowIfOutOfMemory();
}
-Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
+Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> js_set) {
// First copy the element pointers, since getters could mutate them.
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()), isolate_);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(js_set->table()), isolate_);
int length = table->NumberOfElements();
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowGarbageCollection no_gc;
+ OrderedHashSet raw_table = *table;
+ FixedArray raw_entries = *entries;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int result_index = 0;
- for (InternalIndex entry : table->IterateEntries()) {
- Object key = table->KeyAt(entry);
+ for (InternalIndex entry : raw_table.IterateEntries()) {
+ Object key = raw_table.KeyAt(entry);
if (key == the_hole) continue;
- entries->set(result_index++, key);
+ raw_entries.set(result_index++, key);
}
DCHECK_EQ(result_index, length);
}
@@ -876,8 +894,8 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
Handle<JSArrayBuffer> array_buffer) {
if (array_buffer->is_shared()) {
if (!delegate_) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError,
+ array_buffer);
}
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
@@ -890,9 +908,8 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
return ThrowIfOutOfMemory();
}
if (!array_buffer->is_detachable()) {
- ThrowDataCloneError(
+ return ThrowDataCloneError(
MessageTemplate::kDataCloneErrorNonDetachableArrayBuffer);
- return Nothing<bool>();
}
uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
@@ -902,13 +919,12 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
return ThrowIfOutOfMemory();
}
if (array_buffer->was_detached()) {
- ThrowDataCloneError(MessageTemplate::kDataCloneErrorDetachedArrayBuffer);
- return Nothing<bool>();
+ return ThrowDataCloneError(
+ MessageTemplate::kDataCloneErrorDetachedArrayBuffer);
}
double byte_length = array_buffer->byte_length();
if (byte_length > std::numeric_limits<uint32_t>::max()) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
}
WriteTag(SerializationTag::kArrayBuffer);
WriteVarint<uint32_t>(byte_length);
@@ -938,11 +954,7 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
WriteVarint(static_cast<uint8_t>(tag));
WriteVarint(static_cast<uint32_t>(view.byte_offset()));
WriteVarint(static_cast<uint32_t>(view.byte_length()));
- // TODO(crbug.com/v8/12532): Re-enable the flags serialization logic below.
- // Bump the serialization format version number when doing so, and preserve
- // logic and tests for reading from the old format.
- //
- // WriteVarint(static_cast<uint32_t>(view.bit_field()));
+ WriteVarint(static_cast<uint32_t>(view.bit_field()));
return ThrowIfOutOfMemory();
}
@@ -1016,11 +1028,16 @@ Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
return ThrowIfOutOfMemory();
}
+Maybe<bool> ValueSerializer::WriteJSSharedStruct(
+ Handle<JSSharedStruct> shared_struct) {
+ // TODO(v8:12547): Support copying serialization for shared structs as well.
+ return WriteSharedObject(shared_struct);
+}
+
#if V8_ENABLE_WEBASSEMBLY
Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
if (delegate_ == nullptr) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
}
// TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
@@ -1040,8 +1057,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
if (!object->array_buffer().is_shared()) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
}
GlobalBackingStoreRegistry::Register(
@@ -1053,6 +1069,22 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+Maybe<bool> ValueSerializer::WriteSharedObject(Handle<HeapObject> object) {
+ DCHECK(object->IsShared());
+ DCHECK(supports_shared_values_);
+ DCHECK_NOT_NULL(delegate_);
+ DCHECK(delegate_->SupportsSharedValues());
+
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Maybe<uint32_t> index =
+ delegate_->GetSharedValueId(v8_isolate, Utils::ToLocal(object));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+
+ WriteTag(SerializationTag::kSharedObject);
+ WriteVarint(index.FromJust());
+ return ThrowIfOutOfMemory();
+}
+
Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
WriteTag(SerializationTag::kHostObject);
if (!delegate_) {
@@ -1096,21 +1128,21 @@ Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
return Just(properties_written);
}
-void ValueSerializer::ThrowDataCloneError(MessageTemplate template_index) {
- return ThrowDataCloneError(template_index,
- isolate_->factory()->empty_string());
-}
-
Maybe<bool> ValueSerializer::ThrowIfOutOfMemory() {
if (out_of_memory_) {
- ThrowDataCloneError(MessageTemplate::kDataCloneErrorOutOfMemory);
- return Nothing<bool>();
+ return ThrowDataCloneError(MessageTemplate::kDataCloneErrorOutOfMemory);
}
return Just(true);
}
-void ValueSerializer::ThrowDataCloneError(MessageTemplate index,
- Handle<Object> arg0) {
+Maybe<bool> ValueSerializer::ThrowDataCloneError(
+ MessageTemplate template_index) {
+ return ThrowDataCloneError(template_index,
+ isolate_->factory()->empty_string());
+}
+
+Maybe<bool> ValueSerializer::ThrowDataCloneError(MessageTemplate index,
+ Handle<Object> arg0) {
Handle<String> message = MessageFormatter::Format(isolate_, index, arg0);
if (delegate_) {
delegate_->ThrowDataCloneError(Utils::ToLocal(message));
@@ -1121,6 +1153,7 @@ void ValueSerializer::ThrowDataCloneError(MessageTemplate index,
if (isolate_->has_scheduled_exception()) {
isolate_->PromoteScheduledException();
}
+ return Nothing<bool>();
}
ValueDeserializer::ValueDeserializer(Isolate* isolate,
@@ -1130,6 +1163,7 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
delegate_(delegate),
position_(data.begin()),
end_(data.end()),
+ supports_shared_values_(delegate && delegate->SupportsSharedValues()),
id_map_(isolate->global_handles()->Create(
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
@@ -1139,6 +1173,7 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
delegate_(nullptr),
position_(data),
end_(data + size),
+ supports_shared_values_(false),
id_map_(isolate->global_handles()->Create(
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
@@ -1200,17 +1235,66 @@ Maybe<T> ValueDeserializer::ReadVarint() {
// See also https://developers.google.com/protocol-buffers/docs/encoding
static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
"Only unsigned integer types can be read as varints.");
+ if (sizeof(T) > 4) return ReadVarintLoop<T>();
+ auto max_read_position = position_ + sizeof(T) + 1;
+ if (V8_UNLIKELY(max_read_position >= end_)) return ReadVarintLoop<T>();
+#ifdef DEBUG
+ // DCHECK code to make sure the manually unrolled loop yields the exact
+ // same end state and result.
+ auto previous_position = position_;
+ T expected_value = ReadVarintLoop<T>().ToChecked();
+ auto expected_position = position_;
+ position_ = previous_position;
+#endif // DEBUG
+#define EXIT_DCHECK() \
+ DCHECK_LE(position_, end_); \
+ DCHECK_EQ(position_, expected_position); \
+ DCHECK_EQ(value, expected_value)
+
+ T value = 0;
+#define ITERATION_SHIFTED(shift) \
+ if (shift < sizeof(T) * 8) { \
+ uint8_t byte = *position_; \
+ position_++; \
+ if (byte < 0x80) { \
+ value |= static_cast<T>(byte) << shift; \
+ EXIT_DCHECK(); \
+ return Just(value); \
+ } else { \
+ value |= static_cast<T>(byte & 0x7F) << shift; \
+ } \
+ }
+ // Manually unroll the loop to achieve the best measured peformance.
+ // This is ~15% faster than ReadVarintLoop.
+ ITERATION_SHIFTED(0);
+ ITERATION_SHIFTED(7);
+ ITERATION_SHIFTED(14);
+ ITERATION_SHIFTED(21);
+ ITERATION_SHIFTED(28);
+
+ EXIT_DCHECK();
+ return Just(value);
+#undef ITERATION_SHIFTED
+#undef EXIT_DCHECK
+}
+
+template <typename T>
+Maybe<T> ValueDeserializer::ReadVarintLoop() {
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be read as varints.");
T value = 0;
unsigned shift = 0;
bool has_another_byte;
do {
if (position_ >= end_) return Nothing<T>();
uint8_t byte = *position_;
+ has_another_byte = byte & 0x80;
if (V8_LIKELY(shift < sizeof(T) * 8)) {
value |= static_cast<T>(byte & 0x7F) << shift;
shift += 7;
+ } else {
+ DCHECK(!has_another_byte);
}
- has_another_byte = byte & 0x80;
position_++;
} while (has_another_byte);
return Just(value);
@@ -1235,8 +1319,9 @@ template EXPORT_TEMPLATE_DEFINE(
Maybe<double> ValueDeserializer::ReadDouble() {
// Warning: this uses host endianness.
- if (sizeof(double) > static_cast<unsigned>(end_ - position_))
+ if (sizeof(double) > static_cast<unsigned>(end_ - position_)) {
return Nothing<double>();
+ }
double value;
memcpy(&value, position_, sizeof(double));
position_ += sizeof(double);
@@ -1244,8 +1329,11 @@ Maybe<double> ValueDeserializer::ReadDouble() {
return Just(value);
}
-Maybe<base::Vector<const uint8_t>> ValueDeserializer::ReadRawBytes(int size) {
- if (size > end_ - position_) return Nothing<base::Vector<const uint8_t>>();
+Maybe<base::Vector<const uint8_t>> ValueDeserializer::ReadRawBytes(
+ size_t size) {
+ if (size > static_cast<size_t>(end_ - position_)) {
+ return Nothing<base::Vector<const uint8_t>>();
+ }
const uint8_t* start = position_;
position_ += size;
return Just(base::Vector<const uint8_t>(start, size));
@@ -1397,6 +1485,13 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
#endif // V8_ENABLE_WEBASSEMBLY
case SerializationTag::kHostObject:
return ReadHostObject();
+ case SerializationTag::kSharedObject:
+ if (version_ >= 15 && supports_shared_values_) {
+ return ReadSharedObject();
+ }
+ // If the delegate doesn't support shared values (e.g. older version, or
+ // is for deserializing from storage), treat the tag as unknown.
+ V8_FALLTHROUGH;
default:
// Before there was an explicit tag for host objects, all unknown tags
// were delegated to the host.
@@ -1411,7 +1506,7 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
MaybeHandle<String> ValueDeserializer::ReadString() {
if (version_ < 12) return ReadUtf8String();
Handle<Object> object;
- if (!ReadObject().ToHandle(&object) || !object->IsString()) {
+ if (!ReadObject().ToHandle(&object) || !object->IsString(isolate_)) {
return MaybeHandle<String>();
}
return Handle<String>::cast(object);
@@ -1428,38 +1523,34 @@ MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() {
return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage);
}
-MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
+MaybeHandle<String> ValueDeserializer::ReadUtf8String(
+ AllocationType allocation) {
uint32_t utf8_length;
+ if (!ReadVarint<uint32_t>().To(&utf8_length)) return {};
+ // utf8_length is checked in ReadRawBytes.
base::Vector<const uint8_t> utf8_bytes;
- if (!ReadVarint<uint32_t>().To(&utf8_length) ||
- utf8_length >
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- !ReadRawBytes(utf8_length).To(&utf8_bytes)) {
- return MaybeHandle<String>();
- }
+ if (!ReadRawBytes(utf8_length).To(&utf8_bytes)) return {};
return isolate_->factory()->NewStringFromUtf8(
- base::Vector<const char>::cast(utf8_bytes));
+ base::Vector<const char>::cast(utf8_bytes), allocation);
}
-MaybeHandle<String> ValueDeserializer::ReadOneByteString() {
+MaybeHandle<String> ValueDeserializer::ReadOneByteString(
+ AllocationType allocation) {
uint32_t byte_length;
base::Vector<const uint8_t> bytes;
- if (!ReadVarint<uint32_t>().To(&byte_length) ||
- byte_length >
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- !ReadRawBytes(byte_length).To(&bytes)) {
- return MaybeHandle<String>();
- }
- return isolate_->factory()->NewStringFromOneByte(bytes);
+ if (!ReadVarint<uint32_t>().To(&byte_length)) return {};
+ // byte_length is checked in ReadRawBytes.
+ if (!ReadRawBytes(byte_length).To(&bytes)) return {};
+ return isolate_->factory()->NewStringFromOneByte(bytes, allocation);
}
-MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
+MaybeHandle<String> ValueDeserializer::ReadTwoByteString(
+ AllocationType allocation) {
uint32_t byte_length;
base::Vector<const uint8_t> bytes;
- if (!ReadVarint<uint32_t>().To(&byte_length) ||
- byte_length >
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- byte_length % sizeof(base::uc16) != 0 ||
+ if (!ReadVarint<uint32_t>().To(&byte_length)) return {};
+ // byte_length is checked in ReadRawBytes.
+ if (byte_length % sizeof(base::uc16) != 0 ||
!ReadRawBytes(byte_length).To(&bytes)) {
return MaybeHandle<String>();
}
@@ -1469,7 +1560,7 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
if (byte_length == 0) return isolate_->factory()->empty_string();
Handle<SeqTwoByteString> string;
if (!isolate_->factory()
- ->NewRawTwoByteString(byte_length / sizeof(base::uc16))
+ ->NewRawTwoByteString(byte_length / sizeof(base::uc16), allocation)
.ToHandle(&string)) {
return MaybeHandle<String>();
}
@@ -1489,10 +1580,13 @@ bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
SerializationTag tag;
uint32_t byte_length;
base::Vector<const uint8_t> bytes;
- if (!ReadTag().To(&tag) || !ReadVarint<uint32_t>().To(&byte_length) ||
- byte_length >
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- !ReadRawBytes(byte_length).To(&bytes)) {
+ if (!ReadTag().To(&tag) || !ReadVarint<uint32_t>().To(&byte_length)) {
+ return {};
+ }
+ // Length is also checked in ReadRawBytes.
+ DCHECK_LE(byte_length,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
+ if (!ReadRawBytes(byte_length).To(&bytes)) {
position_ = original_position;
return false;
}
@@ -1600,6 +1694,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
AddObjectWithID(id, array);
Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
+ auto elements_length = static_cast<uint32_t>(elements->length());
for (uint32_t i = 0; i < length; i++) {
SerializationTag tag;
if (PeekTag().To(&tag) && tag == SerializationTag::kTheHole) {
@@ -1616,9 +1711,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
if (version_ < 11 && element->IsUndefined(isolate_)) continue;
// Safety check.
- if (i >= static_cast<uint32_t>(elements->length())) {
- return MaybeHandle<JSArray>();
- }
+ if (i >= elements_length) return MaybeHandle<JSArray>();
elements->set(i, *element);
}
@@ -1868,8 +1961,6 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
uint32_t byte_offset = 0;
uint32_t byte_length = 0;
uint32_t flags = 0;
- // TODO(crbug.com/v8/12532): Read `flags` from the serialized value, when we
- // restore the logic for serializing them.
if (!ReadVarint<uint8_t>().To(&tag) ||
!ReadVarint<uint32_t>().To(&byte_offset) ||
!ReadVarint<uint32_t>().To(&byte_length) ||
@@ -1877,6 +1968,9 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
byte_length > buffer_byte_length - byte_offset) {
return MaybeHandle<JSArrayBufferView>();
}
+ if (version_ >= 14 && !ReadVarint<uint32_t>().To(&flags)) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
uint32_t id = next_id_++;
ExternalArrayType external_array_type = kExternalInt8Array;
unsigned element_size = 0;
@@ -1909,6 +2003,8 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
MaybeHandle<Object> ValueDeserializer::ReadJSError() {
+ uint32_t id = next_id_++;
+
Handle<Object> message = isolate_->factory()->undefined_value();
Handle<Object> options = isolate_->factory()->undefined_value();
Handle<Object> stack = isolate_->factory()->undefined_value();
@@ -1978,20 +2074,16 @@ MaybeHandle<Object> ValueDeserializer::ReadJSError() {
}
}
- Handle<Object> error;
+ Handle<JSObject> error;
if (!ErrorUtils::Construct(isolate_, constructor, constructor, message,
options, SKIP_NONE, no_caller,
- ErrorUtils::StackTraceCollection::kNone)
+ ErrorUtils::StackTraceCollection::kDisabled)
.ToHandle(&error)) {
return MaybeHandle<Object>();
}
- if (Object::SetProperty(
- isolate_, error, isolate_->factory()->stack_trace_symbol(), stack,
- StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
- .is_null()) {
- return MaybeHandle<Object>();
- }
+ ErrorUtils::SetFormattedStack(isolate_, error, stack);
+ AddObjectWithID(id, error);
return error;
}
@@ -2039,13 +2131,34 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
}
Handle<WasmMemoryObject> result =
- WasmMemoryObject::New(isolate_, buffer, maximum_pages);
+ WasmMemoryObject::New(isolate_, buffer, maximum_pages).ToHandleChecked();
AddObjectWithID(id, result);
return result;
}
#endif // V8_ENABLE_WEBASSEMBLY
+MaybeHandle<HeapObject> ValueDeserializer::ReadSharedObject() {
+ STACK_CHECK(isolate_, MaybeHandle<HeapObject>());
+ DCHECK_GE(version_, 15);
+ DCHECK(supports_shared_values_);
+ DCHECK_NOT_NULL(delegate_);
+ DCHECK(delegate_->SupportsSharedValues());
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ uint32_t shared_value_id;
+ Local<Value> shared_value;
+ if (!ReadVarint<uint32_t>().To(&shared_value_id) ||
+ !delegate_->GetSharedValueFromId(v8_isolate, shared_value_id)
+ .ToLocal(&shared_value)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, HeapObject);
+ return MaybeHandle<HeapObject>();
+ }
+ Handle<HeapObject> shared_object =
+ Handle<HeapObject>::cast(Utils::OpenHandle(*shared_value));
+ DCHECK(shared_object->IsShared());
+ return shared_object;
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
if (!delegate_) return MaybeHandle<JSObject>();
STACK_CHECK(isolate_, MaybeHandle<JSObject>());
@@ -2078,8 +2191,11 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
}
}
-static bool IsValidObjectKey(Handle<Object> value) {
- return value->IsName() || value->IsNumber();
+static bool IsValidObjectKey(Object value, Isolate* isolate) {
+ if (value.IsSmi()) return true;
+ auto instance_type = HeapObject::cast(value).map(isolate).instance_type();
+ return InstanceTypeChecker::IsName(instance_type) ||
+ InstanceTypeChecker::IsHeapNumber(instance_type);
}
Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
@@ -2112,20 +2228,25 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
// transition was found.
Handle<Object> key;
Handle<Map> target;
- TransitionsAccessor transitions(isolate_, map);
- Handle<String> expected_key = transitions.ExpectedTransitionKey();
+ Handle<String> expected_key;
+ {
+ TransitionsAccessor transitions(isolate_, *map);
+ expected_key = transitions.ExpectedTransitionKey();
+ if (!expected_key.is_null()) {
+ target = transitions.ExpectedTransitionTarget();
+ }
+ }
if (!expected_key.is_null() && ReadExpectedString(expected_key)) {
key = expected_key;
- target = transitions.ExpectedTransitionTarget();
} else {
- if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+ if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(*key, isolate_)) {
return Nothing<uint32_t>();
}
- if (key->IsString()) {
+ if (key->IsString(isolate_)) {
key =
isolate_->factory()->InternalizeString(Handle<String>::cast(key));
// Don't reuse |transitions| because it could be stale.
- transitioning = TransitionsAccessor(isolate_, map)
+ transitioning = TransitionsAccessor(isolate_, *map)
.FindTransitionToField(Handle<String>::cast(key))
.ToHandle(&target);
} else {
@@ -2202,7 +2323,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
}
Handle<Object> key;
- if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+ if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(*key, isolate_)) {
return Nothing<uint32_t>();
}
Handle<Object> value;
@@ -2253,7 +2374,7 @@ static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
uint32_t num_properties) {
for (unsigned i = 0; i < 2 * num_properties; i += 2) {
Handle<Object> key = data[i];
- if (!IsValidObjectKey(key)) return Nothing<bool>();
+ if (!IsValidObjectKey(*key, isolate)) return Nothing<bool>();
Handle<Object> value = data[i + 1];
PropertyKey lookup_key(isolate, key);
LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index c6363e67c6..7dc8842a09 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_VALUE_SERIALIZER_H_
#include <cstdint>
-#include <vector>
#include "include/v8-value-serializer.h"
#include "src/base/compiler-specific.h"
@@ -31,6 +30,7 @@ class JSMap;
class JSPrimitiveWrapper;
class JSRegExp;
class JSSet;
+class JSSharedStruct;
class Object;
class Oddball;
class Smi;
@@ -132,12 +132,16 @@ class ValueSerializer {
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
Maybe<bool> WriteJSError(Handle<JSObject> error) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSSharedStruct(Handle<JSSharedStruct> shared_struct)
+ V8_WARN_UNUSED_RESULT;
#if V8_ENABLE_WEBASSEMBLY
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
V8_WARN_UNUSED_RESULT;
#endif // V8_ENABLE_WEBASSEMBLY
+ Maybe<bool> WriteSharedObject(Handle<HeapObject> object)
+ V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteHostObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
/*
@@ -152,9 +156,11 @@ class ValueSerializer {
* Asks the delegate to handle an error that occurred during data cloning, by
* throwing an exception appropriate for the host.
*/
- void ThrowDataCloneError(MessageTemplate template_index);
- V8_NOINLINE void ThrowDataCloneError(MessageTemplate template_index,
- Handle<Object> arg0);
+ V8_NOINLINE Maybe<bool> ThrowDataCloneError(MessageTemplate template_index)
+ V8_WARN_UNUSED_RESULT;
+ V8_NOINLINE Maybe<bool> ThrowDataCloneError(MessageTemplate template_index,
+ Handle<Object> arg0)
+ V8_WARN_UNUSED_RESULT;
Maybe<bool> ThrowIfOutOfMemory();
@@ -163,6 +169,7 @@ class ValueSerializer {
uint8_t* buffer_ = nullptr;
size_t buffer_size_ = 0;
size_t buffer_capacity_ = 0;
+ const bool supports_shared_values_;
bool treat_array_buffer_views_as_host_objects_ = false;
bool out_of_memory_ = false;
Zone zone_;
@@ -241,11 +248,13 @@ class ValueDeserializer {
void ConsumeTag(SerializationTag peeked_tag);
Maybe<SerializationTag> ReadTag() V8_WARN_UNUSED_RESULT;
template <typename T>
- Maybe<T> ReadVarint() V8_WARN_UNUSED_RESULT;
+ V8_INLINE Maybe<T> ReadVarint() V8_WARN_UNUSED_RESULT;
+ template <typename T>
+ V8_NOINLINE Maybe<T> ReadVarintLoop() V8_WARN_UNUSED_RESULT;
template <typename T>
Maybe<T> ReadZigZag() V8_WARN_UNUSED_RESULT;
Maybe<double> ReadDouble() V8_WARN_UNUSED_RESULT;
- Maybe<base::Vector<const uint8_t>> ReadRawBytes(int size)
+ Maybe<base::Vector<const uint8_t>> ReadRawBytes(size_t size)
V8_WARN_UNUSED_RESULT;
// Reads a string if it matches the one provided.
@@ -264,9 +273,12 @@ class ValueDeserializer {
// Reading V8 objects of specific kinds.
// The tag is assumed to have already been read.
MaybeHandle<BigInt> ReadBigInt() V8_WARN_UNUSED_RESULT;
- MaybeHandle<String> ReadUtf8String() V8_WARN_UNUSED_RESULT;
- MaybeHandle<String> ReadOneByteString() V8_WARN_UNUSED_RESULT;
- MaybeHandle<String> ReadTwoByteString() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadUtf8String(
+ AllocationType allocation = AllocationType::kYoung) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadOneByteString(
+ AllocationType allocation = AllocationType::kYoung) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadTwoByteString(
+ AllocationType allocation = AllocationType::kYoung) V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadJSObject() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArray> ReadSparseJSArray() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArray> ReadDenseJSArray() V8_WARN_UNUSED_RESULT;
@@ -287,6 +299,7 @@ class ValueDeserializer {
MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
#endif // V8_ENABLE_WEBASSEMBLY
+ MaybeHandle<HeapObject> ReadSharedObject() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadHostObject() V8_WARN_UNUSED_RESULT;
/*
@@ -306,6 +319,7 @@ class ValueDeserializer {
v8::ValueDeserializer::Delegate* const delegate_;
const uint8_t* position_;
const uint8_t* const end_;
+ const bool supports_shared_values_;
uint32_t version_ = 0;
uint32_t next_id_ = 0;
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index f8a98e7e12..2e4327caff 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -42,6 +42,7 @@ class CodeDataContainer;
V(kWrapperTracing, "(Wrapper tracing)") \
V(kWriteBarrier, "(Write barrier)") \
V(kRetainMaps, "(Retain maps)") \
+ V(kClientHeap, "(Client heap)") \
V(kUnknown, "(Unknown)")
class VisitorSynchronization : public AllStatic {
@@ -180,6 +181,10 @@ class ObjectVisitor {
// Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
+
+ // Visits an external pointer. This is currently only guaranteed to be called
+ // when the sandbox is enabled.
+ virtual void VisitExternalPointer(HeapObject host, ExternalPointer_t ptr) {}
};
// Helper version of ObjectVisitor that also takes care of caching base values
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 13586e139c..069e31491c 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -1,4 +1,3 @@
-gsathya@chromium.org
leszeks@chromium.org
marja@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index c49fae7519..8a4affa0b8 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -758,6 +758,8 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
kind == FunctionKind::kArrowFunction);
DCHECK(this->CanBeDeclaration());
DCHECK(!this->IsCertainlyDeclaration());
+ // clear last next_arrow_function_info tracked strict parameters error.
+ parser->next_arrow_function_info_.ClearStrictParameterError();
}
ArrowHeadParsingScope(const ArrowHeadParsingScope&) = delete;
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index b61224fbbb..aff064b8c8 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -60,7 +60,7 @@ AstConsString* FuncNameInferrer::MakeNameFromStack() {
continue;
}
// Add name. Separate names with ".".
- Zone* zone = ast_value_factory_->zone();
+ Zone* zone = ast_value_factory_->single_parse_zone();
if (!result->IsEmpty()) {
result->AddString(zone, ast_value_factory_->dot_string());
}
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index a8ebe59474..2c7f1d5817 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -8,23 +8,29 @@
#include <vector>
#include "src/base/macros.h"
-#include "src/utils/pointer-with-payload.h"
+#include "src/base/pointer-with-payload.h"
namespace v8 {
+
+namespace internal {
+class AstRawString;
+}
+
+namespace base {
+template <>
+struct PointerWithPayloadTraits<v8::internal::AstRawString> {
+ static constexpr int kAvailableBits = 2;
+};
+} // namespace base
+
namespace internal {
class AstConsString;
-class AstRawString;
class AstValueFactory;
class FunctionLiteral;
enum class InferName { kYes, kNo };
-template <>
-struct PointerWithPayloadTraits<AstRawString> {
- static constexpr int value = 2;
-};
-
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
@@ -105,7 +111,7 @@ class FuncNameInferrer {
Name(const AstRawString* name, NameType type)
: name_and_type_(name, type) {}
- PointerWithPayload<const AstRawString, NameType, 2> name_and_type_;
+ base::PointerWithPayload<const AstRawString, NameType, 2> name_and_type_;
inline const AstRawString* name() const {
return name_and_type_.GetPointer();
}
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 048948ed3c..e706704d26 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -176,9 +176,12 @@ ReusableUnoptimizedCompileState::ReusableUnoptimizedCompileState(
logger_(isolate->logger()),
dispatcher_(isolate->lazy_compile_dispatcher()),
ast_string_constants_(isolate->ast_string_constants()),
- zone_(allocator_, "unoptimized-compile-zone"),
+ ast_raw_string_zone_(allocator_,
+ "unoptimized-compile-ast-raw-string-zone"),
+ single_parse_zone_(allocator_, "unoptimized-compile-parse-zone"),
ast_value_factory_(
- new AstValueFactory(zone(), ast_string_constants(), hash_seed())) {}
+ new AstValueFactory(ast_raw_string_zone(), single_parse_zone(),
+ ast_string_constants(), hash_seed())) {}
ReusableUnoptimizedCompileState::ReusableUnoptimizedCompileState(
LocalIsolate* isolate)
@@ -187,9 +190,12 @@ ReusableUnoptimizedCompileState::ReusableUnoptimizedCompileState(
logger_(isolate->main_thread_logger()),
dispatcher_(isolate->lazy_compile_dispatcher()),
ast_string_constants_(isolate->ast_string_constants()),
- zone_(allocator_, "unoptimized-compile-zone"),
+ ast_raw_string_zone_(allocator_,
+ "unoptimized-compile-ast-raw-string-zone"),
+ single_parse_zone_(allocator_, "unoptimized-compile-parse-zone"),
ast_value_factory_(
- new AstValueFactory(zone(), ast_string_constants(), hash_seed())) {}
+ new AstValueFactory(ast_raw_string_zone(), single_parse_zone(),
+ ast_string_constants(), hash_seed())) {}
ReusableUnoptimizedCompileState::~ReusableUnoptimizedCompileState() = default;
@@ -235,7 +241,7 @@ ParseInfo::ParseInfo(LocalIsolate* isolate, const UnoptimizedCompileFlags flags,
: ParseInfo(flags, state, reusable_state, stack_limit,
isolate->runtime_call_stats()) {}
-ParseInfo::~ParseInfo() = default;
+ParseInfo::~ParseInfo() { reusable_state_->NotifySingleParseCompleted(); }
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 8ceccdd514..9a942a21a1 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -5,9 +5,7 @@
#ifndef V8_PARSING_PARSE_INFO_H_
#define V8_PARSING_PARSE_INFO_H_
-#include <map>
#include <memory>
-#include <vector>
#include "src/base/bit-field.h"
#include "src/base/export-template.h"
@@ -184,7 +182,21 @@ class V8_EXPORT_PRIVATE ReusableUnoptimizedCompileState {
explicit ReusableUnoptimizedCompileState(LocalIsolate* isolate);
~ReusableUnoptimizedCompileState();
- Zone* zone() { return &zone_; }
+ // The AstRawString Zone stores the AstRawStrings in the AstValueFactory that
+ // can be reused across parses, and thereforce should stay alive between
+ // parses that reuse this reusable state and its AstValueFactory.
+ Zone* ast_raw_string_zone() { return &ast_raw_string_zone_; }
+
+ // The single parse Zone stores the data of a single parse, and can be cleared
+ // when that parse completes.
+ //
+ // This is in "reusable" state despite being wiped per-parse, because it
+ // allows us to reuse the Zone itself, and e.g. keep the same single parse
+ // Zone pointer in the AstValueFactory.
+ Zone* single_parse_zone() { return &single_parse_zone_; }
+
+ void NotifySingleParseCompleted() { single_parse_zone_.Reset(); }
+
AstValueFactory* ast_value_factory() const {
return ast_value_factory_.get();
}
@@ -202,7 +214,8 @@ class V8_EXPORT_PRIVATE ReusableUnoptimizedCompileState {
Logger* logger_;
LazyCompileDispatcher* dispatcher_;
const AstStringConstants* ast_string_constants_;
- Zone zone_;
+ Zone ast_raw_string_zone_;
+ Zone single_parse_zone_;
std::unique_ptr<AstValueFactory> ast_value_factory_;
};
@@ -226,7 +239,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
ScriptOriginOptions origin_options,
NativesFlag natives = NOT_NATIVES_CODE);
- Zone* zone() const { return reusable_state_->zone(); }
+ Zone* zone() const { return reusable_state_->single_parse_zone(); }
const UnoptimizedCompileFlags& flags() const { return flags_; }
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 7240e64777..df829ff8ca 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -15,6 +15,7 @@
#include "src/ast/scopes.h"
#include "src/base/flags.h"
#include "src/base/hashmap.h"
+#include "src/base/pointer-with-payload.h"
#include "src/base/v8-fallthrough.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
@@ -28,7 +29,6 @@
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
#include "src/regexp/regexp.h"
-#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -300,7 +300,7 @@ class ParserBase {
void ResetFunctionLiteralId() { function_literal_id_ = 0; }
// The Zone where the parsing outputs are stored.
- Zone* main_zone() const { return ast_value_factory()->zone(); }
+ Zone* main_zone() const { return ast_value_factory()->single_parse_zone(); }
// The current Zone, which might be the main zone or a temporary Zone.
Zone* zone() const { return zone_; }
@@ -482,7 +482,7 @@ class ParserBase {
}
private:
- PointerWithPayload<FunctionState, bool, 1> state_and_prev_value_;
+ base::PointerWithPayload<FunctionState, bool, 1> state_and_prev_value_;
};
class V8_NODISCARD LoopScope final {
@@ -1234,13 +1234,11 @@ class ParserBase {
ExpressionT ParseArrowFunctionLiteral(const FormalParametersT& parameters);
void ParseAsyncFunctionBody(Scope* scope, StatementListT* body);
ExpressionT ParseAsyncFunctionLiteral();
- ExpressionT ParseClassLiteral(IdentifierT name,
+ ExpressionT ParseClassExpression(Scope* outer_scope);
+ ExpressionT ParseClassLiteral(Scope* outer_scope, IdentifierT name,
Scanner::Location class_name_location,
bool name_is_strict_reserved,
int class_token_pos);
- ExpressionT DoParseClassLiteral(ClassScope* class_scope, IdentifierT name,
- Scanner::Location class_name_location,
- bool is_anonymous, int class_token_pos);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged);
ExpressionT ParseSuperExpression();
@@ -1974,7 +1972,11 @@ ParserBase<Impl>::ParsePrimaryExpression() {
case Token::LPAREN: {
Consume(Token::LPAREN);
+
if (Check(Token::RPAREN)) {
+ // clear last next_arrow_function_info tracked strict parameters error.
+ next_arrow_function_info_.ClearStrictParameterError();
+
// ()=>x. The continuation that consumes the => is in
// ParseAssignmentExpressionCoverGrammar.
if (peek() != Token::ARROW) ReportUnexpectedToken(Token::RPAREN);
@@ -2006,19 +2008,7 @@ ParserBase<Impl>::ParsePrimaryExpression() {
}
case Token::CLASS: {
- Consume(Token::CLASS);
- int class_token_pos = position();
- IdentifierT name = impl()->NullIdentifier();
- bool is_strict_reserved_name = false;
- Scanner::Location class_name_location = Scanner::Location::invalid();
- if (peek_any_identifier()) {
- name = ParseAndClassifyIdentifier(Next());
- class_name_location = scanner()->location();
- is_strict_reserved_name =
- Token::IsStrictReservedWord(scanner()->current_token());
- }
- return ParseClassLiteral(name, class_name_location,
- is_strict_reserved_name, class_token_pos);
+ return ParseClassExpression(scope());
}
case Token::TEMPLATE_SPAN:
@@ -3776,9 +3766,9 @@ ParserBase<Impl>::ParseSuperExpression() {
impl()->ReportMessage(MessageTemplate::kOptionalChainingNoSuper);
return impl()->FailureExpression();
}
- scope->RecordSuperPropertyUsage();
+ Scope* home_object_scope = scope->RecordSuperPropertyUsage();
UseThis();
- return impl()->NewSuperPropertyReference(pos);
+ return impl()->NewSuperPropertyReference(home_object_scope, pos);
}
// super() is only allowed in derived constructor. new super() is never
// allowed; it's reported as an error by
@@ -4262,7 +4252,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
}
ExpressionParsingScope no_expression_scope(impl());
- ExpressionT value = ParseClassLiteral(name, scanner()->location(),
+ ExpressionT value = ParseClassLiteral(scope(), name, scanner()->location(),
is_strict_reserved, class_token_pos);
no_expression_scope.ValidateExpression();
int end_pos = position();
@@ -4419,6 +4409,16 @@ void ParserBase<Impl>::ParseFunctionBody(
impl()->ReportVarRedeclarationIn(conflict, inner_scope);
}
}
+
+ // According to ES#sec-functiondeclarationinstantiation step 27,28
+ // when hasParameterExpressions is true, we need bind var declared
+ // arguments to "arguments exotic object", so we here first declare
+ // "arguments exotic object", then var declared arguments will be
+ // initialized with "arguments exotic object"
+ if (!IsArrowFunction(kind)) {
+ function_scope->DeclareArguments(ast_value_factory());
+ }
+
impl()->InsertShadowingVarBindingInitializers(inner_block);
}
}
@@ -4427,9 +4427,6 @@ void ParserBase<Impl>::ParseFunctionBody(
allow_duplicate_parameters);
if (!IsArrowFunction(kind)) {
- // Declare arguments after parsing the function since lexical 'arguments'
- // masks the arguments object. Declare arguments before declaring the
- // function var since the arguments object masks 'function arguments'.
function_scope->DeclareArguments(ast_value_factory());
}
@@ -4668,8 +4665,26 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassExpression(
+ Scope* outer_scope) {
+ Consume(Token::CLASS);
+ int class_token_pos = position();
+ IdentifierT name = impl()->NullIdentifier();
+ bool is_strict_reserved_name = false;
+ Scanner::Location class_name_location = Scanner::Location::invalid();
+ if (peek_any_identifier()) {
+ name = ParseAndClassifyIdentifier(Next());
+ class_name_location = scanner()->location();
+ is_strict_reserved_name =
+ Token::IsStrictReservedWord(scanner()->current_token());
+ }
+ return ParseClassLiteral(outer_scope, name, class_name_location,
+ is_strict_reserved_name, class_token_pos);
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
- IdentifierT name, Scanner::Location class_name_location,
+ Scope* outer_scope, IdentifierT name, Scanner::Location class_name_location,
bool name_is_strict_reserved, int class_token_pos) {
bool is_anonymous = impl()->IsNull(name);
@@ -4687,16 +4702,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
}
- ClassScope* class_scope = NewClassScope(scope(), is_anonymous);
- return DoParseClassLiteral(class_scope, name, class_name_location,
- is_anonymous, class_token_pos);
-}
-
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::DoParseClassLiteral(
- ClassScope* class_scope, IdentifierT name,
- Scanner::Location class_name_location, bool is_anonymous,
- int class_token_pos) {
+ ClassScope* class_scope = NewClassScope(outer_scope, is_anonymous);
BlockState block_state(&scope_, class_scope);
RaiseLanguageMode(LanguageMode::kStrict);
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 21a1f695cc..6ee70886a9 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -283,7 +283,8 @@ Expression* Parser::NewThrowError(Runtime::FunctionId id,
return factory()->NewThrow(call_constructor, pos);
}
-Expression* Parser::NewSuperPropertyReference(int pos) {
+Expression* Parser::NewSuperPropertyReference(Scope* home_object_scope,
+ int pos) {
const AstRawString* home_object_name;
if (IsStatic(scope()->GetReceiverScope()->function_kind())) {
home_object_name = ast_value_factory_->dot_static_home_object_string();
@@ -291,7 +292,9 @@ Expression* Parser::NewSuperPropertyReference(int pos) {
home_object_name = ast_value_factory_->dot_home_object_string();
}
return factory()->NewSuperPropertyReference(
- NewUnresolved(home_object_name, pos), pos);
+ home_object_scope->NewHomeObjectVariableProxy(factory(), home_object_name,
+ pos),
+ pos);
}
Expression* Parser::NewSuperCallReference(int pos) {
@@ -829,8 +832,40 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
if (shared_info->HasOuterScopeInfo()) {
maybe_outer_scope_info = handle(shared_info->GetOuterScopeInfo(), isolate);
}
- DeserializeScopeChain(isolate, info, maybe_outer_scope_info,
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+
+ MaybeHandle<ScopeInfo> deserialize_start_scope = maybe_outer_scope_info;
+ bool needs_script_scope_finalization = false;
+ // If the function is a class member initializer and there isn't a
+ // scope mismatch, we will only deserialize up to the outer scope of
+ // the class scope, and regenerate the class scope during reparsing.
+ if (flags().function_kind() ==
+ FunctionKind::kClassMembersInitializerFunction &&
+ shared_info->HasOuterScopeInfo() &&
+ maybe_outer_scope_info.ToHandleChecked()->scope_type() == CLASS_SCOPE &&
+ maybe_outer_scope_info.ToHandleChecked()->StartPosition() ==
+ start_position) {
+ Handle<ScopeInfo> outer_scope_info =
+ maybe_outer_scope_info.ToHandleChecked();
+ if (outer_scope_info->HasOuterScopeInfo()) {
+ deserialize_start_scope =
+ handle(outer_scope_info->OuterScopeInfo(), isolate);
+ } else {
+ // If the class scope doesn't have an outer scope to deserialize, we need
+ // to finalize the script scope without using
+ // Scope::DeserializeScopeChain().
+ deserialize_start_scope = MaybeHandle<ScopeInfo>();
+ needs_script_scope_finalization = true;
+ }
+ }
+
+ DeserializeScopeChain(isolate, info, deserialize_start_scope,
Scope::DeserializationMode::kIncludingVariables);
+ if (needs_script_scope_finalization) {
+ DCHECK_EQ(original_scope_, info->script_scope());
+ Scope::SetScriptScopeInfo(isolate, info->script_scope());
+ }
DCHECK_EQ(factory()->zone(), info->zone());
Handle<Script> script = handle(Script::cast(shared_info->script()), isolate);
@@ -838,8 +873,6 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
maybe_wrapped_arguments_ = handle(script->wrapped_arguments(), isolate);
}
- int start_position = shared_info->StartPosition();
- int end_position = shared_info->EndPosition();
int function_literal_id = shared_info->function_literal_id();
if V8_UNLIKELY (script->type() == Script::TYPE_WEB_SNAPSHOT) {
// Function literal IDs for inner functions haven't been allocated when
@@ -864,11 +897,11 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// will be correctly inherited from the outer scope.
ClassScope::HeritageParsingScope heritage(original_scope_->AsClassScope());
result = DoParseDeserializedFunction(
- isolate, shared_info, info, start_position, end_position,
+ isolate, maybe_outer_scope_info, info, start_position, end_position,
function_literal_id, info->function_name());
} else {
result = DoParseDeserializedFunction(
- isolate, shared_info, info, start_position, end_position,
+ isolate, maybe_outer_scope_info, info, start_position, end_position,
function_literal_id, info->function_name());
}
MaybeProcessSourceRanges(info, result, stack_limit_);
@@ -1033,37 +1066,31 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
}
FunctionLiteral* Parser::DoParseDeserializedFunction(
- Isolate* isolate, Handle<SharedFunctionInfo> shared_info, ParseInfo* info,
- int start_position, int end_position, int function_literal_id,
- const AstRawString* raw_name) {
- if (flags().function_kind() !=
+ Isolate* isolate, MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ ParseInfo* info, int start_position, int end_position,
+ int function_literal_id, const AstRawString* raw_name) {
+ if (flags().function_kind() ==
FunctionKind::kClassMembersInitializerFunction) {
- return DoParseFunction(isolate, info, start_position, end_position,
- function_literal_id, raw_name);
- }
-
- // Reparse the outer class while skipping the non-fields to get a list of
- // ClassLiteralProperty and create a InitializeClassMembersStatement for
- // the synthetic instance initializer function.
- FunctionLiteral* result = ParseClassForInstanceMemberInitialization(
- isolate, original_scope_->AsClassScope(), start_position,
- function_literal_id);
- DCHECK_EQ(result->kind(), FunctionKind::kClassMembersInitializerFunction);
- DCHECK_EQ(result->function_literal_id(), function_literal_id);
- DCHECK_EQ(result->end_position(), shared_info->EndPosition());
-
- // The private_name_lookup_skips_outer_class bit should be set by
- // PostProcessParseResult() during scope analysis later.
- return result;
+ return ParseClassForInstanceMemberInitialization(
+ isolate, maybe_outer_scope_info, start_position, function_literal_id,
+ end_position);
+ }
+
+ return DoParseFunction(isolate, info, start_position, end_position,
+ function_literal_id, raw_name);
}
FunctionLiteral* Parser::ParseClassForInstanceMemberInitialization(
- Isolate* isolate, ClassScope* original_scope, int initializer_pos,
- int initializer_id) {
+ Isolate* isolate, MaybeHandle<ScopeInfo> maybe_class_scope_info,
+ int initializer_pos, int initializer_id, int initializer_end_pos) {
+ // When the function is a kClassMembersInitializerFunction, we record the
+ // source range of the entire class as its positions in its SFI, so at this
+ // point the scanner should be rewound to the position of the class token.
int class_token_pos = initializer_pos;
+ DCHECK_EQ(peek_position(), class_token_pos);
// Insert a FunctionState with the closest outer Declaration scope
- DeclarationScope* nearest_decl_scope = original_scope->GetDeclarationScope();
+ DeclarationScope* nearest_decl_scope = original_scope_->GetDeclarationScope();
DCHECK_NOT_NULL(nearest_decl_scope);
FunctionState function_state(&function_state_, &scope_, nearest_decl_scope);
// We will reindex the function literals later.
@@ -1075,40 +1102,10 @@ FunctionLiteral* Parser::ParseClassForInstanceMemberInitialization(
ExpressionParsingScope no_expression_scope(impl());
- // We will reparse the entire class because we want to know if
- // the class is anonymous.
- // When the function is a kClassMembersInitializerFunction, we record the
- // source range of the entire class as its positions in its SFI, so at this
- // point the scanner should be rewound to the position of the class token.
- DCHECK_EQ(peek(), Token::CLASS);
- Expect(Token::CLASS);
-
- const AstRawString* class_name = NullIdentifier();
- const AstRawString* variable_name = NullIdentifier();
- // It's a reparse so we don't need to check for default export or
- // whether the names are reserved.
- if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
- GetDefaultStrings(&class_name, &variable_name);
- } else {
- class_name = ParseIdentifier();
- variable_name = class_name;
- }
- bool is_anonymous = class_name == nullptr || class_name->IsEmpty();
-
- // Create a new ClassScope for the parser to create the inner scopes,
- // the variable resolution would be done in the original scope, however.
- // TODO(joyee): see if we can reset the original scope to a state that
- // can be reused directly and avoid creating this temporary scope.
- ClassScope* reparsed_scope =
- NewClassScope(original_scope->outer_scope(), is_anonymous);
+ // Reparse the class as an expression to build the instance member
+ // initializer function.
+ Expression* expr = ParseClassExpression(original_scope_);
-#ifdef DEBUG
- original_scope->SetScopeName(class_name);
-#endif
-
- Expression* expr =
- DoParseClassLiteral(reparsed_scope, class_name, scanner()->location(),
- is_anonymous, class_token_pos);
DCHECK(expr->IsClassLiteral());
ClassLiteral* literal = expr->AsClassLiteral();
FunctionLiteral* initializer =
@@ -1121,11 +1118,25 @@ FunctionLiteral* Parser::ParseClassForInstanceMemberInitialization(
no_expression_scope.ValidateExpression();
- // Fix up the scope chain and the references used by the instance member
- // initializer.
- reparsed_scope->ReplaceReparsedClassScope(isolate, ast_value_factory(),
- original_scope);
+ // If the class scope was not optimized away, we know that it allocated
+ // some variables and we need to fix up the allocation info for them.
+ bool needs_allocation_fixup =
+ !maybe_class_scope_info.is_null() &&
+ maybe_class_scope_info.ToHandleChecked()->scope_type() == CLASS_SCOPE &&
+ maybe_class_scope_info.ToHandleChecked()->StartPosition() ==
+ class_token_pos;
+
+ ClassScope* reparsed_scope = literal->scope();
+ reparsed_scope->FinalizeReparsedClassScope(isolate, maybe_class_scope_info,
+ ast_value_factory(),
+ needs_allocation_fixup);
original_scope_ = reparsed_scope;
+
+ DCHECK_EQ(initializer->kind(),
+ FunctionKind::kClassMembersInitializerFunction);
+ DCHECK_EQ(initializer->function_literal_id(), initializer_id);
+ DCHECK_EQ(initializer->end_position(), initializer_end_pos);
+
return initializer;
}
@@ -2972,8 +2983,6 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block,
args.Add(factory()->NewVariableProxy(
function_state_->scope()->generator_object_var()));
args.Add(factory()->NewVariableProxy(catch_scope->catch_variable()));
- args.Add(factory()->NewBooleanLiteral(function_state_->CanSuspend(),
- kNoSourcePosition));
reject_promise = factory()->NewCallRuntime(
Runtime::kInlineAsyncFunctionReject, args, kNoSourcePosition);
}
@@ -3393,7 +3402,8 @@ void Parser::UpdateStatistics(
void Parser::ParseOnBackground(LocalIsolate* isolate, ParseInfo* info,
int start_position, int end_position,
int function_literal_id) {
- RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kParseProgram,
+ RuntimeCallStats::CounterMode::kThreadSpecific);
parsing_on_main_thread_ = false;
DCHECK_NULL(info->literal());
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index c85f2afad7..1d8fa2515d 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -12,6 +12,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/compiler-specific.h"
+#include "src/base/pointer-with-payload.h"
#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
#include "src/common/globals.h"
@@ -20,7 +21,6 @@
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
-#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -51,7 +51,7 @@ struct ParserFormalParameters : FormalParametersBase {
position(position),
initializer_end_position(initializer_end_position) {}
- PointerWithPayload<Expression, bool, 1> initializer_and_is_rest;
+ base::PointerWithPayload<Expression, bool, 1> initializer_and_is_rest;
Expression* pattern;
Expression* initializer() const {
@@ -238,13 +238,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* raw_name);
FunctionLiteral* DoParseDeserializedFunction(
- Isolate* isolate, Handle<SharedFunctionInfo> shared_info, ParseInfo* info,
- int start_position, int end_position, int function_literal_id,
- const AstRawString* raw_name);
+ Isolate* isolate, MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ ParseInfo* info, int start_position, int end_position,
+ int function_literal_id, const AstRawString* raw_name);
FunctionLiteral* ParseClassForInstanceMemberInitialization(
- Isolate* isolate, ClassScope* scope, int initializer_pos,
- int initializer_id);
+ Isolate* isolate, MaybeHandle<ScopeInfo> maybe_class_scope_info,
+ int initializer_pos, int initializer_id, int initializer_end_pos);
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Isolate* isolate, ParseInfo* info);
@@ -800,7 +800,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return factory()->NewThisExpression(pos);
}
- Expression* NewSuperPropertyReference(int pos);
+ Expression* NewSuperPropertyReference(Scope* home_object_scope, int pos);
Expression* NewSuperCallReference(int pos);
Expression* NewTargetExpression(int pos);
Expression* ImportMetaExpression(int pos);
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 3ad36e20c1..9739c374e7 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -8,6 +8,7 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
+#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
@@ -529,7 +530,8 @@ class OnHeapProducedPreparseData final : public ProducedPreparseData {
Handle<PreparseData> Serialize(LocalIsolate* isolate) final {
DCHECK(!data_->is_null());
- DCHECK(isolate->heap()->ContainsLocalHandle(data_.location()));
+ DCHECK_IMPLIES(!isolate->is_main_thread(),
+ isolate->heap()->ContainsLocalHandle(data_.location()));
return data_;
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 746802a9aa..ff64f9a5f5 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -1532,7 +1532,8 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::This();
}
- V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
+ V8_INLINE PreParserExpression
+ NewSuperPropertyReference(Scope* home_object_scope, int pos) {
return PreParserExpression::Default();
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 34a8788c57..1bf8be2a09 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -337,20 +337,17 @@ class RelocatingCharacterStream final
RelocatingCharacterStream(Isolate* isolate, size_t pos, TArgs... args)
: UnbufferedCharacterStream<OnHeapStream>(pos, args...),
isolate_(isolate) {
- isolate->heap()->AddGCEpilogueCallback(UpdateBufferPointersCallback,
- v8::kGCTypeAll, this);
+ isolate->main_thread_local_heap()->AddGCEpilogueCallback(
+ UpdateBufferPointersCallback, this);
}
private:
~RelocatingCharacterStream() final {
- isolate_->heap()->RemoveGCEpilogueCallback(UpdateBufferPointersCallback,
- this);
+ isolate_->main_thread_local_heap()->RemoveGCEpilogueCallback(
+ UpdateBufferPointersCallback, this);
}
- static void UpdateBufferPointersCallback(v8::Isolate* v8_isolate,
- v8::GCType type,
- v8::GCCallbackFlags flags,
- void* stream) {
+ static void UpdateBufferPointersCallback(void* stream) {
reinterpret_cast<RelocatingCharacterStream*>(stream)
->UpdateBufferPointers();
}
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index d3a3095c93..646970faee 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -497,15 +497,17 @@ Token::Value Scanner::ScanPrivateName() {
next().literal_chars.Start();
DCHECK_EQ(c0_, '#');
DCHECK(!IsIdentifierStart(kEndOfInput));
- if (!IsIdentifierStart(Peek())) {
- ReportScannerError(source_pos(),
- MessageTemplate::kInvalidOrUnexpectedToken);
- return Token::ILLEGAL;
+ int pos = source_pos();
+ Advance();
+ if (IsIdentifierStart(c0_) ||
+ (CombineSurrogatePair() && IsIdentifierStart(c0_))) {
+ AddLiteralChar('#');
+ Token::Value token = ScanIdentifierOrKeywordInner();
+ return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
}
- AddLiteralCharAdvance();
- Token::Value token = ScanIdentifierOrKeywordInner();
- return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
+ ReportScannerError(pos, MessageTemplate::kInvalidOrUnexpectedToken);
+ return Token::ILLEGAL;
}
Token::Value Scanner::ScanTemplateSpan() {
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index cc2ef52ce3..0fe907632d 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -21,7 +21,6 @@
#include "src/strings/char-predicates.h"
#include "src/strings/unicode.h"
#include "src/utils/allocation.h"
-#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 99dbd9f9c1..bbe85ba823 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -40,7 +40,7 @@ class CpuSampler : public sampler::Sampler {
void SampleStack(const v8::RegisterState& regs) override {
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::WasEverUsed() &&
+ if (isolate->was_locker_ever_used() &&
(!isolate->thread_manager()->IsLockedByThread(
perThreadData_->thread_id()) ||
perThreadData_->thread_state() != nullptr)) {
@@ -268,8 +268,7 @@ SamplingEventsProcessor::ProcessOneSample() {
void SamplingEventsProcessor::Run() {
base::MutexGuard guard(&running_mutex_);
while (running_.load(std::memory_order_relaxed)) {
- base::TimeTicks nextSampleTime =
- base::TimeTicks::HighResolutionNow() + period_;
+ base::TimeTicks nextSampleTime = base::TimeTicks::Now() + period_;
base::TimeTicks now;
SampleProcessingResult result;
// Keep processing existing events until we need to do next sample
@@ -281,7 +280,7 @@ void SamplingEventsProcessor::Run() {
// processed, proceed to the next code event.
ProcessCodeEvent();
}
- now = base::TimeTicks::HighResolutionNow();
+ now = base::TimeTicks::Now();
} while (result != NoSamplesInQueue && now < nextSampleTime);
if (nextSampleTime > now) {
@@ -290,7 +289,7 @@ void SamplingEventsProcessor::Run() {
nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
// Do not use Sleep on Windows as it is very imprecise, with up to 16ms
// jitter, which is unacceptable for short profile intervals.
- while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
+ while (base::TimeTicks::Now() < nextSampleTime) {
}
} else // NOLINT
#else
@@ -307,7 +306,7 @@ void SamplingEventsProcessor::Run() {
if (!running_.load(std::memory_order_relaxed)) {
break;
}
- now = base::TimeTicks::HighResolutionNow();
+ now = base::TimeTicks::Now();
}
}
}
@@ -413,7 +412,7 @@ void ProfilerCodeObserver::LogBuiltins() {
++builtin) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
rec->instruction_start = code.InstructionStart();
rec->instruction_size = code.InstructionSize();
rec->builtin = builtin;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index dbe48876d2..2fb6640470 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -264,9 +264,12 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
CombinedHeapObjectIterator heap_iterator(
heap(), HeapObjectIterator::kFilterUnreachable);
+ PtrComprCageBase cage_base(isolate());
for (HeapObject heap_obj = heap_iterator.Next(); !heap_obj.is_null();
heap_obj = heap_iterator.Next()) {
- if (!heap_obj.IsJSObject() || heap_obj.IsExternal(isolate())) continue;
+ if (!heap_obj.IsJSObject(cage_base) ||
+ heap_obj.IsJSExternalObject(cage_base))
+ continue;
v8::Local<v8::Object> v8_obj(
Utils::ToLocal(handle(JSObject::cast(heap_obj), isolate())));
if (!predicate->Filter(v8_obj)) continue;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index d7e384494d..6fba987818 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -42,6 +42,152 @@
namespace v8 {
namespace internal {
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+class HeapEntryVerifier {
+ public:
+ HeapEntryVerifier(HeapSnapshotGenerator* generator, HeapObject obj)
+ : generator_(generator),
+ primary_object_(obj),
+ reference_summary_(
+ ReferenceSummary::SummarizeReferencesFrom(generator->heap(), obj)) {
+ generator->set_verifier(this);
+ }
+ ~HeapEntryVerifier() {
+ CheckAllReferencesWereChecked();
+ generator_->set_verifier(nullptr);
+ }
+
+ // Checks that `host` retains `target`, according to the marking visitor. This
+ // allows us to verify, when adding edges to the snapshot, that they
+ // correspond to real retaining relationships.
+ void CheckStrongReference(HeapObject host, HeapObject target) {
+ // All references should be from the current primary object.
+ CHECK_EQ(host, primary_object_);
+
+ checked_objects_.insert(target);
+
+ // Check whether there is a direct strong reference from host to target.
+ if (reference_summary_.strong_references().find(target) !=
+ reference_summary_.strong_references().end()) {
+ return;
+ }
+
+ // There is no direct reference from host to target, but sometimes heap
+ // snapshots include references that skip one, two, or three objects, such
+ // as __proto__ on a JSObject referring to its Map's prototype, or a
+ // property getter that bypasses the property array and accessor info. At
+ // this point, we must check for those indirect references.
+ for (size_t level = 0; level < 3; ++level) {
+ const std::unordered_set<HeapObject, Object::Hasher>& indirect =
+ GetIndirectStrongReferences(level);
+ if (indirect.find(target) != indirect.end()) {
+ return;
+ }
+ }
+
+ FATAL("Could not find any matching reference");
+ }
+
+ // Checks that `host` has a weak reference to `target`, according to the
+ // marking visitor.
+ void CheckWeakReference(HeapObject host, HeapObject target) {
+ // All references should be from the current primary object.
+ CHECK_EQ(host, primary_object_);
+
+ checked_objects_.insert(target);
+ CHECK_NE(reference_summary_.weak_references().find(target),
+ reference_summary_.weak_references().end());
+ }
+
+ // Marks the relationship between `host` and `target` as checked, even if the
+ // marking visitor found no such relationship. This is necessary for
+ // ephemerons, where a pair of objects is required to retain the target.
+ // Use this function with care, since it bypasses verification.
+ void MarkReferenceCheckedWithoutChecking(HeapObject host, HeapObject target) {
+ if (host == primary_object_) {
+ checked_objects_.insert(target);
+ }
+ }
+
+ // Verifies that all of the references found by the marking visitor were
+ // checked via a call to CheckStrongReference or CheckWeakReference, or
+ // deliberately skipped via a call to MarkReferenceCheckedWithoutChecking.
+ // This ensures that there aren't retaining relationships found by the marking
+ // visitor which were omitted from the heap snapshot.
+ void CheckAllReferencesWereChecked() {
+ // Both loops below skip pointers to read-only objects, because the heap
+ // snapshot deliberately omits many of those (see IsEssentialObject).
+ // Read-only objects can't ever retain normal read-write objects, so these
+ // are fine to skip.
+ for (HeapObject obj : reference_summary_.strong_references()) {
+ if (!BasicMemoryChunk::FromHeapObject(obj)->InReadOnlySpace()) {
+ CHECK_NE(checked_objects_.find(obj), checked_objects_.end());
+ }
+ }
+ for (HeapObject obj : reference_summary_.weak_references()) {
+ if (!BasicMemoryChunk::FromHeapObject(obj)->InReadOnlySpace()) {
+ CHECK_NE(checked_objects_.find(obj), checked_objects_.end());
+ }
+ }
+ }
+
+ private:
+ const std::unordered_set<HeapObject, Object::Hasher>&
+ GetIndirectStrongReferences(size_t level) {
+ CHECK_GE(indirect_strong_references_.size(), level);
+
+ if (indirect_strong_references_.size() == level) {
+ // Expansion is needed.
+ indirect_strong_references_.resize(level + 1);
+ const std::unordered_set<HeapObject, Object::Hasher>& previous =
+ level == 0 ? reference_summary_.strong_references()
+ : indirect_strong_references_[level - 1];
+ for (HeapObject obj : previous) {
+ if (BasicMemoryChunk::FromHeapObject(obj)->InReadOnlySpace()) {
+ // Marking visitors don't expect to visit objects in read-only space,
+ // and will fail DCHECKs if they are used on those objects. Read-only
+ // objects can never retain anything outside read-only space, so
+ // skipping those objects doesn't weaken verification.
+ continue;
+ }
+
+ // Indirect references should only bypass internal structures, not
+ // user-visible objects or contexts.
+ if (obj.IsJSReceiver() || obj.IsString() || obj.IsContext()) {
+ continue;
+ }
+
+ ReferenceSummary summary =
+ ReferenceSummary::SummarizeReferencesFrom(generator_->heap(), obj);
+ indirect_strong_references_[level].insert(
+ summary.strong_references().begin(),
+ summary.strong_references().end());
+ }
+ }
+
+ return indirect_strong_references_[level];
+ }
+
+ DISALLOW_GARBAGE_COLLECTION(no_gc)
+ HeapSnapshotGenerator* generator_;
+ HeapObject primary_object_;
+
+ // All objects referred to by primary_object_, according to a marking visitor.
+ ReferenceSummary reference_summary_;
+
+ // Objects that have been checked via a call to CheckStrongReference or
+ // CheckWeakReference, or deliberately skipped via a call to
+ // MarkReferenceCheckedWithoutChecking.
+ std::unordered_set<HeapObject, Object::Hasher> checked_objects_;
+
+ // Objects transitively retained by the primary object. The objects in the set
+ // at index i are retained by the primary object via a chain of i+1
+ // intermediate objects.
+ std::vector<std::unordered_set<HeapObject, Object::Hasher>>
+ indirect_strong_references_;
+};
+#endif
+
HeapGraphEdge::HeapGraphEdge(Type type, const char* name, HeapEntry* from,
HeapEntry* to)
: bit_field_(TypeField::encode(type) |
@@ -78,29 +224,84 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot, int index, Type type,
DCHECK_GE(index, 0);
}
-void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
- const char* name,
- HeapEntry* entry) {
+void HeapEntry::VerifyReference(HeapGraphEdge::Type type, HeapEntry* entry,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification) {
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+ if (verification == kOffHeapPointer || generator->verifier() == nullptr) {
+ // Off-heap pointers are outside the scope of this verification; we just
+ // trust the embedder to provide accurate data. If the verifier is null,
+ // then verification is disabled.
+ return;
+ }
+ if (verification == kCustomWeakPointer) {
+ // The caller declared that this is a weak pointer ignored by the marking
+ // visitor. All we can verify at this point is that the edge type declares
+ // it to be weak.
+ CHECK_EQ(type, HeapGraphEdge::kWeak);
+ return;
+ }
+ Address from_address =
+ reinterpret_cast<Address>(generator->FindHeapThingForHeapEntry(this));
+ Address to_address =
+ reinterpret_cast<Address>(generator->FindHeapThingForHeapEntry(entry));
+ if (from_address == kNullAddress || to_address == kNullAddress) {
+ // One of these entries doesn't correspond to a real heap object.
+ // Verification is not possible.
+ return;
+ }
+ HeapObject from_obj = HeapObject::cast(Object(from_address));
+ HeapObject to_obj = HeapObject::cast(Object(to_address));
+ if (BasicMemoryChunk::FromHeapObject(to_obj)->InReadOnlySpace()) {
+ // We can't verify pointers into read-only space, because marking visitors
+ // might not mark those. For example, every Map has a pointer to the
+ // MetaMap, but marking visitors don't bother with following that link.
+ // Read-only objects are immortal and can never point to things outside of
+ // read-only space, so ignoring these objects is safe from the perspective
+ // of ensuring accurate retaining paths for normal read-write objects.
+ // Therefore, do nothing.
+ } else if (verification == kEphemeron) {
+ // Ephemerons can't be verified because they aren't marked directly by the
+ // marking visitor.
+ generator->verifier()->MarkReferenceCheckedWithoutChecking(from_obj,
+ to_obj);
+ } else if (type == HeapGraphEdge::kWeak) {
+ generator->verifier()->CheckWeakReference(from_obj, to_obj);
+ } else {
+ generator->verifier()->CheckStrongReference(from_obj, to_obj);
+ }
+#endif
+}
+
+void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, const char* name,
+ HeapEntry* entry,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification) {
++children_count_;
snapshot_->edges().emplace_back(type, name, this, entry);
+ VerifyReference(type, entry, generator, verification);
}
-void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
- int index,
- HeapEntry* entry) {
+void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, int index,
+ HeapEntry* entry,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification) {
++children_count_;
snapshot_->edges().emplace_back(type, index, this, entry);
+ VerifyReference(type, entry, generator, verification);
}
void HeapEntry::SetNamedAutoIndexReference(HeapGraphEdge::Type type,
const char* description,
HeapEntry* child,
- StringsStorage* names) {
+ StringsStorage* names,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification) {
int index = children_count_ + 1;
const char* name = description
? names->GetFormatted("%d / %s", index, description)
: names->GetName(index);
- SetNamedReference(type, name, child);
+ SetNamedReference(type, name, child, generator, verification);
}
void HeapEntry::Print(const char* prefix, const char* edge_name, int max_depth,
@@ -577,7 +778,7 @@ void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject object) {
} else if (object.IsJSObject()) {
JSObject obj = JSObject::cast(object);
- JSFunction maybe_constructor = GetConstructor(obj);
+ JSFunction maybe_constructor = GetConstructor(heap_->isolate(), obj);
if (!maybe_constructor.is_null()) {
ExtractLocationForJSFunction(entry, maybe_constructor);
@@ -608,8 +809,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
JSRegExp re = JSRegExp::cast(object);
return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.source()));
} else if (object.IsJSObject()) {
+ // TODO(v8:12674) Fix and run full gcmole.
+ DisableGCMole no_gcmole;
const char* name = names_->GetName(
- GetConstructorName(JSObject::cast(object)));
+ GetConstructorName(heap_->isolate(), JSObject::cast(object)));
if (object.IsJSGlobalObject()) {
auto it = global_object_tag_map_.find(JSGlobalObject::cast(object));
if (it != global_object_tag_map_.end()) {
@@ -1019,9 +1222,11 @@ void V8HeapExplorer::ExtractEphemeronHashTableReferences(
key_entry->name(), key_entry->id(), value_entry->name(),
value_entry->id(), table_entry->id());
key_entry->SetNamedAutoIndexReference(HeapGraphEdge::kInternal, edge_name,
- value_entry, names_);
- table_entry->SetNamedAutoIndexReference(HeapGraphEdge::kInternal,
- edge_name, value_entry, names_);
+ value_entry, names_, generator_,
+ HeapEntry::kEphemeron);
+ table_entry->SetNamedAutoIndexReference(
+ HeapGraphEdge::kInternal, edge_name, value_entry, names_, generator_,
+ HeapEntry::kEphemeron);
}
}
}
@@ -1040,14 +1245,13 @@ static const struct {
void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
Context context) {
+ DisallowGarbageCollection no_gc;
if (!context.IsNativeContext() && context.is_declaration_context()) {
ScopeInfo scope_info = context.scope_info();
// Add context allocated locals.
- int context_locals = scope_info.ContextLocalCount();
- for (int i = 0; i < context_locals; ++i) {
- String local_name = scope_info.ContextLocalName(i);
- int idx = scope_info.ContextHeaderLength() + i;
- SetContextReference(entry, local_name, context.get(idx),
+ for (auto it : ScopeInfo::IterateLocalNames(&scope_info, no_gc)) {
+ int idx = scope_info.ContextHeaderLength() + it->index();
+ SetContextReference(entry, it->name(), context.get(idx),
Context::OffsetOfElementAt(idx));
}
if (scope_info.HasContextAllocatedFunctionName()) {
@@ -1083,11 +1287,12 @@ void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
SetWeakReference(entry, "optimized_code_list",
context.get(Context::OPTIMIZED_CODE_LIST),
- Context::OffsetOfElementAt(Context::OPTIMIZED_CODE_LIST));
- SetWeakReference(
- entry, "deoptimized_code_list",
- context.get(Context::DEOPTIMIZED_CODE_LIST),
- Context::OffsetOfElementAt(Context::DEOPTIMIZED_CODE_LIST));
+ Context::OffsetOfElementAt(Context::OPTIMIZED_CODE_LIST),
+ HeapEntry::kCustomWeakPointer);
+ SetWeakReference(entry, "deoptimized_code_list",
+ context.get(Context::DEOPTIMIZED_CODE_LIST),
+ Context::OffsetOfElementAt(Context::DEOPTIMIZED_CODE_LIST),
+ HeapEntry::kCustomWeakPointer);
STATIC_ASSERT(Context::OPTIMIZED_CODE_LIST == Context::FIRST_WEAK_SLOT);
STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
Context::NATIVE_CONTEXT_SLOTS);
@@ -1164,10 +1369,10 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapEntry* entry, SharedFunctionInfo shared) {
std::unique_ptr<char[]> name = shared.DebugNameCStr();
if (name[0] != '\0') {
- TagObject(shared.GetCode(),
+ TagObject(FromCodeT(shared.GetCode()),
names_->GetFormatted("(code for %s)", name.get()));
} else {
- TagObject(shared.GetCode(),
+ TagObject(FromCodeT(shared.GetCode()),
names_->GetFormatted("(%s code)",
CodeKindToString(shared.GetCode().kind())));
}
@@ -1237,9 +1442,8 @@ void V8HeapExplorer::ExtractWeakCellReferences(HeapEntry* entry,
WeakCell::kUnregisterTokenOffset);
}
-void V8HeapExplorer::TagBuiltinCodeObject(Object code, const char* name) {
- DCHECK(code.IsCode() || (V8_EXTERNAL_CODE_SPACE_BOOL && code.IsCodeT()));
- TagObject(code, names_->GetFormatted("(%s builtin)", name));
+void V8HeapExplorer::TagBuiltinCodeObject(CodeT code, const char* name) {
+ TagObject(FromCodeT(code), names_->GetFormatted("(%s builtin)", name));
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
@@ -1336,7 +1540,7 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
HeapEntry* data_entry =
generator_->FindOrAddEntry(buffer.backing_store(), &allocator);
entry->SetNamedReference(HeapGraphEdge::kInternal, "backing_store",
- data_entry);
+ data_entry, generator_, HeapEntry::kOffHeapPointer);
}
void V8HeapExplorer::ExtractJSPromiseReferences(HeapEntry* entry,
@@ -1388,7 +1592,8 @@ void V8HeapExplorer::ExtractNumberReference(HeapEntry* entry, Object number) {
SnapshotObjectId id = heap_object_map_->get_next_id();
HeapEntry* child_entry =
snapshot_->AddEntry(HeapEntry::kString, name, id, 0, 0);
- entry->SetNamedReference(HeapGraphEdge::kInternal, "value", child_entry);
+ entry->SetNamedReference(HeapGraphEdge::kInternal, "value", child_entry,
+ generator_);
}
void V8HeapExplorer::ExtractFeedbackVectorReferences(
@@ -1557,24 +1762,23 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject js_obj,
}
}
-JSFunction V8HeapExplorer::GetConstructor(JSReceiver receiver) {
- Isolate* isolate = receiver.GetIsolate();
+JSFunction V8HeapExplorer::GetConstructor(Isolate* isolate,
+ JSReceiver receiver) {
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
MaybeHandle<JSFunction> maybe_constructor =
- JSReceiver::GetConstructor(handle(receiver, isolate));
+ JSReceiver::GetConstructor(isolate, handle(receiver, isolate));
if (maybe_constructor.is_null()) return JSFunction();
return *maybe_constructor.ToHandleChecked();
}
-String V8HeapExplorer::GetConstructorName(JSObject object) {
- Isolate* isolate = object.GetIsolate();
+String V8HeapExplorer::GetConstructorName(Isolate* isolate, JSObject object) {
if (object.IsJSFunction()) return ReadOnlyRoots(isolate).closure_string();
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
- return *JSReceiver::GetConstructorName(handle(object, isolate));
+ return *JSReceiver::GetConstructorName(isolate, handle(object, isolate));
}
HeapEntry* V8HeapExplorer::GetEntry(Object obj) {
@@ -1599,7 +1803,7 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot object) override {
if (root == Root::kBuiltins) {
- explorer_->TagBuiltinCodeObject(*object, description);
+ explorer_->TagBuiltinCodeObject(CodeT::cast(*object), description);
}
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
*object);
@@ -1696,6 +1900,18 @@ bool V8HeapExplorer::IterateAndExtractReferences(
visited_fields_.resize(max_pointer, false);
}
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+ std::unique_ptr<HeapEntryVerifier> verifier;
+ // MarkingVisitorBase doesn't expect that we will ever visit read-only
+ // objects, and fails DCHECKs if we attempt to. Read-only objects can
+ // never retain read-write objects, so there is no risk in skipping
+ // verification for them.
+ if (FLAG_heap_snapshot_verify &&
+ !BasicMemoryChunk::FromHeapObject(obj)->InReadOnlySpace()) {
+ verifier = std::make_unique<HeapEntryVerifier>(generator, obj);
+ }
+#endif
+
HeapEntry* entry = GetEntry(obj);
ExtractReferences(entry, obj);
SetInternalReference(entry, "map", obj.map(cage_base),
@@ -1759,7 +1975,8 @@ void V8HeapExplorer::SetContextReference(HeapEntry* parent_entry,
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
parent_entry->SetNamedReference(HeapGraphEdge::kContextVariable,
- names_->GetName(reference_name), child_entry);
+ names_->GetName(reference_name), child_entry,
+ generator_);
MarkVisitedField(field_offset);
}
@@ -1776,15 +1993,15 @@ void V8HeapExplorer::SetNativeBindReference(HeapEntry* parent_entry,
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
parent_entry->SetNamedReference(HeapGraphEdge::kShortcut, reference_name,
- child_entry);
+ child_entry, generator_);
}
void V8HeapExplorer::SetElementReference(HeapEntry* parent_entry, int index,
Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
- parent_entry->SetIndexedReference(HeapGraphEdge::kElement, index,
- child_entry);
+ parent_entry->SetIndexedReference(HeapGraphEdge::kElement, index, child_entry,
+ generator_);
}
void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
@@ -1796,7 +2013,7 @@ void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(HeapGraphEdge::kInternal, reference_name,
- child_entry);
+ child_entry, generator_);
MarkVisitedField(field_offset);
}
@@ -1808,7 +2025,8 @@ void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry, int index,
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(HeapGraphEdge::kInternal,
- names_->GetName(index), child_entry);
+ names_->GetName(index), child_entry,
+ generator_);
MarkVisitedField(field_offset);
}
@@ -1824,20 +2042,20 @@ void V8HeapExplorer::SetHiddenReference(HeapObject parent_obj,
DCHECK_NOT_NULL(child_entry);
if (IsEssentialHiddenReference(parent_obj, field_offset)) {
parent_entry->SetIndexedReference(HeapGraphEdge::kHidden, index,
- child_entry);
+ child_entry, generator_);
}
}
-void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
- const char* reference_name,
- Object child_obj, int field_offset) {
+void V8HeapExplorer::SetWeakReference(
+ HeapEntry* parent_entry, const char* reference_name, Object child_obj,
+ int field_offset, HeapEntry::ReferenceVerification verification) {
if (!IsEssentialObject(child_obj)) {
return;
}
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(HeapGraphEdge::kWeak, reference_name,
- child_entry);
+ child_entry, generator_, verification);
MarkVisitedField(field_offset);
}
@@ -1849,8 +2067,9 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
}
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
- parent_entry->SetNamedReference(
- HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kWeak,
+ names_->GetFormatted("%d", index),
+ child_entry, generator_);
if (field_offset.has_value()) {
MarkVisitedField(*field_offset);
}
@@ -1887,25 +2106,25 @@ void V8HeapExplorer::SetPropertyReference(HeapEntry* parent_entry,
.get())
: names_->GetName(reference_name);
- parent_entry->SetNamedReference(type, name, child_entry);
+ parent_entry->SetNamedReference(type, name, child_entry, generator_);
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetRootGcRootsReference() {
- snapshot_->root()->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- snapshot_->gc_roots());
+ snapshot_->root()->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement, snapshot_->gc_roots(), generator_);
}
void V8HeapExplorer::SetUserGlobalReference(Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
- snapshot_->root()->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
- nullptr, child_entry, names_);
+ snapshot_->root()->SetNamedAutoIndexReference(
+ HeapGraphEdge::kShortcut, nullptr, child_entry, names_, generator_);
}
void V8HeapExplorer::SetGcRootsReference(Root root) {
snapshot_->gc_roots()->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement, snapshot_->gc_subroot(root));
+ HeapGraphEdge::kElement, snapshot_->gc_subroot(root), generator_);
}
void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
@@ -1923,11 +2142,11 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
HeapGraphEdge::Type edge_type =
is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kInternal;
if (name != nullptr) {
- snapshot_->gc_subroot(root)->SetNamedReference(edge_type, name,
- child_entry);
+ snapshot_->gc_subroot(root)->SetNamedReference(edge_type, name, child_entry,
+ generator_);
} else {
snapshot_->gc_subroot(root)->SetNamedAutoIndexReference(
- edge_type, description, child_entry, names_);
+ edge_type, description, child_entry, names_, generator_);
}
// For full heap snapshots we do not emit user roots but rather rely on
@@ -2224,7 +2443,8 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
if (auto* entry = EntryForEmbedderGraphNode(node.get())) {
if (node->IsRootNode()) {
snapshot_->root()->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement, entry);
+ HeapGraphEdge::kElement, entry, generator_,
+ HeapEntry::kOffHeapPointer);
}
if (node->WrapperNode()) {
MergeNodeIntoEntry(entry, node.get(), node->WrapperNode());
@@ -2240,10 +2460,13 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
HeapEntry* to = EntryForEmbedderGraphNode(edge.to);
if (!to) continue;
if (edge.name == nullptr) {
- from->SetIndexedAutoIndexReference(HeapGraphEdge::kElement, to);
+ from->SetIndexedAutoIndexReference(HeapGraphEdge::kElement, to,
+ generator_,
+ HeapEntry::kOffHeapPointer);
} else {
from->SetNamedReference(HeapGraphEdge::kInternal,
- names_->GetCopy(edge.name), to);
+ names_->GetCopy(edge.name), to, generator_,
+ HeapEntry::kOffHeapPointer);
}
}
}
@@ -2252,16 +2475,13 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
}
HeapSnapshotGenerator::HeapSnapshotGenerator(
- HeapSnapshot* snapshot,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver,
- Heap* heap)
+ HeapSnapshot* snapshot, v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver, Heap* heap)
: snapshot_(snapshot),
control_(control),
v8_heap_explorer_(snapshot_, this, resolver),
dom_explorer_(snapshot_, this),
- heap_(heap) {
-}
+ heap_(heap) {}
namespace {
class V8_NODISCARD NullContextForSnapshotScope {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index a4f7f5ac48..3c3918ea2a 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -24,6 +24,10 @@
#include "src/profiler/strings-storage.h"
#include "src/strings/string-hasher.h"
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+#include "src/heap/reference-summarizer.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -141,17 +145,40 @@ class HeapEntry {
}
uint8_t detachedness() const { return detachedness_; }
- void SetIndexedReference(
- HeapGraphEdge::Type type, int index, HeapEntry* entry);
- void SetNamedReference(
- HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- HeapEntry* child) {
- SetIndexedReference(type, children_count_ + 1, child);
+ enum ReferenceVerification {
+ // Verify that the reference can be found via marking, if verification is
+ // enabled.
+ kVerify,
+
+ // Skip verifying that the reference can be found via marking, for any of
+ // the following reasons:
+
+ kEphemeron,
+ kOffHeapPointer,
+ kCustomWeakPointer,
+ };
+
+ void VerifyReference(HeapGraphEdge::Type type, HeapEntry* entry,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification);
+ void SetIndexedReference(HeapGraphEdge::Type type, int index,
+ HeapEntry* entry, HeapSnapshotGenerator* generator,
+ ReferenceVerification verification = kVerify);
+ void SetNamedReference(HeapGraphEdge::Type type, const char* name,
+ HeapEntry* entry, HeapSnapshotGenerator* generator,
+ ReferenceVerification verification = kVerify);
+ void SetIndexedAutoIndexReference(
+ HeapGraphEdge::Type type, HeapEntry* child,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification = kVerify) {
+ SetIndexedReference(type, children_count_ + 1, child, generator,
+ verification);
}
void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
const char* description, HeapEntry* child,
- StringsStorage* strings);
+ StringsStorage* strings,
+ HeapSnapshotGenerator* generator,
+ ReferenceVerification verification = kVerify);
V8_EXPORT_PRIVATE void Print(const char* prefix, const char* edge_name,
int max_depth, int indent) const;
@@ -358,14 +385,14 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void CollectGlobalObjectsTags();
void MakeGlobalObjectTagMap(const SafepointScope& safepoint_scope);
- void TagBuiltinCodeObject(Object code, const char* name);
+ void TagBuiltinCodeObject(CodeT code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
const char* name,
size_t size);
- static JSFunction GetConstructor(JSReceiver receiver);
- static String GetConstructorName(JSObject object);
+ static JSFunction GetConstructor(Isolate* isolate, JSReceiver receiver);
+ static String GetConstructorName(Isolate* isolate, JSObject object);
private:
void MarkVisitedField(int offset);
@@ -438,8 +465,10 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
int field_offset = -1);
void SetHiddenReference(HeapObject parent_obj, HeapEntry* parent_entry,
int index, Object child, int field_offset);
- void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
- Object child_obj, int field_offset);
+ void SetWeakReference(
+ HeapEntry* parent_entry, const char* reference_name, Object child_obj,
+ int field_offset,
+ HeapEntry::ReferenceVerification verification = HeapEntry::kVerify);
void SetWeakReference(HeapEntry* parent_entry, int index, Object child_obj,
base::Optional<int> field_offset);
void SetPropertyReference(HeapEntry* parent_entry, Name reference_name,
@@ -511,6 +540,8 @@ class NativeObjectsExplorer {
friend class GlobalHandlesExtractor;
};
+class HeapEntryVerifier;
+
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
// The HeapEntriesMap instance is used to track a mapping between
@@ -539,10 +570,33 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
}
HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- return entries_map_.emplace(ptr, allocator->AllocateEntry(ptr))
- .first->second;
+ HeapEntry* result =
+ entries_map_.emplace(ptr, allocator->AllocateEntry(ptr)).first->second;
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+ if (FLAG_heap_snapshot_verify) {
+ reverse_entries_map_.emplace(result, ptr);
+ }
+#endif
+ return result;
}
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+ HeapThing FindHeapThingForHeapEntry(HeapEntry* entry) {
+ // The reverse lookup map is only populated if the verification flag is
+ // enabled.
+ DCHECK(FLAG_heap_snapshot_verify);
+
+ auto it = reverse_entries_map_.find(entry);
+ return it == reverse_entries_map_.end() ? nullptr : it->second;
+ }
+
+ HeapEntryVerifier* verifier() const { return verifier_; }
+ void set_verifier(HeapEntryVerifier* verifier) {
+ DCHECK_IMPLIES(verifier_, !verifier);
+ verifier_ = verifier;
+ }
+#endif
+
HeapEntry* AddEntry(Smi smi, HeapEntriesAllocator* allocator) {
return smis_map_.emplace(smi.value(), allocator->AllocateEntry(smi))
.first->second;
@@ -558,6 +612,8 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
return entry != nullptr ? entry : AddEntry(smi, allocator);
}
+ Heap* heap() const { return heap_; }
+
private:
bool FillReferences();
void ProgressStep() override;
@@ -575,6 +631,11 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
uint32_t progress_counter_;
uint32_t progress_total_;
Heap* heap_;
+
+#ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY
+ std::unordered_map<HeapEntry*, HeapThing> reverse_entries_map_;
+ HeapEntryVerifier* verifier_ = nullptr;
+#endif
};
class OutputStreamWriter;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 1a4665b874..fc7e080f37 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -578,7 +578,7 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
: title_(title),
options_(options),
delegate_(std::move(delegate)),
- start_time_(base::TimeTicks::HighResolutionNow()),
+ start_time_(base::TimeTicks::Now()),
top_down_(profiler->isolate(), profiler->code_entries()),
profiler_(profiler),
streaming_next_sample_(0),
@@ -750,7 +750,7 @@ void CpuProfile::StreamPendingTraceEvents() {
}
void CpuProfile::FinishProfile() {
- end_time_ = base::TimeTicks::HighResolutionNow();
+ end_time_ = base::TimeTicks::Now();
// Stop tracking context movements after profiling stops.
context_filter_.set_native_context_address(kNullAddress);
StreamPendingTraceEvents();
@@ -1005,6 +1005,13 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
bool accepts_context = context_filter.Accept(native_context_address);
bool accepts_embedder_context =
context_filter.Accept(embedder_native_context_address);
+
+ // if FilterContext is set, do not propagate StateTag if not accepted.
+ // GC is exception because native context address is guaranteed to be empty.
+ DCHECK(state != StateTag::GC || native_context_address == kNullAddress);
+ if (!accepts_context && state != StateTag::GC) {
+ state = StateTag::IDLE;
+ }
profile->AddPath(timestamp, accepts_context ? path : empty_path, src_line,
update_stats, sampling_interval, state,
accepts_embedder_context ? embedder_state_tag
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index af8581a8ac..90c6963afe 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -321,10 +321,7 @@ void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
}
void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
- Address pc, int fp_to_sp_delta,
- bool reuse_code) {
- // When reuse_code is true it is just a bailout and not an actual deopt.
- if (reuse_code) return;
+ Address pc, int fp_to_sp_delta) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeDeopt);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*code, pc);
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index bcdd1b7b05..5ac86eb493 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -6,7 +6,6 @@
#define V8_PROFILER_PROFILER_LISTENER_H_
#include <memory>
-#include <vector>
#include "include/v8-profiler.h"
#include "src/logging/code-events.h"
@@ -64,7 +63,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener,
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) override;
+ int fp_to_sp_delta) override;
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 37197a5918..cec67433ae 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -135,7 +135,7 @@ namespace {
inline uint32_t ComputeStringHash(const char* str, int len) {
uint32_t raw_hash_field =
StringHasher::HashSequentialString(str, len, kZeroHashSeed);
- return raw_hash_field >> Name::kHashShift;
+ return Name::HashBits::decode(raw_hash_field);
}
} // namespace
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 20732bfb76..cd65f9d651 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -200,7 +200,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
tos = nullptr;
}
sampling_interval_ = sampling_interval;
- timestamp = base::TimeTicks::HighResolutionNow();
+ timestamp = base::TimeTicks::Now();
}
bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
@@ -226,6 +226,13 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
sample_info->embedder_state = embedder_state->GetState();
}
+ Context top_context = isolate->context();
+ if (top_context.ptr() != i::Context::kNoContext &&
+ top_context.ptr() != i::Context::kInvalidContext) {
+ NativeContext top_native_context = top_context.native_context();
+ sample_info->context = reinterpret_cast<void*>(top_native_context.ptr());
+ }
+
i::Address js_entry_sp = isolate->js_entry_sp();
if (js_entry_sp == 0) return true; // Not executing JS now.
@@ -293,13 +300,6 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
reinterpret_cast<i::Address>(regs->lr),
js_entry_sp);
- Context top_context = isolate->context();
- if (top_context.ptr() != i::Context::kNoContext &&
- top_context.ptr() != i::Context::kInvalidContext) {
- NativeContext top_native_context = top_context.native_context();
- sample_info->context = reinterpret_cast<void*>(top_native_context.ptr());
- }
-
if (it.done()) return true;
size_t i = 0;
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 72770779b9..78be35552e 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -702,10 +702,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Start new stack frame.
// Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
- RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
+ RegList registers_to_retain = {r4, r5, r6, r7, r8, r9, r10, fp};
+ RegList argument_registers = {r0, r1, r2, r3};
+ __ stm(db_w, sp, argument_registers | registers_to_retain | lr);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
@@ -922,7 +921,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
- __ ldm(ia_w, sp, registers_to_retain | pc.bit());
+ __ ldm(ia_w, sp, registers_to_retain | pc);
// Backtrack code (branch target for conditional backtracks).
if (backtrack_label_.is_linked()) {
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
index 3db9a90c29..c60a714339 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -667,13 +667,12 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Order here should correspond to order of offset constants in header file.
// TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
// or dont save.
- RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() |
- s4.bit() | s5.bit() | s6.bit() | s7.bit();
- RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7};
+ RegList argument_registers = {a0, a1, a2, a3};
- argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+ argument_registers |= {a4, a5, a6, a7};
- __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain);
+ __ MultiPush({ra}, {fp}, argument_registers | registers_to_retain);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
// TODO(plind): this 8 is the # of argument regs, should have definition.
@@ -894,7 +893,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
- __ MultiPop(ra.bit(), fp.bit(), registers_to_retain);
+ __ MultiPop({ra}, {fp}, registers_to_retain);
__ Ret();
// Backtrack code (branch target for conditional backtracks).
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 74a42ef815..dafc657f81 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -681,10 +681,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Start new stack frame.
// Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
- s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
- RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+ RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7, fp};
+ RegList argument_registers = {a0, a1, a2, a3};
+ __ MultiPush(argument_registers | registers_to_retain | ra);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
@@ -905,7 +904,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
- __ MultiPop(registers_to_retain | ra.bit());
+ __ MultiPop(registers_to_retain | ra);
__ Ret();
// Backtrack code (branch target for conditional backtracks).
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index bee0e57501..17546ed52d 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -715,13 +715,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Order here should correspond to order of offset constants in header file.
// TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
// or dont save.
- RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
- s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
- RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7, fp};
+ RegList argument_registers = {a0, a1, a2, a3};
- argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+ argument_registers |= {a4, a5, a6, a7};
- __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+ __ MultiPush(argument_registers | registers_to_retain | ra);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
// TODO(plind): this 8 is the # of argument regs, should have definition.
@@ -942,7 +941,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
- __ MultiPop(registers_to_retain | ra.bit());
+ __ MultiPop(registers_to_retain | ra);
__ Ret();
// Backtrack code (branch target for conditional backtracks).
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index fda0060e47..fb9425f008 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -737,13 +737,13 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
- DCHECK(r25.bit() & kRegExpCalleeSaved);
- DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
- DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
- DCHECK(current_character().bit() & kRegExpCalleeSaved);
- DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
- DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
- DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+ DCHECK(kRegExpCalleeSaved.has(r25));
+ DCHECK(kRegExpCalleeSaved.has(code_pointer()));
+ DCHECK(kRegExpCalleeSaved.has(current_input_offset()));
+ DCHECK(kRegExpCalleeSaved.has(current_character()));
+ DCHECK(kRegExpCalleeSaved.has(backtrack_stackpointer()));
+ DCHECK(kRegExpCalleeSaved.has(end_of_input_address()));
+ DCHECK(kRegExpCalleeSaved.has(frame_pointer()));
// Actually emit code to start a new stack frame.
// Push arguments
@@ -752,8 +752,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
RegList registers_to_retain = kRegExpCalleeSaved;
- RegList argument_registers = r3.bit() | r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit();
+ RegList argument_registers = {r3, r4, r5, r6, r7, r8, r9, r10};
__ mflr(r0);
__ push(r0);
__ MultiPush(argument_registers | registers_to_retain);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index b1ed035134..5760809d96 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -215,8 +215,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
};
// Set of non-volatile registers saved/restored by generated regexp code.
-const RegList kRegExpCalleeSaved =
- 1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
+const RegList kRegExpCalleeSaved = {r25, r26, r27, r28, r29, r30, fp};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index d8c0d24732..4cb9e8e689 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -519,12 +519,29 @@ int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
#ifdef V8_INTL_SUPPORT
-// Case Insensitve comparesion
-int CompareFirstCharCaseInsensitve(RegExpTree* const* a, RegExpTree* const* b) {
+int CompareCaseInsensitive(const icu::UnicodeString& a,
+ const icu::UnicodeString& b) {
+ return a.caseCompare(b, U_FOLD_CASE_DEFAULT);
+}
+
+int CompareFirstCharCaseInsensitive(RegExpTree* const* a,
+ RegExpTree* const* b) {
RegExpAtom* atom1 = (*a)->AsAtom();
RegExpAtom* atom2 = (*b)->AsAtom();
- icu::UnicodeString character1(atom1->data().at(0));
- return character1.caseCompare(atom2->data().at(0), U_FOLD_CASE_DEFAULT);
+ return CompareCaseInsensitive(icu::UnicodeString{atom1->data().at(0)},
+ icu::UnicodeString{atom2->data().at(0)});
+}
+
+bool Equals(bool ignore_case, const icu::UnicodeString& a,
+ const icu::UnicodeString& b) {
+ if (a == b) return true;
+ if (ignore_case) return CompareCaseInsensitive(a, b) == 0;
+ return false; // Case-sensitive equality already checked above.
+}
+
+bool CharAtEquals(bool ignore_case, int index, const RegExpAtom* a,
+ const RegExpAtom* b) {
+ return Equals(ignore_case, a->data().at(index), b->data().at(index));
}
#else
@@ -540,20 +557,43 @@ unibrow::uchar Canonical(
return canonical;
}
-int CompareFirstCharCaseIndependent(
+int CompareCaseInsensitive(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ unibrow::uchar a, unibrow::uchar b) {
+ if (a == b) return 0;
+ if (a >= 'a' || b >= 'a') {
+ a = Canonical(canonicalize, a);
+ b = Canonical(canonicalize, b);
+ }
+ return static_cast<int>(a) - static_cast<int>(b);
+}
+
+int CompareFirstCharCaseInsensitive(
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
RegExpTree* const* a, RegExpTree* const* b) {
RegExpAtom* atom1 = (*a)->AsAtom();
RegExpAtom* atom2 = (*b)->AsAtom();
- unibrow::uchar character1 = atom1->data().at(0);
- unibrow::uchar character2 = atom2->data().at(0);
- if (character1 == character2) return 0;
- if (character1 >= 'a' || character2 >= 'a') {
- character1 = Canonical(canonicalize, character1);
- character2 = Canonical(canonicalize, character2);
- }
- return static_cast<int>(character1) - static_cast<int>(character2);
+ return CompareCaseInsensitive(canonicalize, atom1->data().at(0),
+ atom2->data().at(0));
+}
+
+bool Equals(bool ignore_case,
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ unibrow::uchar a, unibrow::uchar b) {
+ if (a == b) return true;
+ if (ignore_case) {
+ return CompareCaseInsensitive(canonicalize, a, b) == 0;
+ }
+ return false; // Case-sensitive equality already checked above.
+}
+
+bool CharAtEquals(bool ignore_case,
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ int index, const RegExpAtom* a, const RegExpAtom* b) {
+ return Equals(ignore_case, canonicalize, a->data().at(index),
+ b->data().at(index));
}
+
#endif // V8_INTL_SUPPORT
} // namespace
@@ -591,14 +631,14 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LE(first_atom, i);
if (IsIgnoreCase(compiler->flags())) {
#ifdef V8_INTL_SUPPORT
- alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
+ alternatives->StableSort(CompareFirstCharCaseInsensitive, first_atom,
i - first_atom);
#else
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
auto compare_closure = [canonicalize](RegExpTree* const* a,
RegExpTree* const* b) {
- return CompareFirstCharCaseIndependent(canonicalize, a, b);
+ return CompareFirstCharCaseInsensitive(canonicalize, a, b);
};
alternatives->StableSort(compare_closure, first_atom, i - first_atom);
#endif // V8_INTL_SUPPORT
@@ -615,6 +655,7 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
Zone* zone = compiler->zone();
ZoneList<RegExpTree*>* alternatives = this->alternatives();
int length = alternatives->length();
+ const bool ignore_case = IsIgnoreCase(compiler->flags());
int write_posn = 0;
int i = 0;
@@ -629,7 +670,12 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
#ifdef V8_INTL_SUPPORT
icu::UnicodeString common_prefix(atom->data().at(0));
#else
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* const canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
unibrow::uchar common_prefix = atom->data().at(0);
+ if (ignore_case) {
+ common_prefix = Canonical(canonicalize, common_prefix);
+ }
#endif // V8_INTL_SUPPORT
int first_with_prefix = i;
int prefix_length = atom->length();
@@ -640,21 +686,10 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
RegExpAtom* const alt_atom = alternative->AsAtom();
#ifdef V8_INTL_SUPPORT
icu::UnicodeString new_prefix(alt_atom->data().at(0));
- if (new_prefix != common_prefix) {
- if (!IsIgnoreCase(compiler->flags())) break;
- if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
- break;
- }
+ if (!Equals(ignore_case, new_prefix, common_prefix)) break;
#else
unibrow::uchar new_prefix = alt_atom->data().at(0);
- if (new_prefix != common_prefix) {
- if (!IsIgnoreCase(compiler->flags())) break;
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- compiler->isolate()->regexp_macro_assembler_canonicalize();
- new_prefix = Canonical(canonicalize, new_prefix);
- common_prefix = Canonical(canonicalize, common_prefix);
- if (new_prefix != common_prefix) break;
- }
+ if (!Equals(ignore_case, canonicalize, new_prefix, common_prefix)) break;
#endif // V8_INTL_SUPPORT
prefix_length = std::min(prefix_length, alt_atom->length());
i++;
@@ -672,7 +707,11 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
RegExpAtom* old_atom =
alternatives->at(j + first_with_prefix)->AsAtom();
for (int k = 1; k < prefix_length; k++) {
- if (alt_atom->data().at(k) != old_atom->data().at(k)) {
+#ifdef V8_INTL_SUPPORT
+ if (!CharAtEquals(ignore_case, k, alt_atom, old_atom)) {
+#else
+ if (!CharAtEquals(ignore_case, canonicalize, k, alt_atom, old_atom)) {
+#endif // V8_INTL_SUPPORT
prefix_length = k;
break;
}
@@ -778,6 +817,8 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
+ compiler->ToNodeMaybeCheckForStackOverflow();
+
ZoneList<RegExpTree*>* alternatives = this->alternatives();
if (alternatives->length() > 2) {
@@ -1089,6 +1130,8 @@ class AssertionSequenceRewriter final {
RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
+ compiler->ToNodeMaybeCheckForStackOverflow();
+
ZoneList<RegExpTree*>* children = nodes();
AssertionSequenceRewriter::MaybeRewrite(children, compiler->zone());
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index c3ecff9d43..df15764dff 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -3950,5 +3950,11 @@ RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
return node;
}
+void RegExpCompiler::ToNodeCheckForStackOverflow() {
+ if (StackLimitCheck{isolate()}.HasOverflowed()) {
+ FatalProcessOutOfMemory(isolate(), "RegExpCompiler");
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 832a966217..421fc9457c 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -550,6 +550,18 @@ class RegExpCompiler {
current_expansion_factor_ = value;
}
+ // The recursive nature of ToNode node generation means we may run into stack
+ // overflow issues. We introduce periodic checks to detect these, and the
+ // tick counter helps limit overhead of these checks.
+ // TODO(jgruber): This is super hacky and should be replaced by an abort
+ // mechanism or iterative node generation.
+ void ToNodeMaybeCheckForStackOverflow() {
+ if ((to_node_overflow_check_ticks_++ % 16 == 0)) {
+ ToNodeCheckForStackOverflow();
+ }
+ }
+ void ToNodeCheckForStackOverflow();
+
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
@@ -567,6 +579,7 @@ class RegExpCompiler {
bool one_byte_;
bool reg_exp_too_big_;
bool limiting_recursion_;
+ int to_node_overflow_check_ticks_ = 0;
bool optimize_;
bool read_backward_;
int current_expansion_factor_;
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index e1549f95be..bf7769b86e 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -1088,6 +1088,10 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
base::uc16 previous_char = '\n';
String::FlatContent subject_content = subject_string.GetFlatContent(no_gc);
+ // Because interrupts can result in GC and string content relocation, the
+ // checksum verification in FlatContent may fail even though this code is
+ // safe. See (2) above.
+ subject_content.UnsafeDisableChecksumVerification();
if (subject_content.IsOneByte()) {
base::Vector<const uint8_t> subject_vector =
subject_content.ToOneByteVector();
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 675df8de58..5d71527eba 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -428,11 +428,6 @@ void RegExpParserImpl<CharT>::Advance() {
FATAL("Aborting on stack overflow");
}
ReportError(RegExpError::kStackOverflow);
- } else if (zone()->excess_allocation()) {
- if (FLAG_correctness_fuzzer_suppressions) {
- FATAL("Aborting on excess zone allocation");
- }
- ReportError(RegExpError::kTooLarge);
} else {
current_ = ReadNext<true>();
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index dabe5ee4a2..b26007191d 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -49,7 +49,8 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
Handle<Object> value_as_object =
isolate->factory()->NewNumberFromInt64(value);
if (HasInitialRegExpMap(isolate, *recv)) {
- JSRegExp::cast(*recv).set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
+ JSRegExp::cast(*recv).set_last_index(*value_as_object,
+ UPDATE_WRITE_BARRIER);
return recv;
} else {
return Object::SetProperty(
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index c1b8fc4fd9..df50034b16 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -569,7 +569,6 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<FixedArray> data =
Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
if (compile_data.compilation_target == RegExpCompilationTarget::kNative) {
- // TODO(ishell): avoid roundtrips between cdc and code.
Code code = Code::cast(*compile_data.code);
data->set(JSRegExp::code_index(is_one_byte), ToCodeT(code));
@@ -583,9 +582,9 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
// Store code generated by compiler in bytecode and trampoline to
// interpreter in code.
data->set(JSRegExp::bytecode_index(is_one_byte), *compile_data.code);
- Handle<Code> trampoline =
+ Handle<CodeT> trampoline =
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
- data->set(JSRegExp::code_index(is_one_byte), ToCodeT(*trampoline));
+ data->set(JSRegExp::code_index(is_one_byte), *trampoline);
}
Handle<FixedArray> capture_name_map =
RegExp::CreateCaptureNameMap(isolate, compile_data.named_captures);
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
index 74fb625e61..8f6b5e278d 100644
--- a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
@@ -221,16 +221,16 @@ void RegExpMacroAssemblerRISCV::CheckGreedyLoop(Label* on_equal) {
// Push (pop) caller-saved registers used by irregexp.
void RegExpMacroAssemblerRISCV::PushCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
+ RegList caller_saved_regexp = {current_input_offset(), current_character(),
+ end_of_input_address(),
+ backtrack_stackpointer()};
__ MultiPush(caller_saved_regexp);
}
void RegExpMacroAssemblerRISCV::PopCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
+ RegList caller_saved_regexp = {current_input_offset(), current_character(),
+ end_of_input_address(),
+ backtrack_stackpointer()};
__ MultiPop(caller_saved_regexp);
}
@@ -696,10 +696,9 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Order here should correspond to order of offset constants in header file.
// TODO(plind): we save fp..s11, but ONLY use s3 here - use the regs
// or dont save.
- RegList registers_to_retain =
- fp.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit() | s8.bit() /*| s9.bit() | s10.bit() | s11.bit()*/;
- DCHECK(NumRegs(registers_to_retain) == kNumCalleeRegsToRetain);
+ RegList registers_to_retain = {fp, s1, s2, s3, s4,
+ s5, s6, s7, s8 /*, s9, s10, s11*/};
+ DCHECK(registers_to_retain.Count() == kNumCalleeRegsToRetain);
// The remaining arguments are passed in registers, e.g.by calling the code
// entry as cast to a function with the signature:
@@ -713,17 +712,16 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// int call_origin, // a6
// Isolate* isolate, // a7
// Address regexp); // on the stack
- RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit() |
- a4.bit() | a5.bit() | a6.bit() | a7.bit();
+ RegList argument_registers = {a0, a1, a2, a3, a4, a5, a6, a7};
// According to MultiPush implementation, registers will be pushed in the
// order of ra, fp, then s8, ..., s1, and finally a7,...a0
- __ MultiPush(ra.bit() | registers_to_retain | argument_registers);
+ __ MultiPush(RegList{ra} | registers_to_retain | argument_registers);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Add64(frame_pointer(), sp,
- Operand(NumRegs(argument_registers) * kSystemPointerSize));
+ Operand(argument_registers.Count() * kSystemPointerSize));
STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
__ mv(a0, zero_reg);
@@ -844,8 +842,8 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
DCHECK_EQ(0, num_saved_registers_ % 2);
// Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
+ // unroll the loop once to add an operation between a load of a
+ // register and the following use of that register.
for (int i = 0; i < num_saved_registers_; i += 2) {
__ Ld(a2, register_location(i));
__ Ld(a3, register_location(i + 1));
@@ -928,7 +926,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ mv(sp, frame_pointer());
// Restore registers fp..s11 and return (restoring ra to pc).
- __ MultiPop(registers_to_retain | ra.bit());
+ __ MultiPop(registers_to_retain | ra);
__ Ret();
@@ -1144,9 +1142,9 @@ void RegExpMacroAssemblerRISCV::ClearRegisters(int reg_from, int reg_to) {
__ Sd(a0, register_location(reg));
}
}
-
+#ifdef RISCV_HAS_NO_UNALIGNED
bool RegExpMacroAssemblerRISCV::CanReadUnaligned() const { return false; }
-
+#endif
// Private methods:
void RegExpMacroAssemblerRISCV::CallCheckStackGuardState(Register scratch) {
@@ -1328,20 +1326,40 @@ void RegExpMacroAssemblerRISCV::CheckStackLimit() {
void RegExpMacroAssemblerRISCV::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
+
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+ if (!CanReadUnaligned()) {
+ DCHECK_EQ(1, characters);
+ }
if (cp_offset != 0) {
- // s3 is not being used to store the capture start index at this point.
- __ Add64(s3, current_input_offset(), Operand(cp_offset * char_size()));
- offset = s3;
+ // t3 is not being used to store the capture start index at this point.
+ __ Add64(t3, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t3;
}
- // We assume that we cannot do unaligned loads on RISC-V, so this function
- // must only be used to load a single character at a time.
- DCHECK_EQ(1, characters);
- __ Add64(t1, end_of_input_address(), Operand(offset));
+
if (mode_ == LATIN1) {
- __ Lbu(current_character(), MemOperand(t1, 0));
+ if (characters == 4) {
+ __ Add64(kScratchReg, end_of_input_address(), offset);
+ __ Lwu(current_character(), MemOperand(kScratchReg));
+ } else if (characters == 2) {
+ __ Add64(kScratchReg, end_of_input_address(), offset);
+ __ Lhu(current_character(), MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Add64(kScratchReg, end_of_input_address(), offset);
+ __ Lbu(current_character(), MemOperand(kScratchReg));
+ }
} else {
DCHECK(mode_ == UC16);
- __ Lhu(current_character(), MemOperand(t1, 0));
+ if (characters == 2) {
+ __ Add64(kScratchReg, end_of_input_address(), offset);
+ __ Lwu(current_character(), MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Add64(kScratchReg, end_of_input_address(), offset);
+ __ Lhu(current_character(), MemOperand(kScratchReg));
+ }
}
}
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
index 121569849a..7613b47b3e 100644
--- a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
@@ -83,8 +83,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
void WriteCurrentPositionToRegister(int reg, int cp_offset) override;
void ClearRegisters(int reg_from, int reg_to) override;
void WriteStackPointerToRegister(int reg) override;
+#ifdef RISCV_HAS_NO_UNALIGNED
bool CanReadUnaligned() const override;
-
+#endif
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 3b80858f0e..bf22b69222 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -708,13 +708,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
- DCHECK(r6.bit() & kRegExpCalleeSaved);
- DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
- DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
- DCHECK(current_character().bit() & kRegExpCalleeSaved);
- DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
- DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
- DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+ DCHECK(kRegExpCalleeSaved.has(r6));
+ DCHECK(kRegExpCalleeSaved.has(code_pointer()));
+ DCHECK(kRegExpCalleeSaved.has(current_input_offset()));
+ DCHECK(kRegExpCalleeSaved.has(current_character()));
+ DCHECK(kRegExpCalleeSaved.has(backtrack_stackpointer()));
+ DCHECK(kRegExpCalleeSaved.has(end_of_input_address()));
+ DCHECK(kRegExpCalleeSaved.has(frame_pointer()));
// zLinux ABI
// Incoming parameters:
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index bb9ac110f7..645b01faa5 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -215,8 +215,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
};
// Set of non-volatile registers saved/restored by generated regexp code.
-const RegList kRegExpCalleeSaved =
- 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 | 1 << 11 | 1 << 13;
+const RegList kRegExpCalleeSaved = {r6, r7, r8, r9, r10, fp, r13};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
index 71342bef80..0b6f77e8fa 100644
--- a/deps/v8/src/roots/OWNERS
+++ b/deps/v8/src/roots/OWNERS
@@ -1,5 +1,5 @@
bmeurer@chromium.org
-delphick@chromium.org
+cbruni@chromium.org
hpayer@chromium.org
ishell@chromium.org
jgruber@chromium.org
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 623ee23298..75eb6b7700 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -96,6 +96,8 @@ class Symbol;
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, ordered_hash_map_map, OrderedHashMapMap) \
V(Map, ordered_hash_set_map, OrderedHashSetMap) \
+ V(Map, name_to_index_hash_table_map, NameToIndexHashTableMap) \
+ V(Map, registered_symbol_table_map, RegisteredSymbolTableMap) \
V(Map, ordered_name_dictionary_map, OrderedNameDictionaryMap) \
V(Map, preparse_data_map, PreparseDataMap) \
V(Map, property_array_map, PropertyArrayMap) \
@@ -117,6 +119,7 @@ class Symbol;
WasmExportedFunctionDataMap) \
IF_WASM(V, Map, wasm_internal_function_map, WasmInternalFunctionMap) \
IF_WASM(V, Map, wasm_js_function_data_map, WasmJSFunctionDataMap) \
+ IF_WASM(V, Map, wasm_onfulfilled_data_map, WasmOnFulfilledDataMap) \
IF_WASM(V, Map, wasm_type_info_map, WasmTypeInfoMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
@@ -185,6 +188,7 @@ class Symbol;
V(SwissNameDictionary, empty_swiss_property_dictionary, \
EmptySwissPropertyDictionary) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
+ V(ArrayList, empty_array_list, EmptyArrayList) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
/* Special numbers */ \
@@ -291,34 +295,35 @@ class Symbol;
V(SharedFunctionInfo, proxy_revoke_shared_fun, ProxyRevokeSharedFun)
// These root references can be updated by the mutator.
-#define STRONG_MUTABLE_MOVABLE_ROOT_LIST(V) \
- /* Caches */ \
- V(FixedArray, number_string_cache, NumberStringCache) \
- /* Lists and dictionaries */ \
- V(NameDictionary, public_symbol_table, PublicSymbolTable) \
- V(NameDictionary, api_symbol_table, ApiSymbolTable) \
- V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
- V(WeakArrayList, script_list, ScriptList) \
- V(FixedArray, materialized_objects, MaterializedObjects) \
- V(WeakArrayList, detached_contexts, DetachedContexts) \
- V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
- /* Feedback vectors that we need for code coverage or type profile */ \
- V(Object, feedback_vectors_for_profiling_tools, \
- FeedbackVectorsForProfilingTools) \
- V(FixedArray, serialized_objects, SerializedObjects) \
- V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
- V(TemplateList, message_listeners, MessageListeners) \
- /* Support for async stack traces */ \
- V(HeapObject, current_microtask, CurrentMicrotask) \
- /* KeepDuringJob set for JS WeakRefs */ \
- V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
- V(HeapObject, interpreter_entry_trampoline_for_profiling, \
- InterpreterEntryTrampolineForProfiling) \
- V(Object, pending_optimize_for_test_bytecode, \
- PendingOptimizeForTestBytecode) \
- V(ArrayList, basic_block_profiling_data, BasicBlockProfilingData) \
- V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) \
- IF_WASM(V, HeapObject, active_continuation, ActiveContinuation)
+#define STRONG_MUTABLE_MOVABLE_ROOT_LIST(V) \
+ /* Caches */ \
+ V(FixedArray, number_string_cache, NumberStringCache) \
+ /* Lists and dictionaries */ \
+ V(RegisteredSymbolTable, public_symbol_table, PublicSymbolTable) \
+ V(RegisteredSymbolTable, api_symbol_table, ApiSymbolTable) \
+ V(RegisteredSymbolTable, api_private_symbol_table, ApiPrivateSymbolTable) \
+ V(WeakArrayList, script_list, ScriptList) \
+ V(FixedArray, materialized_objects, MaterializedObjects) \
+ V(WeakArrayList, detached_contexts, DetachedContexts) \
+ V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
+ /* Feedback vectors that we need for code coverage or type profile */ \
+ V(Object, feedback_vectors_for_profiling_tools, \
+ FeedbackVectorsForProfilingTools) \
+ V(FixedArray, serialized_objects, SerializedObjects) \
+ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
+ V(TemplateList, message_listeners, MessageListeners) \
+ /* Support for async stack traces */ \
+ V(HeapObject, current_microtask, CurrentMicrotask) \
+ /* KeepDuringJob set for JS WeakRefs */ \
+ V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
+ V(HeapObject, interpreter_entry_trampoline_for_profiling, \
+ InterpreterEntryTrampolineForProfiling) \
+ V(Object, pending_optimize_for_test_bytecode, \
+ PendingOptimizeForTestBytecode) \
+ V(ArrayList, basic_block_profiling_data, BasicBlockProfilingData) \
+ V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) \
+ IF_WASM(V, HeapObject, active_continuation, ActiveContinuation) \
+ IF_WASM(V, HeapObject, active_suspender, ActiveSuspender)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index fbf0dfe508..fc03476f2a 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -25,8 +25,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Map, to_map, 1);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Map> to_map = args.at<Map>(1);
ElementsKind to_kind = to_map->elements_kind();
if (ElementsAccessor::ForKind(to_kind)
->TransitionElementsKind(object, to_map)
@@ -43,9 +43,8 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
RUNTIME_FUNCTION(Runtime_TransitionElementsKindWithKind) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, elements_kind_smi, 1);
- ElementsKind to_kind = static_cast<ElementsKind>(elements_kind_smi->value());
+ Handle<JSObject> object = args.at<JSObject>(0);
+ ElementsKind to_kind = static_cast<ElementsKind>(args.smi_value_at(1));
JSObject::TransitionElementsKind(object, to_kind);
return *object;
}
@@ -56,9 +55,9 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
int const argc = args.length() - 3;
// argv points to the arguments constructed by the JavaScript call.
JavaScriptArguments argv(argc, args.address_of_arg_at(0));
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, argc);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
+ Handle<JSFunction> constructor = args.at<JSFunction>(argc);
+ Handle<JSReceiver> new_target = args.at<JSReceiver>(argc + 1);
+ Handle<HeapObject> type_info = args.at<HeapObject>(argc + 2);
// TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
Handle<AllocationSite> site = type_info->IsAllocationSite()
? Handle<AllocationSite>::cast(type_info)
@@ -157,7 +156,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
+ Handle<JSObject> array = args.at<JSObject>(0);
CHECK(!array->HasTypedArrayElements());
CHECK(!array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
@@ -169,8 +168,8 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Object> key = args.at(1);
uint32_t index;
if (key->IsSmi()) {
int value = Smi::ToInt(*key);
@@ -204,7 +203,7 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
Maybe<bool> result = Object::IsArray(object);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
@@ -213,14 +212,14 @@ RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(obj.IsJSArray());
}
RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
+ Handle<Object> original_array = args.at(0);
RETURN_RESULT_OR_FAILURE(
isolate, Object::ArraySpeciesConstructor(isolate, original_array));
}
@@ -229,8 +228,8 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
HandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
+ Handle<Object> search_element = args.at(1);
+ Handle<Object> from_index = args.at(2);
// Let O be ? ToObject(this value).
Handle<JSReceiver> object;
@@ -330,8 +329,8 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
HandleScope hs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
+ Handle<Object> search_element = args.at(1);
+ Handle<Object> from_index = args.at(2);
// Let O be ? ToObject(this value).
Handle<JSReceiver> object;
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 1fb80f780d..31f1c8f743 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -9,6 +9,7 @@
#include "src/logging/counters.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-struct-inl.h"
#include "src/runtime/runtime-utils.h"
// Implement Atomic accesses to ArrayBuffers and SharedArrayBuffers.
@@ -396,9 +397,9 @@ Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate,
const char* method_name) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
+ Handle<Object> value_obj = args.at(2);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
@@ -444,8 +445,8 @@ Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate,
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
@@ -464,9 +465,9 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
+ Handle<Object> value_obj = args.at(2);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
@@ -496,10 +497,10 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
+ Handle<Object> old_value_obj = args.at(2);
+ Handle<Object> new_value_obj = args.at(3);
CHECK_LT(index, sta->length());
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
@@ -607,5 +608,43 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
// || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
+
+RUNTIME_FUNCTION(Runtime_AtomicsLoadSharedStructField) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
+ Handle<Name> field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
+ Object::ToName(isolate, args.at(1)));
+ // Shared structs are prototypeless.
+ LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
+ if (it.IsFound()) return *it.GetDataValue(kSeqCstAccess);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
+ Handle<Name> field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
+ Object::ToName(isolate, args.at(1)));
+ Handle<Object> shared_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
+ // Shared structs are prototypeless.
+ LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
+ if (it.IsFound()) {
+ it.WriteDataValue(shared_value, kSeqCstAccess);
+ return *shared_value;
+ }
+ // Shared structs are non-extensible. Instead of duplicating logic, call
+ // Object::AddDataProperty to handle the error case.
+ CHECK(Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
+ StoreOrigin::kMaybeKeyed)
+ .IsNothing());
+ return ReadOnlyRoots(isolate).exception();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index a9b21c19bd..ad5c9768a0 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -14,10 +14,10 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_BigIntCompareToBigInt) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 2);
- bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ int mode = args.smi_value_at(0);
+ Handle<BigInt> lhs = args.at<BigInt>(1);
+ Handle<BigInt> rhs = args.at<BigInt>(2);
+ bool result = ComparisonResultToBool(static_cast<Operation>(mode),
BigInt::CompareToBigInt(lhs, rhs));
return *isolate->factory()->ToBoolean(result);
}
@@ -25,10 +25,10 @@ RUNTIME_FUNCTION(Runtime_BigIntCompareToBigInt) {
RUNTIME_FUNCTION(Runtime_BigIntCompareToNumber) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 2);
- bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ int mode = args.smi_value_at(0);
+ Handle<BigInt> lhs = args.at<BigInt>(1);
+ Handle<Object> rhs = args.at(2);
+ bool result = ComparisonResultToBool(static_cast<Operation>(mode),
BigInt::CompareToNumber(lhs, rhs));
return *isolate->factory()->ToBoolean(result);
}
@@ -36,13 +36,13 @@ RUNTIME_FUNCTION(Runtime_BigIntCompareToNumber) {
RUNTIME_FUNCTION(Runtime_BigIntCompareToString) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, rhs, 2);
+ int mode = args.smi_value_at(0);
+ Handle<BigInt> lhs = args.at<BigInt>(1);
+ Handle<String> rhs = args.at<String>(2);
Maybe<ComparisonResult> maybe_result =
BigInt::CompareToString(isolate, lhs, rhs);
MAYBE_RETURN(maybe_result, ReadOnlyRoots(isolate).exception());
- bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ bool result = ComparisonResultToBool(static_cast<Operation>(mode),
maybe_result.FromJust());
return *isolate->factory()->ToBoolean(result);
}
@@ -50,8 +50,8 @@ RUNTIME_FUNCTION(Runtime_BigIntCompareToString) {
RUNTIME_FUNCTION(Runtime_BigIntEqualToBigInt) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 1);
+ Handle<BigInt> lhs = args.at<BigInt>(0);
+ Handle<BigInt> rhs = args.at<BigInt>(1);
bool result = BigInt::EqualToBigInt(*lhs, *rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -59,8 +59,8 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToBigInt) {
RUNTIME_FUNCTION(Runtime_BigIntEqualToNumber) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<BigInt> lhs = args.at<BigInt>(0);
+ Handle<Object> rhs = args.at(1);
bool result = BigInt::EqualToNumber(lhs, rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -68,8 +68,8 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToNumber) {
RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1);
+ Handle<BigInt> lhs = args.at<BigInt>(0);
+ Handle<String> rhs = args.at<String>(1);
Maybe<bool> maybe_result = BigInt::EqualToString(isolate, lhs, rhs);
MAYBE_RETURN(maybe_result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(maybe_result.FromJust());
@@ -78,30 +78,30 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
RUNTIME_FUNCTION(Runtime_BigIntToBoolean) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, bigint, 0);
+ Handle<BigInt> bigint = args.at<BigInt>(0);
return *isolate->factory()->ToBoolean(bigint->ToBoolean());
}
RUNTIME_FUNCTION(Runtime_BigIntToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0);
+ Handle<BigInt> x = args.at<BigInt>(0);
return *BigInt::ToNumber(isolate, x);
}
RUNTIME_FUNCTION(Runtime_ToBigInt) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ Handle<Object> x = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x));
}
RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, left_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, right_obj, 1);
- CONVERT_SMI_ARG_CHECKED(opcode, 2);
+ Handle<Object> left_obj = args.at(0);
+ Handle<Object> right_obj = args.at(1);
+ int opcode = args.smi_value_at(2);
Operation op = static_cast<Operation>(opcode);
if (!left_obj->IsBigInt() || !right_obj->IsBigInt()) {
@@ -157,8 +157,8 @@ RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
RUNTIME_FUNCTION(Runtime_BigIntUnaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0);
- CONVERT_SMI_ARG_CHECKED(opcode, 1);
+ Handle<BigInt> x = args.at<BigInt>(0);
+ int opcode = args.smi_value_at(1);
Operation op = static_cast<Operation>(opcode);
MaybeHandle<BigInt> result;
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 11992b5a96..02597852c8 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -39,7 +39,7 @@ RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ Handle<JSFunction> constructor = args.at<JSFunction>(0);
Handle<String> name(constructor->shared().Name(), isolate);
Handle<Context> context = handle(constructor->native_context(), isolate);
@@ -115,8 +115,8 @@ Object ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
RUNTIME_FUNCTION(Runtime_ThrowNotSuperConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ Handle<Object> constructor = args.at(0);
+ Handle<JSFunction> function = args.at<JSFunction>(1);
return ThrowNotSuperConstructor(isolate, constructor, function);
}
@@ -425,7 +425,7 @@ bool AddDescriptorsByTemplate(
int key_index = ComputedEntryFlags::KeyIndexBits::decode(flags);
Smi value = Smi::FromInt(key_index + 1); // Value follows name.
- Handle<Object> key = args.at<Object>(key_index);
+ Handle<Object> key = args.at(key_index);
DCHECK(key->IsName());
uint32_t element;
Handle<Name> name = Handle<Name>::cast(key);
@@ -664,9 +664,9 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_DefineClass) {
HandleScope scope(isolate);
DCHECK_LE(ClassBoilerplate::kFirstDynamicArgumentIndex, args.length());
- CONVERT_ARG_HANDLE_CHECKED(ClassBoilerplate, class_boilerplate, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 2);
+ Handle<ClassBoilerplate> class_boilerplate = args.at<ClassBoilerplate>(0);
+ Handle<JSFunction> constructor = args.at<JSFunction>(1);
+ Handle<Object> super_class = args.at(2);
DCHECK_EQ(class_boilerplate->arguments_count(), args.length());
RETURN_RESULT_OR_FAILURE(
@@ -718,9 +718,9 @@ MaybeHandle<Object> LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
+ Handle<Object> receiver = args.at(0);
+ Handle<JSObject> home_object = args.at<JSObject>(1);
+ Handle<Name> name = args.at<Name>(2);
PropertyKey key(isolate, name);
@@ -732,11 +732,11 @@ RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ Handle<Object> receiver = args.at(0);
+ Handle<JSObject> home_object = args.at<JSObject>(1);
// TODO(ishell): To improve performance, consider performing the to-string
// conversion of {key} before calling into the runtime.
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
+ Handle<Object> key = args.at(2);
bool success;
PropertyKey lookup_key(isolate, key, &success);
@@ -767,10 +767,10 @@ MaybeHandle<Object> StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
RUNTIME_FUNCTION(Runtime_StoreToSuper) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
+ Handle<Object> receiver = args.at(0);
+ Handle<JSObject> home_object = args.at<JSObject>(1);
+ Handle<Name> name = args.at<Name>(2);
+ Handle<Object> value = args.at(3);
PropertyKey key(isolate, name);
@@ -782,12 +782,12 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper) {
RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ Handle<Object> receiver = args.at(0);
+ Handle<JSObject> home_object = args.at<JSObject>(1);
// TODO(ishell): To improve performance, consider performing the to-string
// conversion of {key} before calling into the runtime.
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
+ Handle<Object> key = args.at(2);
+ Handle<Object> value = args.at(3);
bool success;
PropertyKey lookup_key(isolate, key, &success);
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 7a67c78db1..38279e2d0e 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -23,7 +23,7 @@ RUNTIME_FUNCTION(Runtime_TheHole) {
RUNTIME_FUNCTION(Runtime_SetGrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<JSSet> holder = args.at<JSSet>(0);
Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()), isolate);
MaybeHandle<OrderedHashSet> table_candidate =
OrderedHashSet::EnsureGrowable(isolate, table);
@@ -40,7 +40,7 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
RUNTIME_FUNCTION(Runtime_SetShrink) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<JSSet> holder = args.at<JSSet>(0);
Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()), isolate);
table = OrderedHashSet::Shrink(isolate, table);
holder->set_table(*table);
@@ -50,7 +50,7 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
RUNTIME_FUNCTION(Runtime_MapShrink) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<JSMap> holder = args.at<JSMap>(0);
Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()), isolate);
table = OrderedHashMap::Shrink(isolate, table);
holder->set_table(*table);
@@ -60,7 +60,7 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
RUNTIME_FUNCTION(Runtime_MapGrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<JSMap> holder = args.at<JSMap>(0);
Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()), isolate);
MaybeHandle<OrderedHashMap> table_candidate =
OrderedHashMap::EnsureGrowable(isolate, table);
@@ -77,9 +77,9 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_SMI_ARG_CHECKED(hash, 2)
+ Handle<JSWeakCollection> weak_collection = args.at<JSWeakCollection>(0);
+ Handle<Object> key = args.at(1);
+ int hash = args.smi_value_at(2);
#ifdef DEBUG
DCHECK(key->IsJSReceiver());
@@ -99,10 +99,10 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_SMI_ARG_CHECKED(hash, 3)
+ Handle<JSWeakCollection> weak_collection = args.at<JSWeakCollection>(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
+ int hash = args.smi_value_at(3);
#ifdef DEBUG
DCHECK(key->IsJSReceiver());
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index a3f7872bca..9c7686a14a 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -30,23 +30,20 @@ namespace internal {
namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
- ConcurrencyMode mode) {
+ CodeKind target_kind, ConcurrencyMode mode) {
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
- return isolate->StackOverflow();
- }
+ // Concurrent optimization runs on another thread, thus no additional gap.
+ const int gap = mode == ConcurrencyMode::kConcurrent
+ ? 0
+ : kStackSpaceRequiredForCompilation * KB;
+ if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
- // Compile for the next tier.
- if (!Compiler::CompileOptimized(isolate, function, mode,
- function->NextTier())) {
- return ReadOnlyRoots(isolate).exception();
- }
+ Compiler::CompileOptimized(isolate, function, mode, target_kind);
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
DCHECK(function->is_compiled());
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- return ToCodeT(function->code());
+ return function->code();
}
} // namespace
@@ -54,7 +51,7 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
RUNTIME_FUNCTION(Runtime_CompileLazy) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
@@ -76,79 +73,74 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- return ToCodeT(function->code());
+ return function->code();
}
RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
DCHECK(sfi->HasBaselineCode());
IsCompiledScope is_compiled_scope(*sfi, isolate);
DCHECK(!function->HasAvailableOptimizedCode());
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ JSFunction::CreateAndAttachFeedbackVector(isolate, function,
+ &is_compiled_scope);
CodeT baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return baseline_code;
}
-RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
+RUNTIME_FUNCTION(Runtime_CompileMaglev_Concurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return CompileOptimized(isolate, function, ConcurrencyMode::kConcurrent);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ return CompileOptimized(isolate, function, CodeKind::MAGLEV,
+ ConcurrencyMode::kConcurrent);
}
-RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
+RUNTIME_FUNCTION(Runtime_CompileMaglev_NotConcurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ return CompileOptimized(isolate, function, CodeKind::MAGLEV,
+ ConcurrencyMode::kNotConcurrent);
}
-RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
+RUNTIME_FUNCTION(Runtime_CompileTurbofan_Concurrent) {
HandleScope scope(isolate);
- StackLimitCheck check(isolate);
DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
+ ConcurrencyMode::kConcurrent);
+}
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- DCHECK_EQ(function->feedback_vector().optimization_marker(),
- OptimizationMarker::kLogFirstExecution);
- DCHECK(FLAG_log_function_events);
- Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- Handle<String> name = SharedFunctionInfo::DebugName(sfi);
- LOG(isolate,
- FunctionEvent("first-execution", Script::cast(sfi->script()).id(), 0,
- sfi->StartPosition(), sfi->EndPosition(), *name));
- function->feedback_vector().ClearOptimizationMarker();
- // Return the code to continue execution, we don't care at this point whether
- // this is for lazy compilation or has been eagerly complied.
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- return ToCodeT(function->code());
+RUNTIME_FUNCTION(Runtime_CompileTurbofan_NotConcurrent) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
+ ConcurrencyMode::kNotConcurrent);
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
DCHECK(function->shared().is_compiled());
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->raw_feedback_cell(), function->shared(),
- "Runtime_HealOptimizedCodeSlot");
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- return ToCodeT(function->code());
+ function->shared(), "Runtime_HealOptimizedCodeSlot");
+ return function->code();
}
RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 4);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
Handle<JSReceiver> stdlib;
if (args[1].IsJSReceiver()) {
@@ -197,7 +189,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
// code object from deoptimizer.
Handle<Code> optimized_code = deoptimizer->compiled_code();
DeoptimizeKind type = deoptimizer->deopt_kind();
- bool should_reuse_code = deoptimizer->should_reuse_code();
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
@@ -212,11 +203,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
- if (should_reuse_code) {
- optimized_code->increment_deoptimization_count();
- return ReadOnlyRoots(isolate).undefined_value();
- }
-
// Invalidate the underlying optimized code on eager and soft deopts.
if (type == DeoptimizeKind::kEager || type == DeoptimizeKind::kSoft) {
Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
@@ -230,7 +216,7 @@ RUNTIME_FUNCTION(Runtime_ObserveNode) {
// code compiled by TurboFan.
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ Handle<Object> obj = args.at(0);
return *obj;
}
@@ -238,7 +224,7 @@ RUNTIME_FUNCTION(Runtime_VerifyType) {
// %VerifyType has no effect in the interpreter.
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ Handle<Object> obj = args.at(0);
return *obj;
}
@@ -314,7 +300,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
BytecodeOffset osr_offset = DetermineEntryAndDisarmOSRForUnoptimized(frame);
DCHECK(!osr_offset.IsNone());
- MaybeHandle<Code> maybe_result;
+ MaybeHandle<CodeT> maybe_result;
Handle<JSFunction> function(frame->function(), isolate);
if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
@@ -328,7 +314,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
}
// Check whether we ended up with usable optimized code.
- Handle<Code> result;
+ Handle<CodeT> result;
if (maybe_result.ToHandle(&result) &&
CodeKindIsOptimizedJSFunction(result->kind())) {
DeoptimizationData data =
@@ -374,7 +360,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
function->PrintName(scope.file());
PrintF(scope.file(), " for non-concurrent optimization]\n");
}
- function->SetOptimizationMarker(OptimizationMarker::kCompileOptimized);
+ function->SetOptimizationMarker(
+ OptimizationMarker::kCompileTurbofan_NotConcurrent);
}
return *result;
}
@@ -447,14 +434,13 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
return *callee;
}
- DCHECK(args[3].IsSmi());
- DCHECK(is_valid_language_mode(args.smi_at(3)));
- LanguageMode language_mode = static_cast<LanguageMode>(args.smi_at(3));
- DCHECK(args[4].IsSmi());
+ DCHECK(is_valid_language_mode(args.smi_value_at(3)));
+ LanguageMode language_mode = static_cast<LanguageMode>(args.smi_value_at(3));
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
return CompileGlobalEval(isolate, args.at<Object>(1), outer_info,
- language_mode, args.smi_at(4), args.smi_at(5));
+ language_mode, args.smi_value_at(4),
+ args.smi_value_at(5));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 38f3ef7d90..db4cf1f49a 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -47,7 +47,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ Handle<Object> value = args.at(0);
HandleScope scope(isolate);
// Return value can be changed by debugger. Last set value will be used as
@@ -112,7 +112,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
USE(function);
DCHECK(function->shared().HasDebugInfo());
@@ -354,7 +354,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<Symbol> memory_symbol =
isolate->factory()->array_buffer_wasm_memory_symbol();
Handle<Object> memory_object =
- JSObject::GetDataProperty(js_array_buffer, memory_symbol);
+ JSObject::GetDataProperty(isolate, js_array_buffer, memory_symbol);
if (!memory_object->IsUndefined(isolate)) {
result = ArrayList::Add(isolate, result,
isolate->factory()->NewStringFromAsciiChecked(
@@ -385,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
if (!args[0].IsJSGeneratorObject()) return Smi::zero();
// Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+ Handle<JSGeneratorObject> gen = args.at<JSGeneratorObject>(0);
// Only inspect suspended generator scopes.
if (!gen->is_suspended()) {
@@ -410,8 +410,8 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
}
// Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ Handle<JSGeneratorObject> gen = args.at<JSGeneratorObject>(0);
+ int index = NumberToInt32(args[1]);
// Only inspect suspended generator scopes.
if (!gen->is_suspended()) {
@@ -453,10 +453,10 @@ static bool SetScopeVariableValue(ScopeIterator* it, int index,
RUNTIME_FUNCTION(Runtime_SetGeneratorScopeVariableValue) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_value, 3);
+ Handle<JSGeneratorObject> gen = args.at<JSGeneratorObject>(0);
+ int index = NumberToInt32(args[1]);
+ Handle<String> variable_name = args.at<String>(2);
+ Handle<Object> new_value = args.at(3);
ScopeIterator it(isolate, gen);
bool res = SetScopeVariableValue(&it, index, variable_name, new_value);
return isolate->heap()->ToBoolean(res);
@@ -467,7 +467,7 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ Handle<JSFunction> fun = args.at<JSFunction>(0);
Handle<SharedFunctionInfo> shared(fun->shared(), isolate);
// Find the number of break points
@@ -487,7 +487,7 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
+ uint32_t type_arg = NumberToUint32(args[0]);
ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
bool result = isolate->debug()->IsBreakOnException(type);
@@ -529,7 +529,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, f, 0);
+ Object f = args[0];
if (f.IsJSFunction()) {
return JSFunction::cast(f).shared().inferred_name();
}
@@ -670,10 +670,10 @@ bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
- CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
- CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
+ int32_t scriptid = NumberToInt32(args[0]);
+ Handle<Object> opt_line = args.at(1);
+ Handle<Object> opt_column = args.at(2);
+ int32_t offset = NumberToInt32(args[3]);
Handle<Script> script;
CHECK(GetScriptById(isolate, scriptid, &script));
@@ -686,8 +686,8 @@ RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ Handle<JSFunction> fun = args.at<JSFunction>(0);
+ Handle<Object> receiver = args.at(1);
if (isolate->debug()->needs_check_on_function_call()) {
// Ensure that the callee will perform debug check on function call too.
Handle<SharedFunctionInfo> shared(fun->shared(), isolate);
@@ -716,7 +716,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ Handle<JSObject> promise = args.at<JSObject>(0);
isolate->PushPromise(promise);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -800,7 +800,7 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
SealHandleScope shs(isolate);
- CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
+ bool enable = Oddball::cast(args[0]).ToBool(isolate);
Coverage::SelectMode(isolate, enable ? debug::CoverageMode::kPreciseCount
: debug::CoverageMode::kBestEffort);
return ReadOnlyRoots(isolate).undefined_value();
@@ -808,7 +808,7 @@ RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
SealHandleScope shs(isolate);
- CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
+ bool enable = Oddball::cast(args[0]).ToBool(isolate);
Coverage::SelectMode(isolate, enable ? debug::CoverageMode::kBlockCount
: debug::CoverageMode::kBestEffort);
return ReadOnlyRoots(isolate).undefined_value();
@@ -818,42 +818,58 @@ RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
UNREACHABLE(); // Never called. See the IncBlockCounter builtin instead.
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionEntered) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->RunPromiseHook(PromiseHookType::kInit, promise,
- isolate->factory()->undefined_value());
- if (isolate->debug()->is_active()) isolate->PushPromise(promise);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(5, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->PopPromise();
- isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionResumed) {
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<JSPromise> outer_promise = args.at<JSPromise>(1);
+ Handle<JSFunction> reject_handler = args.at<JSFunction>(2);
+ Handle<JSGeneratorObject> generator = args.at<JSGeneratorObject>(3);
+ bool is_predicted_as_caught = Oddball::cast(args[4]).ToBool(isolate);
+
+ // Allocate the throwaway promise and fire the appropriate init
+ // hook for the throwaway promise (passing the {promise} as its
+ // parent).
+ Handle<JSPromise> throwaway = isolate->factory()->NewJSPromiseWithoutHook();
+ isolate->OnAsyncFunctionSuspended(throwaway, promise);
+
+ // The Promise will be thrown away and not handled, but it
+ // shouldn't trigger unhandled reject events as its work is done
+ throwaway->set_has_handler(true);
+
+ // Enable proper debug support for promises.
+ if (isolate->debug()->is_active()) {
+ Object::SetProperty(isolate, reject_handler,
+ isolate->factory()->promise_forwarding_handler_symbol(),
+ isolate->factory()->true_value(),
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
+ .Check();
+ promise->set_handled_hint(is_predicted_as_caught);
+
+ // Mark the dependency to {outer_promise} in case the {throwaway}
+ // Promise is found on the Promise stack
+ Object::SetProperty(isolate, throwaway,
+ isolate->factory()->promise_handled_by_symbol(),
+ outer_promise, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
+ .Check();
+
+ Object::SetProperty(
+ isolate, promise, isolate->factory()->promise_awaited_by_symbol(),
+ generator, StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
+ .Check();
+ }
+
+ return *throwaway;
+}
+
+RUNTIME_FUNCTION(Runtime_DebugPromiseThen) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->PushPromise(promise);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
- DCHECK_EQ(2, args.length());
- HandleScope scope(isolate);
- CONVERT_BOOLEAN_ARG_CHECKED(has_suspend, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
- isolate->PopPromise();
- if (has_suspend) {
- isolate->OnAsyncFunctionStateChanged(promise,
- debug::kAsyncFunctionFinished);
+ Handle<JSReceiver> promise = args.at<JSReceiver>(0);
+ if (promise->IsJSPromise()) {
+ isolate->OnPromiseThen(Handle<JSPromise>::cast(promise));
}
return *promise;
}
@@ -861,8 +877,8 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, script_function, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
+ Handle<JSFunction> script_function = args.at<JSFunction>(0);
+ Handle<String> new_source = args.at<String>(1);
Handle<Script> script(Script::cast(script_function->shared().script()),
isolate);
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index b3cb9d2fd3..103724d603 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -119,7 +119,7 @@ MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
RETURN_RESULT_OR_FAILURE(isolate, Enumerate(isolate, receiver));
}
@@ -127,8 +127,8 @@ RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ Handle<Object> key = args.at(1);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, HasEnumerableProperty(isolate, receiver, key));
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 98df87ceed..94a96d9d6f 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -17,7 +17,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_FunctionGetScriptSource) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ Handle<JSReceiver> function = args.at<JSReceiver>(0);
if (function->IsJSFunction()) {
Handle<Object> script(Handle<JSFunction>::cast(function)->shared().script(),
@@ -30,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSource) {
RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ Handle<JSReceiver> function = args.at<JSReceiver>(0);
if (function->IsJSFunction()) {
Handle<Object> script(Handle<JSFunction>::cast(function)->shared().script(),
@@ -45,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ Handle<JSReceiver> function = args.at<JSReceiver>(0);
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(
Handle<JSFunction>::cast(function)->shared(), isolate);
@@ -59,7 +59,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ auto fun = JSFunction::cast(args[0]);
int pos = fun.shared().StartPosition();
return Smi::FromInt(pos);
}
@@ -69,7 +69,7 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ auto f = JSFunction::cast(args[0]);
return isolate->heap()->ToBoolean(f.shared().IsApiFunction());
}
@@ -78,8 +78,8 @@ RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
int const argc = args.length() - 2;
- CONVERT_ARG_HANDLE_CHECKED(Object, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ Handle<Object> target = args.at(0);
+ Handle<Object> receiver = args.at(1);
base::ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
argv[i] = args.at(2 + i);
@@ -92,7 +92,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
+ Object object = args[0];
return isolate->heap()->ToBoolean(object.IsFunction());
}
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index cb7facf110..da4e7a0fb6 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -23,8 +23,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
CHECK(!sta->WasDetached());
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, sta->length());
@@ -44,8 +44,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumAsyncWaitersForTesting) {
RUNTIME_FUNCTION(Runtime_AtomicsNumUnresolvedAsyncPromisesForTesting) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
+ Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
+ size_t index = NumberToSize(args[1]);
CHECK(!sta->WasDetached());
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, sta->length());
@@ -61,7 +61,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumUnresolvedAsyncPromisesForTesting) {
RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(set, 0);
+ bool set = Oddball::cast(args[0]).ToBool(isolate);
isolate->set_allow_atomics_wait(set);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index f9e60c64b3..d57ebe12ee 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -46,8 +46,8 @@ RUNTIME_FUNCTION(Runtime_AsyncFunctionResolve) {
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ Handle<Object> receiver = args.at(1);
CHECK_IMPLIES(IsAsyncFunction(function->shared().kind()),
IsAsyncGeneratorFunction(function->shared().kind()));
CHECK(IsResumableFunction(function->shared().kind()));
@@ -83,7 +83,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorClose) {
RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ Handle<JSGeneratorObject> generator = args.at<JSGeneratorObject>(0);
return generator->function();
}
@@ -129,7 +129,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
DisallowGarbageCollection no_gc_scope;
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSAsyncGeneratorObject, generator, 0);
+ auto generator = JSAsyncGeneratorObject::cast(args[0]);
int state = generator.continuation();
DCHECK_NE(state, JSAsyncGeneratorObject::kGeneratorExecuting);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index dce6cc4086..b1c1b1c5a8 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -7,17 +7,14 @@
#include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
-#include "src/baseline/baseline-batch-compiler.h"
-#include "src/baseline/baseline.h"
#include "src/builtins/builtins.h"
-#include "src/codegen/compiler.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
-#include "src/execution/runtime-profiler.h"
+#include "src/execution/tiering-manager.h"
#include "src/handles/maybe-handles.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -44,7 +41,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_AccessCheck) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<JSObject> object = args.at<JSObject>(0);
if (!isolate->MayAccess(handle(isolate->context(), isolate), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -100,7 +97,7 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
#define THROW_ERROR(isolate, args, call) \
HandleScope scope(isolate); \
DCHECK_LE(1, args.length()); \
- CONVERT_SMI_ARG_CHECKED(message_id_smi, 0); \
+ int message_id_smi = args.smi_value_at(0); \
\
Handle<Object> undefined = isolate->factory()->undefined_value(); \
Handle<Object> arg0 = (args.length() > 1) ? args.at(1) : undefined; \
@@ -114,7 +111,7 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
RUNTIME_FUNCTION(Runtime_ThrowRangeError) {
if (FLAG_correctness_fuzzer_suppressions) {
DCHECK_LE(1, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+ int message_id_smi = args.smi_value_at(0);
// If the result of a BigInt computation is truncated to 64 bit, Turbofan
// can sometimes truncate intermediate results already, which can prevent
@@ -166,8 +163,8 @@ const char* ElementsKindToType(ElementsKind fixed_elements_kind) {
RUNTIME_FUNCTION(Runtime_ThrowInvalidTypedArrayAlignment) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Map, map, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, problem_string, 1);
+ Handle<Map> map = args.at<Map>(0);
+ Handle<String> problem_string = args.at<String>(1);
ElementsKind kind = map->elements_kind();
@@ -200,7 +197,7 @@ RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ Handle<Object> name = args.at(0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
@@ -208,7 +205,7 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
RUNTIME_FUNCTION(Runtime_ThrowAccessedUninitializedVariable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ Handle<Object> name = args.at(0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewReferenceError(MessageTemplate::kAccessedUninitializedVariable, name));
@@ -217,33 +214,36 @@ RUNTIME_FUNCTION(Runtime_ThrowAccessedUninitializedVariable) {
RUNTIME_FUNCTION(Runtime_NewError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_INT32_ARG_CHECKED(template_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ int template_index = args.smi_value_at(0);
+ Handle<Object> arg0 = args.at(1);
MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewError(message_template, arg0);
}
+RUNTIME_FUNCTION(Runtime_NewForeign) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ return *isolate->factory()->NewForeign(kNullAddress);
+}
+
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
DCHECK_LE(args.length(), 4);
DCHECK_GE(args.length(), 1);
- CONVERT_INT32_ARG_CHECKED(template_index, 0);
+ int template_index = args.smi_value_at(0);
MessageTemplate message_template = MessageTemplateFromInt(template_index);
Handle<Object> arg0;
if (args.length() >= 2) {
- CHECK(args[1].IsObject());
arg0 = args.at<Object>(1);
}
Handle<Object> arg1;
if (args.length() >= 3) {
- CHECK(args[2].IsObject());
arg1 = args.at<Object>(2);
}
Handle<Object> arg2;
if (args.length() >= 4) {
- CHECK(args[3].IsObject());
arg2 = args.at<Object>(3);
}
@@ -253,8 +253,8 @@ RUNTIME_FUNCTION(Runtime_NewTypeError) {
RUNTIME_FUNCTION(Runtime_NewReferenceError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_INT32_ARG_CHECKED(template_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ int template_index = args.smi_value_at(0);
+ Handle<Object> arg0 = args.at(1);
MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewReferenceError(message_template, arg0);
}
@@ -262,8 +262,8 @@ RUNTIME_FUNCTION(Runtime_NewReferenceError) {
RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_INT32_ARG_CHECKED(template_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ int template_index = args.smi_value_at(0);
+ Handle<Object> arg0 = args.at(1);
MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
@@ -276,7 +276,7 @@ RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ Handle<Object> value = args.at(0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
@@ -299,7 +299,7 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
}
@@ -307,7 +307,7 @@ RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
Handle<String> type = Object::TypeOf(isolate, object);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
@@ -330,7 +330,7 @@ RUNTIME_FUNCTION(Runtime_StackGuard) {
RUNTIME_FUNCTION(Runtime_StackGuardWithGap) {
SealHandleScope shs(isolate);
DCHECK_EQ(args.length(), 1);
- CONVERT_UINT32_ARG_CHECKED(gap, 0);
+ uint32_t gap = args.positive_smi_value_at(0);
TRACE_EVENT0("v8.execute", "V8.StackGuard");
// First check if this is a real stack overflow.
@@ -342,45 +342,10 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) {
return isolate->stack_guard()->HandleInterrupts();
}
-namespace {
-
-void BytecodeBudgetInterruptFromBytecode(Isolate* isolate,
- Handle<JSFunction> function) {
- function->SetInterruptBudget();
- bool should_mark_for_optimization = function->has_feedback_vector();
- if (!function->has_feedback_vector()) {
- IsCompiledScope is_compiled_scope(
- function->shared().is_compiled_scope(isolate));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- DCHECK(is_compiled_scope.is_compiled());
- // Also initialize the invocation count here. This is only really needed for
- // OSR. When we OSR functions with lazy feedback allocation we want to have
- // a non zero invocation count so we can inline functions.
- function->feedback_vector().set_invocation_count(1, kRelaxedStore);
- }
- if (CanCompileWithBaseline(isolate, function->shared()) &&
- !function->ActiveTierIsBaseline()) {
- if (FLAG_baseline_batch_compilation) {
- isolate->baseline_batch_compiler()->EnqueueFunction(function);
- } else {
- IsCompiledScope is_compiled_scope(
- function->shared().is_compiled_scope(isolate));
- Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope);
- }
- }
- if (should_mark_for_optimization) {
- SealHandleScope shs(isolate);
- isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->MarkCandidatesForOptimizationFromBytecode();
- }
-}
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) {
+RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheck) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterruptWithStackCheck");
// Check for stack interrupts here so that we can fold the interrupt check
@@ -399,35 +364,17 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) {
}
}
- BytecodeBudgetInterruptFromBytecode(isolate, function);
+ isolate->tiering_manager()->OnInterruptTick(function);
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
+RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterrupt");
- BytecodeBudgetInterruptFromBytecode(isolate, function);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 0);
-
- // TODO(leszeks): Consider checking stack interrupts here, and removing
- // those checks for code that can have budget interrupts.
-
- DCHECK(feedback_cell->value().IsFeedbackVector());
-
- FeedbackVector::SetInterruptBudget(*feedback_cell);
-
- SealHandleScope shs(isolate);
- isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->MarkCandidatesForOptimizationFromCode();
+ isolate->tiering_manager()->OnInterruptTick(function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -462,16 +409,14 @@ class SaveAndClearThreadInWasmFlag {};
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(size, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
+ int size = args.smi_value_at(0);
+ int flags = args.smi_value_at(1);
AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
- CHECK(FLAG_young_generation_large_objects ||
- size <= kMaxRegularHeapObjectSize);
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
@@ -496,8 +441,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(size, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
+ int size = args.smi_value_at(0);
+ int flags = args.smi_value_at(1);
AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
@@ -514,7 +459,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(length, 0);
+ int length = args.smi_value_at(0);
DCHECK_LT(0, length);
return *isolate->factory()->NewByteArray(length);
}
@@ -522,7 +467,7 @@ RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(length, 0);
+ int length = args.smi_value_at(0);
if (length == 0) return ReadOnlyRoots(isolate).empty_string();
Handle<SeqOneByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -533,7 +478,7 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(length, 0);
+ int length = args.smi_value_at(0);
if (length == 0) return ReadOnlyRoots(isolate).empty_string();
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -544,23 +489,23 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
RUNTIME_FUNCTION(Runtime_ThrowIteratorError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
return isolate->Throw(*ErrorUtils::NewIteratorError(isolate, object));
}
RUNTIME_FUNCTION(Runtime_ThrowSpreadArgError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+ int message_id_smi = args.smi_value_at(0);
MessageTemplate message_id = MessageTemplateFromInt(message_id_smi);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ Handle<Object> object = args.at(1);
return ErrorUtils::ThrowSpreadArgError(isolate, message_id, object);
}
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
return isolate->Throw(
*ErrorUtils::NewCalledNonCallableError(isolate, object));
}
@@ -568,7 +513,7 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
return isolate->Throw(
*ErrorUtils::NewConstructedNonConstructable(isolate, object));
}
@@ -576,7 +521,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
RUNTIME_FUNCTION(Runtime_ThrowPatternAssignmentNonCoercible) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, object,
MaybeHandle<Object>());
}
@@ -594,7 +539,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::CreateListFromArrayLike(
isolate, object, ElementTypes::kAll));
}
@@ -602,7 +547,7 @@ RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(counter, 0);
+ int counter = args.smi_value_at(0);
isolate->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(counter));
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -629,18 +574,18 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
std::FILE* f;
if (args[0].IsString()) {
// With a string argument, the results are appended to that file.
- CONVERT_ARG_HANDLE_CHECKED(String, filename, 0);
+ Handle<String> filename = args.at<String>(0);
f = std::fopen(filename->ToCString().get(), "a");
DCHECK_NOT_NULL(f);
} else {
// With an integer argument, the results are written to stdout/stderr.
- CONVERT_SMI_ARG_CHECKED(fd, 0);
+ int fd = args.smi_value_at(0);
DCHECK(fd == 1 || fd == 2);
f = fd == 1 ? stdout : stderr;
}
// The second argument (if any) is a message header to be printed.
if (args.length() >= 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, message, 1);
+ Handle<String> message = args.at<String>(1);
message->PrintOn(f);
std::fputc('\n', f);
std::fflush(f);
@@ -660,8 +605,8 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, callable, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ Handle<Object> callable = args.at(0);
+ Handle<Object> object = args.at(1);
RETURN_RESULT_OR_FAILURE(
isolate, Object::OrdinaryHasInstance(isolate, callable, object));
}
@@ -669,14 +614,14 @@ RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
RUNTIME_FUNCTION(Runtime_Typeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
return *Object::TypeOf(isolate, object);
}
RUNTIME_FUNCTION(Runtime_AllowDynamicFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ Handle<JSFunction> target = args.at<JSFunction>(0);
Handle<JSObject> global_proxy(target->global_proxy(), isolate);
return *isolate->factory()->ToBoolean(
Builtins::AllowDynamicFunction(isolate, target, global_proxy));
@@ -686,7 +631,7 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, sync_iterator, 0);
+ Handle<Object> sync_iterator = args.at(0);
if (!sync_iterator->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -706,9 +651,10 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared_info, 1);
- CONVERT_SMI_ARG_CHECKED(slot_id, 2);
+ Handle<TemplateObjectDescription> description =
+ args.at<TemplateObjectDescription>(0);
+ Handle<SharedFunctionInfo> shared_info = args.at<SharedFunctionInfo>(1);
+ int slot_id = args.smi_value_at(2);
Handle<NativeContext> native_context(isolate->context().native_context(),
isolate);
@@ -723,7 +669,7 @@ RUNTIME_FUNCTION(Runtime_ReportMessageFromMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, exception, 0);
+ Handle<Object> exception = args.at(0);
DCHECK(!isolate->has_pending_exception());
isolate->set_pending_exception(*exception);
@@ -739,17 +685,19 @@ RUNTIME_FUNCTION(Runtime_GetInitializerFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, constructor, 0);
+ Handle<JSReceiver> constructor = args.at<JSReceiver>(0);
Handle<Symbol> key = isolate->factory()->class_fields_symbol();
- Handle<Object> initializer = JSReceiver::GetDataProperty(constructor, key);
+ Handle<Object> initializer =
+ JSReceiver::GetDataProperty(isolate, constructor, key);
return *initializer;
}
RUNTIME_FUNCTION(Runtime_DoubleToStringWithRadix) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- CONVERT_INT32_ARG_CHECKED(radix, 1);
+ double number = args.number_value_at(0);
+ int32_t radix = 0;
+ CHECK(args[1].ToInt32(&radix));
char* const str = DoubleToRadixCString(number, radix);
Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
@@ -757,5 +705,15 @@ RUNTIME_FUNCTION(Runtime_DoubleToStringWithRadix) {
return *result;
}
+RUNTIME_FUNCTION(Runtime_SharedValueBarrierSlow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<HeapObject> value = args.at<HeapObject>(0);
+ Handle<Object> shared_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, shared_value, Object::ShareSlow(isolate, value, kThrowOnError));
+ return *shared_value;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index de27dca8a3..1750ae6828 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -36,8 +36,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_FormatList) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSListFormat, list_format, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, list, 1);
+ Handle<JSListFormat> list_format = args.at<JSListFormat>(0);
+ Handle<FixedArray> list = args.at<FixedArray>(1);
RETURN_RESULT_OR_FAILURE(
isolate, JSListFormat::FormatList(isolate, list_format, list));
}
@@ -46,8 +46,8 @@ RUNTIME_FUNCTION(Runtime_FormatList) {
RUNTIME_FUNCTION(Runtime_FormatListToParts) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSListFormat, list_format, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, list, 1);
+ Handle<JSListFormat> list_format = args.at<JSListFormat>(0);
+ Handle<FixedArray> list = args.at<FixedArray>(1);
RETURN_RESULT_OR_FAILURE(
isolate, JSListFormat::FormatListToParts(isolate, list_format, list));
}
@@ -55,7 +55,7 @@ RUNTIME_FUNCTION(Runtime_FormatListToParts) {
RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ Handle<String> s = args.at<String>(0);
s = String::Flatten(isolate, s);
RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToLower(isolate, s));
}
@@ -63,7 +63,7 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ Handle<String> s = args.at<String>(0);
s = String::Flatten(isolate, s);
RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToUpper(isolate, s));
}
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 7d4e0e0924..4ac519c397 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -584,10 +584,11 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(0);
+ int literals_index = args.tagged_index_value_at(1);
+ Handle<ObjectBoilerplateDescription> description =
+ args.at<ObjectBoilerplateDescription>(2);
+ int flags = args.smi_value_at(3);
Handle<FeedbackVector> vector;
if (maybe_vector->IsFeedbackVector()) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
@@ -602,8 +603,9 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
RUNTIME_FUNCTION(Runtime_CreateObjectLiteralWithoutAllocationSite) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
+ Handle<ObjectBoilerplateDescription> description =
+ args.at<ObjectBoilerplateDescription>(0);
+ int flags = args.smi_value_at(1);
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteralWithoutAllocationSite<ObjectLiteralHelper>(
isolate, description, flags));
@@ -612,8 +614,9 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteralWithoutAllocationSite) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteralWithoutAllocationSite) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, description, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
+ Handle<ArrayBoilerplateDescription> description =
+ args.at<ArrayBoilerplateDescription>(0);
+ int flags = args.smi_value_at(1);
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteralWithoutAllocationSite<ArrayLiteralHelper>(
isolate, description, flags));
@@ -622,10 +625,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralWithoutAllocationSite) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(0);
+ int literals_index = args.tagged_index_value_at(1);
+ Handle<ArrayBoilerplateDescription> elements =
+ args.at<ArrayBoilerplateDescription>(2);
+ int flags = args.smi_value_at(3);
Handle<FeedbackVector> vector;
if (maybe_vector->IsFeedbackVector()) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
@@ -640,10 +644,10 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(0);
+ int index = args.tagged_index_value_at(1);
+ Handle<String> pattern = args.at<String>(2);
+ int flags = args.smi_value_at(3);
if (maybe_vector->IsUndefined()) {
// We don't have a vector; don't create a boilerplate, simply construct a
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 9adde80fd9..740965d9e7 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -28,12 +28,11 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
DCHECK_GE(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, specifier, 1);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ Handle<Object> specifier = args.at(1);
MaybeHandle<Object> import_assertions;
if (args.length() == 3) {
- CHECK(args[2].IsObject());
import_assertions = args.at<Object>(2);
}
@@ -47,7 +46,7 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(module_request, 0);
+ int module_request = args.smi_value_at(0);
Handle<SourceTextModule> module(isolate->context().module(), isolate);
return *SourceTextModule::GetModuleNamespace(isolate, module, module_request);
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 38349bd507..e988e3cc1a 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -16,7 +16,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_StringToNumber) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ Handle<String> subject = args.at<String>(0);
return *String::ToNumber(isolate, subject);
}
@@ -25,8 +25,8 @@ RUNTIME_FUNCTION(Runtime_StringToNumber) {
RUNTIME_FUNCTION(Runtime_StringParseInt) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, string, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, radix, 1);
+ Handle<Object> string = args.at(0);
+ Handle<Object> radix = args.at(1);
// Convert {string} to a String first, and flatten it.
Handle<String> subject;
@@ -53,7 +53,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
RUNTIME_FUNCTION(Runtime_StringParseFloat) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ Handle<String> subject = args.at<String>(0);
double value = StringToDouble(isolate, subject, ALLOW_TRAILING_JUNK,
std::numeric_limits<double>::quiet_NaN());
@@ -64,9 +64,8 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
RUNTIME_FUNCTION(Runtime_NumberToStringSlow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
-
- return *isolate->factory()->NumberToString(number, NumberCacheMode::kSetOnly);
+ return *isolate->factory()->NumberToString(args.at(0),
+ NumberCacheMode::kSetOnly);
}
RUNTIME_FUNCTION(Runtime_MaxSmi) {
@@ -79,7 +78,7 @@ RUNTIME_FUNCTION(Runtime_MaxSmi) {
RUNTIME_FUNCTION(Runtime_IsSmi) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(obj.IsSmi());
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 47bb8f0f56..9d7cf09241 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -3,10 +3,13 @@
// found in the LICENSE file.
#include "src/ast/prettyprinter.h"
+#include "src/base/macros.h"
+#include "src/builtins/builtins.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
#include "src/handles/maybe-handles.h"
@@ -77,7 +80,7 @@ MaybeHandle<Object> Runtime::HasProperty(Isolate* isolate,
Object);
// Lookup the {name} on {receiver}.
- Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
+ Maybe<bool> maybe = JSReceiver::HasProperty(isolate, receiver, name);
if (maybe.IsNothing()) return MaybeHandle<Object>();
return maybe.FromJust() ? ReadOnlyRoots(isolate).true_value_handle()
: ReadOnlyRoots(isolate).false_value_handle();
@@ -117,7 +120,7 @@ void GeneralizeAllTransitionsToFieldAsMutable(Isolate* isolate, Handle<Map> map,
// Collect all outgoing field transitions.
{
DisallowGarbageCollection no_gc;
- TransitionsAccessor transitions(isolate, *map, &no_gc);
+ TransitionsAccessor transitions(isolate, *map);
transitions.ForEachTransitionTo(
*name,
[&](Map target) {
@@ -422,8 +425,8 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
RUNTIME_FUNCTION(Runtime_HasOwnConstDataProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, property, 1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> property = args.at(1);
bool success;
PropertyKey key(isolate, property, &success);
@@ -478,6 +481,42 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
return *value;
}
+RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 4);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ Handle<Symbol> brand = args.at<Symbol>(1);
+ Handle<Context> context = args.at<Context>(2);
+ int depth = args.smi_value_at(3);
+ DCHECK(brand->is_private_name());
+
+ LookupIterator it(isolate, receiver, brand, LookupIterator::OWN);
+
+ if (it.IsFound()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateBrandReinitialization,
+ brand));
+ }
+
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ // Look for the context in |depth| in the context chain to store it
+ // in the instance with the brand variable as key, which is needed by
+ // the debugger for retrieving names of private methods.
+ DCHECK_GE(depth, 0);
+ for (; depth > 0; depth--) {
+ context =
+ handle(Context::cast(context->get(Context::PREVIOUS_INDEX)), isolate);
+ }
+ DCHECK_EQ(context->scope_info().scope_type(), ScopeType::CLASS_SCOPE);
+ CHECK(Object::AddDataProperty(&it, context, attributes, Just(kDontThrow),
+ StoreOrigin::kMaybeKeyed)
+ .FromJust());
+ return *receiver;
+}
+
// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
// TODO(verwaest): Support the common cases with precached map directly in
// an Object.create stub.
@@ -587,18 +626,19 @@ MaybeHandle<Object> Runtime::DefineObjectOwnProperty(
RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- MAYBE_RETURN(JSReceiver::SetPrototype(obj, prototype, false, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
+ Handle<JSReceiver> obj = args.at<JSReceiver>(0);
+ Handle<Object> prototype = args.at(1);
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(isolate, obj, prototype, false, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
return *obj;
}
RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_SMI_ARG_CHECKED(properties, 1);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ int properties = args.smi_value_at(1);
// Conservative upper limit to prevent fuzz tests from going OOM.
if (properties > 100000) return isolate->ThrowIllegalOperation();
if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
@@ -612,7 +652,7 @@ RUNTIME_FUNCTION(Runtime_ObjectValues) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
Handle<FixedArray> values;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -626,7 +666,7 @@ RUNTIME_FUNCTION(Runtime_ObjectValuesSkipFastPath) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
Handle<FixedArray> value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -640,7 +680,7 @@ RUNTIME_FUNCTION(Runtime_ObjectEntries) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
Handle<FixedArray> entries;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -654,7 +694,7 @@ RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
Handle<FixedArray> entries;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -667,7 +707,7 @@ RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
RUNTIME_FUNCTION(Runtime_ObjectIsExtensible) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
Maybe<bool> result =
object->IsJSReceiver()
@@ -680,7 +720,7 @@ RUNTIME_FUNCTION(Runtime_ObjectIsExtensible) {
RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsThrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
kThrowOnError),
@@ -691,7 +731,7 @@ RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsThrow) {
RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsDontThrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
Maybe<bool> result = JSReceiver::PreventExtensions(
Handle<JSReceiver>::cast(object), kDontThrow);
@@ -702,7 +742,7 @@ RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsDontThrow) {
RUNTIME_FUNCTION(Runtime_JSReceiverGetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
RETURN_RESULT_OR_FAILURE(isolate,
JSReceiver::GetPrototype(isolate, receiver));
@@ -712,11 +752,12 @@ RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfThrow) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Object> proto = args.at(1);
- MAYBE_RETURN(JSReceiver::SetPrototype(object, proto, true, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(isolate, object, proto, true, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
return *object;
}
@@ -725,11 +766,11 @@ RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfDontThrow) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Object> proto = args.at(1);
Maybe<bool> result =
- JSReceiver::SetPrototype(object, proto, true, kDontThrow);
+ JSReceiver::SetPrototype(isolate, object, proto, true, kDontThrow);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -737,11 +778,10 @@ RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfDontThrow) {
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 3 || args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, lookup_start_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
+ Handle<Object> lookup_start_obj = args.at(0);
+ Handle<Object> key_obj = args.at(1);
Handle<Object> receiver_obj = lookup_start_obj;
if (args.length() == 3) {
- CHECK(args[2].IsObject());
receiver_obj = args.at<Object>(2);
}
@@ -845,9 +885,9 @@ RUNTIME_FUNCTION(Runtime_SetKeyedProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, object, key, value,
@@ -858,9 +898,9 @@ RUNTIME_FUNCTION(Runtime_DefineObjectOwnProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::DefineObjectOwnProperty(isolate, object, key, value,
@@ -871,26 +911,28 @@ RUNTIME_FUNCTION(Runtime_SetNamedProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, object, key, value,
StoreOrigin::kNamed));
}
-// Similar to DefineDataPropertyInLiteral, but does not update feedback, and
+// Similar to DefineKeyedOwnPropertyInLiteral, but does not update feedback, and
// and does not have a flags parameter for performing SetFunctionName().
//
-// Currently, this is used for ObjectLiteral spread properties.
-RUNTIME_FUNCTION(Runtime_StoreDataPropertyInLiteral) {
+// Currently, this is used for ObjectLiteral spread properties in CloneObjectIC
+// and for array literal creations in StoreInArrayLiteralIC.
+// TODO(v8:12548): merge this into DefineKeyedOwnPropertyInLiteral.
+RUNTIME_FUNCTION(Runtime_DefineKeyedOwnPropertyInLiteral_Simple) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
PropertyKey lookup_key(isolate, key);
LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
@@ -923,9 +965,9 @@ Object DeleteProperty(Isolate* isolate, Handle<Object> object,
RUNTIME_FUNCTION(Runtime_DeleteProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_SMI_ARG_CHECKED(language_mode, 2);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
+ int language_mode = args.smi_value_at(2);
return DeleteProperty(isolate, object, key,
static_cast<LanguageMode>(language_mode));
}
@@ -933,7 +975,7 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
RUNTIME_FUNCTION(Runtime_ShrinkNameDictionary) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(NameDictionary, dictionary, 0);
+ Handle<NameDictionary> dictionary = args.at<NameDictionary>(0);
return *NameDictionary::Shrink(isolate, dictionary);
}
@@ -941,7 +983,7 @@ RUNTIME_FUNCTION(Runtime_ShrinkNameDictionary) {
RUNTIME_FUNCTION(Runtime_ShrinkSwissNameDictionary) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, dictionary, 0);
+ Handle<SwissNameDictionary> dictionary = args.at<SwissNameDictionary>(0);
return *SwissNameDictionary::Shrink(isolate, dictionary);
}
@@ -950,8 +992,8 @@ RUNTIME_FUNCTION(Runtime_ShrinkSwissNameDictionary) {
RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
// Check that {object} is actually a receiver.
if (!object->IsJSReceiver()) {
@@ -967,7 +1009,7 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
Object::ToName(isolate, key));
// Lookup the {name} on {receiver}.
- Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
+ Maybe<bool> maybe = JSReceiver::HasProperty(isolate, receiver, name);
if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -975,8 +1017,8 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_SMI_ARG_CHECKED(filter_value, 1);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ int filter_value = args.smi_value_at(1);
PropertyFilter filter = static_cast<PropertyFilter>(filter_value);
Handle<FixedArray> keys;
@@ -991,7 +1033,7 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
if (object->IsJSObject() && !object->IsJSGlobalObject()) {
JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
"RuntimeToFastProperties");
@@ -1008,8 +1050,8 @@ RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
RUNTIME_FUNCTION(Runtime_NewObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
+ Handle<JSFunction> target = args.at<JSFunction>(0);
+ Handle<JSReceiver> new_target = args.at<JSReceiver>(1);
RETURN_RESULT_OR_FAILURE(
isolate,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
@@ -1018,9 +1060,9 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
RUNTIME_FUNCTION(Runtime_GetDerivedMap) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, rab_gsab, 2);
+ Handle<JSFunction> target = args.at<JSFunction>(0);
+ Handle<JSReceiver> new_target = args.at<JSReceiver>(1);
+ Handle<Object> rab_gsab = args.at(2);
if (rab_gsab->IsTrue()) {
return *JSFunction::GetDerivedRabGsabMap(isolate, target, new_target);
} else {
@@ -1034,7 +1076,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
+ Handle<Map> initial_map = args.at<Map>(0);
MapUpdater::CompleteInobjectSlackTracking(isolate, *initial_map);
return ReadOnlyRoots(isolate).undefined_value();
@@ -1043,7 +1085,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
+ Handle<JSObject> js_object = args.at<JSObject>(0);
// It could have been a DCHECK but we call this function directly from tests.
if (!js_object->map().is_deprecated()) return Smi::zero();
// This call must not cause lazy deopts, because it's called from deferred
@@ -1067,29 +1109,29 @@ static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<JSObject> obj = args.at<JSObject>(0);
CHECK(!obj->IsNull(isolate));
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> getter = args.at(2);
CHECK(IsValidAccessor(isolate, getter));
- CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+ Handle<Object> setter = args.at(3);
CHECK(IsValidAccessor(isolate, setter));
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 4);
+ auto attrs = PropertyAttributesFromInt(args.smi_value_at(4));
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::DefineAccessor(obj, name, getter, setter, attrs));
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
+RUNTIME_FUNCTION(Runtime_DefineKeyedOwnPropertyInLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_SMI_ARG_CHECKED(flag, 3);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 4);
- CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 5);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> value = args.at(2);
+ int flag = args.smi_value_at(3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(4);
+ int index = args.tagged_index_value_at(5);
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
@@ -1109,12 +1151,13 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
}
}
- DataPropertyInLiteralFlags flags(flag);
- PropertyAttributes attrs = (flags & DataPropertyInLiteralFlag::kDontEnum)
- ? PropertyAttributes::DONT_ENUM
- : PropertyAttributes::NONE;
+ DefineKeyedOwnPropertyInLiteralFlags flags(flag);
+ PropertyAttributes attrs =
+ (flags & DefineKeyedOwnPropertyInLiteralFlag::kDontEnum)
+ ? PropertyAttributes::DONT_ENUM
+ : PropertyAttributes::NONE;
- if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
+ if (flags & DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName) {
DCHECK(value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(value);
DCHECK(!function->shared().HasSharedName());
@@ -1136,27 +1179,28 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
Just(kDontThrow))
.IsJust());
- // Return the value so that BaselineCompiler::VisitStaDataPropertyInLiteral
- // doesn't have to save the accumulator.
+ // Return the value so that
+ // BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral doesn't have to
+ // save the accumulator.
return *value;
}
RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Smi, position, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 2);
+ int position = args.smi_value_at(0);
+ Handle<Object> value = args.at(1);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
if (maybe_vector->IsUndefined()) {
return ReadOnlyRoots(isolate).undefined_value();
}
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 2);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<String> type = Object::TypeOf(isolate, value);
if (value->IsJSReceiver()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
- type = JSReceiver::GetConstructorName(object);
+ type = JSReceiver::GetConstructorName(isolate, object);
} else if (value->IsNull(isolate)) {
// typeof(null) is object. But it's more user-friendly to annotate
// null as type "null".
@@ -1165,7 +1209,7 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
DCHECK(vector->metadata().HasTypeProfileSlot());
FeedbackNexus nexus(vector, vector->GetTypeProfileSlot());
- nexus.Collect(type, position->value());
+ nexus.Collect(type, position);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1173,7 +1217,7 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ auto obj = HeapObject::cast(args[0]);
return isolate->heap()->ToBoolean(
IsFastPackedElementsKind(obj.map().elements_kind()));
}
@@ -1181,24 +1225,24 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(obj.IsJSReceiver());
}
RUNTIME_FUNCTION(Runtime_GetFunctionName) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
return *JSFunction::GetName(isolate, function);
}
RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<JSFunction> getter = args.at<JSFunction>(2);
+ auto attrs = PropertyAttributesFromInt(args.smi_value_at(3));
if (String::cast(getter->shared().Name()).length() == 0) {
Handle<Map> getter_map(getter->map(), isolate);
@@ -1218,8 +1262,8 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
RUNTIME_FUNCTION(Runtime_SetDataProperties) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
+ Handle<JSReceiver> target = args.at<JSReceiver>(0);
+ Handle<Object> source = args.at(1);
// 2. If source is undefined or null, let keys be an empty List.
if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
@@ -1236,8 +1280,8 @@ RUNTIME_FUNCTION(Runtime_SetDataProperties) {
RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
+ Handle<JSObject> target = args.at<JSObject>(0);
+ Handle<Object> source = args.at(1);
// 2. If source is undefined or null, let keys be an empty List.
if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
@@ -1252,10 +1296,55 @@ RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
+namespace {
+
+// Check that the excluded properties are within the stack range of the top of
+// the stack, and the start of the JS frame.
+void CheckExcludedPropertiesAreOnCallerStack(Isolate* isolate, Address base,
+ int count) {
+#ifdef DEBUG
+ StackFrameIterator it(isolate);
+
+ // Don't need to check when there's no excluded properties.
+ if (count == 0) return;
+
+ DCHECK(!it.done());
+
+ // Properties are pass in order on the stack, which means that their addresses
+ // are in reverse order in memory (because stacks grow backwards). So, we
+ // need to check if the _last_ property address is before the stack end...
+ Address last_property = base - (count - 1) * kSystemPointerSize;
+ DCHECK_GE(last_property, it.frame()->sp());
+
+ // ... and for the first JS frame, make sure the _first_ property address is
+ // after that stack frame's start.
+ for (; !it.done(); it.Advance()) {
+ if (it.frame()->is_java_script()) {
+ DCHECK_LT(base, it.frame()->fp());
+ return;
+ }
+ }
+
+ // We should always find a JS frame.
+ UNREACHABLE();
+#endif
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedPropertiesOnStack) {
HandleScope scope(isolate);
- DCHECK_LE(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 0);
+ DCHECK_LE(3, args.length());
+ Handle<Object> source = args.at(0);
+ int excluded_property_count = args.smi_value_at(1);
+ // The excluded_property_base is passed as a raw stack pointer. This is safe
+ // because the stack pointer is aligned, so it looks like a Smi to the GC.
+ Address* excluded_property_base = reinterpret_cast<Address*>(args[2].ptr());
+ DCHECK(HAS_SMI_TAG(reinterpret_cast<intptr_t>(excluded_property_base)));
+ // Also make sure that the given base pointer points to to on-stack values.
+ CheckExcludedPropertiesAreOnCallerStack(
+ isolate, reinterpret_cast<Address>(excluded_property_base),
+ excluded_property_count);
// If source is undefined or null, throw a non-coercible error.
if (source->IsNullOrUndefined(isolate)) {
@@ -1263,9 +1352,12 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
MaybeHandle<Object>());
}
- base::ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
- for (int i = 1; i < args.length(); i++) {
- Handle<Object> property = args.at(i);
+ base::ScopedVector<Handle<Object>> excluded_properties(
+ excluded_property_count);
+ for (int i = 0; i < excluded_property_count; i++) {
+ // Because the excluded properties on stack is from high address
+ // to low address, so we need to use sub
+ Handle<Object> property(excluded_property_base - i);
uint32_t property_num;
// We convert string to number if possible, in cases of computed
// properties resolving to numbers, which would've been strings
@@ -1276,7 +1368,7 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
property = isolate->factory()->NewNumberFromUint(property_num);
}
- excluded_properties[i - 1] = property;
+ excluded_properties[i] = property;
}
Handle<JSObject> target =
@@ -1292,10 +1384,10 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<JSFunction> setter = args.at<JSFunction>(2);
+ auto attrs = PropertyAttributesFromInt(args.smi_value_at(3));
if (String::cast(setter->shared().Name()).length() == 0) {
Handle<Map> setter_map(setter->map(), isolate);
@@ -1321,43 +1413,43 @@ RUNTIME_FUNCTION(Runtime_ToObject) {
RUNTIME_FUNCTION(Runtime_ToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> input = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToNumeric) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> input = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToLength) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> input = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToLength(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> input = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToString(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToName) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> input = args.at(0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToName(isolate, input));
}
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> prototype = args.at(1);
if (!object->IsJSReceiver()) return ReadOnlyRoots(isolate).false_value();
Maybe<bool> result = JSReceiver::HasInPrototypeChain(
isolate, Handle<JSReceiver>::cast(object), prototype);
@@ -1369,8 +1461,8 @@ RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
+ Handle<Object> value = args.at(0);
+ Handle<Object> done = args.at(1);
return *isolate->factory()->NewJSIteratorResult(value,
done->BooleanValue(isolate));
}
@@ -1378,9 +1470,9 @@ RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<JSReceiver> o = args.at<JSReceiver>(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
bool success;
PropertyKey lookup_key(isolate, key, &success);
if (!success) return ReadOnlyRoots(isolate).exception();
@@ -1393,22 +1485,22 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
RUNTIME_FUNCTION(Runtime_SetOwnPropertyIgnoreAttributes) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, o, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(Smi, attributes, 3);
+ Handle<JSObject> o = args.at<JSObject>(0);
+ Handle<String> key = args.at<String>(1);
+ Handle<Object> value = args.at(2);
+ int attributes = args.smi_value_at(3);
- RETURN_RESULT_OR_FAILURE(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- o, key, value, PropertyAttributes(attributes->value())));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ o, key, value, PropertyAttributes(attributes)));
}
RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Name> name = args.at<Name>(1);
PropertyDescriptor desc;
Maybe<bool> found =
@@ -1422,7 +1514,7 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
RUNTIME_FUNCTION(Runtime_LoadPrivateSetter) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(AccessorPair, pair, 0);
+ Handle<AccessorPair> pair = args.at<AccessorPair>(0);
DCHECK(pair->setter().IsJSFunction());
return pair->setter();
}
@@ -1430,7 +1522,7 @@ RUNTIME_FUNCTION(Runtime_LoadPrivateSetter) {
RUNTIME_FUNCTION(Runtime_LoadPrivateGetter) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(AccessorPair, pair, 0);
+ Handle<AccessorPair> pair = args.at<AccessorPair>(0);
DCHECK(pair->getter().IsJSFunction());
return pair->getter();
}
@@ -1449,35 +1541,34 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateAccessors) {
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableAllocate) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(Smi, at_least_space_for, 0);
+ int at_least_space_for = args.smi_value_at(0);
- return *isolate->factory()->NewSwissNameDictionary(
- at_least_space_for->value(), AllocationType::kYoung);
+ return *isolate->factory()->NewSwissNameDictionary(at_least_space_for,
+ AllocationType::kYoung);
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableAdd) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(Smi, details_smi, 3);
+ Handle<SwissNameDictionary> table = args.at<SwissNameDictionary>(0);
+ Handle<Name> key = args.at<Name>(1);
+ Handle<Object> value = args.at(2);
+ PropertyDetails details(Smi::cast(args[3]));
DCHECK(key->IsUniqueName());
- return *SwissNameDictionary::Add(isolate, table, key, value,
- PropertyDetails{*details_smi});
+ return *SwissNameDictionary::Add(isolate, table, key, value, details);
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableFindEntry) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
-
- InternalIndex index = table->FindEntry(isolate, *key);
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ Name key = Name::cast(args[1]);
+ InternalIndex index = table.FindEntry(isolate, key);
return Smi::FromInt(index.is_found()
? index.as_int()
: SwissNameDictionary::kNotFoundSentinel);
@@ -1487,15 +1578,14 @@ RUNTIME_FUNCTION(Runtime_SwissTableFindEntry) {
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableUpdate) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(Smi, details_smi, 3);
-
- InternalIndex i(Smi::ToInt(*index));
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ InternalIndex index(args.smi_value_at(1));
+ Object value = args[2];
+ table.ValueAtPut(index, value);
- table->ValueAtPut(i, *value);
- table->DetailsAtPut(i, PropertyDetails{*details_smi});
+ PropertyDetails details(Smi::cast(args[3]));
+ table.DetailsAtPut(index, details);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1504,61 +1594,59 @@ RUNTIME_FUNCTION(Runtime_SwissTableUpdate) {
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableDelete) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
-
- InternalIndex i(Smi::ToInt(*entry));
+ Handle<SwissNameDictionary> table = args.at<SwissNameDictionary>(0);
+ InternalIndex index(args.smi_value_at(1));
- return *SwissNameDictionary::DeleteEntry(isolate, table, i);
+ return *SwissNameDictionary::DeleteEntry(isolate, table, index);
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableEquals) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, other, 1);
-
- return Smi::FromInt(table->EqualsForTesting(*other));
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ auto other = SwissNameDictionary::cast(args[0]);
+ return Smi::FromInt(table.EqualsForTesting(other));
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableElementsCount) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
-
- return Smi::FromInt(table->NumberOfElements());
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ return Smi::FromInt(table.NumberOfElements());
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableKeyAt) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
-
- return table->KeyAt(InternalIndex(Smi::ToInt(*entry)));
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ InternalIndex index(args.smi_value_at(1));
+ return table.KeyAt(index);
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableValueAt) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
-
- return table->ValueAt(InternalIndex(Smi::ToInt(*entry)));
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ InternalIndex index(args.smi_value_at(1));
+ return table.ValueAt(index);
}
// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
// SwissNameDictionary is work in progress.
RUNTIME_FUNCTION(Runtime_SwissTableDetailsAt) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
-
- PropertyDetails d = table->DetailsAt(InternalIndex(Smi::ToInt(*entry)));
+ DisallowGarbageCollection no_gc;
+ auto table = SwissNameDictionary::cast(args[0]);
+ InternalIndex index(args.smi_value_at(1));
+ PropertyDetails d = table.DetailsAt(index);
return d.AsSmi();
}
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index fd5298077e..e6dc17b80c 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -14,8 +14,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_Add) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> lhs = args.at(0);
+ Handle<Object> rhs = args.at(1);
RETURN_RESULT_OR_FAILURE(isolate, Object::Add(isolate, lhs, rhs));
}
@@ -23,8 +23,8 @@ RUNTIME_FUNCTION(Runtime_Add) {
RUNTIME_FUNCTION(Runtime_Equal) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::Equals(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
@@ -33,8 +33,8 @@ RUNTIME_FUNCTION(Runtime_Equal) {
RUNTIME_FUNCTION(Runtime_NotEqual) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::Equals(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(!result.FromJust());
@@ -43,32 +43,32 @@ RUNTIME_FUNCTION(Runtime_NotEqual) {
RUNTIME_FUNCTION(Runtime_StrictEqual) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- CONVERT_ARG_CHECKED(Object, y, 1);
+ Object x = args[0];
+ Object y = args[1];
return isolate->heap()->ToBoolean(x.StrictEquals(y));
}
RUNTIME_FUNCTION(Runtime_StrictNotEqual) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- CONVERT_ARG_CHECKED(Object, y, 1);
+ Object x = args[0];
+ Object y = args[1];
return isolate->heap()->ToBoolean(!x.StrictEquals(y));
}
RUNTIME_FUNCTION(Runtime_ReferenceEqual) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- CONVERT_ARG_CHECKED(Object, y, 1);
+ Object x = args[0];
+ Object y = args[1];
return isolate->heap()->ToBoolean(x == y);
}
RUNTIME_FUNCTION(Runtime_LessThan) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::LessThan(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
@@ -77,8 +77,8 @@ RUNTIME_FUNCTION(Runtime_LessThan) {
RUNTIME_FUNCTION(Runtime_GreaterThan) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::GreaterThan(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
@@ -87,8 +87,8 @@ RUNTIME_FUNCTION(Runtime_GreaterThan) {
RUNTIME_FUNCTION(Runtime_LessThanOrEqual) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::LessThanOrEqual(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
@@ -97,8 +97,8 @@ RUNTIME_FUNCTION(Runtime_LessThanOrEqual) {
RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Handle<Object> x = args.at(0);
+ Handle<Object> y = args.at(1);
Maybe<bool> result = Object::GreaterThanOrEqual(isolate, x, y);
if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 516a597e56..352879a593 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -20,8 +20,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> value = args.at(1);
Handle<Object> rejected_promise = promise;
if (isolate->debug()->is_active()) {
@@ -44,8 +44,8 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
RUNTIME_FUNCTION(Runtime_PromiseRejectAfterResolved) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> reason = args.at(1);
isolate->ReportPromiseReject(promise, reason,
v8::kPromiseRejectAfterResolved);
return ReadOnlyRoots(isolate).undefined_value();
@@ -54,8 +54,8 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectAfterResolved) {
RUNTIME_FUNCTION(Runtime_PromiseResolveAfterResolved) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, resolution, 1);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> resolution = args.at(1);
isolate->ReportPromiseReject(promise, resolution,
v8::kPromiseResolveAfterResolved);
return ReadOnlyRoots(isolate).undefined_value();
@@ -64,7 +64,7 @@ RUNTIME_FUNCTION(Runtime_PromiseResolveAfterResolved) {
RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
// At this point, no revocation has been issued before
CHECK(!promise->has_handler());
isolate->ReportPromiseReject(promise, Handle<Object>(),
@@ -75,7 +75,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
function, handle(function->native_context(), isolate));
@@ -95,8 +95,8 @@ RUNTIME_FUNCTION(Runtime_PerformMicrotaskCheckpoint) {
RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, microtask_callback, 0);
- CONVERT_ARG_CHECKED(Object, microtask_data, 1);
+ Object microtask_callback = args[0];
+ Object microtask_data = args[1];
MicrotaskCallback callback = ToCData<MicrotaskCallback>(microtask_callback);
void* data = ToCData<void*>(microtask_data);
callback(data);
@@ -107,7 +107,7 @@ RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
RUNTIME_FUNCTION(Runtime_PromiseStatus) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
return Smi::FromInt(promise->status());
}
@@ -115,123 +115,38 @@ RUNTIME_FUNCTION(Runtime_PromiseStatus) {
RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, parent, 1);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> parent = args.at(1);
isolate->RunPromiseHook(PromiseHookType::kInit, promise, parent);
return ReadOnlyRoots(isolate).undefined_value();
}
-namespace {
-
-Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
- Handle<Object> value,
- Handle<JSPromise> promise,
- Handle<JSPromise> outer_promise,
- Handle<JSFunction> reject_handler,
- bool is_predicted_as_caught) {
- // Allocate the throwaway promise and fire the appropriate init
- // hook for the throwaway promise (passing the {promise} as its
- // parent).
- Handle<JSPromise> throwaway = isolate->factory()->NewJSPromiseWithoutHook();
- isolate->RunAllPromiseHooks(PromiseHookType::kInit, throwaway, promise);
-
- // On inspector side we capture async stack trace and store it by
- // outer_promise->async_task_id when async function is suspended first time.
- // To use captured stack trace later throwaway promise should have the same
- // async_task_id as outer_promise since we generate WillHandle and DidHandle
- // events using throwaway promise.
- throwaway->set_async_task_id(outer_promise->async_task_id());
-
- // The Promise will be thrown away and not handled, but it
- // shouldn't trigger unhandled reject events as its work is done
- throwaway->set_has_handler(true);
-
- // Enable proper debug support for promises.
- if (isolate->debug()->is_active()) {
- if (value->IsJSPromise()) {
- Object::SetProperty(
- isolate, reject_handler,
- isolate->factory()->promise_forwarding_handler_symbol(),
- isolate->factory()->true_value(), StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError))
- .Check();
- Handle<JSPromise>::cast(value)->set_handled_hint(is_predicted_as_caught);
- }
-
- // Mark the dependency to {outer_promise} in case the {throwaway}
- // Promise is found on the Promise stack
- Object::SetProperty(isolate, throwaway,
- isolate->factory()->promise_handled_by_symbol(),
- outer_promise, StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError))
- .Check();
- }
-
- return throwaway;
-}
-
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_AwaitPromisesInit) {
- DCHECK_EQ(5, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject_handler, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(is_predicted_as_caught, 4);
- return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
- reject_handler, is_predicted_as_caught);
-}
-
-RUNTIME_FUNCTION(Runtime_AwaitPromisesInitOld) {
- DCHECK_EQ(5, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject_handler, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(is_predicted_as_caught, 4);
-
- // Fire the init hook for the wrapper promise (that we created for the
- // {value} previously).
- isolate->RunAllPromiseHooks(PromiseHookType::kInit, promise, outer_promise);
- return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
- reject_handler, is_predicted_as_caught);
-}
-
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, maybe_promise, 0);
- if (!maybe_promise->IsJSPromise())
- return ReadOnlyRoots(isolate).undefined_value();
- Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
- if (isolate->debug()->is_active()) isolate->PushPromise(promise);
- isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
- isolate->factory()->undefined_value());
+ Handle<JSReceiver> promise = args.at<JSReceiver>(0);
+ if (promise->IsJSPromise()) {
+ isolate->OnPromiseBefore(Handle<JSPromise>::cast(promise));
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, maybe_promise, 0);
- if (!maybe_promise->IsJSPromise())
- return ReadOnlyRoots(isolate).undefined_value();
- Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
- if (isolate->debug()->is_active()) isolate->PopPromise();
- isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
- isolate->factory()->undefined_value());
+ Handle<JSReceiver> promise = args.at<JSReceiver>(0);
+ if (promise->IsJSPromise()) {
+ isolate->OnPromiseAfter(Handle<JSPromise>::cast(promise));
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_RejectPromise) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
- CONVERT_ARG_HANDLE_CHECKED(Oddball, debug_event, 2);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> reason = args.at(1);
+ Handle<Oddball> debug_event = args.at<Oddball>(2);
return *JSPromise::Reject(promise, reason,
debug_event->BooleanValue(isolate));
}
@@ -239,8 +154,8 @@ RUNTIME_FUNCTION(Runtime_RejectPromise) {
RUNTIME_FUNCTION(Runtime_ResolvePromise) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, resolution, 1);
+ Handle<JSPromise> promise = args.at<JSPromise>(0);
+ Handle<Object> resolution = args.at(1);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSPromise::Resolve(promise, resolution));
@@ -252,10 +167,10 @@ RUNTIME_FUNCTION(Runtime_ResolvePromise) {
RUNTIME_FUNCTION(Runtime_ConstructAggregateErrorHelper) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, message, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, options, 3);
+ Handle<JSFunction> target = args.at<JSFunction>(0);
+ Handle<Object> new_target = args.at(1);
+ Handle<Object> message = args.at(2);
+ Handle<Object> options = args.at(3);
DCHECK_EQ(*target, *isolate->aggregate_error_function());
@@ -271,36 +186,32 @@ RUNTIME_FUNCTION(Runtime_ConstructAggregateErrorHelper) {
RUNTIME_FUNCTION(Runtime_ConstructInternalAggregateErrorHelper) {
HandleScope scope(isolate);
DCHECK_GE(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, message, 0);
+ int message_template_index = args.smi_value_at(0);
Handle<Object> arg0;
if (args.length() >= 2) {
- DCHECK(args[1].IsObject());
arg0 = args.at<Object>(1);
}
Handle<Object> arg1;
if (args.length() >= 3) {
- DCHECK(args[2].IsObject());
arg1 = args.at<Object>(2);
}
Handle<Object> arg2;
if (args.length() >= 4) {
- CHECK(args[3].IsObject());
arg2 = args.at<Object>(3);
}
Handle<Object> options;
if (args.length() >= 5) {
- CHECK(args[4].IsObject());
options = args.at<Object>(4);
} else {
options = isolate->factory()->undefined_value();
}
Handle<Object> message_string = MessageFormatter::Format(
- isolate, MessageTemplate(message->value()), arg0, arg1, arg2);
+ isolate, MessageTemplate(message_template_index), arg0, arg1, arg2);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 9249affb9f..88b6fb2297 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -18,21 +18,21 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_IsJSProxy) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(obj.IsJSProxy());
}
RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ auto proxy = JSProxy::cast(args[0]);
return proxy.handler();
}
RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ auto proxy = JSProxy::cast(args[0]);
return proxy.target();
}
@@ -40,15 +40,15 @@ RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 2);
+ Handle<JSReceiver> holder = args.at<JSReceiver>(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> receiver = args.at(2);
// TODO(mythria): Remove the on_non_existent parameter to this function. This
// should only be called when getting named properties on receiver. This
// doesn't handle the global variable loads.
#ifdef DEBUG
- CONVERT_ARG_HANDLE_CHECKED(Smi, on_non_existent, 3);
- DCHECK_NE(static_cast<OnNonExistent>(on_non_existent->value()),
+ int on_non_existent = args.smi_value_at(3);
+ DCHECK_NE(static_cast<OnNonExistent>(on_non_existent),
OnNonExistent::kThrowReferenceError);
#endif
@@ -67,10 +67,10 @@ RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 3);
+ Handle<JSReceiver> holder = args.at<JSReceiver>(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
+ Handle<Object> receiver = args.at(3);
bool success = false;
PropertyKey lookup_key(isolate, key, &success);
@@ -89,10 +89,10 @@ RUNTIME_FUNCTION(Runtime_CheckProxyGetSetTrapResult) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, trap_result, 2);
- CONVERT_NUMBER_CHECKED(int64_t, access_kind, Int64, args[3]);
+ Handle<Name> name = args.at<Name>(0);
+ Handle<JSReceiver> target = args.at<JSReceiver>(1);
+ Handle<Object> trap_result = args.at(2);
+ int64_t access_kind = NumberToInt64(args[3]);
RETURN_RESULT_OR_FAILURE(isolate, JSProxy::CheckGetSetTrapResult(
isolate, name, target, trap_result,
@@ -103,8 +103,8 @@ RUNTIME_FUNCTION(Runtime_CheckProxyHasTrapResult) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
+ Handle<Name> name = args.at<Name>(0);
+ Handle<JSReceiver> target = args.at<JSReceiver>(1);
Maybe<bool> result = JSProxy::CheckHasTrap(isolate, name, target);
if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
@@ -115,8 +115,8 @@ RUNTIME_FUNCTION(Runtime_CheckProxyDeleteTrapResult) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
+ Handle<Name> name = args.at<Name>(0);
+ Handle<JSReceiver> target = args.at<JSReceiver>(1);
Maybe<bool> result = JSProxy::CheckDeleteTrap(isolate, name, target);
if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index eb16e9c24f..1f870561b2 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -800,9 +800,9 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
+ Handle<String> subject = args.at<String>(0);
+ Handle<String> pattern = args.at<String>(1);
+ uint32_t limit = NumberToUint32(args[2]);
CHECK_LT(0, limit);
int subject_length = subject->length();
@@ -911,10 +911,11 @@ MaybeHandle<Object> ExperimentalOneshotExec(
RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> subject = args.at<String>(1);
+ int32_t index = 0;
+ CHECK(args[2].ToInt32(&index));
+ Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(3);
RETURN_RESULT_OR_FAILURE(
isolate, RegExpExec(isolate, regexp, subject, index, last_match_info,
RegExp::ExecQuirks::kNone));
@@ -923,10 +924,11 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
RUNTIME_FUNCTION(Runtime_RegExpExecTreatMatchAtEndAsFailure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> subject = args.at<String>(1);
+ int32_t index = 0;
+ CHECK(args[2].ToInt32(&index));
+ Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(3);
RETURN_RESULT_OR_FAILURE(
isolate, RegExpExec(isolate, regexp, subject, index, last_match_info,
RegExp::ExecQuirks::kTreatMatchAtEndAsFailure));
@@ -935,10 +937,11 @@ RUNTIME_FUNCTION(Runtime_RegExpExecTreatMatchAtEndAsFailure) {
RUNTIME_FUNCTION(Runtime_RegExpExperimentalOneshotExec) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> subject = args.at<String>(1);
+ int32_t index = 0;
+ CHECK(args[2].ToInt32(&index));
+ Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(3);
RETURN_RESULT_OR_FAILURE(
isolate,
ExperimentalOneshotExec(isolate, regexp, subject, index, last_match_info,
@@ -949,10 +952,11 @@ RUNTIME_FUNCTION(
Runtime_RegExpExperimentalOneshotExecTreatMatchAtEndAsFailure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> subject = args.at<String>(1);
+ int32_t index = 0;
+ CHECK(args[2].ToInt32(&index));
+ Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(3);
RETURN_RESULT_OR_FAILURE(
isolate,
ExperimentalOneshotExec(isolate, regexp, subject, index, last_match_info,
@@ -962,10 +966,10 @@ RUNTIME_FUNCTION(
RUNTIME_FUNCTION(Runtime_RegExpBuildIndices) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, match_info, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, maybe_names, 2);
+ Handle<RegExpMatchInfo> match_info = args.at<RegExpMatchInfo>(1);
+ Handle<Object> maybe_names = args.at(2);
#ifdef DEBUG
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
DCHECK(regexp->flags() & JSRegExp::kHasIndices);
#endif
@@ -1462,10 +1466,10 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> subject = args.at<String>(1);
+ Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(2);
+ Handle<JSArray> result_array = args.at<JSArray>(3);
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
CHECK(result_array->HasObjectElements());
@@ -1488,9 +1492,9 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, replace_obj, 2);
+ Handle<String> subject = args.at<String>(0);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(1);
+ Handle<JSReceiver> replace_obj = args.at<JSReceiver>(2);
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
DCHECK(replace_obj->map().is_callable());
@@ -1639,9 +1643,9 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, limit_obj, 2);
+ Handle<JSReceiver> recv = args.at<JSReceiver>(0);
+ Handle<String> string = args.at<String>(1);
+ Handle<Object> limit_obj = args.at(2);
Factory* factory = isolate->factory();
@@ -1793,8 +1797,8 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
+ Handle<JSReceiver> recv = args.at<JSReceiver>(0);
+ Handle<String> string = args.at<String>(1);
Handle<Object> replace_obj = args.at(2);
Factory* factory = isolate->factory();
@@ -1993,9 +1997,9 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
// TODO(pwong): To follow the spec more closely and simplify calling code,
// this could handle the canonicalization of pattern and flags. See
// https://tc39.github.io/ecma262/#sec-regexpinitialize
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
+ Handle<String> source = args.at<String>(1);
+ Handle<String> flags = args.at<String>(2);
RETURN_FAILURE_ON_EXCEPTION(isolate,
JSRegExp::Initialize(regexp, source, flags));
@@ -2006,14 +2010,14 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
RUNTIME_FUNCTION(Runtime_IsRegExp) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(obj.IsJSRegExp());
}
RUNTIME_FUNCTION(Runtime_RegExpStringFromFlags) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ auto regexp = JSRegExp::cast(args[0]);
Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.flags());
return *flags;
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index cb88bec373..9c71a4f20d 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -54,7 +54,7 @@ Object DeclareGlobal(Isolate* isolate, Handle<JSGlobalObject> global,
Handle<ScriptContextTable> script_contexts(
global->native_context().script_context_table(), isolate);
VariableLookupResult lookup;
- if (ScriptContextTable::Lookup(isolate, *script_contexts, *name, &lookup) &&
+ if (script_contexts->Lookup(name, &lookup) &&
IsLexicalVariableMode(lookup.mode)) {
// ES#sec-globaldeclarationinstantiation 6.a:
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
@@ -125,8 +125,8 @@ RUNTIME_FUNCTION(Runtime_DeclareModuleExports) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 1);
+ Handle<FixedArray> declarations = args.at<FixedArray>(0);
+ Handle<JSFunction> closure = args.at<JSFunction>(1);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
Handle<ClosureFeedbackCellArray>::null();
@@ -173,8 +173,8 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 1);
+ Handle<FixedArray> declarations = args.at<FixedArray>(0);
+ Handle<JSFunction> closure = args.at<JSFunction>(1);
Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context(), isolate);
@@ -324,15 +324,15 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
RUNTIME_FUNCTION(Runtime_DeclareEvalFunction) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value = args.at(1);
return DeclareEvalHelper(isolate, name, value);
}
RUNTIME_FUNCTION(Runtime_DeclareEvalVar) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
return DeclareEvalHelper(isolate, name,
isolate->factory()->undefined_value());
}
@@ -493,7 +493,7 @@ class ParameterArguments {
RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
@@ -506,7 +506,7 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
@@ -531,7 +531,7 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
int start_index =
callee->shared().internal_formal_parameter_count_without_receiver();
// This generic runtime function can also be used when the caller has been
@@ -557,8 +557,8 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
+ Handle<SharedFunctionInfo> shared = args.at<SharedFunctionInfo>(0);
+ Handle<FeedbackCell> feedback_cell = args.at<FeedbackCell>(1);
Handle<Context> context(isolate->context(), isolate);
return *Factory::JSFunctionBuilder{isolate, shared, context}
.set_feedback_cell(feedback_cell)
@@ -569,8 +569,8 @@ RUNTIME_FUNCTION(Runtime_NewClosure) {
RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
+ Handle<SharedFunctionInfo> shared = args.at<SharedFunctionInfo>(0);
+ Handle<FeedbackCell> feedback_cell = args.at<FeedbackCell>(1);
Handle<Context> context(isolate->context(), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
@@ -584,7 +584,7 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
+ Handle<ScopeInfo> scope_info = args.at<ScopeInfo>(0);
Handle<Context> outer(isolate->context(), isolate);
return *isolate->factory()->NewFunctionContext(outer, scope_info);
@@ -594,8 +594,8 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+ Handle<JSReceiver> extension_object = args.at<JSReceiver>(0);
+ Handle<ScopeInfo> scope_info = args.at<ScopeInfo>(1);
Handle<Context> current(isolate->context(), isolate);
return *isolate->factory()->NewWithContext(current, scope_info,
extension_object);
@@ -605,8 +605,8 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+ Handle<Object> thrown_object = args.at(0);
+ Handle<ScopeInfo> scope_info = args.at<ScopeInfo>(1);
Handle<Context> current(isolate->context(), isolate);
return *isolate->factory()->NewCatchContext(current, scope_info,
thrown_object);
@@ -616,7 +616,7 @@ RUNTIME_FUNCTION(Runtime_PushCatchContext) {
RUNTIME_FUNCTION(Runtime_PushBlockContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
+ Handle<ScopeInfo> scope_info = args.at<ScopeInfo>(0);
Handle<Context> current(isolate->context(), isolate);
return *isolate->factory()->NewBlockContext(current, scope_info);
}
@@ -625,7 +625,7 @@ RUNTIME_FUNCTION(Runtime_PushBlockContext) {
RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
int index;
PropertyAttributes attributes;
@@ -731,7 +731,7 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
RETURN_RESULT_OR_FAILURE(isolate,
LoadLookupSlot(isolate, name, kThrowOnError));
}
@@ -740,7 +740,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(isolate, name, kDontThrow));
}
@@ -748,7 +748,6 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- DCHECK(args[0].IsString());
Handle<String> name = args.at<String>(0);
Handle<Object> value;
Handle<Object> receiver;
@@ -831,8 +830,8 @@ MaybeHandle<Object> StoreLookupSlot(
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value = args.at(1);
Handle<Context> context(isolate->context(), isolate);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -842,8 +841,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value = args.at(1);
Handle<Context> context(isolate->context(), isolate);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -855,8 +854,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value = args.at(1);
const ContextLookupFlags lookup_flags =
static_cast<ContextLookupFlags>(DONT_FOLLOW_CHAINS);
Handle<Context> declaration_context(isolate->context().declaration_context(),
@@ -869,16 +868,15 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
RUNTIME_FUNCTION(Runtime_StoreGlobalNoHoleCheckForReplLetOrConst) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value = args.at(1);
Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
native_context->script_context_table(), isolate);
VariableLookupResult lookup_result;
- bool found = ScriptContextTable::Lookup(isolate, *script_contexts, *name,
- &lookup_result);
+ bool found = script_contexts->Lookup(name, &lookup_result);
CHECK(found);
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 5e5aae89fc..e8ea9f1e89 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -21,11 +21,11 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_GetSubstitution) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, matched, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_SMI_ARG_CHECKED(position, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 3);
- CONVERT_SMI_ARG_CHECKED(start_index, 4);
+ Handle<String> matched = args.at<String>(0);
+ Handle<String> subject = args.at<String>(1);
+ int position = args.smi_value_at(2);
+ Handle<String> replacement = args.at<String>(3);
+ int start_index = args.smi_value_at(4);
// A simple match without captures.
class SimpleMatch : public String::Match {
@@ -112,9 +112,9 @@ MaybeHandle<String> StringReplaceOneCharWithString(
RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
+ Handle<String> subject = args.at<String>(0);
+ Handle<String> search = args.at<String>(1);
+ Handle<String> replace = args.at<String>(2);
// If the cons string tree is too deep, we simply abort the recursion and
// retry with a flattened subject string.
@@ -148,9 +148,9 @@ RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
RUNTIME_FUNCTION(Runtime_StringSubstring) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- CONVERT_INT32_ARG_CHECKED(start, 1);
- CONVERT_INT32_ARG_CHECKED(end, 2);
+ Handle<String> string = args.at<String>(0);
+ int start = args.smi_value_at(1);
+ int end = args.smi_value_at(2);
DCHECK_LE(0, start);
DCHECK_LE(start, end);
DCHECK_LE(end, string->length());
@@ -161,8 +161,8 @@ RUNTIME_FUNCTION(Runtime_StringSubstring) {
RUNTIME_FUNCTION(Runtime_StringAdd) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
+ Handle<String> str1 = args.at<String>(0);
+ Handle<String> str2 = args.at<String>(1);
isolate->counters()->string_add_runtime()->Increment();
RETURN_RESULT_OR_FAILURE(isolate,
isolate->factory()->NewConsString(str1, str2));
@@ -172,7 +172,7 @@ RUNTIME_FUNCTION(Runtime_StringAdd) {
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ Handle<String> string = args.at<String>(0);
return *isolate->factory()->InternalizeString(string);
}
@@ -180,8 +180,8 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
+ Handle<String> subject = args.at<String>(0);
+ uint32_t i = NumberToUint32(args[1]);
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
@@ -198,12 +198,12 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ Handle<JSArray> array = args.at<JSArray>(0);
int32_t array_length;
if (!args[1].ToInt32(&array_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
}
- CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
+ Handle<String> special = args.at<String>(2);
size_t actual_array_length = 0;
CHECK(TryNumberToSize(array->length(), &actual_array_length));
@@ -304,8 +304,8 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
RUNTIME_FUNCTION(Runtime_StringToArray) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
+ Handle<String> s = args.at<String>(0);
+ uint32_t limit = NumberToUint32(args[1]);
s = String::Flatten(isolate, s);
const int length =
@@ -350,8 +350,8 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
RUNTIME_FUNCTION(Runtime_StringLessThan) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ Handle<String> x = args.at<String>(0);
+ Handle<String> y = args.at<String>(1);
ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
@@ -361,8 +361,8 @@ RUNTIME_FUNCTION(Runtime_StringLessThan) {
RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ Handle<String> x = args.at<String>(0);
+ Handle<String> y = args.at<String>(1);
ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
@@ -372,8 +372,8 @@ RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ Handle<String> x = args.at<String>(0);
+ Handle<String> y = args.at<String>(1);
ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
@@ -383,8 +383,8 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ Handle<String> x = args.at<String>(0);
+ Handle<String> y = args.at<String>(1);
ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
@@ -394,15 +394,15 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
RUNTIME_FUNCTION(Runtime_StringEqual) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ Handle<String> x = args.at<String>(0);
+ Handle<String> y = args.at<String>(1);
return isolate->heap()->ToBoolean(String::Equals(isolate, x, y));
}
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+ Handle<String> str = args.at<String>(0);
return *String::Flatten(isolate, str);
}
@@ -414,7 +414,7 @@ RUNTIME_FUNCTION(Runtime_StringMaxLength) {
RUNTIME_FUNCTION(Runtime_StringEscapeQuotes) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ Handle<String> string = args.at<String>(0);
// Equivalent to global replacement `string.replace(/"/g, "&quot")`, but this
// does not modify any global state (e.g. the regexp match info).
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index c5f9218911..3cc6b1977d 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -18,7 +18,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
DCHECK_GE(1, args.length());
Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
if (args.length() == 1) {
- CONVERT_ARG_HANDLE_CHECKED(Object, description, 0);
+ Handle<Object> description = args.at(0);
CHECK(description->IsString() || description->IsUndefined(isolate));
if (description->IsString())
symbol->set_description(String::cast(*description));
@@ -29,7 +29,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
RUNTIME_FUNCTION(Runtime_CreatePrivateBrandSymbol) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
Handle<Symbol> symbol = isolate->factory()->NewPrivateNameSymbol(name);
symbol->set_is_private_brand();
return *symbol;
@@ -38,7 +38,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateBrandSymbol) {
RUNTIME_FUNCTION(Runtime_CreatePrivateNameSymbol) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<String> name = args.at<String>(0);
Handle<Symbol> symbol = isolate->factory()->NewPrivateNameSymbol(name);
return *symbol;
}
@@ -46,7 +46,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateNameSymbol) {
RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
+ Handle<Symbol> symbol = args.at<Symbol>(0);
IncrementalStringBuilder builder(isolate);
builder.AppendCStringLiteral("Symbol(");
if (symbol->description().IsString()) {
@@ -60,7 +60,7 @@ RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ auto symbol = Symbol::cast(args[0]);
return isolate->heap()->ToBoolean(symbol.is_private());
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
index 2eec868ab9..b0a809f434 100644
--- a/deps/v8/src/runtime/runtime-test-wasm.cc
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -101,12 +101,12 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
CHECK_EQ(args.length(), 2);
- CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
+ int block_size = args.smi_value_at(0);
+ bool allow_async = Oddball::cast(args[1]).ToBool(isolate);
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
WasmCompileControls& ctrl = (*GetPerIsolateWasmControls())[v8_isolate];
ctrl.AllowAnySizeForAsync = allow_async;
- ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
+ ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size);
v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -179,7 +179,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Smi, value_addr_smi, 0);
+ auto value_addr_smi = Smi::cast(args[0]);
PrintIndentation(WasmStackSize(isolate));
PrintF("}");
@@ -233,7 +233,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ auto function = JSFunction::cast(args[0]);
if (!function.shared().HasAsmWasmData()) {
return ReadOnlyRoots(isolate).false_value();
}
@@ -257,7 +257,7 @@ bool DisallowWasmCodegenFromStringsCallback(v8::Local<v8::Context> context,
RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
+ bool flag = Oddball::cast(args[0]).ToBool(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8_isolate->SetAllowWasmCodeGenerationCallback(
flag ? DisallowWasmCodegenFromStringsCallback : nullptr);
@@ -267,8 +267,8 @@ RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
RUNTIME_FUNCTION(Runtime_IsWasmCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- Code code = function.code();
+ auto function = JSFunction::cast(args[0]);
+ CodeT code = function.code();
bool is_js_to_wasm = code.kind() == CodeKind::JS_TO_WASM_FUNCTION ||
(code.builtin_id() == Builtin::kGenericJSToWasmWrapper);
return isolate->heap()->ToBoolean(is_js_to_wasm);
@@ -296,8 +296,8 @@ RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
RUNTIME_FUNCTION(Runtime_GetWasmExceptionTagId) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
+ Handle<WasmExceptionPackage> exception = args.at<WasmExceptionPackage>(0);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(1);
Handle<Object> tag =
WasmExceptionPackage::GetExceptionTag(isolate, exception);
CHECK(tag->IsWasmExceptionTag());
@@ -311,7 +311,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionTagId) {
RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
+ Handle<WasmExceptionPackage> exception = args.at<WasmExceptionPackage>(0);
Handle<Object> values_obj =
WasmExceptionPackage::GetExceptionValues(isolate, exception);
CHECK(values_obj->IsFixedArray()); // Only called with correct input.
@@ -319,15 +319,12 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
return *isolate->factory()->NewJSArrayWithElements(values);
}
-// Wait until the given module is fully tiered up, then serialize it into an
-// array buffer.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
+ Handle<WasmModuleObject> module_obj = args.at<WasmModuleObject>(0);
wasm::NativeModule* native_module = module_obj->native_module();
- native_module->compilation_state()->WaitForTopTierFinished();
DCHECK(!native_module->compilation_state()->failed());
wasm::WasmSerializer wasm_serializer(native_module);
@@ -349,8 +346,8 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, wire_bytes, 1);
+ Handle<JSArrayBuffer> buffer = args.at<JSArrayBuffer>(0);
+ Handle<JSTypedArray> wire_bytes = args.at<JSTypedArray>(1);
CHECK(!buffer->was_detached());
CHECK(!wire_bytes->WasDetached());
@@ -377,7 +374,7 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
+ Handle<WasmModuleObject> module_obj = args.at<WasmModuleObject>(0);
int instance_count = 0;
WeakArrayList weak_instance_list =
module_obj->script().wasm_weak_instance_list();
@@ -390,7 +387,7 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, argument, 0);
+ Handle<JSObject> argument = args.at<JSObject>(0);
Handle<WasmModuleObject> module;
if (argument->IsWasmInstanceObject()) {
module = handle(Handle<WasmInstanceObject>::cast(argument)->module_object(),
@@ -406,7 +403,7 @@ RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Smi, info_addr, 0);
+ auto info_addr = Smi::cast(args[0]);
wasm::MemoryTracingInfo* info =
reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr.ptr());
@@ -432,8 +429,8 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_SMI_ARG_CHECKED(function_index, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ int function_index = args.smi_value_at(1);
auto* native_module = instance->module_object().native_module();
wasm::GetWasmEngine()->CompileFunction(isolate, native_module, function_index,
wasm::ExecutionTier::kTurbofan);
@@ -458,7 +455,7 @@ RUNTIME_FUNCTION(Runtime_WasmTierUp) {
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
Handle<WasmExportedFunction> exp_fun =
Handle<WasmExportedFunction>::cast(function);
@@ -470,10 +467,25 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
return isolate->heap()->ToBoolean(code && code->is_liftoff());
}
+RUNTIME_FUNCTION(Runtime_IsTurboFanFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ wasm::NativeModule* native_module =
+ exp_fun->instance().module_object().native_module();
+ uint32_t func_index = exp_fun->function_index();
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCode* code = native_module->GetCode(func_index);
+ return isolate->heap()->ToBoolean(code && code->is_turbofan());
+}
+
RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
DCHECK_EQ(1, args.length());
DisallowGarbageCollection no_gc;
- CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
+ auto instance = WasmInstanceObject::cast(args[0]);
instance.module_object().native_module()->set_lazy_compile_frozen(true);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 54b53b719e..a351e85e93 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -20,7 +20,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
-#include "src/execution/runtime-profiler.h"
+#include "src/execution/tiering-manager.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/heap-write-barrier-inl.h"
#include "src/ic/stub-cache.h"
@@ -29,11 +29,17 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/smi.h"
#include "src/profiler/heap-snapshot-generator.h"
#include "src/regexp/regexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
+#include "src/web-snapshot/web-snapshot.h"
+
+#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev.h"
+#endif // V8_ENABLE_MAGLEV
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
@@ -101,8 +107,8 @@ RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
RUNTIME_FUNCTION(Runtime_ConstructDouble) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint32_t hi = NumberToUint32(args[0]);
+ uint32_t lo = NumberToUint32(args[1]);
uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
return *isolate->factory()->NewNumber(base::uint64_to_double(result));
}
@@ -110,8 +116,8 @@ RUNTIME_FUNCTION(Runtime_ConstructDouble) {
RUNTIME_FUNCTION(Runtime_ConstructConsString) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, left, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, right, 1);
+ Handle<String> left = args.at<String>(0);
+ Handle<String> right = args.at<String>(1);
CHECK(left->IsOneByteRepresentation());
CHECK(right->IsOneByteRepresentation());
@@ -124,14 +130,14 @@ RUNTIME_FUNCTION(Runtime_ConstructConsString) {
RUNTIME_FUNCTION(Runtime_ConstructSlicedString) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+ Handle<String> string = args.at<String>(0);
+ int index = args.smi_value_at(1);
CHECK(string->IsOneByteRepresentation());
- CHECK_LT(index->value(), string->length());
+ CHECK_LT(index, string->length());
- Handle<String> sliced_string = isolate->factory()->NewSubString(
- string, index->value(), string->length());
+ Handle<String> sliced_string =
+ isolate->factory()->NewSubString(string, index, string->length());
CHECK(sliced_string->IsSlicedString());
return *sliced_string;
}
@@ -140,7 +146,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ Handle<Object> function_object = args.at(0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
@@ -182,7 +188,7 @@ RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
RUNTIME_FUNCTION(Runtime_RuntimeEvaluateREPL) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> source = args.at<String>(0);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -206,25 +212,6 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
-RUNTIME_FUNCTION(Runtime_DynamicCheckMapsEnabled) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(FLAG_turbo_dynamic_map_checks);
-}
-
-RUNTIME_FUNCTION(Runtime_IsTopTierTurboprop) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(FLAG_turboprop_as_toptier);
-}
-
-RUNTIME_FUNCTION(Runtime_IsMidTierTurboprop) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(FLAG_turboprop &&
- !FLAG_turboprop_as_toptier);
-}
-
RUNTIME_FUNCTION(Runtime_IsAtomicsWaitAllowed) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -233,11 +220,14 @@ RUNTIME_FUNCTION(Runtime_IsAtomicsWaitAllowed) {
namespace {
-enum class TierupKind { kTierupBytecode, kTierupBytecodeOrMidTier };
-
+template <CodeKind code_kind>
bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
- TierupKind tierup_kind,
- IsCompiledScope* is_compiled_scope) {
+ IsCompiledScope* is_compiled_scope);
+
+template <>
+bool CanOptimizeFunction<CodeKind::TURBOFAN>(
+ Handle<JSFunction> function, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
// The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
@@ -269,8 +259,7 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
}
CodeKind kind = CodeKindForTopTier();
- if ((tierup_kind == TierupKind::kTierupBytecode &&
- function->HasAvailableOptimizedCode()) ||
+ if (function->HasAvailableOptimizedCode() ||
function->HasAvailableCodeKind(kind)) {
DCHECK(function->HasAttachedOptimizedCode() ||
function->ChecksOptimizationMarker());
@@ -283,26 +272,42 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
return true;
}
-Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
- TierupKind tierup_kind) {
+#ifdef V8_ENABLE_MAGLEV
+template <>
+bool CanOptimizeFunction<CodeKind::MAGLEV>(Handle<JSFunction> function,
+ Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
+ if (!FLAG_maglev) return false;
+
+ CHECK(!IsAsmWasmFunction(isolate, *function));
+
+ // TODO(v8:7700): Disabled optimization due to deopts?
+ // TODO(v8:7700): Already cached?
+
+ return function->GetActiveTier() < CodeKind::MAGLEV;
+}
+#endif // V8_ENABLE_MAGLEV
+
+Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate) {
if (args.length() != 1 && args.length() != 2) {
return CrashUnlessFuzzing(isolate);
}
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ Handle<Object> function_object = args.at(0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+ static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
+
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
- if (!CanOptimizeFunction(function, isolate, tierup_kind,
- &is_compiled_scope)) {
+ if (!CanOptimizeFunction<kCodeKind>(function, isolate, &is_compiled_scope)) {
return ReadOnlyRoots(isolate).undefined_value();
}
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(Object, type, 1);
+ Handle<Object> type = args.at(1);
if (!type->IsString()) return CrashUnlessFuzzing(isolate);
if (Handle<String>::cast(type)->IsOneByteEqualTo(
base::StaticCharVector("concurrent")) &&
@@ -310,23 +315,21 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
concurrency_mode = ConcurrencyMode::kConcurrent;
}
}
- if (FLAG_trace_opt) {
- PrintF("[manually marking ");
- function->ShortPrint();
- PrintF(" for %s optimization]\n",
- concurrency_mode == ConcurrencyMode::kConcurrent ? "concurrent"
- : "non-concurrent");
- }
// This function may not have been lazily compiled yet, even though its shared
// function has.
if (!function->is_compiled()) {
- DCHECK(function->shared().IsInterpreted());
- function->set_code(*BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
+ DCHECK(function->shared().HasBytecodeArray());
+ CodeT codet = *BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
+ if (function->shared().HasBaselineCode()) {
+ codet = function->shared().baseline_code(kAcquireLoad);
+ }
+ function->set_code(codet);
}
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- function->MarkForOptimization(concurrency_mode);
+ TraceManualRecompile(*function, kCodeKind, concurrency_mode);
+ JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
+ function->MarkForOptimization(isolate, CodeKind::TURBOFAN, concurrency_mode);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -354,7 +357,7 @@ bool EnsureFeedbackVector(Isolate* isolate, Handle<JSFunction> function) {
// Ensure function has a feedback vector to hold type feedback for
// optimization.
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
return true;
}
@@ -365,7 +368,7 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
if (args.length() != 1) {
return CrashUnlessFuzzing(isolate);
}
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ Handle<Object> function_object = args.at(0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
@@ -391,21 +394,87 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
return *function;
}
-RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
+// TODO(v8:7700): Remove this function once we no longer need it to measure
+// maglev compile times. For normal tierup, OptimizeMaglevOnNextCall should be
+// used instead.
+#ifdef V8_ENABLE_MAGLEV
+RUNTIME_FUNCTION(Runtime_BenchMaglev) {
HandleScope scope(isolate);
- return OptimizeFunctionOnNextCall(args, isolate, TierupKind::kTierupBytecode);
+ DCHECK_EQ(args.length(), 2);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ int count = args.smi_value_at(1);
+
+ Handle<CodeT> codet;
+ base::ElapsedTimer timer;
+ timer.Start();
+ codet = Maglev::Compile(isolate, function).ToHandleChecked();
+ for (int i = 1; i < count; ++i) {
+ HandleScope handle_scope(isolate);
+ Maglev::Compile(isolate, function);
+ }
+ PrintF("Maglev compile time: %g ms!\n",
+ timer.Elapsed().InMillisecondsF() / count);
+
+ function->set_code(*codet);
+
+ return ReadOnlyRoots(isolate).undefined_value();
}
+#else
+RUNTIME_FUNCTION(Runtime_BenchMaglev) {
+ PrintF("Maglev is not enabled.\n");
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+#endif // V8_ENABLE_MAGLEV
-RUNTIME_FUNCTION(Runtime_TierupFunctionOnNextCall) {
+RUNTIME_FUNCTION(Runtime_ActiveTierIsMaglev) {
HandleScope scope(isolate);
- return OptimizeFunctionOnNextCall(args, isolate,
- TierupKind::kTierupBytecodeOrMidTier);
+ DCHECK_EQ(args.length(), 1);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ return isolate->heap()->ToBoolean(function->ActiveTierIsMaglev());
+}
+
+#ifdef V8_ENABLE_MAGLEV
+RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 1);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+
+ static constexpr CodeKind kCodeKind = CodeKind::MAGLEV;
+
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate));
+ if (!CanOptimizeFunction<kCodeKind>(function, isolate, &is_compiled_scope)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ DCHECK(is_compiled_scope.is_compiled());
+ DCHECK(function->is_compiled());
+
+ // TODO(v8:7700): Support concurrent compiles.
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+
+ TraceManualRecompile(*function, kCodeKind, concurrency_mode);
+ JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
+ function->MarkForOptimization(isolate, kCodeKind, concurrency_mode);
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+#else
+RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
+ PrintF("Maglev is not enabled.\n");
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+#endif // V8_ENABLE_MAGLEV
+
+// TODO(jgruber): Rename to OptimizeTurbofanOnNextCall.
+RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ return OptimizeFunctionOnNextCall(args, isolate);
}
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
EnsureFeedbackVector(isolate, function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -415,11 +484,11 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
if ((args.length() != 1 && args.length() != 2) || !args[0].IsJSFunction()) {
return CrashUnlessFuzzing(isolate);
}
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
bool allow_heuristic_optimization = false;
if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
+ Handle<Object> sync_object = args.at(1);
if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
Handle<String> sync = Handle<String>::cast(sync_object);
if (sync->IsOneByteEqualTo(
@@ -452,32 +521,6 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_OptimizeFunctionForTopTier) {
- // TODO(rmcilroy): Ideally this should be rolled into
- // OptimizeFunctionOnNextCall, but there is no way to mark the tier to be
- // optimized using the regular optimization marking system.
- HandleScope scope(isolate);
- if (args.length() != 1) {
- return CrashUnlessFuzzing(isolate);
- }
-
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-
- IsCompiledScope is_compiled_scope(
- function->shared().is_compiled_scope(isolate));
- if (!CanOptimizeFunction(function, isolate,
- TierupKind::kTierupBytecodeOrMidTier,
- &is_compiled_scope)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
-
- Compiler::CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent,
- CodeKindForTopTier());
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 0 || args.length() == 1);
@@ -488,7 +531,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
int stack_depth = 0;
if (args.length() == 1) {
if (!args[0].IsSmi()) return CrashUnlessFuzzing(isolate);
- stack_depth = args.smi_at(0);
+ stack_depth = args.smi_value_at(0);
}
// Find the JavaScript function on the top of the stack.
@@ -534,12 +577,13 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
}
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
+ function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
+ ConcurrencyMode::kNotConcurrent);
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->is_unoptimized()) {
- isolate->runtime_profiler()->AttemptOnStackReplacement(
+ isolate->tiering_manager()->AttemptOnStackReplacement(
UnoptimizedFrame::cast(it.frame()),
AbstractCode::kMaxLoopNestingMarker);
}
@@ -573,7 +617,7 @@ RUNTIME_FUNCTION(Runtime_BaselineOsr) {
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ Handle<Object> function_object = args.at(0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
@@ -612,7 +656,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
}
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ Handle<Object> function_object = args.at(0);
if (function_object->IsUndefined()) return Smi::FromInt(status);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
@@ -628,7 +672,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
}
if (function->HasAttachedOptimizedCode()) {
- Code code = function->code();
+ CodeT code = function->code();
if (code.marked_for_deoptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForDeoptimization);
} else {
@@ -746,7 +790,7 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
function->ClearTypeFeedbackInfo();
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -795,7 +839,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
+ DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());
@@ -867,7 +911,7 @@ RUNTIME_FUNCTION(Runtime_TakeHeapSnapshot) {
if (args.length() >= 1) {
HandleScope hs(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, filename_as_js_string, 0);
+ Handle<String> filename_as_js_string = args.at<String>(0);
std::unique_ptr<char[]> buffer = filename_as_js_string->ToCString();
filename = std::string(buffer.get());
}
@@ -940,7 +984,7 @@ RUNTIME_FUNCTION(Runtime_PrintWithNameForAssert) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(String, name, 0);
+ auto name = String::cast(args[0]);
PrintF(" * ");
StringCharacterStream stream(name);
@@ -967,10 +1011,10 @@ RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
DCHECK_LE(1, args.length());
DCHECK_GE(2, args.length());
CHECK(FLAG_track_retaining_path);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
+ Handle<HeapObject> object = args.at<HeapObject>(0);
RetainingPathOption option = RetainingPathOption::kDefault;
if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
+ Handle<String> str = args.at<String>(1);
const char track_ephemeron_path[] = "track-ephemeron-path";
if (str->IsOneByteEqualTo(base::StaticCharVector(track_ephemeron_path))) {
option = RetainingPathOption::kTrackEphemeronPath;
@@ -988,7 +1032,7 @@ RUNTIME_FUNCTION(Runtime_GlobalPrint) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(String, string, 0);
+ auto string = String::cast(args[0]);
StringCharacterStream stream(string);
while (stream.HasMore()) {
uint16_t character = stream.GetNext();
@@ -1009,7 +1053,7 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, arg, 0);
+ Object arg = args[0];
if (arg.IsTrue(isolate)) {
isolate->set_force_slow_path(true);
} else {
@@ -1022,7 +1066,7 @@ RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ int message_id = args.smi_value_at(0);
const char* message = GetAbortReason(static_cast<AbortReason>(message_id));
base::OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
@@ -1033,7 +1077,7 @@ RUNTIME_FUNCTION(Runtime_Abort) {
RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ Handle<String> message = args.at<String>(0);
if (FLAG_disable_abortjs) {
base::OS::PrintError("[disabled] abort: %s\n", message->ToCString().get());
return Object();
@@ -1047,7 +1091,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_AbortCSADcheck) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ Handle<String> message = args.at<String>(0);
base::OS::PrintError("abort: CSA_DCHECK failed: %s\n",
message->ToCString().get());
isolate->PrintStack(stderr);
@@ -1060,7 +1104,7 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
#ifdef DEBUG
DCHECK_EQ(1, args.length());
// Get the function and make sure it is compiled.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+ Handle<JSFunction> func = args.at<JSFunction>(0);
IsCompiledScope is_compiled_scope;
CHECK(func->is_compiled() ||
Compiler::Compile(isolate, func, Compiler::KEEP_EXCEPTION,
@@ -1103,7 +1147,7 @@ RUNTIME_FUNCTION(Runtime_TraceEnter) {
RUNTIME_FUNCTION(Runtime_TraceExit) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
PrintIndentation(StackSize(isolate));
PrintF("} -> ");
obj.ShortPrint();
@@ -1114,15 +1158,15 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSObject, obj1, 0);
- CONVERT_ARG_CHECKED(JSObject, obj2, 1);
+ auto obj1 = JSObject::cast(args[0]);
+ auto obj2 = JSObject::cast(args[1]);
return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
}
RUNTIME_FUNCTION(Runtime_InLargeObjectSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ auto obj = HeapObject::cast(args[0]);
return isolate->heap()->ToBoolean(
isolate->heap()->new_lo_space()->Contains(obj) ||
isolate->heap()->code_lo_space()->Contains(obj) ||
@@ -1132,7 +1176,7 @@ RUNTIME_FUNCTION(Runtime_InLargeObjectSpace) {
RUNTIME_FUNCTION(Runtime_HasElementsInALargeObjectSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSArray, array, 0);
+ auto array = JSArray::cast(args[0]);
FixedArrayBase elements = array.elements();
return isolate->heap()->ToBoolean(
isolate->heap()->new_lo_space()->Contains(elements) ||
@@ -1142,7 +1186,7 @@ RUNTIME_FUNCTION(Runtime_HasElementsInALargeObjectSpace) {
RUNTIME_FUNCTION(Runtime_InYoungGeneration) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ Object obj = args[0];
return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj));
}
@@ -1151,7 +1195,7 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
DisallowGarbageCollection no_gc;
if (args.length() != 1) return CrashUnlessFuzzing(isolate);
- CONVERT_ARG_CHECKED(Object, arg, 0);
+ Object arg = args[0];
if (!arg.IsJSObject()) return CrashUnlessFuzzing(isolate);
JSObject object = JSObject::cast(arg);
@@ -1183,7 +1227,7 @@ v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
+ bool flag = Oddball::cast(args[0]).ToBool(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8_isolate->SetModifyCodeGenerationFromStringsCallback(
flag ? DisallowCodegenFromStringsCallback : nullptr);
@@ -1193,8 +1237,8 @@ RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
+ auto regexp = JSRegExp::cast(args[0]);
+ bool is_latin1 = Oddball::cast(args[1]).ToBool(isolate);
bool result;
if (regexp.type_tag() == JSRegExp::IRREGEXP) {
result = regexp.bytecode(is_latin1).IsByteArray();
@@ -1207,8 +1251,8 @@ RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
+ auto regexp = JSRegExp::cast(args[0]);
+ bool is_latin1 = Oddball::cast(args[1]).ToBool(isolate);
bool result;
if (regexp.type_tag() == JSRegExp::IRREGEXP) {
result = regexp.code(is_latin1).IsCodeT();
@@ -1221,7 +1265,7 @@ RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ auto regexp = JSRegExp::cast(args[0]);
const char* type_str;
switch (regexp.type_tag()) {
case JSRegExp::NOT_COMPILED:
@@ -1243,34 +1287,34 @@ RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
RUNTIME_FUNCTION(Runtime_RegexpIsUnmodified) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
return isolate->heap()->ToBoolean(
RegExp::IsUnmodifiedRegExp(isolate, regexp));
}
-#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
- RUNTIME_FUNCTION(Runtime_Has##Name) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj.Has##Name()); \
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION(Runtime_##Name) { \
+ auto obj = JSObject::cast(args[0]); \
+ return isolate->heap()->ToBoolean(obj.Name()); \
}
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiOrObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DoubleElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HoleyElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(PackedElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasFastElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasSmiElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasSmiOrObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasHoleyElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasDictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasPackedElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasSloppyArgumentsElements)
// Properties test sitting with elements tests - not fooling anyone.
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HasFastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype) \
RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ auto obj = JSObject::cast(args[0]); \
return isolate->heap()->ToBoolean(obj.HasFixed##Type##Elements()); \
}
@@ -1359,7 +1403,7 @@ RUNTIME_FUNCTION(Runtime_SerializeDeserializeNow) {
RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<Object> object = args.at(0);
#ifdef VERIFY_HEAP
object->ObjectVerify(isolate);
#else
@@ -1389,7 +1433,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<JSObject> object = args.at<JSObject>(0);
MapUpdater::CompleteInobjectSlackTracking(isolate, object->map());
return ReadOnlyRoots(isolate).undefined_value();
@@ -1442,7 +1486,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) final {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta, bool reuse_code) final {}
+ int fp_to_sp_delta) final {}
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> shared,
const char* reason) final {}
@@ -1462,9 +1506,9 @@ RUNTIME_FUNCTION(Runtime_NewRegExpWithBacktrackLimit) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 1);
- CONVERT_UINT32_ARG_CHECKED(backtrack_limit, 2);
+ Handle<String> pattern = args.at<String>(0);
+ Handle<String> flags_string = args.at<String>(1);
+ uint32_t backtrack_limit = args.positive_smi_value_at(2);
JSRegExp::Flags flags =
JSRegExp::FlagsFromString(isolate, flags_string).value();
@@ -1485,5 +1529,115 @@ RUNTIME_FUNCTION(Runtime_BigIntMaxLengthBits) {
return *isolate->factory()->NewNumber(BigInt::kMaxLengthBits);
}
+RUNTIME_FUNCTION(Runtime_IsSameHeapObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<HeapObject> obj1 = args.at<HeapObject>(0);
+ Handle<HeapObject> obj2 = args.at<HeapObject>(1);
+ return isolate->heap()->ToBoolean(obj1->address() == obj2->address());
+}
+
+RUNTIME_FUNCTION(Runtime_IsSharedString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<HeapObject> obj = args.at<HeapObject>(0);
+ return isolate->heap()->ToBoolean(obj->IsString() &&
+ Handle<String>::cast(obj)->IsShared());
+}
+
+RUNTIME_FUNCTION(Runtime_WebSnapshotSerialize) {
+ if (!FLAG_allow_natives_syntax) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ HandleScope scope(isolate);
+ if (args.length() < 1 || args.length() > 2) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRuntimeWrongNumArgs));
+ }
+ Handle<Object> object = args.at(0);
+ Handle<FixedArray> block_list = isolate->factory()->empty_fixed_array();
+ Handle<JSArray> block_list_js_array;
+ if (args.length() == 2) {
+ if (!args[1].IsJSArray()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidArgument));
+ }
+ block_list_js_array = args.at<JSArray>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, block_list,
+ JSReceiver::GetOwnValues(block_list_js_array,
+ PropertyFilter::ENUMERABLE_STRINGS));
+ }
+
+ auto snapshot_data = std::make_shared<WebSnapshotData>();
+ WebSnapshotSerializer serializer(isolate);
+ if (!serializer.TakeSnapshot(object, block_list, *snapshot_data)) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
+ if (!block_list_js_array.is_null() &&
+ static_cast<uint32_t>(block_list->length()) <
+ serializer.external_objects_count()) {
+ Handle<FixedArray> externals = serializer.GetExternals();
+ Handle<Map> map = JSObject::GetElementsTransitionMap(block_list_js_array,
+ PACKED_ELEMENTS);
+ block_list_js_array->set_elements(*externals);
+ block_list_js_array->set_length(Smi::FromInt(externals->length()));
+ block_list_js_array->set_map(*map);
+ }
+ i::Handle<i::Object> managed_object = Managed<WebSnapshotData>::FromSharedPtr(
+ isolate, snapshot_data->buffer_size, snapshot_data);
+ return *managed_object;
+}
+
+RUNTIME_FUNCTION(Runtime_WebSnapshotDeserialize) {
+ if (!FLAG_allow_natives_syntax) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ HandleScope scope(isolate);
+ if (args.length() == 0 || args.length() > 2) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRuntimeWrongNumArgs));
+ }
+ if (!args[0].IsForeign()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidArgument));
+ }
+ Handle<Foreign> foreign_data = args.at<Foreign>(0);
+ Handle<FixedArray> injected_references =
+ isolate->factory()->empty_fixed_array();
+ if (args.length() == 2) {
+ if (!args[1].IsJSArray()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidArgument));
+ }
+ auto js_array = args.at<JSArray>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, injected_references,
+ JSReceiver::GetOwnValues(js_array, PropertyFilter::ENUMERABLE_STRINGS));
+ }
+
+ auto data = Managed<WebSnapshotData>::cast(*foreign_data).get();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ WebSnapshotDeserializer deserializer(v8_isolate, data->buffer,
+ data->buffer_size);
+ if (!deserializer.Deserialize(injected_references)) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
+ Handle<Object> object;
+ if (!deserializer.value().ToHandle(&object)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kWebSnapshotError));
+ }
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_SharedGC) {
+ SealHandleScope scope(isolate);
+ isolate->heap()->CollectSharedGarbage(GarbageCollectionReason::kTesting);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-trace.cc b/deps/v8/src/runtime/runtime-trace.cc
index 06cfd73548..536d0eed11 100644
--- a/deps/v8/src/runtime/runtime-trace.cc
+++ b/deps/v8/src/runtime/runtime-trace.cc
@@ -46,9 +46,7 @@ void PrintRegisterRange(UnoptimizedFrame* frame, std::ostream& os,
reg_index++) {
Object reg_object = frame->ReadInterpreterRegister(reg_index);
os << " [ " << std::setw(reg_field_width)
- << interpreter::Register(reg_index).ToString(
- bytecode_iterator.bytecode_array()->parameter_count())
- << arrow_direction;
+ << interpreter::Register(reg_index).ToString() << arrow_direction;
reg_object.ShortPrint(os);
os << " ]" << std::endl;
}
@@ -124,9 +122,9 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
- CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+ Handle<BytecodeArray> bytecode_array = args.at<BytecodeArray>(0);
+ int bytecode_offset = args.smi_value_at(1);
+ Handle<Object> accumulator = args.at(2);
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
@@ -146,8 +144,7 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
}
os << static_cast<const void*>(bytecode_address) << " @ " << std::setw(4)
<< offset << " : ";
- interpreter::BytecodeDecoder::Decode(os, bytecode_address,
- bytecode_array->parameter_count());
+ interpreter::BytecodeDecoder::Decode(os, bytecode_address);
os << std::endl;
// Print all input registers and accumulator.
PrintRegisters(frame, os, true, bytecode_iterator, accumulator);
@@ -175,9 +172,9 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
- CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+ Handle<BytecodeArray> bytecode_array = args.at<BytecodeArray>(0);
+ int bytecode_offset = args.smi_value_at(1);
+ Handle<Object> accumulator = args.at(2);
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
@@ -208,9 +205,9 @@ RUNTIME_FUNCTION(Runtime_TraceUpdateFeedback) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_ARG_CHECKED(slot, 1);
- CONVERT_ARG_CHECKED(String, reason, 2);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ int slot = args.smi_value_at(1);
+ auto reason = String::cast(args[2]);
int slot_count = function->feedback_vector().metadata().slot_count();
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index ca3a50ee76..4ce669b1f7 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -35,13 +35,10 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) {
RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
-
+ Handle<JSTypedArray> target = args.at<JSTypedArray>(0);
+ Handle<Object> source = args.at(1);
size_t length;
- CHECK(TryNumberToSize(*length_obj, &length));
-
+ CHECK(TryNumberToSize(args[2], &length));
ElementsAccessor* accessor = target->GetElementsAccessor();
return accessor->CopyElements(source, target, length, 0);
}
@@ -49,14 +46,14 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ Handle<JSTypedArray> holder = args.at<JSTypedArray>(0);
return *holder->GetBuffer();
}
RUNTIME_FUNCTION(Runtime_GrowableSharedArrayBufferByteLength) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ Handle<JSArrayBuffer> array_buffer = args.at<JSArrayBuffer>(0);
CHECK_EQ(0, array_buffer->byte_length());
size_t byte_length = array_buffer->GetBackingStore()->byte_length();
@@ -91,8 +88,9 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
DCHECK_EQ(1, args.length());
// Validation is handled in the Torque builtin.
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
+ Handle<JSTypedArray> array = args.at<JSTypedArray>(0);
DCHECK(!array->WasDetached());
+ DCHECK(!array->IsOutOfBounds());
#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
if (FLAG_multi_mapped_mock_allocator) {
@@ -102,7 +100,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
}
#endif
- size_t length = array->length();
+ size_t length = array->GetLength();
DCHECK_LT(1, length);
// In case of a SAB, the data is copied into temporary memory, as
@@ -116,7 +114,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
std::vector<uint8_t> offheap_copy;
void* data_copy_ptr = nullptr;
if (copy_data) {
- const size_t bytes = array->byte_length();
+ const size_t bytes = array->GetByteLength();
if (bytes <= static_cast<unsigned>(
ByteArray::LengthFor(kMaxRegularHeapObjectSize))) {
array_copy = isolate->factory()->NewByteArray(static_cast<int>(bytes));
@@ -165,7 +163,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
if (copy_data) {
DCHECK_NOT_NULL(data_copy_ptr);
DCHECK_NE(array_copy.is_null(), offheap_copy.empty());
- const size_t bytes = array->byte_length();
+ const size_t bytes = array->GetByteLength();
base::Relaxed_Memcpy(static_cast<base::Atomic8*>(array->DataPtr()),
static_cast<base::Atomic8*>(data_copy_ptr), bytes);
}
@@ -176,17 +174,12 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
RUNTIME_FUNCTION(Runtime_TypedArraySet) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 3);
-
+ Handle<JSTypedArray> target = args.at<JSTypedArray>(0);
+ Handle<Object> source = args.at(1);
size_t length;
- CHECK(TryNumberToSize(*length_obj, &length));
-
+ CHECK(TryNumberToSize(args[2], &length));
size_t offset;
- CHECK(TryNumberToSize(*offset_obj, &offset));
-
+ CHECK(TryNumberToSize(args[3], &offset));
ElementsAccessor* accessor = target->GetElementsAccessor();
return accessor->CopyElements(source, target, length, offset);
}
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 170c0bcdbc..30437b9ba9 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -13,103 +13,6 @@
namespace v8 {
namespace internal {
-// Cast the given object to a value of the specified type and store
-// it in a variable with the given name. If the object is not of the
-// expected type we crash safely.
-#define CONVERT_ARG_CHECKED(Type, name, index) \
- CHECK(args[index].Is##Type()); \
- Type name = Type::cast(args[index]);
-
-#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
- CHECK(args[index].Is##Type()); \
- Handle<Type> name = args.at<Type>(index);
-
-#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
- CHECK(args[index].IsNumber()); \
- Handle<Object> name = args.at(index);
-
-// Cast the given object to a boolean and store it in a variable with
-// the given name. If the object is not a boolean we crash safely.
-#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
- CHECK(args[index].IsBoolean()); \
- bool name = args[index].IsTrue(isolate);
-
-// Cast the given argument to a Smi and store its value in an int variable
-// with the given name. If the argument is not a Smi we crash safely.
-#define CONVERT_SMI_ARG_CHECKED(name, index) \
- CHECK(args[index].IsSmi()); \
- int name = args.smi_at(index); \
- /* Ensure we have a Smi and not a TaggedIndex */ \
- DCHECK_IMPLIES(args[index].IsTaggedIndex(), \
- name == TaggedIndex(args[index].ptr()).value());
-
-// Cast the given argument to a TaggedIndex and store its value in an int
-// variable with the given name. If the argument is not a TaggedIndex we crash
-// safely.
-#define CONVERT_TAGGED_INDEX_ARG_CHECKED(name, index) \
- CHECK(args[index].IsTaggedIndex()); \
- int name = args.tagged_index_at(index);
-
-// Cast the given argument to a double and store it in a variable with
-// the given name. If the argument is not a number (as opposed to
-// the number not-a-number) we crash safely.
-#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
- CHECK(args[index].IsNumber()); \
- double name = args.number_at(index);
-
-// Cast the given argument to a size_t and store its value in a variable with
-// the given name. If the argument is not a size_t we crash safely.
-#define CONVERT_SIZE_ARG_CHECKED(name, index) \
- CHECK(args[index].IsNumber()); \
- Handle<Object> name##_object = args.at(index); \
- size_t name = 0; \
- CHECK(TryNumberToSize(*name##_object, &name));
-
-// Call the specified converter on the object *comand store the result in
-// a variable of the specified type with the given name. If the
-// object is not a Number we crash safely.
-#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- CHECK(obj.IsNumber()); \
- type name = NumberTo##Type(obj);
-
-// Cast the given argument to PropertyDetails and store its value in a
-// variable with the given name. If the argument is not a Smi we crash safely.
-#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \
- CHECK(args[index]->IsSmi()); \
- PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
- int32_t __tmp_##name = 0; \
- CHECK(args[index]->ToInt32(&__tmp_##name)); \
- CHECK(is_valid_language_mode(__tmp_##name)); \
- LanguageMode name = static_cast<LanguageMode>(__tmp_##name);
-
-// Assert that the given argument is a number within the Int32 range
-// and convert it to int32_t. If the argument is not an Int32 we crash safely.
-#define CONVERT_INT32_ARG_CHECKED(name, index) \
- CHECK(args[index].IsNumber()); \
- int32_t name = 0; \
- CHECK(args[index].ToInt32(&name));
-
-// Assert that the given argument is a number within the Uint32 range
-// and convert it to uint32_t. If the argument is not an Uint32 call
-// IllegalOperation and return.
-#define CONVERT_UINT32_ARG_CHECKED(name, index) \
- CHECK(args[index].IsNumber()); \
- uint32_t name = 0; \
- CHECK(args[index].ToUint32(&name));
-
-// Cast the given argument to PropertyAttributes and store its value in a
-// variable with the given name. If the argument is not a Smi or the
-// enum value is out of range, we crash safely.
-#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
- CHECK(args[index].IsSmi()); \
- CHECK_EQ(args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE), 0); \
- PropertyAttributes name = static_cast<PropertyAttributes>(args.smi_at(index));
-
// A mechanism to return a pair of Object pointers in registers (if possible).
// How this is achieved is calling convention-dependent.
// All currently supported x86 compiles uses calling conventions that are cdecl
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index c6e39c4c01..e7c695947e 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -57,8 +57,13 @@ class FrameFinder {
StackFrameIterator frame_iterator_;
};
-WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
- return FrameFinder<WasmFrame>(isolate).frame()->wasm_instance();
+WasmInstanceObject GetWasmInstanceOnStackTop(
+ Isolate* isolate,
+ std::initializer_list<StackFrame::Type> skipped_frame_types = {
+ StackFrame::EXIT}) {
+ return FrameFinder<WasmFrame>(isolate, skipped_frame_types)
+ .frame()
+ ->wasm_instance();
}
Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
@@ -102,11 +107,11 @@ RUNTIME_FUNCTION(Runtime_WasmIsValidRefValue) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
// 'raw_instance' can be either a WasmInstanceObject or undefined.
- CONVERT_ARG_HANDLE_CHECKED(Object, raw_instance, 0)
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<Object> raw_instance = args.at(0);
+ Handle<Object> value = args.at(1);
// Make sure ValueType fits properly in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- CONVERT_SMI_ARG_CHECKED(raw_type, 2);
+ int raw_type = args.smi_value_at(2);
const wasm::WasmModule* module =
raw_instance->IsWasmInstanceObject()
@@ -125,10 +130,10 @@ RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
// {delta_pages} is checked to be a positive smi in the WasmMemoryGrow builtin
// which calls this runtime function.
- CONVERT_UINT32_ARG_CHECKED(delta_pages, 1);
+ uint32_t delta_pages = args.positive_smi_value_at(1);
int ret = WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), delta_pages);
@@ -142,7 +147,7 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ int message_id = args.smi_value_at(0);
return ThrowWasmError(isolate, MessageTemplateFromInt(message_id));
}
@@ -171,8 +176,8 @@ RUNTIME_FUNCTION(Runtime_WasmThrow) {
DCHECK_EQ(2, args.length());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- CONVERT_ARG_CHECKED(WasmExceptionTag, tag_raw, 0);
- CONVERT_ARG_CHECKED(FixedArray, values_raw, 1);
+ auto tag_raw = WasmExceptionTag::cast(args[0]);
+ auto values_raw = FixedArray::cast(args[1]);
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<WasmExceptionTag> tag(tag_raw, isolate);
Handle<FixedArray> values(values_raw, isolate);
@@ -206,8 +211,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
ClearThreadInWasmScope wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_SMI_ARG_CHECKED(func_index, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ int func_index = args.smi_value_at(1);
#ifdef DEBUG
FrameFinder<WasmCompileLazyFrame> frame_finder(isolate);
@@ -231,7 +236,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
namespace {
void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
- int function_index, Handle<Code> wrapper_code) {
+ int function_index, Handle<CodeT> wrapper_code) {
Handle<WasmInternalFunction> internal =
WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
function_index)
@@ -248,8 +253,9 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_ARG_HANDLE_CHECKED(WasmExportedFunctionData, function_data, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ Handle<WasmExportedFunctionData> function_data =
+ args.at<WasmExportedFunctionData>(1);
DCHECK(isolate->context().is_null());
isolate->set_context(instance->native_context());
@@ -269,9 +275,10 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
return ReadOnlyRoots(isolate).undefined_value();
}
- Handle<Code> wrapper_code =
+ Handle<CodeT> wrapper_code = ToCodeT(
wasm::JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
- isolate, sig, module);
+ isolate, sig, module),
+ isolate);
// Replace the wrapper for the function that triggered the tier-up.
// This is to verify that the wrapper is replaced, even if the function
@@ -298,7 +305,7 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
// We're reusing this interrupt mechanism to interrupt long-running loops.
StackLimitCheck check(isolate);
@@ -321,10 +328,10 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ double offset_double = args.number_value_at(1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
- CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
+ uint32_t count = NumberToUint32(args[2]);
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
// Should have trapped if address was OOB.
@@ -337,11 +344,11 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ double offset_double = args.number_value_at(1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
- CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
+ int32_t expected_value = NumberToInt32(args[2]);
+ Handle<BigInt> timeout_ns = args.at<BigInt>(3);
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
@@ -360,11 +367,11 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ double offset_double = args.number_value_at(1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
- CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
+ Handle<BigInt> expected_value = args.at<BigInt>(2);
+ Handle<BigInt> timeout_ns = args.at<BigInt>(3);
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
@@ -396,8 +403,8 @@ RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(function_index, 1);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t function_index = args.positive_smi_value_at(1);
return *WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
function_index);
@@ -407,9 +414,9 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_index, 1);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 2);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_index = args.positive_smi_value_at(1);
+ uint32_t entry_index = args.positive_smi_value_at(2);
DCHECK_LT(table_index, instance->tables().length());
auto table = handle(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
@@ -431,10 +438,10 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_index, 1);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 2);
- CONVERT_ARG_CHECKED(Object, element_raw, 3);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_index = args.positive_smi_value_at(1);
+ uint32_t entry_index = args.positive_smi_value_at(2);
+ Object element_raw = args[3];
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> element(element_raw, isolate);
DCHECK_LT(table_index, instance->tables().length());
@@ -458,15 +465,15 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_index, 1);
- CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 2);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_index = args.positive_smi_value_at(1);
+ uint32_t elem_segment_index = args.positive_smi_value_at(2);
static_assert(
wasm::kV8MaxWasmTableSize < kSmiMaxValue,
"Make sure clamping to Smi range doesn't make an invalid call valid");
- CONVERT_UINT32_ARG_CHECKED(dst, 3);
- CONVERT_UINT32_ARG_CHECKED(src, 4);
- CONVERT_UINT32_ARG_CHECKED(count, 5);
+ uint32_t dst = args.positive_smi_value_at(3);
+ uint32_t src = args.positive_smi_value_at(4);
+ uint32_t count = args.positive_smi_value_at(5);
DCHECK(!isolate->context().is_null());
@@ -480,15 +487,15 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
- CONVERT_UINT32_ARG_CHECKED(table_src_index, 2);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_dst_index = args.positive_smi_value_at(1);
+ uint32_t table_src_index = args.positive_smi_value_at(2);
static_assert(
wasm::kV8MaxWasmTableSize < kSmiMaxValue,
"Make sure clamping to Smi range doesn't make an invalid call valid");
- CONVERT_UINT32_ARG_CHECKED(dst, 3);
- CONVERT_UINT32_ARG_CHECKED(src, 4);
- CONVERT_UINT32_ARG_CHECKED(count, 5);
+ uint32_t dst = args.positive_smi_value_at(3);
+ uint32_t src = args.positive_smi_value_at(4);
+ uint32_t count = args.positive_smi_value_at(5);
DCHECK(!isolate->context().is_null());
@@ -502,12 +509,12 @@ RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_index, 1);
- CONVERT_ARG_CHECKED(Object, value_raw, 2);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_index = args.positive_smi_value_at(1);
+ Object value_raw = args[2];
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> value(value_raw, isolate);
- CONVERT_UINT32_ARG_CHECKED(delta, 3);
+ uint32_t delta = args.positive_smi_value_at(3);
Handle<WasmTableObject> table(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
@@ -520,13 +527,13 @@ RUNTIME_FUNCTION(Runtime_WasmTableFill) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_UINT32_ARG_CHECKED(table_index, 1);
- CONVERT_UINT32_ARG_CHECKED(start, 2);
- CONVERT_ARG_CHECKED(Object, value_raw, 3);
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t table_index = args.positive_smi_value_at(1);
+ uint32_t start = args.positive_smi_value_at(2);
+ Object value_raw = args[3];
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> value(value_raw, isolate);
- CONVERT_UINT32_ARG_CHECKED(count, 4);
+ uint32_t count = args.positive_smi_value_at(4);
Handle<WasmTableObject> table(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
@@ -573,7 +580,7 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
// Enter the debugger.
DebugScope debug_scope(isolate->debug());
-
+ bool paused_on_instrumentation = false;
// Check for instrumentation breakpoint.
DCHECK_EQ(script->break_on_entry(), !!instance->break_on_entry());
if (script->break_on_entry()) {
@@ -590,14 +597,9 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
.set_break_on_entry(false);
}
DCHECK(!instance->break_on_entry());
- Handle<FixedArray> on_entry_breakpoints;
- if (maybe_on_entry_breakpoints.ToHandle(&on_entry_breakpoints)) {
- debug_info->ClearStepping(isolate);
- StepAction step_action = isolate->debug()->last_step_action();
- isolate->debug()->ClearStepping();
- isolate->debug()->OnDebugBreak(on_entry_breakpoints, step_action);
- // Don't process regular breakpoints.
- return ReadOnlyRoots(isolate).undefined_value();
+ if (!maybe_on_entry_breakpoints.is_null()) {
+ isolate->debug()->OnInstrumentationBreak();
+ paused_on_instrumentation = true;
}
}
@@ -625,6 +627,12 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
return ReadOnlyRoots(isolate).undefined_value();
}
+ // We only hit the instrumentation breakpoint, and there is no other reason to
+ // break.
+ if (paused_on_instrumentation) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
// We did not hit a breakpoint. If we are in stepping code, but the user did
// not request stepping, clear this (to save further calls into this runtime
// function).
@@ -633,19 +641,6 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmAllocateRtt) {
- ClearThreadInWasmScope flag_scope(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_UINT32_ARG_CHECKED(type_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Map, parent, 1);
- CONVERT_SMI_ARG_CHECKED(raw_mode, 2);
- Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
- isolate);
- return *wasm::AllocateSubRtt(isolate, instance, type_index, parent,
- static_cast<WasmRttSubMode>(raw_mode));
-}
-
namespace {
inline void* ArrayElementAddress(Handle<WasmArray> array, uint32_t index,
int element_size_bytes) {
@@ -659,11 +654,11 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmArray, dst_array, 0);
- CONVERT_UINT32_ARG_CHECKED(dst_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(WasmArray, src_array, 2);
- CONVERT_UINT32_ARG_CHECKED(src_index, 3);
- CONVERT_UINT32_ARG_CHECKED(length, 4);
+ Handle<WasmArray> dst_array = args.at<WasmArray>(0);
+ uint32_t dst_index = args.positive_smi_value_at(1);
+ Handle<WasmArray> src_array = args.at<WasmArray>(2);
+ uint32_t src_index = args.positive_smi_value_at(3);
+ uint32_t length = args.positive_smi_value_at(4);
DCHECK_GT(length, 0);
bool overlapping_ranges =
dst_array->ptr() == src_array->ptr() &&
@@ -694,6 +689,37 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
return ReadOnlyRoots(isolate).undefined_value();
}
+// Returns
+// - the new array if the operation succeeds,
+// - Smi(0) if the requested array length is too large,
+// - Smi(1) if the data segment ran out-of-bounds.
+RUNTIME_FUNCTION(Runtime_WasmArrayInitFromData) {
+ ClearThreadInWasmScope flag_scope(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
+ uint32_t data_segment = args.positive_smi_value_at(1);
+ uint32_t offset = args.positive_smi_value_at(2);
+ uint32_t length = args.positive_smi_value_at(3);
+ Handle<Map> rtt = args.at<Map>(4);
+ uint32_t element_size = WasmArray::DecodeElementSizeFromMap(*rtt);
+ uint32_t length_in_bytes = length * element_size;
+
+ if (length > static_cast<uint32_t>(WasmArray::MaxLength(element_size))) {
+ return Smi::FromInt(wasm::kArrayInitFromDataArrayTooLargeErrorCode);
+ }
+ // The check above implies no overflow.
+ DCHECK_EQ(length_in_bytes / element_size, length);
+ if (!base::IsInBounds<uint32_t>(
+ offset, length_in_bytes,
+ instance->data_segment_sizes()[data_segment])) {
+ return Smi::FromInt(wasm::kArrayInitFromDataSegmentOutOfBoundsErrorCode);
+ }
+
+ Address source = instance->data_segment_starts()[data_segment] + offset;
+ return *isolate->factory()->NewWasmArrayFromMemory(length, rtt, source);
+}
+
namespace {
// Synchronize the stack limit with the active continuation for stack-switching.
// This can be done before or after changing the stack pointer itself, as long
@@ -707,25 +733,45 @@ void SyncStackLimit(Isolate* isolate) {
auto continuation = WasmContinuationObject::cast(
*isolate->roots_table().slot(RootIndex::kActiveContinuation));
auto stack = Managed<wasm::StackMemory>::cast(continuation.stack()).get();
+ if (FLAG_trace_wasm_stack_switching) {
+ PrintF("Switch to stack #%d\n", stack->id());
+ }
uintptr_t limit = reinterpret_cast<uintptr_t>(stack->jmpbuf()->stack_limit);
isolate->stack_guard()->SetStackLimit(limit);
}
} // namespace
// Allocate a new continuation, and prepare for stack switching by updating the
-// active continuation and setting the stack limit.
+// active continuation, active suspender and stack limit.
RUNTIME_FUNCTION(Runtime_WasmAllocateContinuation) {
CHECK(FLAG_experimental_wasm_stack_switching);
HandleScope scope(isolate);
+ Handle<WasmSuspenderObject> suspender = args.at<WasmSuspenderObject>(0);
+
+ // Update the continuation state.
auto parent =
handle(WasmContinuationObject::cast(
*isolate->roots_table().slot(RootIndex::kActiveContinuation)),
isolate);
- auto target = WasmContinuationObject::New(isolate, *parent);
+ Handle<WasmContinuationObject> target =
+ WasmContinuationObject::New(isolate, parent);
auto target_stack =
Managed<wasm::StackMemory>::cast(target->stack()).get().get();
isolate->wasm_stacks()->Add(target_stack);
isolate->roots_table().slot(RootIndex::kActiveContinuation).store(*target);
+
+ // Update the suspender state.
+ FullObjectSlot active_suspender_slot =
+ isolate->roots_table().slot(RootIndex::kActiveSuspender);
+ suspender->set_parent(HeapObject::cast(*active_suspender_slot));
+ if (!(*active_suspender_slot).IsUndefined()) {
+ WasmSuspenderObject::cast(*active_suspender_slot)
+ .set_state(WasmSuspenderObject::Inactive);
+ }
+ suspender->set_state(WasmSuspenderObject::State::Active);
+ suspender->set_continuation(*target);
+ active_suspender_slot.store(*suspender);
+
SyncStackLimit(isolate);
return *target;
}
@@ -737,5 +783,42 @@ RUNTIME_FUNCTION(Runtime_WasmSyncStackLimit) {
return ReadOnlyRoots(isolate).undefined_value();
}
+// Takes a promise and a suspender, and returns promise.then(onFulfilled), where
+// onFulfilled resumes the suspender.
+RUNTIME_FUNCTION(Runtime_WasmCreateResumePromise) {
+ CHECK(FLAG_experimental_wasm_stack_switching);
+ HandleScope scope(isolate);
+ Handle<Object> promise = args.at(0);
+ Handle<WasmSuspenderObject> suspender = args.at<WasmSuspenderObject>(1);
+
+ // Instantiate onFulfilled callback.
+ Handle<WasmOnFulfilledData> function_data =
+ isolate->factory()->NewWasmOnFulfilledData(suspender);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForWasmOnFulfilled(
+ function_data);
+ Handle<WasmInstanceObject> instance(
+ GetWasmInstanceOnStackTop(isolate,
+ {StackFrame::EXIT, StackFrame::WASM_TO_JS}),
+ isolate);
+ isolate->set_context(instance->native_context());
+ Handle<Context> context(isolate->native_context());
+ Handle<Map> function_map = isolate->strict_function_map();
+ Handle<JSObject> on_fulfilled =
+ Factory::JSFunctionBuilder{isolate, shared, context}
+ .set_map(function_map)
+ .Build();
+
+ i::Handle<i::Object> argv[] = {on_fulfilled};
+ i::Handle<i::Object> result;
+ bool has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->promise_then(), promise,
+ arraysize(argv), argv)
+ .ToHandle(&result);
+ // TODO(thibaudm): Propagate exception.
+ CHECK(!has_pending_exception);
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
index e20c9aa27a..f3c6f63ebc 100644
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -13,7 +13,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_ShrinkFinalizationRegistryUnregisterTokenMap) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFinalizationRegistry, finalization_registry, 0);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ args.at<JSFinalizationRegistry>(0);
if (!finalization_registry->key_map().IsUndefined(isolate)) {
Handle<SimpleNumberDictionary> key_map =
@@ -30,8 +31,9 @@ RUNTIME_FUNCTION(
Runtime_JSFinalizationRegistryRegisterWeakCellWithUnregisterToken) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFinalizationRegistry, finalization_registry, 0);
- CONVERT_ARG_HANDLE_CHECKED(WeakCell, weak_cell, 1);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ args.at<JSFinalizationRegistry>(0);
+ Handle<WeakCell> weak_cell = args.at<WeakCell>(1);
JSFinalizationRegistry::RegisterWeakCellWithUnregisterToken(
finalization_registry, weak_cell, isolate);
@@ -42,7 +44,7 @@ RUNTIME_FUNCTION(
RUNTIME_FUNCTION(Runtime_JSWeakRefAddToKeptObjects) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
isolate->heap()->KeepDuringJob(object);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 41bc3256a7..9554a167d8 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -65,7 +65,9 @@ namespace internal {
F(AtomicsOr, 3, 1) \
F(AtomicsSub, 3, 1) \
F(AtomicsXor, 3, 1) \
- F(SetAllowAtomicsWait, 1, 1)
+ F(SetAllowAtomicsWait, 1, 1) \
+ F(AtomicsLoadSharedStructField, 2, 1) \
+ F(AtomicsStoreSharedStructField, 3, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F, I) \
F(BigIntBinaryOp, 3, 1) \
@@ -107,11 +109,12 @@ namespace internal {
F(CompileForOnStackReplacement, 0, 1) \
F(CompileLazy, 1, 1) \
F(CompileBaseline, 1, 1) \
- F(CompileOptimized_Concurrent, 1, 1) \
- F(CompileOptimized_NotConcurrent, 1, 1) \
+ F(CompileMaglev_Concurrent, 1, 1) \
+ F(CompileMaglev_NotConcurrent, 1, 1) \
+ F(CompileTurbofan_Concurrent, 1, 1) \
+ F(CompileTurbofan_NotConcurrent, 1, 1) \
F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \
- F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
F(ObserveNode, 1, 1) \
@@ -123,16 +126,14 @@ namespace internal {
#define FOR_EACH_INTRINSIC_DEBUG(F, I) \
F(ClearStepping, 0, 1) \
F(CollectGarbage, 1, 1) \
- F(DebugAsyncFunctionEntered, 1, 1) \
- F(DebugAsyncFunctionSuspended, 1, 1) \
- F(DebugAsyncFunctionResumed, 1, 1) \
- F(DebugAsyncFunctionFinished, 2, 1) \
+ F(DebugAsyncFunctionSuspended, 4, 1) \
F(DebugBreakAtEntry, 1, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugGetLoadedScriptIds, 0, 1) \
F(DebugOnFunctionCall, 2, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
+ F(DebugPromiseThen, 1, 1) \
F(DebugPushPromise, 1, 1) \
F(DebugToggleBlockCoverage, 1, 1) \
F(DebugTogglePreciseCoverage, 1, 1) \
@@ -184,8 +185,8 @@ namespace internal {
I(AsyncFunctionAwaitCaught, 2, 1) \
I(AsyncFunctionAwaitUncaught, 2, 1) \
I(AsyncFunctionEnter, 2, 1) \
- I(AsyncFunctionReject, 3, 1) \
- I(AsyncFunctionResolve, 3, 1) \
+ I(AsyncFunctionReject, 2, 1) \
+ I(AsyncFunctionResolve, 2, 1) \
I(AsyncGeneratorAwaitCaught, 2, 1) \
I(AsyncGeneratorAwaitUncaught, 2, 1) \
F(AsyncGeneratorHasCatchHandlerForPC, 1, 1) \
@@ -207,60 +208,61 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTL(F, I)
#endif // V8_INTL_SUPPORT
-#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
- F(AccessCheck, 1, 1) \
- F(AllocateByteArray, 1, 1) \
- F(AllocateInYoungGeneration, 2, 1) \
- F(AllocateInOldGeneration, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(AllowDynamicFunction, 1, 1) \
- I(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(DoubleToStringWithRadix, 2, 1) \
- F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
- F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(GetTemplateObject, 3, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(BytecodeBudgetInterruptFromBytecode, 1, 1) \
- F(BytecodeBudgetInterruptWithStackCheckFromBytecode, 1, 1) \
- F(BytecodeBudgetInterruptFromCode, 1, 1) \
- F(NewError, 2, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, -1 /* [1, 4] */, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReportMessageFromMicrotask, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ReThrowWithMessage, 2, 1) \
- F(RunMicrotaskCallback, 2, 1) \
- F(PerformMicrotaskCheckpoint, 0, 1) \
- F(StackGuard, 0, 1) \
- F(StackGuardWithGap, 1, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorError, 1, 1) \
- F(ThrowSpreadArgError, 2, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowPatternAssignmentNonCoercible, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowAccessedUninitializedVariable, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowThrowMethodMissing, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
- F(Typeof, 1, 1) \
+#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ F(AccessCheck, 1, 1) \
+ F(AllocateByteArray, 1, 1) \
+ F(AllocateInYoungGeneration, 2, 1) \
+ F(AllocateInOldGeneration, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(AllowDynamicFunction, 1, 1) \
+ I(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(DoubleToStringWithRadix, 2, 1) \
+ F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
+ F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(GetTemplateObject, 3, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(BytecodeBudgetInterrupt, 1, 1) \
+ F(BytecodeBudgetInterruptWithStackCheck, 1, 1) \
+ F(NewError, 2, 1) \
+ F(NewForeign, 0, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, -1 /* [1, 4] */, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReportMessageFromMicrotask, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ReThrowWithMessage, 2, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
+ F(PerformMicrotaskCheckpoint, 0, 1) \
+ F(SharedValueBarrierSlow, 1, 1) \
+ F(StackGuard, 0, 1) \
+ F(StackGuardWithGap, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorError, 1, 1) \
+ F(ThrowSpreadArgError, 2, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowPatternAssignmentNonCoercible, 1, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowAccessedUninitializedVariable, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
+ F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F, I) \
@@ -287,75 +289,76 @@ namespace internal {
F(StringToNumber, 1, 1) \
F(TypedArrayMaxLength, 0, 1)
-#define FOR_EACH_INTRINSIC_OBJECT(F, I) \
- F(AddDictionaryProperty, 3, 1) \
- F(AllocateHeapNumber, 0, 1) \
- F(CollectTypeProfile, 3, 1) \
- F(CompleteInobjectSlackTrackingForMap, 1, 1) \
- I(CopyDataProperties, 2, 1) \
- F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
- I(CreateDataProperty, 3, 1) \
- I(CreateIterResultObject, 2, 1) \
- F(CreatePrivateAccessors, 2, 1) \
- F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(DefineDataPropertyInLiteral, 6, 1) \
- F(DefineGetterPropertyUnchecked, 4, 1) \
- F(DefineSetterPropertyUnchecked, 4, 1) \
- F(DeleteProperty, 3, 1) \
- F(GetDerivedMap, 2, 1) \
- F(GetFunctionName, 1, 1) \
- F(GetOwnPropertyDescriptor, 2, 1) \
- F(GetOwnPropertyKeys, 2, 1) \
- F(GetProperty, -1 /* [2, 3] */, 1) \
- F(HasFastPackedElements, 1, 1) \
- F(HasInPrototypeChain, 2, 1) \
- F(HasProperty, 2, 1) \
- F(InternalSetPrototype, 2, 1) \
- F(IsJSReceiver, 1, 1) \
- F(JSReceiverPreventExtensionsDontThrow, 1, 1) \
- F(JSReceiverPreventExtensionsThrow, 1, 1) \
- F(JSReceiverGetPrototypeOf, 1, 1) \
- F(JSReceiverSetPrototypeOfDontThrow, 2, 1) \
- F(JSReceiverSetPrototypeOfThrow, 2, 1) \
- F(LoadPrivateGetter, 1, 1) \
- F(LoadPrivateSetter, 1, 1) \
- F(NewObject, 2, 1) \
- F(ObjectCreate, 2, 1) \
- F(ObjectEntries, 1, 1) \
- F(ObjectEntriesSkipFastPath, 1, 1) \
- F(ObjectGetOwnPropertyNames, 1, 1) \
- F(ObjectGetOwnPropertyNamesTryFast, 1, 1) \
- F(ObjectHasOwnProperty, 2, 1) \
- F(ObjectIsExtensible, 1, 1) \
- F(ObjectKeys, 1, 1) \
- F(ObjectValues, 1, 1) \
- F(ObjectValuesSkipFastPath, 1, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(SetDataProperties, 2, 1) \
- F(SetKeyedProperty, 3, 1) \
- F(DefineObjectOwnProperty, 3, 1) \
- F(SetNamedProperty, 3, 1) \
- F(SetOwnPropertyIgnoreAttributes, 4, 1) \
- F(StoreDataPropertyInLiteral, 3, 1) \
- F(ShrinkNameDictionary, 1, 1) \
- F(ShrinkSwissNameDictionary, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(ToLength, 1, 1) \
- F(ToName, 1, 1) \
- F(ToNumber, 1, 1) \
- F(ToNumeric, 1, 1) \
- F(ToObject, 1, 1) \
- F(ToString, 1, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(SwissTableAdd, 4, 1) \
- F(SwissTableAllocate, 1, 1) \
- F(SwissTableDelete, 2, 1) \
- F(SwissTableDetailsAt, 2, 1) \
- F(SwissTableElementsCount, 1, 1) \
- F(SwissTableEquals, 2, 1) \
- F(SwissTableFindEntry, 2, 1) \
- F(SwissTableUpdate, 4, 1) \
- F(SwissTableValueAt, 2, 1) \
+#define FOR_EACH_INTRINSIC_OBJECT(F, I) \
+ F(AddDictionaryProperty, 3, 1) \
+ F(AddPrivateBrand, 4, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(CollectTypeProfile, 3, 1) \
+ F(CompleteInobjectSlackTrackingForMap, 1, 1) \
+ I(CopyDataProperties, 2, 1) \
+ I(CopyDataPropertiesWithExcludedPropertiesOnStack, -1 /* >= 1 */, 1) \
+ I(CreateDataProperty, 3, 1) \
+ I(CreateIterResultObject, 2, 1) \
+ F(CreatePrivateAccessors, 2, 1) \
+ F(DefineAccessorPropertyUnchecked, 5, 1) \
+ F(DefineKeyedOwnPropertyInLiteral, 6, 1) \
+ F(DefineGetterPropertyUnchecked, 4, 1) \
+ F(DefineSetterPropertyUnchecked, 4, 1) \
+ F(DeleteProperty, 3, 1) \
+ F(GetDerivedMap, 2, 1) \
+ F(GetFunctionName, 1, 1) \
+ F(GetOwnPropertyDescriptor, 2, 1) \
+ F(GetOwnPropertyKeys, 2, 1) \
+ F(GetProperty, -1 /* [2, 3] */, 1) \
+ F(HasFastPackedElements, 1, 1) \
+ F(HasInPrototypeChain, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(InternalSetPrototype, 2, 1) \
+ F(IsJSReceiver, 1, 1) \
+ F(JSReceiverPreventExtensionsDontThrow, 1, 1) \
+ F(JSReceiverPreventExtensionsThrow, 1, 1) \
+ F(JSReceiverGetPrototypeOf, 1, 1) \
+ F(JSReceiverSetPrototypeOfDontThrow, 2, 1) \
+ F(JSReceiverSetPrototypeOfThrow, 2, 1) \
+ F(LoadPrivateGetter, 1, 1) \
+ F(LoadPrivateSetter, 1, 1) \
+ F(NewObject, 2, 1) \
+ F(ObjectCreate, 2, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
+ F(ObjectGetOwnPropertyNames, 1, 1) \
+ F(ObjectGetOwnPropertyNamesTryFast, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectIsExtensible, 1, 1) \
+ F(ObjectKeys, 1, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(SetDataProperties, 2, 1) \
+ F(SetKeyedProperty, 3, 1) \
+ F(DefineObjectOwnProperty, 3, 1) \
+ F(SetNamedProperty, 3, 1) \
+ F(SetOwnPropertyIgnoreAttributes, 4, 1) \
+ F(DefineKeyedOwnPropertyInLiteral_Simple, 3, 1) \
+ F(ShrinkNameDictionary, 1, 1) \
+ F(ShrinkSwissNameDictionary, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(ToLength, 1, 1) \
+ F(ToName, 1, 1) \
+ F(ToNumber, 1, 1) \
+ F(ToNumeric, 1, 1) \
+ F(ToObject, 1, 1) \
+ F(ToString, 1, 1) \
+ F(TryMigrateInstance, 1, 1) \
+ F(SwissTableAdd, 4, 1) \
+ F(SwissTableAllocate, 1, 1) \
+ F(SwissTableDelete, 2, 1) \
+ F(SwissTableDetailsAt, 2, 1) \
+ F(SwissTableElementsCount, 1, 1) \
+ F(SwissTableEquals, 2, 1) \
+ F(SwissTableFindEntry, 2, 1) \
+ F(SwissTableUpdate, 4, 1) \
+ F(SwissTableValueAt, 2, 1) \
F(SwissTableKeyAt, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F, I) \
@@ -375,8 +378,6 @@ namespace internal {
F(PromiseHookAfter, 1, 1) \
F(PromiseHookBefore, 1, 1) \
F(PromiseHookInit, 2, 1) \
- F(AwaitPromisesInit, 5, 1) \
- F(AwaitPromisesInitOld, 5, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
F(PromiseRevokeReject, 1, 1) \
F(PromiseStatus, 1, 1) \
@@ -466,9 +467,11 @@ namespace internal {
F(Abort, 1, 1) \
F(AbortCSADcheck, 1, 1) \
F(AbortJS, 1, 1) \
+ F(ActiveTierIsMaglev, 1, 1) \
F(ArrayIteratorProtector, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
F(BaselineOsr, -1, 1) \
+ F(BenchMaglev, 2, 1) \
F(ClearFunctionFeedback, 1, 1) \
F(ClearMegamorphicStubCache, 0, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
@@ -480,12 +483,11 @@ namespace internal {
F(DebugTrace, 0, 1) \
F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
+ F(DisableOptimizationFinalization, 0, 1) \
F(DisallowCodegenFromStrings, 1, 1) \
F(DisassembleFunction, 1, 1) \
- F(DynamicCheckMapsEnabled, 0, 1) \
F(EnableCodeLoggingForTesting, 0, 1) \
F(EnsureFeedbackVectorForFunction, 1, 1) \
- F(DisableOptimizationFinalization, 0, 1) \
F(FinalizeOptimization, 0, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
@@ -526,23 +528,23 @@ namespace internal {
F(IsConcatSpreadableProtector, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
F(IsDictPropertyConstTrackingEnabled, 0, 1) \
- F(IsMidTierTurboprop, 0, 1) \
- F(IsTopTierTurboprop, 0, 1) \
+ F(IsSameHeapObject, 2, 1) \
+ F(IsSharedString, 1, 1) \
F(MapIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NewRegExpWithBacktrackLimit, 3, 1) \
F(NotifyContextDisposed, 0, 1) \
- F(OptimizeFunctionForTopTier, 1, 1) \
+ F(OptimizeMaglevOnNextCall, 1, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
F(PrepareFunctionForOptimization, -1, 1) \
F(PretenureAllocationSite, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \
F(PromiseSpeciesProtector, 0, 1) \
+ F(RegExpSpeciesProtector, 0, 1) \
F(RegexpHasBytecode, 2, 1) \
F(RegexpHasNativeCode, 2, 1) \
F(RegexpIsUnmodified, 1, 1) \
- F(RegExpSpeciesProtector, 0, 1) \
F(RegexpTypeTag, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(RuntimeEvaluateREPL, 1, 1) \
@@ -551,16 +553,18 @@ namespace internal {
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetForceSlowPath, 1, 1) \
F(SetIteratorProtector, 0, 1) \
+ F(SharedGC, 0, 1) \
F(SimulateNewspaceFull, 0, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TakeHeapSnapshot, -1, 1) \
- F(TierupFunctionOnNextCall, -1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(TurbofanStaticAssert, 1, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
F(WaitForBackgroundOptimization, 0, 1) \
+ F(WebSnapshotDeserialize, -1, 1) \
+ F(WebSnapshotSerialize, -1, 1) \
I(DeoptimizeNow, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
@@ -594,10 +598,11 @@ namespace internal {
F(WasmCompileWrapper, 2, 1) \
F(WasmTriggerTierUp, 1, 1) \
F(WasmDebugBreak, 0, 1) \
- F(WasmAllocateRtt, 3, 1) \
F(WasmArrayCopy, 5, 1) \
- F(WasmAllocateContinuation, 0, 1) \
- F(WasmSyncStackLimit, 0, 1)
+ F(WasmArrayInitFromData, 5, 1) \
+ F(WasmAllocateContinuation, 1, 1) \
+ F(WasmSyncStackLimit, 0, 1) \
+ F(WasmCreateResumePromise, 2, 1)
#define FOR_EACH_INTRINSIC_WASM_TEST(F, I) \
F(DeserializeWasmModule, 2, 1) \
@@ -608,6 +613,7 @@ namespace internal {
F(GetWasmRecoveredTrapCount, 0, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsLiftoffFunction, 1, 1) \
+ F(IsTurboFanFunction, 1, 1) \
F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
@@ -638,11 +644,11 @@ namespace internal {
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
- F(KeyedDefineOwnIC_Miss, 5, 1) \
+ F(DefineKeyedOwnIC_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Miss, 5, 1) \
- F(StoreOwnIC_Slow, 3, 1) \
+ F(DefineNamedOwnIC_Slow, 3, 1) \
F(KeyedStoreIC_Slow, 3, 1) \
- F(KeyedDefineOwnIC_Slow, 3, 1) \
+ F(DefineKeyedOwnIC_Slow, 3, 1) \
F(LoadElementWithInterceptor, 2, 1) \
F(LoadGlobalIC_Miss, 4, 1) \
F(LoadGlobalIC_Slow, 3, 1) \
@@ -656,7 +662,7 @@ namespace internal {
F(StoreGlobalICNoFeedback_Miss, 2, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
- F(StoreOwnIC_Miss, 5, 1) \
+ F(DefineNamedOwnIC_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Slow, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
F(CloneObjectIC_Miss, 4, 1) \
diff --git a/deps/v8/src/security/OWNERS b/deps/v8/src/sandbox/OWNERS
index 9b6691b0cd..9b6691b0cd 100644
--- a/deps/v8/src/security/OWNERS
+++ b/deps/v8/src/sandbox/OWNERS
diff --git a/deps/v8/src/security/external-pointer-inl.h b/deps/v8/src/sandbox/external-pointer-inl.h
index c25543d757..61a6cb1a4b 100644
--- a/deps/v8/src/security/external-pointer-inl.h
+++ b/deps/v8/src/sandbox/external-pointer-inl.h
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SECURITY_EXTERNAL_POINTER_INL_H_
-#define V8_SECURITY_EXTERNAL_POINTER_INL_H_
+#ifndef V8_SANDBOX_EXTERNAL_POINTER_INL_H_
+#define V8_SANDBOX_EXTERNAL_POINTER_INL_H_
#include "include/v8-internal.h"
#include "src/execution/isolate.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer-table-inl.h"
+#include "src/sandbox/external-pointer.h"
namespace v8 {
namespace internal {
@@ -15,39 +16,28 @@ namespace internal {
V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
-#ifdef V8_HEAP_SANDBOX
- uint32_t index = static_cast<uint32_t>(encoded_pointer);
- return isolate->external_pointer_table().get(index) & ~tag;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ STATIC_ASSERT(kExternalPointerSize == kInt32Size);
+ uint32_t index = encoded_pointer >> kExternalPointerIndexShift;
+ return isolate->external_pointer_table().Get(index, tag);
#else
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
return encoded_pointer;
#endif
}
-V8_INLINE void InitExternalPointerField(Address field_address,
- Isolate* isolate) {
-#ifdef V8_HEAP_SANDBOX
- static_assert(kExternalPointerSize == kSystemPointerSize,
- "Review the code below, once kExternalPointerSize is 4-byte "
- "the address of the field will always be aligned");
- ExternalPointer_t index = isolate->external_pointer_table().allocate();
- base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
-#else
- // Nothing to do.
-#endif // V8_HEAP_SANDBOX
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ ExternalPointerTag tag) {
+ InitExternalPointerField(field_address, isolate, kNullExternalPointer, tag);
}
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
Address value, ExternalPointerTag tag) {
-#ifdef V8_HEAP_SANDBOX
- DCHECK_EQ(value & kExternalPointerTagMask, 0);
- ExternalPointer_t index = isolate->external_pointer_table().allocate();
- isolate->external_pointer_table().set(static_cast<uint32_t>(index),
- value | tag);
- static_assert(kExternalPointerSize == kSystemPointerSize,
- "Review the code below, once kExternalPointerSize is 4-byte "
- "the address of the field will always be aligned");
- base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ExternalPointer_t index = isolate->external_pointer_table().Allocate();
+ isolate->external_pointer_table().Set(index, value, tag);
+ index <<= kExternalPointerIndexShift;
+ base::Memory<ExternalPointer_t>(field_address) = index;
#else
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
@@ -58,37 +48,34 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
} else {
base::Memory<ExternalPointer_t>(field_address) = encoded_value;
}
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
-V8_INLINE Address ReadExternalPointerField(Address field_address,
- const Isolate* isolate,
- ExternalPointerTag tag) {
+V8_INLINE ExternalPointer_t ReadRawExternalPointerField(Address field_address) {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
kExternalPointerSize > kTaggedSize;
- ExternalPointer_t encoded_value;
if (v8_pointer_compression_unaligned) {
- encoded_value = base::ReadUnalignedValue<ExternalPointer_t>(field_address);
+ return base::ReadUnalignedValue<ExternalPointer_t>(field_address);
} else {
- encoded_value = base::Memory<ExternalPointer_t>(field_address);
+ return base::Memory<ExternalPointer_t>(field_address);
}
+}
+
+V8_INLINE Address ReadExternalPointerField(Address field_address,
+ const Isolate* isolate,
+ ExternalPointerTag tag) {
+ ExternalPointer_t encoded_value = ReadRawExternalPointerField(field_address);
return DecodeExternalPointer(isolate, encoded_value, tag);
}
V8_INLINE void WriteExternalPointerField(Address field_address,
Isolate* isolate, Address value,
ExternalPointerTag tag) {
-#ifdef V8_HEAP_SANDBOX
- static_assert(kExternalPointerSize == kSystemPointerSize,
- "Review the code below, once kExternalPointerSize is 4-byte "
- "the address of the field will always be aligned");
- DCHECK_EQ(value & kExternalPointerTagMask, 0);
-
- ExternalPointer_t index =
- base::ReadUnalignedValue<ExternalPointer_t>(field_address);
- isolate->external_pointer_table().set(static_cast<uint32_t>(index),
- value | tag);
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ExternalPointer_t index = base::Memory<ExternalPointer_t>(field_address);
+ index >>= kExternalPointerIndexShift;
+ isolate->external_pointer_table().Set(index, value, tag);
#else
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
@@ -99,10 +86,10 @@ V8_INLINE void WriteExternalPointerField(Address field_address,
} else {
base::Memory<ExternalPointer_t>(field_address) = encoded_value;
}
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
} // namespace internal
} // namespace v8
-#endif // V8_SECURITY_EXTERNAL_POINTER_INL_H_
+#endif // V8_SANDBOX_EXTERNAL_POINTER_INL_H_
diff --git a/deps/v8/src/sandbox/external-pointer-table-inl.h b/deps/v8/src/sandbox/external-pointer-table-inl.h
new file mode 100644
index 0000000000..cda26fa07e
--- /dev/null
+++ b/deps/v8/src/sandbox/external-pointer-table-inl.h
@@ -0,0 +1,149 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
+#define V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
+
+#include "src/base/atomicops.h"
+#include "src/sandbox/external-pointer-table.h"
+#include "src/sandbox/external-pointer.h"
+#include "src/utils/allocation.h"
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+namespace v8 {
+namespace internal {
+
+void ExternalPointerTable::Init(Isolate* isolate) {
+ DCHECK(!is_initialized());
+
+ VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
+ DCHECK(IsAligned(kExternalPointerTableReservationSize,
+ root_space->allocation_granularity()));
+ buffer_ = root_space->AllocatePages(
+ VirtualAddressSpace::kNoHint, kExternalPointerTableReservationSize,
+ root_space->allocation_granularity(), PagePermissions::kNoAccess);
+ if (!buffer_) {
+ V8::FatalProcessOutOfMemory(
+ isolate,
+ "Failed to reserve memory for ExternalPointerTable backing buffer");
+ }
+
+ mutex_ = new base::Mutex;
+ if (!mutex_) {
+ V8::FatalProcessOutOfMemory(
+ isolate, "Failed to allocate mutex for ExternalPointerTable");
+ }
+
+ // Allocate the initial block. Mutex must be held for that.
+ base::MutexGuard guard(mutex_);
+ Grow();
+
+ // Set up the special null entry. This entry must contain nullptr so that
+ // empty EmbedderDataSlots represent nullptr.
+ STATIC_ASSERT(kNullExternalPointer == 0);
+ store(kNullExternalPointer, kNullAddress);
+}
+
+void ExternalPointerTable::TearDown() {
+ DCHECK(is_initialized());
+
+ GetPlatformVirtualAddressSpace()->FreePages(
+ buffer_, kExternalPointerTableReservationSize);
+ delete mutex_;
+
+ buffer_ = kNullAddress;
+ capacity_ = 0;
+ freelist_head_ = 0;
+ mutex_ = nullptr;
+}
+
+Address ExternalPointerTable::Get(uint32_t index,
+ ExternalPointerTag tag) const {
+ DCHECK_LT(index, capacity_);
+
+ Address entry = load_atomic(index);
+ DCHECK(!is_free(entry));
+
+ return entry & ~tag;
+}
+
+void ExternalPointerTable::Set(uint32_t index, Address value,
+ ExternalPointerTag tag) {
+ DCHECK_LT(index, capacity_);
+ DCHECK_NE(kNullExternalPointer, index);
+ DCHECK_EQ(0, value & kExternalPointerTagMask);
+ DCHECK(is_marked(tag));
+
+ store_atomic(index, value | tag);
+}
+
+uint32_t ExternalPointerTable::Allocate() {
+ DCHECK(is_initialized());
+
+ base::Atomic32* freelist_head_ptr =
+ reinterpret_cast<base::Atomic32*>(&freelist_head_);
+
+ uint32_t index;
+ bool success = false;
+ while (!success) {
+ // This is essentially DCLP (see
+ // https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/)
+ // and so requires an acquire load as well as a release store in Grow() to
+ // prevent reordering of memory accesses, which could for example cause one
+ // thread to read a freelist entry before it has been properly initialized.
+ uint32_t freelist_head = base::Acquire_Load(freelist_head_ptr);
+ if (!freelist_head) {
+ // Freelist is empty. Need to take the lock, then attempt to grow the
+ // table if no other thread has done it in the meantime.
+ base::MutexGuard guard(mutex_);
+
+ // Reload freelist head in case another thread already grew the table.
+ freelist_head = base::Relaxed_Load(freelist_head_ptr);
+
+ if (!freelist_head) {
+ // Freelist is (still) empty so grow the table.
+ freelist_head = Grow();
+ }
+ }
+
+ DCHECK(freelist_head);
+ DCHECK_LT(freelist_head, capacity_);
+ index = freelist_head;
+
+ // The next free element is stored in the lower 32 bits of the entry.
+ uint32_t new_freelist_head = static_cast<uint32_t>(load_atomic(index));
+
+ uint32_t old_val = base::Relaxed_CompareAndSwap(
+ freelist_head_ptr, freelist_head, new_freelist_head);
+ success = old_val == freelist_head;
+ }
+
+ return index;
+}
+
+void ExternalPointerTable::Mark(uint32_t index) {
+ DCHECK_LT(index, capacity_);
+ STATIC_ASSERT(sizeof(base::Atomic64) == sizeof(Address));
+
+ base::Atomic64 old_val = load_atomic(index);
+ DCHECK(!is_free(old_val));
+ base::Atomic64 new_val = set_mark_bit(old_val);
+
+ // We don't need to perform the CAS in a loop: if the new value is not equal
+ // to the old value, then the mutator must've just written a new value into
+ // the entry. This in turn must've set the marking bit already (see
+ // ExternalPointerTable::Set), so we don't need to do it again.
+ base::Atomic64* ptr = reinterpret_cast<base::Atomic64*>(entry_address(index));
+ base::Atomic64 val = base::Relaxed_CompareAndSwap(ptr, old_val, new_val);
+ DCHECK((val == old_val) || is_marked(val));
+ USE(val);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_IS_AVAILABLE
+
+#endif // V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
diff --git a/deps/v8/src/sandbox/external-pointer-table.cc b/deps/v8/src/sandbox/external-pointer-table.cc
new file mode 100644
index 0000000000..5f6c3e2df6
--- /dev/null
+++ b/deps/v8/src/sandbox/external-pointer-table.cc
@@ -0,0 +1,97 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/sandbox/external-pointer-table.h"
+
+#include <algorithm>
+
+#include "src/execution/isolate.h"
+#include "src/logging/counters.h"
+#include "src/sandbox/external-pointer-table-inl.h"
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+namespace v8 {
+namespace internal {
+
+STATIC_ASSERT(sizeof(ExternalPointerTable) == ExternalPointerTable::kSize);
+
+// static
+uint32_t ExternalPointerTable::AllocateEntry(ExternalPointerTable* table) {
+ return table->Allocate();
+}
+
+uint32_t ExternalPointerTable::Sweep(Isolate* isolate) {
+ // Sweep top to bottom and rebuild the freelist from newly dead and
+ // previously freed entries. This way, the freelist ends up sorted by index,
+ // which helps defragment the table. This method must run either on the
+ // mutator thread or while the mutator is stopped. Also clear marking bits on
+ // live entries.
+ // TODO(v8:10391, saelo) could also shrink the table using DecommitPages() if
+ // elements at the end are free. This might require some form of compaction.
+ uint32_t freelist_size = 0;
+ uint32_t current_freelist_head = 0;
+
+ // Skip the special null entry.
+ DCHECK_GE(capacity_, 1);
+ for (uint32_t i = capacity_ - 1; i > 0; i--) {
+ // No other threads are active during sweep, so there is no need to use
+ // atomic operations here.
+ Address entry = load(i);
+ if (!is_marked(entry)) {
+ store(i, make_freelist_entry(current_freelist_head));
+ current_freelist_head = i;
+ freelist_size++;
+ } else {
+ store(i, clear_mark_bit(entry));
+ }
+ }
+
+ freelist_head_ = current_freelist_head;
+
+ uint32_t num_active_entries = capacity_ - freelist_size;
+ isolate->counters()->sandboxed_external_pointers_count()->AddSample(
+ num_active_entries);
+ return num_active_entries;
+}
+
+uint32_t ExternalPointerTable::Grow() {
+ // Freelist should be empty.
+ DCHECK_EQ(0, freelist_head_);
+ // Mutex must be held when calling this method.
+ mutex_->AssertHeld();
+
+ // Grow the table by one block.
+ uint32_t old_capacity = capacity_;
+ uint32_t new_capacity = old_capacity + kEntriesPerBlock;
+ CHECK_LE(new_capacity, kMaxSandboxedExternalPointers);
+
+ // Failure likely means OOM. TODO(saelo) handle this.
+ VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
+ DCHECK(IsAligned(kBlockSize, root_space->page_size()));
+ CHECK(root_space->SetPagePermissions(buffer_ + old_capacity * sizeof(Address),
+ kBlockSize,
+ PagePermissions::kReadWrite));
+ capacity_ = new_capacity;
+
+ // Build freelist bottom to top, which might be more cache friendly.
+ uint32_t start = std::max<uint32_t>(old_capacity, 1); // Skip entry zero
+ uint32_t last = new_capacity - 1;
+ for (uint32_t i = start; i < last; i++) {
+ store(i, make_freelist_entry(i + 1));
+ }
+ store(last, make_freelist_entry(0));
+
+ // This must be a release store to prevent reordering of the preceeding
+ // stores to the freelist from being reordered past this store. See
+ // Allocate() for more details.
+ base::Release_Store(reinterpret_cast<base::Atomic32*>(&freelist_head_),
+ start);
+ return start;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_IS_AVAILABLE
diff --git a/deps/v8/src/sandbox/external-pointer-table.h b/deps/v8/src/sandbox/external-pointer-table.h
new file mode 100644
index 0000000000..87937db817
--- /dev/null
+++ b/deps/v8/src/sandbox/external-pointer-table.h
@@ -0,0 +1,205 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
+#define V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
+
+#include "include/v8config.h"
+#include "src/base/atomicops.h"
+#include "src/base/memory.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+/**
+ * A table storing pointers to objects outside the sandbox.
+ *
+ * An external pointer table provides the basic mechanisms to ensure
+ * memory-safe access to objects located outside the sandbox, but referenced
+ * from within it. When an external pointer table is used, objects located
+ * inside the sandbox reference outside objects through indices into the table.
+ *
+ * Type safety can be ensured by using type-specific tags for the external
+ * pointers. These tags will be ORed into the unused top bits of the pointer
+ * when storing them and will be ANDed away when loading the pointer later
+ * again. If a pointer of the wrong type is accessed, some of the top bits will
+ * remain in place, rendering the pointer inaccessible.
+ *
+ * Temporal memory safety is achieved through garbage collection of the table,
+ * which ensures that every entry is either an invalid pointer or a valid
+ * pointer pointing to a live object.
+ *
+ * Spatial memory safety can, if necessary, be ensured by storing the size of a
+ * referenced object together with the object itself outside the sandbox, and
+ * referencing both through a single entry in the table.
+ *
+ * The garbage collection algorithm for the table works as follows:
+ * - The top bit of every entry is reserved for the marking bit.
+ * - Every store to an entry automatically sets the marking bit when ORing
+ * with the tag. This avoids the need for write barriers.
+ * - Every load of an entry automatically removes the marking bit when ANDing
+ * with the inverted tag.
+ * - When the GC marking visitor finds a live object with an external pointer,
+ * it marks the corresponding entry as alive through Mark(), which sets the
+ * marking bit using an atomic CAS operation.
+ * - When marking is finished, Sweep() iterates of the table once while the
+ * mutator is stopped and builds a freelist from all dead entries while also
+ * removing the marking bit from any live entry.
+ *
+ * The freelist is a singly-linked list, using the lower 32 bits of each entry
+ * to store the index of the next free entry. When the freelist is empty and a
+ * new entry is allocated, the table grows in place and the freelist is
+ * re-populated from the newly added entries.
+ */
+class V8_EXPORT_PRIVATE ExternalPointerTable {
+ public:
+ // Size of an ExternalPointerTable, for layout computation in IsolateData.
+ // Asserted to be equal to the actual size in external-pointer-table.cc.
+ static int constexpr kSize = 3 * kSystemPointerSize;
+
+ ExternalPointerTable() = default;
+
+ // Initializes this external pointer table by reserving the backing memory
+ // and initializing the freelist.
+ inline void Init(Isolate* isolate);
+
+ // Resets this external pointer table and deletes all associated memory.
+ inline void TearDown();
+
+ // Retrieves the entry at the given index.
+ //
+ // This method is atomic and can be called from background threads.
+ inline Address Get(uint32_t index, ExternalPointerTag tag) const;
+
+ // Sets the entry at the given index to the given value.
+ //
+ // This method is atomic and can be called from background threads.
+ inline void Set(uint32_t index, Address value, ExternalPointerTag tag);
+
+ // Allocates a new entry in the external pointer table. The caller must
+ // initialize the entry afterwards through set(). In particular, the caller is
+ // responsible for setting the mark bit of the new entry.
+ // TODO(saelo) this can fail, in which case we should probably do GC + retry.
+ //
+ // This method is atomic and can be called from background threads.
+ inline uint32_t Allocate();
+
+ // Runtime function called from CSA. Internally just calls Allocate().
+ static uint32_t AllocateEntry(ExternalPointerTable* table);
+
+ // Marks the specified entry as alive.
+ //
+ // This method is atomic and can be called from background threads.
+ inline void Mark(uint32_t index);
+
+ // Frees unmarked entries.
+ //
+ // This method must be called on the mutator thread or while that thread is
+ // stopped.
+ //
+ // Returns the number of live entries after sweeping.
+ uint32_t Sweep(Isolate* isolate);
+
+ private:
+ // Required for Isolate::CheckIsolateLayout().
+ friend class Isolate;
+
+ // An external pointer table grows in blocks of this size. This is also the
+ // initial size of the table.
+ static const size_t kBlockSize = 64 * KB;
+ static const size_t kEntriesPerBlock = kBlockSize / kSystemPointerSize;
+
+ static const Address kExternalPointerMarkBit = 1ULL << 63;
+
+ // Returns true if this external pointer table has been initialized.
+ bool is_initialized() { return buffer_ != kNullAddress; }
+
+ // Extends the table and adds newly created entries to the freelist. Returns
+ // the new freelist head. When calling this method, mutex_ must be locked.
+ //
+ // TODO(saelo) this can fail, deal with that appropriately.
+ uint32_t Grow();
+
+ // Computes the address of the specified entry.
+ inline Address entry_address(uint32_t index) const {
+ return buffer_ + index * sizeof(Address);
+ }
+
+ // Loads the value at the given index. This method is non-atomic, only use it
+ // when no other threads can currently access the table.
+ inline Address load(uint32_t index) const {
+ return base::Memory<Address>(entry_address(index));
+ }
+
+ // Stores the provided value at the given index. This method is non-atomic,
+ // only use it when no other threads can currently access the table.
+ inline void store(uint32_t index, Address value) {
+ base::Memory<Address>(entry_address(index)) = value;
+ }
+
+ // Atomically loads the value at the given index.
+ inline Address load_atomic(uint32_t index) const {
+ auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
+ return base::Relaxed_Load(addr);
+ }
+
+ // Atomically stores the provided value at the given index.
+ inline void store_atomic(uint32_t index, Address value) {
+ auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
+ base::Relaxed_Store(addr, value);
+ }
+
+ static bool is_marked(Address entry) {
+ return (entry & kExternalPointerMarkBit) == kExternalPointerMarkBit;
+ }
+
+ static Address set_mark_bit(Address entry) {
+ return entry | kExternalPointerMarkBit;
+ }
+
+ static Address clear_mark_bit(Address entry) {
+ return entry & ~kExternalPointerMarkBit;
+ }
+
+ static bool is_free(Address entry) {
+ return (entry & kExternalPointerFreeEntryTag) ==
+ kExternalPointerFreeEntryTag;
+ }
+
+ static Address make_freelist_entry(uint32_t current_freelist_head) {
+ // The next freelist entry is stored in the lower 32 bits of the entry.
+ Address entry = current_freelist_head;
+ return entry | kExternalPointerFreeEntryTag;
+ }
+
+ // The buffer backing this table. This is const after initialization. Should
+ // only be accessed using the load_x() and store_x() methods, which take care
+ // of atomicicy if necessary.
+ Address buffer_ = kNullAddress;
+
+ // The current capacity of this table, which is the number of usable entries.
+ uint32_t capacity_ = 0;
+
+ // The index of the first entry on the freelist or zero if the list is empty.
+ uint32_t freelist_head_ = 0;
+
+ // Lock protecting the slow path for entry allocation, in particular Grow().
+ // As the size of this structure must be predictable (it's part of
+ // IsolateData), it cannot directly contain a Mutex and so instead contains a
+ // pointer to one.
+ base::Mutex* mutex_ = nullptr;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_IS_AVAILABLE
+
+#endif // V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
diff --git a/deps/v8/src/security/external-pointer.h b/deps/v8/src/sandbox/external-pointer.h
index 1c29a46b60..cc81df3995 100644
--- a/deps/v8/src/security/external-pointer.h
+++ b/deps/v8/src/sandbox/external-pointer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SECURITY_EXTERNAL_POINTER_H_
-#define V8_SECURITY_EXTERNAL_POINTER_H_
+#ifndef V8_SANDBOX_EXTERNAL_POINTER_H_
+#define V8_SANDBOX_EXTERNAL_POINTER_H_
#include "src/common/globals.h"
@@ -18,11 +18,10 @@ V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
constexpr ExternalPointer_t kNullExternalPointer = 0;
-// Creates uninitialized entry in external pointer table and writes the entry id
-// to the field.
-// When sandbox is not enabled, it's a no-op.
-V8_INLINE void InitExternalPointerField(Address field_address,
- Isolate* isolate);
+// Creates zero-initialized entry in external pointer table and writes the entry
+// id to the field. When sandbox is not enabled, it's a no-op.
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ ExternalPointerTag tag);
// Creates and initializes entry in external pointer table and writes the entry
// id to the field.
@@ -31,6 +30,9 @@ V8_INLINE void InitExternalPointerField(Address field_address,
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
Address value, ExternalPointerTag tag);
+// Reads and returns a raw external pointer value.
+V8_INLINE ExternalPointer_t ReadRawExternalPointerField(Address field_address);
+
// Reads external pointer for the field, and decodes it if the sandbox is
// enabled.
V8_INLINE Address ReadExternalPointerField(Address field_address,
@@ -45,4 +47,4 @@ V8_INLINE void WriteExternalPointerField(Address field_address,
} // namespace internal
} // namespace v8
-#endif // V8_SECURITY_EXTERNAL_POINTER_H_
+#endif // V8_SANDBOX_EXTERNAL_POINTER_H_
diff --git a/deps/v8/src/sandbox/sandbox.cc b/deps/v8/src/sandbox/sandbox.cc
new file mode 100644
index 0000000000..aaeabc2c8c
--- /dev/null
+++ b/deps/v8/src/sandbox/sandbox.cc
@@ -0,0 +1,332 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/sandbox/sandbox.h"
+
+#include "include/v8-internal.h"
+#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/cpu.h"
+#include "src/base/emulated-virtual-address-subspace.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/base/virtual-address-space-page-allocator.h"
+#include "src/base/virtual-address-space.h"
+#include "src/flags/flags.h"
+#include "src/sandbox/sandboxed-pointer.h"
+#include "src/utils/allocation.h"
+
+#if defined(V8_OS_WIN)
+#include <windows.h>
+// This has to come after windows.h.
+#include <versionhelpers.h> // For IsWindows8Point1OrGreater().
+#endif
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+// Best-effort helper function to determine the size of the userspace virtual
+// address space. Used to determine appropriate sandbox size and placement.
+static Address DetermineAddressSpaceLimit() {
+#ifndef V8_TARGET_ARCH_64_BIT
+#error Unsupported target architecture.
+#endif
+
+ // Assume 48 bits by default, which seems to be the most common configuration.
+ constexpr unsigned kDefaultVirtualAddressBits = 48;
+ // 36 bits should realistically be the lowest value we could ever see.
+ constexpr unsigned kMinVirtualAddressBits = 36;
+ constexpr unsigned kMaxVirtualAddressBits = 64;
+
+ constexpr size_t kMinVirtualAddressSpaceSize = 1ULL << kMinVirtualAddressBits;
+ static_assert(kMinVirtualAddressSpaceSize >= kSandboxMinimumSize,
+ "The minimum sandbox size should be smaller or equal to the "
+ "smallest possible userspace address space. Otherwise, large "
+ "parts of the sandbox will not be usable on those platforms.");
+
+#ifdef V8_TARGET_ARCH_X64
+ base::CPU cpu;
+ Address virtual_address_bits = kDefaultVirtualAddressBits;
+ if (cpu.exposes_num_virtual_address_bits()) {
+ virtual_address_bits = cpu.num_virtual_address_bits();
+ }
+#else
+ // TODO(saelo) support ARM and possibly other CPUs as well.
+ Address virtual_address_bits = kDefaultVirtualAddressBits;
+#endif
+
+ // Guard against nonsensical values.
+ if (virtual_address_bits < kMinVirtualAddressBits ||
+ virtual_address_bits > kMaxVirtualAddressBits) {
+ virtual_address_bits = kDefaultVirtualAddressBits;
+ }
+
+ // Assume virtual address space is split 50/50 between userspace and kernel.
+ Address userspace_virtual_address_bits = virtual_address_bits - 1;
+ Address address_space_limit = 1ULL << userspace_virtual_address_bits;
+
+#if defined(V8_OS_WIN_X64)
+ if (!IsWindows8Point1OrGreater()) {
+ // On Windows pre 8.1 userspace is limited to 8TB on X64. See
+ // https://docs.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases
+ address_space_limit = 8ULL * TB;
+ }
+#endif // V8_OS_WIN_X64
+
+ // TODO(saelo) we could try allocating memory in the upper half of the address
+ // space to see if it is really usable.
+ return address_space_limit;
+}
+
+bool Sandbox::Initialize(v8::VirtualAddressSpace* vas) {
+ // Take the number of virtual address bits into account when determining the
+ // size of the sandbox. For example, if there are only 39 bits available,
+ // split evenly between userspace and kernel, then userspace can only address
+ // 256GB and so we use a quarter of that, 64GB, as maximum size.
+ Address address_space_limit = DetermineAddressSpaceLimit();
+ size_t max_sandbox_size = address_space_limit / 4;
+ size_t sandbox_size = std::min(kSandboxSize, max_sandbox_size);
+ size_t size_to_reserve = sandbox_size;
+
+ // If the size is less than the minimum sandbox size though, we fall back to
+ // creating a partially reserved sandbox, as that allows covering more virtual
+ // address space. This happens for CPUs with only 36 virtual address bits, in
+ // which case the sandbox size would end up being only 8GB.
+ bool partially_reserve = false;
+ if (sandbox_size < kSandboxMinimumSize) {
+ static_assert(
+ (8ULL * GB) >= kSandboxMinimumReservationSize,
+ "Minimum reservation size for a partially reserved sandbox must be at "
+ "most 8GB to support CPUs with only 36 virtual address bits");
+ size_to_reserve = sandbox_size;
+ sandbox_size = kSandboxMinimumSize;
+ partially_reserve = true;
+ }
+
+#if defined(V8_OS_WIN)
+ if (!IsWindows8Point1OrGreater()) {
+ // On Windows pre 8.1, reserving virtual memory is an expensive operation,
+ // apparently because the OS already charges for the memory required for
+ // all page table entries. For example, a 1TB reservation increases private
+ // memory usage by 2GB. As such, it is not possible to create a proper
+ // sandbox there and so a partially reserved sandbox is created which
+ // doesn't reserve most of the virtual memory, and so doesn't incur the
+ // cost, but also doesn't provide the desired security benefits.
+ size_to_reserve = kSandboxMinimumReservationSize;
+ partially_reserve = true;
+ }
+#endif // V8_OS_WIN
+
+ if (!vas->CanAllocateSubspaces()) {
+ // If we cannot create virtual memory subspaces, we also need to fall back
+ // to creating a partially reserved sandbox. In practice, this should only
+ // happen on Windows version before Windows 10, maybe including early
+ // Windows 10 releases, where the necessary memory management APIs, in
+ // particular, VirtualAlloc2, are not available. This check should also in
+ // practice subsume the preceeding one for Windows 8 and earlier, but we'll
+ // keep both just to be sure since there the partially reserved sandbox is
+ // technically required for a different reason (large virtual memory
+ // reservations being too expensive).
+ size_to_reserve = kSandboxMinimumReservationSize;
+ partially_reserve = true;
+ }
+
+ // In any case, the sandbox must be at most as large as our address space.
+ DCHECK_LE(sandbox_size, address_space_limit);
+
+ if (partially_reserve) {
+ return InitializeAsPartiallyReservedSandbox(vas, sandbox_size,
+ size_to_reserve);
+ } else {
+ const bool use_guard_regions = true;
+ bool success = Initialize(vas, sandbox_size, use_guard_regions);
+#ifdef V8_SANDBOXED_POINTERS
+ // If sandboxed pointers are enabled, we need the sandbox to be initialized,
+ // so fall back to creating a partially reserved sandbox.
+ if (!success) {
+ // Instead of going for the minimum reservation size directly, we could
+ // also first try a couple of larger reservation sizes if that is deemed
+ // sensible in the future.
+ success = InitializeAsPartiallyReservedSandbox(
+ vas, sandbox_size, kSandboxMinimumReservationSize);
+ }
+#endif // V8_SANDBOXED_POINTERS
+ return success;
+ }
+}
+
+bool Sandbox::Initialize(v8::VirtualAddressSpace* vas, size_t size,
+ bool use_guard_regions) {
+ CHECK(!initialized_);
+ CHECK(!disabled_);
+ CHECK(base::bits::IsPowerOfTwo(size));
+ CHECK_GE(size, kSandboxMinimumSize);
+ CHECK(vas->CanAllocateSubspaces());
+
+ // Currently, we allow the sandbox to be smaller than the requested size.
+ // This way, we can gracefully handle address space reservation failures
+ // during the initial rollout and can collect data on how often these occur.
+ // In the future, we will likely either require the sandbox to always have a
+ // fixed size or will design SandboxedPointers (pointers that are guaranteed
+ // to point into the sandbox) in a way that doesn't reduce the sandbox's
+ // security properties if it has a smaller size. Which of these options is
+ // ultimately taken likey depends on how frequently sandbox reservation
+ // failures occur in practice.
+ size_t reservation_size;
+ while (!address_space_ && size >= kSandboxMinimumSize) {
+ reservation_size = size;
+ if (use_guard_regions) {
+ reservation_size += 2 * kSandboxGuardRegionSize;
+ }
+
+ Address hint = RoundDown(vas->RandomPageAddress(), kSandboxAlignment);
+
+ // There should be no executable pages mapped inside the sandbox since
+ // those could be corrupted by an attacker and therefore pose a security
+ // risk. Furthermore, allowing executable mappings in the sandbox requires
+ // MAP_JIT on macOS, which causes fork() to become excessively slow
+ // (multiple seconds or even minutes for a 1TB sandbox on macOS 12.X), in
+ // turn causing tests to time out. As such, the maximum page permission
+ // inside the sandbox should be read + write.
+ address_space_ = vas->AllocateSubspace(
+ hint, reservation_size, kSandboxAlignment, PagePermissions::kReadWrite);
+ if (!address_space_) {
+ size /= 2;
+ }
+ }
+
+ if (!address_space_) return false;
+
+ reservation_base_ = address_space_->base();
+ base_ = reservation_base_;
+ if (use_guard_regions) {
+ base_ += kSandboxGuardRegionSize;
+ }
+
+ size_ = size;
+ end_ = base_ + size_;
+ reservation_size_ = reservation_size;
+
+ if (use_guard_regions) {
+ Address front = reservation_base_;
+ Address back = end_;
+ // These must succeed since nothing was allocated in the subspace yet.
+ CHECK(address_space_->AllocateGuardRegion(front, kSandboxGuardRegionSize));
+ CHECK(address_space_->AllocateGuardRegion(back, kSandboxGuardRegionSize));
+ }
+
+ sandbox_page_allocator_ =
+ std::make_unique<base::VirtualAddressSpacePageAllocator>(
+ address_space_.get());
+
+ initialized_ = true;
+ is_partially_reserved_ = false;
+
+ InitializeConstants();
+
+ return true;
+}
+
+bool Sandbox::InitializeAsPartiallyReservedSandbox(v8::VirtualAddressSpace* vas,
+ size_t size,
+ size_t size_to_reserve) {
+ CHECK(!initialized_);
+ CHECK(!disabled_);
+ CHECK(base::bits::IsPowerOfTwo(size));
+ CHECK(base::bits::IsPowerOfTwo(size_to_reserve));
+ CHECK_GE(size, kSandboxMinimumSize);
+ CHECK_LT(size_to_reserve, size);
+
+ // Use a custom random number generator here to ensure that we get uniformly
+ // distributed random numbers. We figure out the available address space
+ // ourselves, and so are potentially better positioned to determine a good
+ // base address for the sandbox than the embedder.
+ base::RandomNumberGenerator rng;
+ if (FLAG_random_seed != 0) {
+ rng.SetSeed(FLAG_random_seed);
+ }
+
+ // We try to ensure that base + size is still (mostly) within the process'
+ // address space, even though we only reserve a fraction of the memory. For
+ // that, we attempt to map the sandbox into the first half of the usable
+ // address space. This keeps the implementation simple and should, In any
+ // realistic scenario, leave plenty of space after the actual reservation.
+ Address address_space_end = DetermineAddressSpaceLimit();
+ Address highest_allowed_address = address_space_end / 2;
+ DCHECK(base::bits::IsPowerOfTwo(highest_allowed_address));
+ constexpr int kMaxAttempts = 10;
+ for (int i = 1; i <= kMaxAttempts; i++) {
+ Address hint = rng.NextInt64() % highest_allowed_address;
+ hint = RoundDown(hint, kSandboxAlignment);
+
+ reservation_base_ = vas->AllocatePages(
+ hint, size_to_reserve, kSandboxAlignment, PagePermissions::kNoAccess);
+
+ if (!reservation_base_) return false;
+
+ // Take this base if it meets the requirements or if this is the last
+ // attempt.
+ if (reservation_base_ <= highest_allowed_address || i == kMaxAttempts)
+ break;
+
+ // Can't use this base, so free the reservation and try again
+ vas->FreePages(reservation_base_, size_to_reserve);
+ reservation_base_ = kNullAddress;
+ }
+ DCHECK(reservation_base_);
+
+ base_ = reservation_base_;
+ size_ = size;
+ end_ = base_ + size_;
+ reservation_size_ = size_to_reserve;
+ initialized_ = true;
+ is_partially_reserved_ = true;
+ address_space_ = std::make_unique<base::EmulatedVirtualAddressSubspace>(
+ vas, reservation_base_, reservation_size_, size_);
+ sandbox_page_allocator_ =
+ std::make_unique<base::VirtualAddressSpacePageAllocator>(
+ address_space_.get());
+
+ InitializeConstants();
+
+ return true;
+}
+
+void Sandbox::InitializeConstants() {
+#ifdef V8_SANDBOXED_POINTERS
+ // Place the empty backing store buffer at the end of the sandbox, so that any
+ // accidental access to it will most likely hit a guard page.
+ constants_.set_empty_backing_store_buffer(base_ + size_ - 1);
+#endif
+}
+
+void Sandbox::TearDown() {
+ if (initialized_) {
+ // This destroys the sub space and frees the underlying reservation.
+ address_space_.reset();
+ sandbox_page_allocator_.reset();
+ base_ = kNullAddress;
+ end_ = kNullAddress;
+ size_ = 0;
+ reservation_base_ = kNullAddress;
+ reservation_size_ = 0;
+ initialized_ = false;
+ is_partially_reserved_ = false;
+#ifdef V8_SANDBOXED_POINTERS
+ constants_.Reset();
+#endif
+ }
+ disabled_ = false;
+}
+
+#endif // V8_SANDBOX_IS_AVAILABLE
+
+#ifdef V8_SANDBOX
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Sandbox, GetProcessWideSandbox)
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/sandbox/sandbox.h b/deps/v8/src/sandbox/sandbox.h
new file mode 100644
index 0000000000..20f2343db5
--- /dev/null
+++ b/deps/v8/src/sandbox/sandbox.h
@@ -0,0 +1,195 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SANDBOX_SANDBOX_H_
+#define V8_SANDBOX_SANDBOX_H_
+
+#include "include/v8-internal.h"
+#include "include/v8-platform.h"
+#include "include/v8config.h"
+#include "src/common/globals.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+
+namespace internal {
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+/**
+ * The V8 Sandbox.
+ *
+ * When enabled, V8 reserves a large region of virtual address space - the
+ * sandbox - and places most of its objects inside of it. It is then assumed
+ * that an attacker can, by exploiting a vulnerability in V8, corrupt memory
+ * inside the sandbox arbitrarily and from different threads. The sandbox
+ * attempts to stop an attacker from corrupting other memory in the process.
+ *
+ * The sandbox relies on a number of different mechanisms to achieve its goal.
+ * For example, objects inside the sandbox can reference each other through
+ * offsets from the start of the sandbox ("sandboxed pointers") instead of raw
+ * pointers, and external objects can be referenced through indices into a
+ * per-Isolate table of external pointers ("sandboxed external pointers").
+ *
+ * The pointer compression region, which contains most V8 objects, and inside
+ * of which compressed (32-bit) pointers are used, is located at the start of
+ * the sandbox. The remainder of the sandbox is mostly used for memory
+ * buffers, in particular ArrayBuffer backing stores and WASM memory cages.
+ *
+ * As the embedder is responsible for providing ArrayBuffer allocators, V8
+ * exposes the virtual address space backing the sandbox to the embedder.
+ */
+class V8_EXPORT_PRIVATE Sandbox {
+ public:
+ // +- ~~~ -+---------------------------------------- ~~~ -+- ~~~ -+
+ // | 32 GB | (Ideally) 1 TB | 32 GB |
+ // | | | |
+ // | Guard | 4 GB : ArrayBuffer backing stores, | Guard |
+ // | Region | V8 Heap : WASM memory buffers, and | Region |
+ // | (front) | Region : any other sandboxed objects. | (back) |
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // ^ ^
+ // base base + size
+
+ Sandbox() = default;
+
+ Sandbox(const Sandbox&) = delete;
+ Sandbox& operator=(Sandbox&) = delete;
+
+ bool Initialize(v8::VirtualAddressSpace* vas);
+ void Disable() {
+ CHECK(!initialized_);
+ disabled_ = true;
+ }
+
+ void TearDown();
+
+ bool is_initialized() const { return initialized_; }
+ bool is_disabled() const { return disabled_; }
+ bool is_enabled() const { return !disabled_; }
+ bool is_partially_reserved() const { return is_partially_reserved_; }
+
+ Address base() const { return base_; }
+ Address end() const { return end_; }
+ size_t size() const { return size_; }
+
+ Address base_address() const { return reinterpret_cast<Address>(&base_); }
+ Address end_address() const { return reinterpret_cast<Address>(&end_); }
+ Address size_address() const { return reinterpret_cast<Address>(&size_); }
+
+ v8::PageAllocator* page_allocator() const {
+ return sandbox_page_allocator_.get();
+ }
+
+ v8::VirtualAddressSpace* address_space() const {
+ return address_space_.get();
+ }
+
+ bool Contains(Address addr) const {
+ return addr >= base_ && addr < base_ + size_;
+ }
+
+ bool Contains(void* ptr) const {
+ return Contains(reinterpret_cast<Address>(ptr));
+ }
+
+#ifdef V8_SANDBOXED_POINTERS
+ class SandboxedPointerConstants final {
+ public:
+ Address empty_backing_store_buffer() const {
+ return empty_backing_store_buffer_;
+ }
+ Address empty_backing_store_buffer_address() const {
+ return reinterpret_cast<Address>(&empty_backing_store_buffer_);
+ }
+ void set_empty_backing_store_buffer(Address value) {
+ empty_backing_store_buffer_ = value;
+ }
+
+ void Reset() { empty_backing_store_buffer_ = 0; }
+
+ private:
+ Address empty_backing_store_buffer_ = 0;
+ };
+ const SandboxedPointerConstants& constants() const { return constants_; }
+#endif
+
+ private:
+ // The SequentialUnmapperTest calls the private Initialize method to create a
+ // sandbox without guard regions, which would consume too much memory.
+ friend class SequentialUnmapperTest;
+
+ // These tests call the private Initialize methods below.
+ FRIEND_TEST(SandboxTest, InitializationWithSize);
+ FRIEND_TEST(SandboxTest, PartiallyReservedSandboxInitialization);
+ FRIEND_TEST(SandboxTest, PartiallyReservedSandboxPageAllocation);
+
+ // We allow tests to disable the guard regions around the sandbox. This is
+ // useful for example for tests like the SequentialUnmapperTest which track
+ // page allocations and so would incur a large overhead from the guard
+ // regions. The provided virtual address space must be able to allocate
+ // subspaces. The size must be a multiple of the allocation granularity of the
+ // virtual memory space.
+ bool Initialize(v8::VirtualAddressSpace* vas, size_t size,
+ bool use_guard_regions);
+
+ // Used when reserving virtual memory is too expensive. A partially reserved
+ // sandbox does not reserve all of its virtual memory and so doesn't have the
+ // desired security properties as unrelated mappings could end up inside of
+ // it and be corrupted. The size and size_to_reserve parameters must be
+ // multiples of the allocation granularity of the virtual address space.
+ bool InitializeAsPartiallyReservedSandbox(v8::VirtualAddressSpace* vas,
+ size_t size,
+ size_t size_to_reserve);
+
+ // Initialize the constant objects for this sandbox. Called by the Initialize
+ // methods above.
+ void InitializeConstants();
+
+ Address base_ = kNullAddress;
+ Address end_ = kNullAddress;
+ size_t size_ = 0;
+
+ // Base and size of the virtual memory reservation backing this sandbox.
+ // These can be different from the sandbox base and size due to guard regions
+ // or when a fake sandbox is used.
+ Address reservation_base_ = kNullAddress;
+ size_t reservation_size_ = 0;
+
+ bool initialized_ = false;
+ bool disabled_ = false;
+ bool is_partially_reserved_ = false;
+
+ // The virtual address subspace backing the sandbox.
+ std::unique_ptr<v8::VirtualAddressSpace> address_space_;
+
+ // The page allocator instance for this sandbox.
+ std::unique_ptr<v8::PageAllocator> sandbox_page_allocator_;
+
+#ifdef V8_SANDBOXED_POINTERS
+ // Constant objects inside this sandbox.
+ SandboxedPointerConstants constants_;
+#endif
+};
+
+#endif // V8_SANDBOX_IS_AVAILABLE
+
+#ifdef V8_SANDBOX
+// This function is only available when the sandbox is actually used.
+V8_EXPORT_PRIVATE Sandbox* GetProcessWideSandbox();
+#endif
+
+V8_INLINE void* EmptyBackingStoreBuffer() {
+#ifdef V8_SANDBOXED_POINTERS
+ return reinterpret_cast<void*>(
+ GetProcessWideSandbox()->constants().empty_backing_store_buffer());
+#else
+ return nullptr;
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_SANDBOX_H_
diff --git a/deps/v8/src/sandbox/sandboxed-pointer-inl.h b/deps/v8/src/sandbox/sandboxed-pointer-inl.h
new file mode 100644
index 0000000000..3682be6974
--- /dev/null
+++ b/deps/v8/src/sandbox/sandboxed-pointer-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SANDBOX_SANDBOXED_POINTER_INL_H_
+#define V8_SANDBOX_SANDBOXED_POINTER_INL_H_
+
+#include "include/v8-internal.h"
+#include "src/common/ptr-compr.h"
+#include "src/execution/isolate.h"
+#include "src/sandbox/sandboxed-pointer.h"
+
+namespace v8 {
+namespace internal {
+
+V8_INLINE Address ReadSandboxedPointerField(Address field_address,
+ PtrComprCageBase cage_base) {
+#ifdef V8_SANDBOXED_POINTERS
+ SandboxedPointer_t sandboxed_pointer =
+ base::ReadUnalignedValue<SandboxedPointer_t>(field_address);
+
+ Address offset = sandboxed_pointer >> kSandboxedPointerShift;
+ Address pointer = cage_base.address() + offset;
+ return pointer;
+#else
+ return ReadMaybeUnalignedValue<Address>(field_address);
+#endif
+}
+
+V8_INLINE void WriteSandboxedPointerField(Address field_address,
+ PtrComprCageBase cage_base,
+ Address pointer) {
+#ifdef V8_SANDBOXED_POINTERS
+ // The pointer must point into the sandbox.
+ CHECK(GetProcessWideSandbox()->Contains(pointer));
+
+ Address offset = pointer - cage_base.address();
+ SandboxedPointer_t sandboxed_pointer = offset << kSandboxedPointerShift;
+ base::WriteUnalignedValue<SandboxedPointer_t>(field_address,
+ sandboxed_pointer);
+#else
+ WriteMaybeUnalignedValue<Address>(field_address, pointer);
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_SANDBOXED_POINTER_INL_H_
diff --git a/deps/v8/src/sandbox/sandboxed-pointer.h b/deps/v8/src/sandbox/sandboxed-pointer.h
new file mode 100644
index 0000000000..8490d49815
--- /dev/null
+++ b/deps/v8/src/sandbox/sandboxed-pointer.h
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SANDBOX_SANDBOXED_POINTER_H_
+#define V8_SANDBOX_SANDBOXED_POINTER_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+V8_INLINE Address ReadSandboxedPointerField(Address field_address,
+ PtrComprCageBase cage_base);
+
+V8_INLINE void WriteSandboxedPointerField(Address field_address,
+ PtrComprCageBase cage_base,
+ Address value);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_SANDBOXED_POINTER_H_
diff --git a/deps/v8/src/security/caged-pointer-inl.h b/deps/v8/src/security/caged-pointer-inl.h
deleted file mode 100644
index 93cd95a6bf..0000000000
--- a/deps/v8/src/security/caged-pointer-inl.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SECURITY_CAGED_POINTER_INL_H_
-#define V8_SECURITY_CAGED_POINTER_INL_H_
-
-#include "include/v8-internal.h"
-#include "src/execution/isolate.h"
-#include "src/security/caged-pointer.h"
-
-namespace v8 {
-namespace internal {
-
-V8_INLINE Address ReadCagedPointerField(Address field_address,
- PtrComprCageBase cage_base) {
-#ifdef V8_CAGED_POINTERS
- // Caged pointers are currently only used if the sandbox is enabled.
- DCHECK(V8_HEAP_SANDBOX_BOOL);
-
- CagedPointer_t caged_pointer =
- base::ReadUnalignedValue<CagedPointer_t>(field_address);
-
- Address offset = caged_pointer >> kCagedPointerShift;
- Address pointer = cage_base.address() + offset;
- return pointer;
-#else
- return base::ReadUnalignedValue<Address>(field_address);
-#endif
-}
-
-V8_INLINE void WriteCagedPointerField(Address field_address,
- PtrComprCageBase cage_base,
- Address pointer) {
-#ifdef V8_CAGED_POINTERS
- // Caged pointers are currently only used if the sandbox is enabled.
- DCHECK(V8_HEAP_SANDBOX_BOOL);
-
- // The pointer must point into the virtual memory cage.
- DCHECK(GetProcessWideVirtualMemoryCage()->Contains(pointer));
-
- Address offset = pointer - cage_base.address();
- CagedPointer_t caged_pointer = offset << kCagedPointerShift;
- base::WriteUnalignedValue<CagedPointer_t>(field_address, caged_pointer);
-#else
- base::WriteUnalignedValue<Address>(field_address, pointer);
-#endif
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SECURITY_CAGED_POINTER_INL_H_
diff --git a/deps/v8/src/security/caged-pointer.h b/deps/v8/src/security/caged-pointer.h
deleted file mode 100644
index 30c3b40db8..0000000000
--- a/deps/v8/src/security/caged-pointer.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SECURITY_CAGED_POINTER_H_
-#define V8_SECURITY_CAGED_POINTER_H_
-
-#include "src/common/globals.h"
-
-namespace v8 {
-namespace internal {
-
-V8_INLINE Address ReadCagedPointerField(Address field_address,
- PtrComprCageBase cage_base);
-
-V8_INLINE void WriteCagedPointerField(Address field_address,
- PtrComprCageBase cage_base,
- Address value);
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SECURITY_CAGED_POINTER_H_
diff --git a/deps/v8/src/security/external-pointer-table.cc b/deps/v8/src/security/external-pointer-table.cc
deleted file mode 100644
index 90bd49e7a0..0000000000
--- a/deps/v8/src/security/external-pointer-table.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/security/external-pointer-table.h"
-
-#include "src/base/platform/wrappers.h"
-
-namespace v8 {
-namespace internal {
-
-void ExternalPointerTable::GrowTable(ExternalPointerTable* table) {
- // TODO(v8:10391, saelo): overflow check here and in the multiplication below
- uint32_t new_capacity = table->capacity_ + table->capacity_ / 2;
- table->buffer_ = reinterpret_cast<Address*>(
- base::Realloc(table->buffer_, new_capacity * sizeof(Address)));
- CHECK(table->buffer_);
- memset(&table->buffer_[table->capacity_], 0,
- (new_capacity - table->capacity_) * sizeof(Address));
- table->capacity_ = new_capacity;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/security/external-pointer-table.h b/deps/v8/src/security/external-pointer-table.h
deleted file mode 100644
index 6a96bab13c..0000000000
--- a/deps/v8/src/security/external-pointer-table.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SECURITY_EXTERNAL_POINTER_TABLE_H_
-#define V8_SECURITY_EXTERNAL_POINTER_TABLE_H_
-
-#include "src/base/platform/wrappers.h"
-#include "src/security/external-pointer.h"
-#include "src/utils/utils.h"
-
-namespace v8 {
-namespace internal {
-
-class V8_EXPORT_PRIVATE ExternalPointerTable {
- public:
- static const int kExternalPointerTableInitialCapacity = 1024;
-
- ExternalPointerTable()
- : buffer_(reinterpret_cast<Address*>(base::Calloc(
- kExternalPointerTableInitialCapacity, sizeof(Address)))),
- length_(1),
- capacity_(kExternalPointerTableInitialCapacity),
- freelist_head_(0) {
- // Explicitly setup the invalid nullptr entry.
- STATIC_ASSERT(kNullExternalPointer == 0);
- buffer_[kNullExternalPointer] = kNullAddress;
- }
-
- ~ExternalPointerTable() { base::Free(buffer_); }
-
- Address get(uint32_t index) const {
- CHECK_LT(index, length_);
- return buffer_[index];
- }
-
- void set(uint32_t index, Address value) {
- DCHECK_NE(kNullExternalPointer, index);
- CHECK_LT(index, length_);
- buffer_[index] = value;
- }
-
- uint32_t allocate() {
- uint32_t index = length_++;
- if (index >= capacity_) {
- GrowTable(this);
- }
- DCHECK_NE(kNullExternalPointer, index);
- return index;
- }
-
- // Returns true if the entry exists in the table and therefore it can be read.
- bool is_valid_index(uint32_t index) const {
- // TODO(v8:10391, saelo): also check here if entry is free
- return index < length_;
- }
-
- uint32_t size() const { return length_; }
-
- static void GrowTable(ExternalPointerTable* table);
-
- private:
- friend class Isolate;
-
- Address* buffer_;
- uint32_t length_;
- uint32_t capacity_;
- uint32_t freelist_head_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SECURITY_EXTERNAL_POINTER_TABLE_H_
diff --git a/deps/v8/src/security/vm-cage.cc b/deps/v8/src/security/vm-cage.cc
deleted file mode 100644
index acd2d7c625..0000000000
--- a/deps/v8/src/security/vm-cage.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/security/vm-cage.h"
-
-#include "include/v8-internal.h"
-#include "src/base/bits.h"
-#include "src/base/bounded-page-allocator.h"
-#include "src/base/cpu.h"
-#include "src/base/emulated-virtual-address-subspace.h"
-#include "src/base/lazy-instance.h"
-#include "src/base/utils/random-number-generator.h"
-#include "src/base/virtual-address-space-page-allocator.h"
-#include "src/base/virtual-address-space.h"
-#include "src/flags/flags.h"
-#include "src/security/caged-pointer.h"
-#include "src/utils/allocation.h"
-
-#if defined(V8_OS_WIN)
-#include <windows.h>
-// This has to come after windows.h.
-#include <versionhelpers.h> // For IsWindows8Point1OrGreater().
-#endif
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-// Best-effort helper function to determine the size of the userspace virtual
-// address space. Used to determine appropriate cage size and placement.
-static Address DetermineAddressSpaceLimit() {
-#ifndef V8_TARGET_ARCH_64_BIT
-#error Unsupported target architecture.
-#endif
-
- // Assume 48 bits by default, which seems to be the most common configuration.
- constexpr unsigned kDefaultVirtualAddressBits = 48;
- // 36 bits should realistically be the lowest value we could ever see.
- constexpr unsigned kMinVirtualAddressBits = 36;
- constexpr unsigned kMaxVirtualAddressBits = 64;
-
- constexpr size_t kMinVirtualAddressSpaceSize = 1ULL << kMinVirtualAddressBits;
- static_assert(kMinVirtualAddressSpaceSize >= kVirtualMemoryCageMinimumSize,
- "The minimum cage size should be smaller or equal to the "
- "smallest possible userspace address space. Otherwise, larger "
- "parts of the cage will not be usable on those platforms.");
-
-#ifdef V8_TARGET_ARCH_X64
- base::CPU cpu;
- Address virtual_address_bits = kDefaultVirtualAddressBits;
- if (cpu.exposes_num_virtual_address_bits()) {
- virtual_address_bits = cpu.num_virtual_address_bits();
- }
-#else
- // TODO(saelo) support ARM and possibly other CPUs as well.
- Address virtual_address_bits = kDefaultVirtualAddressBits;
-#endif
-
- // Guard against nonsensical values.
- if (virtual_address_bits < kMinVirtualAddressBits ||
- virtual_address_bits > kMaxVirtualAddressBits) {
- virtual_address_bits = kDefaultVirtualAddressBits;
- }
-
- // Assume virtual address space is split 50/50 between userspace and kernel.
- Address userspace_virtual_address_bits = virtual_address_bits - 1;
- Address address_space_limit = 1ULL << userspace_virtual_address_bits;
-
-#if defined(V8_OS_WIN_X64)
- if (!IsWindows8Point1OrGreater()) {
- // On Windows pre 8.1 userspace is limited to 8TB on X64. See
- // https://docs.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases
- address_space_limit = 8ULL * TB;
- }
-#endif // V8_OS_WIN_X64
-
- // TODO(saelo) we could try allocating memory in the upper half of the address
- // space to see if it is really usable.
- return address_space_limit;
-}
-
-bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas) {
- // Take the number of virtual address bits into account when determining the
- // size of the cage. For example, if there are only 39 bits available, split
- // evenly between userspace and kernel, then userspace can only address 256GB
- // and so we use a quarter of that, 64GB, as maximum cage size.
- Address address_space_limit = DetermineAddressSpaceLimit();
- size_t max_cage_size = address_space_limit / 4;
- size_t cage_size = std::min(kVirtualMemoryCageSize, max_cage_size);
- size_t size_to_reserve = cage_size;
-
- // If the size is less than the minimum cage size though, we fall back to
- // creating a fake cage. This happens for CPUs with only 36 virtual address
- // bits, in which case the cage size would end up being only 8GB.
- bool create_fake_cage = false;
- if (cage_size < kVirtualMemoryCageMinimumSize) {
- static_assert((8ULL * GB) >= kFakeVirtualMemoryCageMinReservationSize,
- "Minimum reservation size for a fake cage must be at most "
- "8GB to support CPUs with only 36 virtual address bits");
- size_to_reserve = cage_size;
- cage_size = kVirtualMemoryCageMinimumSize;
- create_fake_cage = true;
- }
-
-#if defined(V8_OS_WIN)
- if (!IsWindows8Point1OrGreater()) {
- // On Windows pre 8.1, reserving virtual memory is an expensive operation,
- // apparently because the OS already charges for the memory required for
- // all page table entries. For example, a 1TB reservation increases private
- // memory usage by 2GB. As such, it is not possible to create a proper
- // virtual memory cage there and so a fake cage is created which doesn't
- // reserve most of the virtual memory, and so doesn't incur the cost, but
- // also doesn't provide the desired security benefits.
- size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
- create_fake_cage = true;
- }
-#endif // V8_OS_WIN
-
- if (!vas->CanAllocateSubspaces()) {
- // If we cannot create virtual memory subspaces, we also need to fall back
- // to creating a fake cage. In practice, this should only happen on Windows
- // version before Windows 10, maybe including early Windows 10 releases,
- // where the necessary memory management APIs, in particular, VirtualAlloc2,
- // are not available. This check should also in practice subsume the
- // preceeding one for Windows 8 and earlier, but we'll keep both just to be
- // sure since there the fake cage is technically required for a different
- // reason (large virtual memory reservations being too expensive).
- size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
- create_fake_cage = true;
- }
-
- // In any case, the (fake) cage must be at most as large as our address space.
- DCHECK_LE(cage_size, address_space_limit);
-
- if (create_fake_cage) {
- return InitializeAsFakeCage(vas, cage_size, size_to_reserve);
- } else {
- // TODO(saelo) if this fails, we could still fall back to creating a fake
- // cage. Decide if that would make sense.
- const bool use_guard_regions = true;
- return Initialize(vas, cage_size, use_guard_regions);
- }
-}
-
-bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas, size_t size,
- bool use_guard_regions) {
- CHECK(!initialized_);
- CHECK(!disabled_);
- CHECK(base::bits::IsPowerOfTwo(size));
- CHECK_GE(size, kVirtualMemoryCageMinimumSize);
- CHECK(vas->CanAllocateSubspaces());
-
- // Currently, we allow the cage to be smaller than the requested size. This
- // way, we can gracefully handle cage reservation failures during the initial
- // rollout and can collect data on how often these occur. In the future, we
- // will likely either require the cage to always have a fixed size or will
- // design CagedPointers (pointers that are guaranteed to point into the cage,
- // e.g. because they are stored as offsets from the cage base) in a way that
- // doesn't reduce the cage's security properties if it has a smaller size.
- // Which of these options is ultimately taken likey depends on how frequently
- // cage reservation failures occur in practice.
- size_t reservation_size;
- while (!virtual_address_space_ && size >= kVirtualMemoryCageMinimumSize) {
- reservation_size = size;
- if (use_guard_regions) {
- reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
- }
-
- Address hint =
- RoundDown(vas->RandomPageAddress(), kVirtualMemoryCageAlignment);
-
- // Currently, executable memory is still allocated inside the cage. In the
- // future, we should drop that and use kReadWrite as max_permissions.
- virtual_address_space_ = vas->AllocateSubspace(
- hint, reservation_size, kVirtualMemoryCageAlignment,
- PagePermissions::kReadWriteExecute);
- if (!virtual_address_space_) {
- size /= 2;
- }
- }
-
- if (!virtual_address_space_) return false;
-
- reservation_base_ = virtual_address_space_->base();
- base_ = reservation_base_;
- if (use_guard_regions) {
- base_ += kVirtualMemoryCageGuardRegionSize;
- }
-
- size_ = size;
- end_ = base_ + size_;
- reservation_size_ = reservation_size;
-
- if (use_guard_regions) {
- // These must succeed since nothing was allocated in the subspace yet.
- CHECK_EQ(reservation_base_,
- virtual_address_space_->AllocatePages(
- reservation_base_, kVirtualMemoryCageGuardRegionSize,
- vas->allocation_granularity(), PagePermissions::kNoAccess));
- CHECK_EQ(end_,
- virtual_address_space_->AllocatePages(
- end_, kVirtualMemoryCageGuardRegionSize,
- vas->allocation_granularity(), PagePermissions::kNoAccess));
- }
-
- cage_page_allocator_ =
- std::make_unique<base::VirtualAddressSpacePageAllocator>(
- virtual_address_space_.get());
-
- initialized_ = true;
- is_fake_cage_ = false;
-
- InitializeConstants();
-
- return true;
-}
-
-bool V8VirtualMemoryCage::InitializeAsFakeCage(v8::VirtualAddressSpace* vas,
- size_t size,
- size_t size_to_reserve) {
- CHECK(!initialized_);
- CHECK(!disabled_);
- CHECK(base::bits::IsPowerOfTwo(size));
- CHECK(base::bits::IsPowerOfTwo(size_to_reserve));
- CHECK_GE(size, kVirtualMemoryCageMinimumSize);
- CHECK_LT(size_to_reserve, size);
-
- // Use a custom random number generator here to ensure that we get uniformly
- // distributed random numbers. We figure out the available address space
- // ourselves, and so are potentially better positioned to determine a good
- // base address for the cage than the embedder.
- base::RandomNumberGenerator rng;
- if (FLAG_random_seed != 0) {
- rng.SetSeed(FLAG_random_seed);
- }
-
- // We try to ensure that base + size is still (mostly) within the process'
- // address space, even though we only reserve a fraction of the memory. For
- // that, we attempt to map the cage into the first half of the usable address
- // space. This keeps the implementation simple and should, In any realistic
- // scenario, leave plenty of space after the cage reservation.
- Address address_space_end = DetermineAddressSpaceLimit();
- Address highest_allowed_address = address_space_end / 2;
- DCHECK(base::bits::IsPowerOfTwo(highest_allowed_address));
- constexpr int kMaxAttempts = 10;
- for (int i = 1; i <= kMaxAttempts; i++) {
- Address hint = rng.NextInt64() % highest_allowed_address;
- hint = RoundDown(hint, kVirtualMemoryCageAlignment);
-
- reservation_base_ =
- vas->AllocatePages(hint, size_to_reserve, kVirtualMemoryCageAlignment,
- PagePermissions::kNoAccess);
-
- if (!reservation_base_) return false;
-
- // Take this base if it meets the requirements or if this is the last
- // attempt.
- if (reservation_base_ <= highest_allowed_address || i == kMaxAttempts)
- break;
-
- // Can't use this base, so free the reservation and try again
- CHECK(vas->FreePages(reservation_base_, size_to_reserve));
- reservation_base_ = kNullAddress;
- }
- DCHECK(reservation_base_);
-
- base_ = reservation_base_;
- size_ = size;
- end_ = base_ + size_;
- reservation_size_ = size_to_reserve;
- initialized_ = true;
- is_fake_cage_ = true;
- virtual_address_space_ =
- std::make_unique<base::EmulatedVirtualAddressSubspace>(
- vas, reservation_base_, reservation_size_, size_);
- cage_page_allocator_ =
- std::make_unique<base::VirtualAddressSpacePageAllocator>(
- virtual_address_space_.get());
-
- InitializeConstants();
-
- return true;
-}
-
-void V8VirtualMemoryCage::InitializeConstants() {
-#ifdef V8_CAGED_POINTERS
- // Place the empty backing store buffer at the end of the cage, so that any
- // accidental access to it will most likely hit a guard page.
- constants_.set_empty_backing_store_buffer(base_ + size_ - 1);
-#endif
-}
-
-void V8VirtualMemoryCage::TearDown() {
- if (initialized_) {
- // This destroys the sub space and frees the underlying reservation.
- virtual_address_space_.reset();
- cage_page_allocator_.reset();
- base_ = kNullAddress;
- end_ = kNullAddress;
- size_ = 0;
- reservation_base_ = kNullAddress;
- reservation_size_ = 0;
- initialized_ = false;
- is_fake_cage_ = false;
-#ifdef V8_CAGED_POINTERS
- constants_.Reset();
-#endif
- }
- disabled_ = false;
-}
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
- GetProcessWideVirtualMemoryCage)
-#endif
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/security/vm-cage.h b/deps/v8/src/security/vm-cage.h
deleted file mode 100644
index b3f54d9bd1..0000000000
--- a/deps/v8/src/security/vm-cage.h
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SECURITY_VM_CAGE_H_
-#define V8_SECURITY_VM_CAGE_H_
-
-#include "include/v8-internal.h"
-#include "src/base/bounded-page-allocator.h"
-#include "src/common/globals.h"
-
-namespace v8 {
-
-namespace internal {
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-/**
- * V8 Virtual Memory Cage.
- *
- * When the virtual memory cage is enabled, V8 will reserve a large region of
- * virtual address space - the cage - and place most of its objects inside of
- * it. This allows these objects to reference each other through offsets rather
- * than raw pointers, which in turn makes it harder for an attacker to abuse
- * them in an exploit.
- *
- * The pointer compression region, which contains most V8 objects, and inside
- * of which compressed (32-bit) pointers are used, is located at the start of
- * the virtual memory cage. The remainder of the cage is mostly used for memory
- * buffers, in particular ArrayBuffer backing stores and WASM memory cages.
- *
- * It should be assumed that an attacker is able to corrupt data arbitrarily
- * and concurrently inside the virtual memory cage. The heap sandbox, of which
- * the virtual memory cage is one building block, attempts to then stop an
- * attacker from corrupting data outside of the cage.
- *
- * As the embedder is responsible for providing ArrayBuffer allocators, v8
- * exposes a page allocator for the virtual memory cage to the embedder.
- *
- * TODO(chromium:1218005) come up with a coherent naming scheme for this class
- * and the other "cages" in v8.
- */
-class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
- public:
- // +- ~~~ -+---------------------------------------- ~~~ -+- ~~~ -+
- // | 32 GB | (Ideally) 1 TB | 32 GB |
- // | | | |
- // | Guard | 4 GB : ArrayBuffer backing stores, | Guard |
- // | Region | V8 Heap : WASM memory buffers, and | Region |
- // | (front) | Region : any other caged objects. | (back) |
- // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
- // ^ ^
- // base base + size
-
- V8VirtualMemoryCage() = default;
-
- V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
- V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
-
- bool Initialize(v8::VirtualAddressSpace* vas);
- void Disable() {
- CHECK(!initialized_);
- disabled_ = true;
- }
-
- void TearDown();
-
- bool is_initialized() const { return initialized_; }
- bool is_disabled() const { return disabled_; }
- bool is_enabled() const { return !disabled_; }
- bool is_fake_cage() const { return is_fake_cage_; }
-
- Address base() const { return base_; }
- Address end() const { return end_; }
- size_t size() const { return size_; }
-
- Address base_address() const { return reinterpret_cast<Address>(&base_); }
- Address end_address() const { return reinterpret_cast<Address>(&end_); }
- Address size_address() const { return reinterpret_cast<Address>(&size_); }
-
- v8::PageAllocator* page_allocator() const {
- return cage_page_allocator_.get();
- }
-
- v8::VirtualAddressSpace* virtual_address_space() const {
- return virtual_address_space_.get();
- }
-
- bool Contains(Address addr) const {
- return addr >= base_ && addr < base_ + size_;
- }
-
- bool Contains(void* ptr) const {
- return Contains(reinterpret_cast<Address>(ptr));
- }
-
-#ifdef V8_CAGED_POINTERS
- class CagedPointerConstants final {
- public:
- Address empty_backing_store_buffer() const {
- return empty_backing_store_buffer_;
- }
- Address empty_backing_store_buffer_address() const {
- return reinterpret_cast<Address>(&empty_backing_store_buffer_);
- }
- void set_empty_backing_store_buffer(Address value) {
- empty_backing_store_buffer_ = value;
- }
-
- void Reset() { empty_backing_store_buffer_ = 0; }
-
- private:
- Address empty_backing_store_buffer_ = 0;
- };
- const CagedPointerConstants& constants() const { return constants_; }
-#endif
-
- private:
- // The SequentialUnmapperTest calls the private Initialize method to create a
- // cage without guard regions, which would otherwise consume too much memory.
- friend class SequentialUnmapperTest;
-
- // These tests call the private Initialize methods below.
- FRIEND_TEST(VirtualMemoryCageTest, InitializationWithSize);
- FRIEND_TEST(VirtualMemoryCageTest, InitializationAsFakeCage);
- FRIEND_TEST(VirtualMemoryCageTest, FakeCagePageAllocation);
-
- // We allow tests to disable the guard regions around the cage. This is useful
- // for example for tests like the SequentialUnmapperTest which track page
- // allocations and so would incur a large overhead from the guard regions.
- // The provided virtual address space must be able to allocate subspaces.
- // The size must be a multiple of the allocation granularity of the virtual
- // memory space.
- bool Initialize(v8::VirtualAddressSpace* vas, size_t size,
- bool use_guard_regions);
-
- // Used on OSes where reserving virtual memory is too expensive. A fake cage
- // does not reserve all of the virtual memory and so doesn't have the desired
- // security properties.
- // The size and size_to_reserve parameters must be multiples of the
- // allocation granularity of the virtual address space.
- bool InitializeAsFakeCage(v8::VirtualAddressSpace* vas, size_t size,
- size_t size_to_reserve);
-
- // Initialize the caged pointer constants for this cage. Called by the
- // Initialize methods above.
- void InitializeConstants();
-
- Address base_ = kNullAddress;
- Address end_ = kNullAddress;
- size_t size_ = 0;
-
- // Base and size of the virtual memory reservation backing this cage. These
- // can be different from the cage base and size due to guard regions or when a
- // fake cage is used.
- Address reservation_base_ = kNullAddress;
- size_t reservation_size_ = 0;
-
- bool initialized_ = false;
- bool disabled_ = false;
- bool is_fake_cage_ = false;
-
- // The virtual address subspace backing the cage.
- std::unique_ptr<v8::VirtualAddressSpace> virtual_address_space_;
-
- // The page allocator instance for this cage.
- std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
-
-#ifdef V8_CAGED_POINTERS
- // CagedPointer constants inside this cage.
- CagedPointerConstants constants_;
-#endif
-};
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-// This function is only available when the cage is actually used.
-V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
-#endif
-
-V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- Address addr = reinterpret_cast<Address>(ptr);
- return kAllowBackingStoresOutsideCage || addr == kNullAddress ||
- GetProcessWideVirtualMemoryCage()->Contains(addr);
-#else
- return true;
-#endif
-}
-
-V8_INLINE void* EmptyBackingStoreBuffer() {
-#ifdef V8_CAGED_POINTERS
- return reinterpret_cast<void*>(GetProcessWideVirtualMemoryCage()
- ->constants()
- .empty_backing_store_buffer());
-#else
- return nullptr;
-#endif
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SECURITY_VM_CAGE_H_
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 0cf6544300..4924fb5e81 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,4 +1,3 @@
-delphick@chromium.org
jgruber@chromium.org
leszeks@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index fb643ba014..524911893b 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -51,7 +51,7 @@ MaybeHandle<Object> ContextDeserializer::Deserialize(
WeakenDescriptorArrays();
}
- if (FLAG_rehash_snapshot && can_rehash()) Rehash();
+ if (should_rehash()) Rehash();
SetupOffHeapArrayBufferBackingStores();
return result;
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index fb3c41888e..40f1cbdefc 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -32,7 +32,7 @@
#include "src/objects/slots.h"
#include "src/objects/string.h"
#include "src/roots/roots.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/references.h"
#include "src/snapshot/serializer-deserializer.h"
@@ -208,8 +208,8 @@ template <typename TSlot>
int Deserializer<IsolateT>::WriteExternalPointer(TSlot dest, Address value,
ExternalPointerTag tag) {
DCHECK(!next_reference_is_weak_);
+ DCHECK(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
InitExternalPointerField(dest.address(), main_thread_isolate(), value, tag);
- STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
return (kExternalPointerSize / TSlot::kSlotDataSize);
}
@@ -244,7 +244,8 @@ Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
source_(payload),
magic_number_(magic_number),
deserializing_user_code_(deserializing_user_code),
- can_rehash_(can_rehash) {
+ should_rehash_((FLAG_rehash_snapshot && can_rehash) ||
+ deserializing_user_code) {
DCHECK_NOT_NULL(isolate);
isolate->RegisterDeserializerStarted();
@@ -262,7 +263,7 @@ Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
template <typename IsolateT>
void Deserializer<IsolateT>::Rehash() {
- DCHECK(can_rehash() || deserializing_user_code());
+ DCHECK(should_rehash());
for (Handle<HeapObject> item : to_rehash_) {
item->RehashBasedOnMap(isolate());
}
@@ -308,6 +309,7 @@ void Deserializer<IsolateT>::DeserializeDeferredObjects() {
template <typename IsolateT>
void Deserializer<IsolateT>::LogNewMapEvents() {
+ if (V8_LIKELY(!FLAG_log_maps)) return;
DisallowGarbageCollection no_gc;
for (Handle<Map> map : new_maps_) {
DCHECK(FLAG_log_maps);
@@ -319,11 +321,12 @@ void Deserializer<IsolateT>::LogNewMapEvents() {
template <typename IsolateT>
void Deserializer<IsolateT>::WeakenDescriptorArrays() {
DisallowGarbageCollection no_gc;
+ Map descriptor_array_map = ReadOnlyRoots(isolate()).descriptor_array_map();
for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
- DCHECK(descriptor_array->IsStrongDescriptorArray());
- descriptor_array->set_map(ReadOnlyRoots(isolate()).descriptor_array_map());
- WriteBarrier::Marking(*descriptor_array,
- descriptor_array->number_of_descriptors());
+ DescriptorArray raw = *descriptor_array;
+ DCHECK(raw.IsStrongDescriptorArray());
+ raw.set_map_safe_transition(descriptor_array_map);
+ WriteBarrier::Marking(raw, raw.number_of_descriptors());
}
}
@@ -377,83 +380,149 @@ template bool StringTableInsertionKey::IsMatch(LocalIsolate* isolate,
namespace {
-void PostProcessExternalString(Handle<ExternalString> string,
- Isolate* isolate) {
- uint32_t index = string->GetResourceRefForDeserialization();
+void NoExternalReferencesCallback() {
+ // The following check will trigger if a function or object template
+ // with references to native functions have been deserialized from
+ // snapshot, but no actual external references were provided when the
+ // isolate was created.
+ FATAL("No external references provided via API");
+}
+
+void PostProcessExternalString(ExternalString string, Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
+ uint32_t index = string.GetResourceRefForDeserialization();
Address address =
static_cast<Address>(isolate->api_external_references()[index]);
- string->AllocateExternalPointerEntries(isolate);
- string->set_address_as_resource(isolate, address);
- isolate->heap()->UpdateExternalString(*string, 0,
- string->ExternalPayloadSize());
- isolate->heap()->RegisterExternalString(*string);
+ string.AllocateExternalPointerEntries(isolate);
+ string.set_address_as_resource(isolate, address);
+ isolate->heap()->UpdateExternalString(string, 0,
+ string.ExternalPayloadSize());
+ isolate->heap()->RegisterExternalString(string);
}
} // namespace
template <typename IsolateT>
+void Deserializer<IsolateT>::PostProcessNewJSReceiver(
+ Map map, Handle<JSReceiver> obj, JSReceiver raw_obj,
+ InstanceType instance_type, SnapshotSpace space) {
+ DisallowGarbageCollection no_gc;
+ DCHECK_EQ(*obj, raw_obj);
+ DCHECK_EQ(raw_obj.map(), map);
+ DCHECK_EQ(map.instance_type(), instance_type);
+
+ if (InstanceTypeChecker::IsJSDataView(instance_type)) {
+ auto data_view = JSDataView::cast(raw_obj);
+ auto buffer = JSArrayBuffer::cast(data_view.buffer());
+ void* backing_store = EmptyBackingStoreBuffer();
+ uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
+ if (store_index != kEmptyBackingStoreRefSentinel) {
+ // The backing store of the JSArrayBuffer has not been correctly restored
+ // yet, as that may trigger GC. The backing_store field currently contains
+ // a numbered reference to an already deserialized backing store.
+ backing_store = backing_stores_[store_index]->buffer_start();
+ }
+ data_view.set_data_pointer(
+ main_thread_isolate(),
+ reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
+ } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
+ auto typed_array = JSTypedArray::cast(raw_obj);
+ // Fixup typed array pointers.
+ if (typed_array.is_on_heap()) {
+ typed_array.AddExternalPointerCompensationForDeserialization(
+ main_thread_isolate());
+ } else {
+ // Serializer writes backing store ref as a DataPtr() value.
+ uint32_t store_index =
+ typed_array.GetExternalBackingStoreRefForDeserialization();
+ auto backing_store = backing_stores_[store_index];
+ void* start = backing_store ? backing_store->buffer_start()
+ : EmptyBackingStoreBuffer();
+ typed_array.SetOffHeapDataPtr(main_thread_isolate(), start,
+ typed_array.byte_offset());
+ }
+ } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
+ auto buffer = JSArrayBuffer::cast(raw_obj);
+ // Postpone allocation of backing store to avoid triggering the GC.
+ if (buffer.GetBackingStoreRefForDeserialization() !=
+ kEmptyBackingStoreRefSentinel) {
+ new_off_heap_array_buffers_.push_back(Handle<JSArrayBuffer>::cast(obj));
+ } else {
+ buffer.set_backing_store(main_thread_isolate(),
+ EmptyBackingStoreBuffer());
+ }
+ }
+
+ // Check alignment.
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(map)));
+}
+
+template <typename IsolateT>
void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
Handle<HeapObject> obj,
SnapshotSpace space) {
- DCHECK_EQ(*map, obj->map(isolate_));
DisallowGarbageCollection no_gc;
- InstanceType instance_type = map->instance_type();
+ Map raw_map = *map;
+ DCHECK_EQ(raw_map, obj->map(isolate_));
+ InstanceType instance_type = raw_map.instance_type();
- if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
+ // Check alignment.
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(raw_map)));
+ HeapObject raw_obj = *obj;
+ DCHECK_IMPLIES(deserializing_user_code(), should_rehash());
+ if (should_rehash()) {
if (InstanceTypeChecker::IsString(instance_type)) {
// Uninitialize hash field as we need to recompute the hash.
- Handle<String> string = Handle<String>::cast(obj);
- string->set_raw_hash_field(String::kEmptyHashField);
+ String string = String::cast(raw_obj);
+ string.set_raw_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
if (space == SnapshotSpace::kReadOnlyHeap) {
to_rehash_.push_back(obj);
}
- } else if (obj->NeedsRehashing(instance_type)) {
+ } else if (raw_obj.NeedsRehashing(instance_type)) {
to_rehash_.push_back(obj);
}
- }
- if (deserializing_user_code()) {
- if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
- // Canonicalize the internalized string. If it already exists in the
- // string table, set the string to point to the existing one and patch the
- // deserialized string handle to point to the existing one.
- // TODO(leszeks): This handle patching is ugly, consider adding an
- // explicit internalized string bytecode. Also, the new thin string should
- // be dead, try immediately freeing it.
- Handle<String> string = Handle<String>::cast(obj);
-
- StringTableInsertionKey key(
- isolate(), string,
- DeserializingUserCodeOption::kIsDeserializingUserCode);
- Handle<String> result =
- isolate()->string_table()->LookupKey(isolate(), &key);
-
- if (*result != *string) {
- DCHECK(!string->IsShared());
- string->MakeThin(isolate(), *result);
- // Mutate the given object handle so that the backreference entry is
- // also updated.
- obj.PatchValue(*result);
+ if (deserializing_user_code()) {
+ if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
+ // Canonicalize the internalized string. If it already exists in the
+ // string table, set the string to point to the existing one and patch
+ // the deserialized string handle to point to the existing one.
+ // TODO(leszeks): This handle patching is ugly, consider adding an
+ // explicit internalized string bytecode. Also, the new thin string
+ // should be dead, try immediately freeing it.
+ Handle<String> string = Handle<String>::cast(obj);
+
+ StringTableInsertionKey key(
+ isolate(), string,
+ DeserializingUserCodeOption::kIsDeserializingUserCode);
+ String result = *isolate()->string_table()->LookupKey(isolate(), &key);
+
+ if (result != raw_obj) {
+ String::cast(raw_obj).MakeThin(isolate(), result);
+ // Mutate the given object handle so that the backreference entry is
+ // also updated.
+ obj.PatchValue(result);
+ }
+ return;
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
+ new_scripts_.push_back(Handle<Script>::cast(obj));
+ } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
+ // We should link new allocation sites, but we can't do this immediately
+ // because |AllocationSite::HasWeakNext()| internally accesses
+ // |Heap::roots_| that may not have been initialized yet. So defer this
+ // to |ObjectDeserializer::CommitPostProcessedObjects()|.
+ new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
+ } else {
+ DCHECK(CanBeDeferred(*obj));
}
- return;
- } else if (InstanceTypeChecker::IsScript(instance_type)) {
- new_scripts_.push_back(Handle<Script>::cast(obj));
- } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
- // We should link new allocation sites, but we can't do this immediately
- // because |AllocationSite::HasWeakNext()| internally accesses
- // |Heap::roots_| that may not have been initialized yet. So defer this to
- // |ObjectDeserializer::CommitPostProcessedObjects()|.
- new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
- } else {
- DCHECK(CanBeDeferred(*obj));
}
}
- if (InstanceTypeChecker::IsScript(instance_type)) {
- LogScriptEvents(Script::cast(*obj));
- } else if (InstanceTypeChecker::IsCode(instance_type)) {
+ if (InstanceTypeChecker::IsCode(instance_type)) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
@@ -462,11 +531,11 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
}
} else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- auto code_data_container = Handle<CodeDataContainer>::cast(obj);
- code_data_container->set_code_cage_base(isolate()->code_cage_base());
- code_data_container->AllocateExternalPointerEntries(main_thread_isolate());
- code_data_container->UpdateCodeEntryPoint(main_thread_isolate(),
- code_data_container->code());
+ auto code_data_container = CodeDataContainer::cast(raw_obj);
+ code_data_container.set_code_cage_base(isolate()->code_cage_base());
+ code_data_container.AllocateExternalPointerEntries(main_thread_isolate());
+ code_data_container.UpdateCodeEntryPoint(main_thread_isolate(),
+ code_data_container.code());
} else if (InstanceTypeChecker::IsMap(instance_type)) {
if (FLAG_log_maps) {
// Keep track of all seen Maps to log them later since they might be only
@@ -482,65 +551,26 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
#endif
} else if (InstanceTypeChecker::IsExternalString(instance_type)) {
- PostProcessExternalString(Handle<ExternalString>::cast(obj),
+ PostProcessExternalString(ExternalString::cast(raw_obj),
main_thread_isolate());
- } else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
- Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
- JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
- void* backing_store = EmptyBackingStoreBuffer();
- uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
- if (store_index != kEmptyBackingStoreRefSentinel) {
- // The backing store of the JSArrayBuffer has not been correctly restored
- // yet, as that may trigger GC. The backing_store field currently contains
- // a numbered reference to an already deserialized backing store.
- backing_store = backing_stores_[store_index]->buffer_start();
- }
- data_view->set_data_pointer(
- main_thread_isolate(),
- reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
- } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
- // Fixup typed array pointers.
- if (typed_array->is_on_heap()) {
- typed_array->AddExternalPointerCompensationForDeserialization(
- main_thread_isolate());
- } else {
- // Serializer writes backing store ref as a DataPtr() value.
- uint32_t store_index =
- typed_array->GetExternalBackingStoreRefForDeserialization();
- auto backing_store = backing_stores_[store_index];
- void* start = backing_store ? backing_store->buffer_start()
- : EmptyBackingStoreBuffer();
- typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
- typed_array->byte_offset());
- }
- } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
- Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(obj);
- // Postpone allocation of backing store to avoid triggering the GC.
- if (buffer->GetBackingStoreRefForDeserialization() !=
- kEmptyBackingStoreRefSentinel) {
- new_off_heap_array_buffers_.push_back(buffer);
- } else {
- buffer->set_backing_store(main_thread_isolate(),
- EmptyBackingStoreBuffer());
- }
+ } else if (InstanceTypeChecker::IsJSReceiver(instance_type)) {
+ return PostProcessNewJSReceiver(raw_map, Handle<JSReceiver>::cast(obj),
+ JSReceiver::cast(raw_obj), instance_type,
+ space);
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
- Handle<BytecodeArray> bytecode_array = Handle<BytecodeArray>::cast(obj);
- bytecode_array->set_osr_loop_nesting_level(0);
+ BytecodeArray::cast(raw_obj).set_osr_loop_nesting_level(0);
} else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
new_descriptor_arrays_.push_back(descriptors);
} else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
- Handle<NativeContext> context = Handle<NativeContext>::cast(obj);
- context->AllocateExternalPointerEntries(main_thread_isolate());
+ NativeContext::cast(raw_obj).AllocateExternalPointerEntries(
+ main_thread_isolate());
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
+ LogScriptEvents(Script::cast(*obj));
}
-
- // Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
- HeapObject::RequiredAlignment(*map)));
}
template <typename IsolateT>
@@ -652,12 +682,13 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
#ifdef DEBUG
PtrComprCageBase cage_base(isolate());
// We want to make sure that all embedder pointers are initialized to null.
- if (raw_obj.IsJSObject(cage_base) && JSObject::cast(raw_obj).IsApiWrapper()) {
+ if (raw_obj.IsJSObject(cage_base) &&
+ JSObject::cast(raw_obj).MayHaveEmbedderFields()) {
JSObject js_obj = JSObject::cast(raw_obj);
for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
void* pointer;
- CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(
- main_thread_isolate(), &pointer));
+ CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointer(main_thread_isolate(),
+ &pointer));
CHECK_NULL(pointer);
}
} else if (raw_obj.IsEmbedderDataArray(cage_base)) {
@@ -666,7 +697,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
EmbedderDataSlot end(array, array.length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
void* pointer;
- CHECK(slot.ToAlignedPointerSafe(main_thread_isolate(), &pointer));
+ CHECK(slot.ToAlignedPointer(main_thread_isolate(), &pointer));
CHECK_NULL(pointer);
}
}
@@ -837,14 +868,6 @@ int Deserializer<IsolateT>::ReadRepeatedObject(SlotAccessor slot_accessor,
namespace {
-void NoExternalReferencesCallback() {
- // The following check will trigger if a function or object template
- // with references to native functions have been deserialized from
- // snapshot, but no actual external references were provided when the
- // isolate was created.
- FATAL("No external references provided via API");
-}
-
// Template used by the below CASE_RANGE macro to statically verify that the
// given number of cases matches the number of expected cases for that bytecode.
template <int byte_code_count, int expected>
@@ -996,11 +1019,12 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
case kSandboxedExternalReference:
case kExternalReference: {
Address address = ReadExternalReferenceCase();
- if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedExternalReference) {
- return WriteExternalPointer(slot_accessor.slot(), address,
- kForeignForeignAddressTag);
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
+ data == kSandboxedExternalReference) {
+ ExternalPointerTag tag = ReadExternalPointerTag();
+ return WriteExternalPointer(slot_accessor.slot(), address, tag);
} else {
- DCHECK(!V8_HEAP_SANDBOX_BOOL);
+ DCHECK(!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
return WriteAddress(slot_accessor.slot(), address);
}
}
@@ -1117,7 +1141,7 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
Code code = Code::cast(*slot_accessor.object());
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- code.set_main_cage_base(isolate()->cage_base());
+ code.set_main_cage_base(isolate()->cage_base(), kRelaxedStore);
}
DeserializerRelocInfoVisitor visitor(this, &preserialized_objects);
for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
@@ -1159,11 +1183,12 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
- if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedApiReference) {
- return WriteExternalPointer(slot_accessor.slot(), address,
- kForeignForeignAddressTag);
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
+ data == kSandboxedApiReference) {
+ ExternalPointerTag tag = ReadExternalPointerTag();
+ return WriteExternalPointer(slot_accessor.slot(), address, tag);
} else {
- DCHECK(!V8_HEAP_SANDBOX_BOOL);
+ DCHECK(!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
return WriteAddress(slot_accessor.slot(), address);
}
}
@@ -1254,6 +1279,13 @@ Address Deserializer<IsolateT>::ReadExternalReferenceCase() {
}
template <typename IsolateT>
+ExternalPointerTag Deserializer<IsolateT>::ReadExternalPointerTag() {
+ uint64_t shifted_tag = static_cast<uint64_t>(source_.GetInt());
+ return static_cast<ExternalPointerTag>(shifted_tag
+ << kExternalPointerTagShift);
+}
+
+template <typename IsolateT>
HeapObject Deserializer<IsolateT>::Allocate(AllocationType allocation, int size,
AllocationAlignment alignment) {
#ifdef DEBUG
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 9498925f17..24e79ed084 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -117,7 +117,7 @@ class Deserializer : public SerializerDeserializer {
}
bool deserializing_user_code() const { return deserializing_user_code_; }
- bool can_rehash() const { return can_rehash_; }
+ bool should_rehash() const { return should_rehash_; }
void Rehash();
@@ -184,6 +184,9 @@ class Deserializer : public SerializerDeserializer {
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
+ // A helper function for reading external pointer tags.
+ ExternalPointerTag ReadExternalPointerTag();
+
Handle<HeapObject> ReadObject(SnapshotSpace space_number);
Handle<HeapObject> ReadMetaMap();
@@ -195,6 +198,9 @@ class Deserializer : public SerializerDeserializer {
// Special handling for serialized code like hooking up internalized strings.
void PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
SnapshotSpace space);
+ void PostProcessNewJSReceiver(Map map, Handle<JSReceiver> obj,
+ JSReceiver raw_obj, InstanceType instance_type,
+ SnapshotSpace space);
HeapObject Allocate(AllocationType allocation, int size,
AllocationAlignment alignment);
@@ -245,7 +251,7 @@ class Deserializer : public SerializerDeserializer {
bool next_reference_is_weak_ = false;
// TODO(6593): generalize rehashing, and remove this flag.
- bool can_rehash_;
+ const bool should_rehash_;
std::vector<Handle<HeapObject>> to_rehash_;
#ifdef DEBUG
@@ -280,15 +286,15 @@ class StringTableInsertionKey final : public StringTableKey {
template <typename IsolateT>
bool IsMatch(IsolateT* isolate, String string);
- V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) {
+ void PrepareForInsertion(Isolate* isolate) {
// When sharing the string table, all string table lookups during snapshot
// deserialization are hits.
DCHECK(isolate->OwnsStringTable() ||
deserializing_user_code_ ==
DeserializingUserCodeOption::kIsDeserializingUserCode);
- return string_;
}
- V8_WARN_UNUSED_RESULT Handle<String> AsHandle(LocalIsolate* isolate) {
+ void PrepareForInsertion(LocalIsolate* isolate) {}
+ V8_WARN_UNUSED_RESULT Handle<String> GetHandleForInsertion() {
return string_;
}
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 3b3653676a..6d67c9d311 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -175,8 +175,8 @@ void OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
- CHECK(FreePages(page_allocator, code, RoundUp(code_size, page_size)));
- CHECK(FreePages(page_allocator, data, RoundUp(data_size, page_size)));
+ FreePages(page_allocator, code, RoundUp(code_size, page_size));
+ FreePages(page_allocator, data, RoundUp(data_size, page_size));
}
namespace {
@@ -221,7 +221,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = isolate->builtins()->code(builtin);
+ Code code = FromCodeT(isolate->builtins()->code(builtin));
RelocIterator on_heap_it(code, kRelocMask);
RelocIterator off_heap_it(blob, code, kRelocMask);
@@ -275,7 +275,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
// Sanity-check that the given builtin is isolate-independent and does not
// use the trampoline register in its calling convention.
@@ -343,7 +343,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
uint32_t offset =
layout_descriptions[static_cast<int>(builtin)].metadata_offset;
uint8_t* dst = raw_metadata_start + offset;
@@ -358,7 +358,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
uint32_t offset =
layout_descriptions[static_cast<int>(builtin)].instruction_offset;
uint8_t* dst = raw_code_start + offset;
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 5adc3e0115..1a906c5de7 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -291,7 +291,7 @@ void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
// Retrieve the SourcePositionTable and copy it.
- Code code = builtins->code(builtin);
+ Code code = FromCodeT(builtins->code(builtin));
// Verify that the code object is still the "real code" and not a
// trampoline (which wouldn't have source positions).
DCHECK(!code.is_off_heap_trampoline());
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
index 447d2e9bac..7e0ec70ac4 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
@@ -116,7 +116,7 @@ EmbeddedTargetArch ToEmbeddedTargetArch(const char* s) {
EmbeddedTargetOs DefaultEmbeddedTargetOs() {
#if defined(V8_OS_AIX)
return EmbeddedTargetOs::kAIX;
-#elif defined(V8_OS_MACOSX)
+#elif defined(V8_OS_DARWIN)
return EmbeddedTargetOs::kMac;
#elif defined(V8_OS_WIN)
return EmbeddedTargetOs::kWin;
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 5687172e60..6e2faff280 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -240,9 +240,9 @@ int main(int argc, char** argv) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
v8::V8::Initialize();
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index c8a6651eb7..9c2de5caca 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -49,7 +49,7 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
CheckNoArrayBufferBackingStores();
}
- if (FLAG_rehash_snapshot && can_rehash()) {
+ if (should_rehash()) {
isolate()->heap()->InitializeHashSeed();
Rehash();
}
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 029f24c300..35a62aa515 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -74,7 +74,7 @@ void ReadOnlySerializer::SerializeReadOnlyRoots() {
ReadOnlyRoots(isolate()).Iterate(this);
- if (reconstruct_read_only_object_cache_for_testing()) {
+ if (reconstruct_read_only_and_shared_object_caches_for_testing()) {
ReconstructReadOnlyObjectCacheForTesting();
}
}
diff --git a/deps/v8/src/snapshot/serializer-deserializer.cc b/deps/v8/src/snapshot/serializer-deserializer.cc
index d2562ca768..d32de12ec0 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.cc
+++ b/deps/v8/src/snapshot/serializer-deserializer.cc
@@ -58,16 +58,18 @@ bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
}
void SerializerDeserializer::RestoreExternalReferenceRedirector(
- Isolate* isolate, Handle<AccessorInfo> accessor_info) {
+ Isolate* isolate, AccessorInfo accessor_info) {
+ DisallowGarbageCollection no_gc;
// Restore wiped accessor infos.
- Foreign::cast(accessor_info->js_getter())
- .set_foreign_address(isolate, accessor_info->redirected_getter());
+ Foreign::cast(accessor_info.js_getter())
+ .set_foreign_address(isolate, accessor_info.redirected_getter());
}
void SerializerDeserializer::RestoreExternalReferenceRedirector(
- Isolate* isolate, Handle<CallHandlerInfo> call_handler_info) {
- Foreign::cast(call_handler_info->js_callback())
- .set_foreign_address(isolate, call_handler_info->redirected_callback());
+ Isolate* isolate, CallHandlerInfo call_handler_info) {
+ DisallowGarbageCollection no_gc;
+ Foreign::cast(call_handler_info.js_callback())
+ .set_foreign_address(isolate, call_handler_info.redirected_callback());
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index 5bc23cc7bf..626a102704 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -29,9 +29,9 @@ class SerializerDeserializer : public RootVisitor {
static bool CanBeDeferred(HeapObject o);
void RestoreExternalReferenceRedirector(Isolate* isolate,
- Handle<AccessorInfo> accessor_info);
- void RestoreExternalReferenceRedirector(
- Isolate* isolate, Handle<CallHandlerInfo> call_handler_info);
+ AccessorInfo accessor_info);
+ void RestoreExternalReferenceRedirector(Isolate* isolate,
+ CallHandlerInfo call_handler_info);
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 2ae6fc17b1..dbc8be27b1 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -559,13 +559,13 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
&reference)) {
DCHECK(reference.is_from_api());
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
uint32_t external_pointer_entry =
string->GetResourceRefForDeserialization();
#endif
string->SetResourceRefForSerialization(reference.index());
SerializeObject();
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
string->SetResourceRefForSerialization(external_pointer_entry);
#else
string->set_address_as_resource(isolate(), resource);
@@ -919,10 +919,10 @@ void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
bytes_processed_so_far_ += kTaggedSize;
}
-void Serializer::ObjectSerializer::OutputExternalReference(Address target,
- int target_size,
- bool sandboxify) {
+void Serializer::ObjectSerializer::OutputExternalReference(
+ Address target, int target_size, bool sandboxify, ExternalPointerTag tag) {
DCHECK_LE(target_size, sizeof(target)); // Must fit in Address.
+ DCHECK_IMPLIES(sandboxify, tag != kExternalPointerNullTag);
ExternalReferenceEncoder::Value encoded_reference;
bool encoded_successfully;
@@ -946,26 +946,31 @@ void Serializer::ObjectSerializer::OutputExternalReference(Address target,
sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
sink_->PutRaw(reinterpret_cast<byte*>(&target), target_size, "Bytes");
} else if (encoded_reference.is_from_api()) {
- if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && sandboxify) {
sink_->Put(kSandboxedApiReference, "SandboxedApiRef");
} else {
sink_->Put(kApiReference, "ApiRef");
}
sink_->PutInt(encoded_reference.index(), "reference index");
} else {
- if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && sandboxify) {
sink_->Put(kSandboxedExternalReference, "SandboxedExternalRef");
} else {
sink_->Put(kExternalReference, "ExternalRef");
}
sink_->PutInt(encoded_reference.index(), "reference index");
}
+ if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && sandboxify) {
+ sink_->PutInt(static_cast<uint32_t>(tag >> kExternalPointerTagShift),
+ "external pointer tag");
+ }
}
void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
Address* p) {
// "Sandboxify" external reference.
- OutputExternalReference(host.foreign_address(), kExternalPointerSize, true);
+ OutputExternalReference(host.foreign_address(), kSystemPointerSize, true,
+ kForeignForeignAddressTag);
bytes_processed_so_far_ += kExternalPointerSize;
}
@@ -1019,7 +1024,33 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
DCHECK_IMPLIES(serializer_->EncodeExternalReference(target).is_from_api(),
!rinfo->IsCodedSpecially());
// Don't "sandboxify" external references embedded in the code.
- OutputExternalReference(target, rinfo->target_address_size(), false);
+ OutputExternalReference(target, rinfo->target_address_size(), false,
+ kExternalPointerNullTag);
+}
+
+void Serializer::ObjectSerializer::VisitExternalPointer(HeapObject host,
+ ExternalPointer_t ptr) {
+ // TODO(v8:12700) handle other external references here as well. This should
+ // allow removing some of the other Visit* methods, should unify the sandbox
+ // vs no-sandbox implementation, and should allow removing various
+ // XYZForSerialization methods throughout the codebase.
+ if (host.IsJSExternalObject()) {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ // TODO(saelo) maybe add a helper method for this conversion if also needed
+ // in other places? This might require a ExternalPointerTable::Get variant
+ // that drops the pointer tag completely.
+ uint32_t index = ptr >> kExternalPointerIndexShift;
+ Address value =
+ isolate()->external_pointer_table().Get(index, kExternalObjectValueTag);
+#else
+ Address value = ptr;
+#endif
+ // TODO(v8:12700) should we specify here whether we expect the references to
+ // be internal or external (or either)?
+ OutputExternalReference(value, kSystemPointerSize, true,
+ kExternalObjectValueTag);
+ bytes_processed_so_far_ += kExternalPointerSize;
+ }
}
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
@@ -1133,13 +1164,16 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
sizeof(field_value), field_value);
} else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
object_->IsCodeDataContainer(cage_base)) {
- // The CodeEntryPoint field is just a cached value which will be
- // recomputed after deserialization, so write zeros to keep the snapshot
- // deterministic.
- static byte field_value[kExternalPointerSize] = {0};
- OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
- CodeDataContainer::kCodeEntryPointOffset,
- sizeof(field_value), field_value);
+ // code_cage_base and code_entry_point fields contain raw values that
+ // will be recomputed after deserialization, so write zeros to keep the
+ // snapshot deterministic.
+ CHECK_EQ(CodeDataContainer::kCodeCageBaseUpper32BitsOffset + kTaggedSize,
+ CodeDataContainer::kCodeEntryPointOffset);
+ static byte field_value[kTaggedSize + kExternalPointerSize] = {0};
+ OutputRawWithCustomField(
+ sink_, object_start, base, bytes_to_output,
+ CodeDataContainer::kCodeCageBaseUpper32BitsOffset,
+ sizeof(field_value), field_value);
} else {
sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
bytes_to_output, "Bytes");
@@ -1252,5 +1286,21 @@ Serializer::HotObjectsList::~HotObjectsList() {
heap_->UnregisterStrongRoots(strong_roots_entry_);
}
+Handle<FixedArray> ObjectCacheIndexMap::Values(Isolate* isolate) {
+ if (size() == 0) {
+ return isolate->factory()->empty_fixed_array();
+ }
+ Handle<FixedArray> externals = isolate->factory()->NewFixedArray(size());
+ DisallowGarbageCollection no_gc;
+ FixedArray raw = *externals;
+ IdentityMap<int, base::DefaultAllocationPolicy>::IteratableScope it_scope(
+ &map_);
+ for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
+ raw.set(*it.entry(), it.key());
+ }
+
+ return externals;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index b049af5776..8aab2028cc 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -5,8 +5,6 @@
#ifndef V8_SNAPSHOT_SERIALIZER_H_
#define V8_SNAPSHOT_SERIALIZER_H_
-#include <map>
-
#include "src/codegen/external-reference-encoder.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
@@ -143,7 +141,7 @@ class ObjectCacheIndexMap {
// If |obj| is in the map, immediately return true. Otherwise add it to the
// map and return false. In either case set |*index_out| to the index
// associated with the map.
- bool LookupOrInsert(Handle<HeapObject> obj, int* index_out) {
+ bool LookupOrInsert(HeapObject obj, int* index_out) {
auto find_result = map_.FindOrInsert(obj);
if (!find_result.already_exists) {
*find_result.entry = next_index_++;
@@ -151,6 +149,9 @@ class ObjectCacheIndexMap {
*index_out = *find_result.entry;
return find_result.already_exists;
}
+ bool LookupOrInsert(Handle<HeapObject> obj, int* index_out) {
+ return LookupOrInsert(*obj, index_out);
+ }
bool Lookup(HeapObject obj, int* index_out) const {
int* index = map_.Find(obj);
@@ -161,6 +162,8 @@ class ObjectCacheIndexMap {
return true;
}
+ Handle<FixedArray> Values(Isolate* isolate);
+
int size() const { return next_index_; }
private:
@@ -311,8 +314,9 @@ class Serializer : public SerializerDeserializer {
return (flags_ & Snapshot::kAllowActiveIsolateForTesting) != 0;
}
- bool reconstruct_read_only_object_cache_for_testing() const {
- return (flags_ & Snapshot::kReconstructReadOnlyObjectCacheForTesting) != 0;
+ bool reconstruct_read_only_and_shared_object_caches_for_testing() const {
+ return (flags_ &
+ Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting) != 0;
}
private:
@@ -446,6 +450,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* target) override;
void VisitExternalReference(Foreign host, Address* p) override;
void VisitExternalReference(Code host, RelocInfo* rinfo) override;
+ void VisitExternalPointer(HeapObject host, ExternalPointer_t ptr) override;
void VisitInternalReference(Code host, RelocInfo* rinfo) override;
void VisitCodeTarget(Code host, RelocInfo* target) override;
void VisitRuntimeEntry(Code host, RelocInfo* reloc) override;
@@ -461,8 +466,8 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
void SerializeContent(Map map, int size);
- void OutputExternalReference(Address target, int target_size,
- bool sandboxify);
+ void OutputExternalReference(Address target, int target_size, bool sandboxify,
+ ExternalPointerTag tag);
void OutputRawData(Address up_to);
void SerializeCode(Map map, int size);
uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
diff --git a/deps/v8/src/snapshot/shared-heap-deserializer.cc b/deps/v8/src/snapshot/shared-heap-deserializer.cc
index 22697ea9e8..811fde348c 100644
--- a/deps/v8/src/snapshot/shared-heap-deserializer.cc
+++ b/deps/v8/src/snapshot/shared-heap-deserializer.cc
@@ -23,7 +23,7 @@ void SharedHeapDeserializer::DeserializeIntoIsolate() {
DeserializeStringTable();
DeserializeDeferredObjects();
- if (FLAG_rehash_snapshot && can_rehash()) {
+ if (should_rehash()) {
// Hash seed was initialized in ReadOnlyDeserializer.
Rehash();
}
diff --git a/deps/v8/src/snapshot/shared-heap-serializer.cc b/deps/v8/src/snapshot/shared-heap-serializer.cc
index 512a11d34e..daacbc5e99 100644
--- a/deps/v8/src/snapshot/shared-heap-serializer.cc
+++ b/deps/v8/src/snapshot/shared-heap-serializer.cc
@@ -44,6 +44,9 @@ SharedHeapSerializer::SharedHeapSerializer(
serialized_objects_(isolate->heap())
#endif
{
+ if (ShouldReconstructSharedHeapObjectCacheForTesting()) {
+ ReconstructSharedHeapObjectCacheForTesting();
+ }
}
SharedHeapSerializer::~SharedHeapSerializer() {
@@ -65,10 +68,6 @@ void SharedHeapSerializer::FinalizeSerialization() {
Pad();
#ifdef DEBUG
- // During snapshotting there is no shared heap.
- CHECK(!isolate()->is_shared());
- CHECK_NULL(isolate()->shared_isolate());
-
// Check that all serialized object are in shared heap and not RO. RO objects
// should be in the RO snapshot.
IdentityMap<int, base::DefaultAllocationPolicy>::IteratableScope it_scope(
@@ -90,6 +89,26 @@ bool SharedHeapSerializer::SerializeUsingSharedHeapObjectCache(
SnapshotByteSink* sink, Handle<HeapObject> obj) {
if (!ShouldBeInSharedHeapObjectCache(*obj)) return false;
int cache_index = SerializeInObjectCache(obj);
+
+ // When testing deserialization of a snapshot from a live Isolate where there
+ // is also a shared Isolate, the shared object cache needs to be extended
+ // because the live isolate may have had new internalized strings that were
+ // not present in the startup snapshot to be serialized.
+ if (ShouldReconstructSharedHeapObjectCacheForTesting()) {
+ std::vector<Object>* existing_cache =
+ isolate()->shared_isolate()->shared_heap_object_cache();
+ const size_t existing_cache_size = existing_cache->size();
+ // This is strictly < because the existing cache contains the terminating
+ // undefined value, which the reconstructed cache does not.
+ DCHECK_LT(base::checked_cast<size_t>(cache_index), existing_cache_size);
+ if (base::checked_cast<size_t>(cache_index) == existing_cache_size - 1) {
+ ReadOnlyRoots roots(isolate());
+ DCHECK(existing_cache->back().IsUndefined(roots));
+ existing_cache->back() = *obj;
+ existing_cache->push_back(roots.undefined_value());
+ }
+ }
+
sink->Put(kSharedHeapObjectCache, "SharedHeapObjectCache");
sink->PutInt(cache_index, "shared_heap_object_cache_index");
return true;
@@ -170,5 +189,30 @@ void SharedHeapSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
#endif
}
+bool SharedHeapSerializer::ShouldReconstructSharedHeapObjectCacheForTesting()
+ const {
+ // When the live Isolate being serialized is not a client Isolate, there's no
+ // need to reconstruct the shared heap object cache because it is not actually
+ // shared.
+ return reconstruct_read_only_and_shared_object_caches_for_testing() &&
+ isolate()->shared_isolate() != nullptr;
+}
+
+void SharedHeapSerializer::ReconstructSharedHeapObjectCacheForTesting() {
+ std::vector<Object>* cache =
+ isolate()->shared_isolate()->shared_heap_object_cache();
+ // Don't reconstruct the final element, which is always undefined and marks
+ // the end of the cache, since serializing the live Isolate may extend the
+ // shared object cache.
+ for (size_t i = 0, size = cache->size(); i < size - 1; i++) {
+ Handle<HeapObject> obj(HeapObject::cast(cache->at(i)), isolate());
+ DCHECK(ShouldBeInSharedHeapObjectCache(*obj));
+ int cache_index = SerializeInObjectCache(obj);
+ USE(cache_index);
+ DCHECK_EQ(cache_index, i);
+ }
+ DCHECK(cache->back().IsUndefined(isolate()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/shared-heap-serializer.h b/deps/v8/src/snapshot/shared-heap-serializer.h
index 8f4c46c29c..a9553a4060 100644
--- a/deps/v8/src/snapshot/shared-heap-serializer.h
+++ b/deps/v8/src/snapshot/shared-heap-serializer.h
@@ -48,6 +48,10 @@ class V8_EXPORT_PRIVATE SharedHeapSerializer : public RootsSerializer {
static bool ShouldBeInSharedHeapObjectCache(HeapObject obj);
private:
+ bool ShouldReconstructSharedHeapObjectCacheForTesting() const;
+
+ void ReconstructSharedHeapObjectCacheForTesting();
+
void SerializeStringTable(StringTable* string_table);
void SerializeObjectImpl(Handle<HeapObject> obj) override;
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index db1cb36087..4350d13777 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -11,7 +11,6 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/safepoint.h"
#include "src/init/bootstrapper.h"
-#include "src/logging/counters-scopes.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/code-kind.h"
#include "src/objects/js-regexp-inl.h"
@@ -72,7 +71,7 @@ class SnapshotImpl : public AllStatic {
// [0] number of contexts N
// [1] rehashability
// [2] checksum
- // [3] (128 bytes) version string
+ // [3] (64 bytes) version string
// [4] offset to readonly
// [5] offset to shared heap
// [6] offset to context 0
@@ -176,20 +175,11 @@ bool Snapshot::Initialize(Isolate* isolate) {
base::Vector<const byte> shared_heap_data =
SnapshotImpl::ExtractSharedHeapData(blob);
-#ifdef V8_SNAPSHOT_COMPRESSION
- base::Optional<NestedTimedHistogramScope> decompress_histogram;
- if (base::TimeTicks::IsHighResolution()) {
- decompress_histogram.emplace(isolate->counters()->snapshot_decompress());
- }
-#endif
SnapshotData startup_snapshot_data(MaybeDecompress(isolate, startup_data));
SnapshotData read_only_snapshot_data(
MaybeDecompress(isolate, read_only_data));
SnapshotData shared_heap_snapshot_data(
MaybeDecompress(isolate, shared_heap_data));
-#ifdef V8_SNAPSHOT_COMPRESSION
- decompress_histogram.reset();
-#endif
bool success = isolate->InitWithSnapshot(
&startup_snapshot_data, &read_only_snapshot_data,
@@ -215,20 +205,10 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
bool can_rehash = ExtractRehashability(blob);
base::Vector<const byte> context_data = SnapshotImpl::ExtractContextData(
blob, static_cast<uint32_t>(context_index));
- base::Optional<SnapshotData> snapshot_data;
- {
-#ifdef V8_SNAPSHOT_COMPRESSION
- base::Optional<NestedTimedHistogramScope> decompress_histogram;
- if (base::TimeTicks::IsHighResolution()) {
- decompress_histogram.emplace(
- isolate->counters()->context_snapshot_decompress());
- }
-#endif
- snapshot_data.emplace(MaybeDecompress(isolate, context_data));
- }
+ SnapshotData snapshot_data(MaybeDecompress(isolate, context_data));
MaybeHandle<Context> maybe_result = ContextDeserializer::DeserializeContext(
- isolate, &(*snapshot_data), can_rehash, global_proxy,
+ isolate, &snapshot_data, can_rehash, global_proxy,
embedder_fields_deserializer);
Handle<Context> result;
@@ -333,13 +313,17 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
// Test serialization.
{
GlobalSafepointScope global_safepoint(isolate);
+ base::Optional<SafepointScope> shared_isolate_safepoint_scope;
+ if (Isolate* shared_isolate = isolate->shared_isolate()) {
+ shared_isolate_safepoint_scope.emplace(shared_isolate->heap());
+ }
DisallowGarbageCollection no_gc;
Snapshot::SerializerFlags flags(
Snapshot::kAllowUnknownExternalReferencesForTesting |
Snapshot::kAllowActiveIsolateForTesting |
- (ReadOnlyHeap::IsReadOnlySpaceShared()
- ? Snapshot::kReconstructReadOnlyObjectCacheForTesting
+ ((isolate->shared_isolate() || ReadOnlyHeap::IsReadOnlySpaceShared())
+ ? Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting
: 0));
serialized_data = Snapshot::Create(isolate, *default_context,
global_safepoint, no_gc, flags);
@@ -357,6 +341,9 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
new_isolate->set_snapshot_blob(&serialized_data);
new_isolate->set_array_buffer_allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
+ if (Isolate* shared_isolate = isolate->shared_isolate()) {
+ new_isolate->set_shared_isolate(shared_isolate);
+ }
CHECK(Snapshot::Initialize(new_isolate));
HandleScope scope(new_isolate);
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index bfa03fd478..b25d6d227a 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -5,6 +5,8 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
+#include <vector>
+
#include "include/v8-snapshot.h" // For StartupData.
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
@@ -37,15 +39,17 @@ class Snapshot : public AllStatic {
// after deserialization.
// If unset, we assert that these previously mentioned areas are empty.
kAllowActiveIsolateForTesting = 1 << 1,
- // If set, the ReadOnlySerializer reconstructs the read-only object cache
- // from the existing ReadOnlyHeap's read-only object cache so the same
+ // If set, the ReadOnlySerializer and the SharedHeapSerializer reconstructs
+ // their respective object caches from the existing ReadOnlyHeap's read-only
+ // object cache or the existing shared heap's object cache so the same
// mapping is used. This mode is used for testing deserialization of a
- // snapshot from a live isolate that's using a shared
- // ReadOnlyHeap. Otherwise during deserialization the indices will mismatch,
- // causing deserialization crashes when e.g. types mismatch.
- // If unset, the read-only object cache is populated as read-only objects
- // are serialized.
- kReconstructReadOnlyObjectCacheForTesting = 1 << 2,
+ // snapshot from a live isolate that's using a shared ReadOnlyHeap or is
+ // attached to a shared isolate. Otherwise during deserialization the
+ // indices will mismatch, causing deserialization crashes when e.g. types
+ // mismatch. If unset, the read-only object cache is populated as read-only
+ // objects are serialized, and the shared heap object cache is populated as
+ // shared heap objects are serialized.
+ kReconstructReadOnlyAndSharedObjectCachesForTesting = 1 << 2,
};
using SerializerFlags = base::Flags<SerializerFlag>;
V8_EXPORT_PRIVATE static constexpr SerializerFlags kDefaultSerializerFlags =
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 82f2ef51cf..ddd3aa3871 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -37,10 +37,10 @@ void StartupDeserializer::DeserializeIntoIsolate() {
this, base::EnumSet<SkipRoot>{SkipRoot::kUnserializable});
DeserializeDeferredObjects();
for (Handle<AccessorInfo> info : accessor_infos()) {
- RestoreExternalReferenceRedirector(isolate(), info);
+ RestoreExternalReferenceRedirector(isolate(), *info);
}
for (Handle<CallHandlerInfo> info : call_handler_infos()) {
- RestoreExternalReferenceRedirector(isolate(), info);
+ RestoreExternalReferenceRedirector(isolate(), *info);
}
// Flush the instruction cache for the entire code-space. Must happen after
@@ -68,7 +68,7 @@ void StartupDeserializer::DeserializeIntoIsolate() {
LogNewMapEvents();
WeakenDescriptorArrays();
- if (FLAG_rehash_snapshot && can_rehash()) {
+ if (should_rehash()) {
// Hash seed was initialized in ReadOnlyDeserializer.
Rehash();
}
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index bb09d21208..ad80d45fe3 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -77,10 +77,10 @@ StartupSerializer::StartupSerializer(
StartupSerializer::~StartupSerializer() {
for (Handle<AccessorInfo> info : accessor_infos_) {
- RestoreExternalReferenceRedirector(isolate(), info);
+ RestoreExternalReferenceRedirector(isolate(), *info);
}
for (Handle<CallHandlerInfo> info : call_handler_infos_) {
- RestoreExternalReferenceRedirector(isolate(), info);
+ RestoreExternalReferenceRedirector(isolate(), *info);
}
OutputStatistics("StartupSerializer");
}
@@ -132,7 +132,6 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
} // namespace
#endif // DEBUG
-
void StartupSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
PtrComprCageBase cage_base(isolate());
#ifdef DEBUG
diff --git a/deps/v8/src/strings/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index 5194b4513c..60cb3333ec 100644
--- a/deps/v8/src/strings/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -297,6 +297,8 @@ class IncrementalStringBuilder {
IncrementalStringBuilder* builder_;
};
+ Isolate* isolate() { return isolate_; }
+
private:
Factory* factory() { return isolate_->factory(); }
diff --git a/deps/v8/src/strings/string-hasher-inl.h b/deps/v8/src/strings/string-hasher-inl.h
index 0d943b783d..07fbac7be6 100644
--- a/deps/v8/src/strings/string-hasher-inl.h
+++ b/deps/v8/src/strings/string-hasher-inl.h
@@ -30,20 +30,21 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
running_hash += (running_hash << 3);
running_hash ^= (running_hash >> 11);
running_hash += (running_hash << 15);
- int32_t hash = static_cast<int32_t>(running_hash & String::kHashBitMask);
+ int32_t hash = static_cast<int32_t>(running_hash & String::HashBits::kMax);
+ // Ensure that the hash is kZeroHash, if the computed value is 0.
int32_t mask = (hash - 1) >> 31;
- return running_hash | (kZeroHash & mask);
+ running_hash |= (kZeroHash & mask);
+ return running_hash;
}
uint32_t StringHasher::GetTrivialHash(int length) {
DCHECK_GT(length, String::kMaxHashCalcLength);
// The hash of a large string is simply computed from the length.
- // Ensure that the max length is small enough to be shifted without losing
+ // Ensure that the max length is small enough to be encoded without losing
// information.
- STATIC_ASSERT(base::bits::CountLeadingZeros32(String::kMaxLength) >=
- String::kHashShift);
+ STATIC_ASSERT(String::kMaxLength <= String::HashBits::kMax);
uint32_t hash = static_cast<uint32_t>(length);
- return (hash << String::kHashShift) | String::kIsNotIntegerIndexMask;
+ return String::CreateHashFieldValue(hash, String::HashFieldType::kHash);
}
template <typename char_t>
@@ -79,19 +80,19 @@ uint32_t StringHasher::HashSequentialString(const char_t* chars_raw, int length,
// Not an array index, but it could still be an integer index.
// Perform a regular hash computation, and additionally check
// if there are non-digit characters.
- uint32_t is_integer_index = 0;
+ String::HashFieldType type = String::HashFieldType::kIntegerIndex;
uint32_t running_hash = static_cast<uint32_t>(seed);
uint64_t index_big = 0;
const uchar* end = &chars[length];
while (chars != end) {
- if (is_integer_index == 0 &&
+ if (type == String::HashFieldType::kIntegerIndex &&
!TryAddIntegerIndexChar(&index_big, *chars)) {
- is_integer_index = String::kIsNotIntegerIndexMask;
+ type = String::HashFieldType::kHash;
}
running_hash = AddCharacterCore(running_hash, *chars++);
}
- uint32_t hash = (GetHashCore(running_hash) << String::kHashShift) |
- is_integer_index;
+ uint32_t hash =
+ String::CreateHashFieldValue(GetHashCore(running_hash), type);
if (Name::ContainsCachedArrayIndex(hash)) {
// The hash accidentally looks like a cached index. Fix that by
// setting a bit that looks like a longer-than-cacheable string
@@ -118,8 +119,8 @@ uint32_t StringHasher::HashSequentialString(const char_t* chars_raw, int length,
running_hash = AddCharacterCore(running_hash, *chars++);
}
- return (GetHashCore(running_hash) << String::kHashShift) |
- String::kIsNotIntegerIndexMask;
+ return String::CreateHashFieldValue(GetHashCore(running_hash),
+ String::HashFieldType::kHash);
}
std::size_t SeededStringHasher::operator()(const char* name) const {
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index b918e3c36d..9e079dff67 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -408,7 +408,7 @@ void StringStream::PrintSecurityTokenIfChanged(JSFunction fun) {
void StringStream::PrintFunction(JSFunction fun, Object receiver, Code* code) {
PrintPrototype(fun, receiver);
- *code = fun.code();
+ *code = FromCodeT(fun.code());
}
void StringStream::PrintPrototype(JSFunction fun, Object receiver) {
diff --git a/deps/v8/src/strings/uri.cc b/deps/v8/src/strings/uri.cc
index 460ca586b9..b1602bdc91 100644
--- a/deps/v8/src/strings/uri.cc
+++ b/deps/v8/src/strings/uri.cc
@@ -285,6 +285,7 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
std::vector<uint8_t> buffer;
buffer.reserve(uri_length);
+ bool throw_error = false;
{
DisallowGarbageCollection no_gc;
String::FlatContent uri_content = uri->GetFlatContent(no_gc);
@@ -310,11 +311,15 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
continue;
}
- AllowGarbageCollection allocate_error_and_return;
- THROW_NEW_ERROR(isolate, NewURIError(), String);
+ // String::FlatContent DCHECKs its contents did not change during its
+ // lifetime. Throwing the error inside the loop may cause GC and move the
+ // string contents.
+ throw_error = true;
+ break;
}
}
+ if (throw_error) THROW_NEW_ERROR(isolate, NewURIError(), String);
return isolate->factory()->NewStringFromOneByte(base::VectorOf(buffer));
}
diff --git a/deps/v8/src/temporal/temporal-parser.cc b/deps/v8/src/temporal/temporal-parser.cc
index a4468b05d4..0675b7721a 100644
--- a/deps/v8/src/temporal/temporal-parser.cc
+++ b/deps/v8/src/temporal/temporal-parser.cc
@@ -1187,20 +1187,22 @@ SATISIFY(TemporalDurationString, ParsedISO8601Duration)
} // namespace
#define IMPL_PARSE_METHOD(R, NAME) \
- Maybe<R> TemporalParser::Parse##NAME( \
- Isolate* isolate, Handle<String> iso_string, bool* valid) { \
+ Maybe<R> TemporalParser::Parse##NAME(Isolate* isolate, \
+ Handle<String> iso_string) { \
+ bool valid; \
R parsed; \
iso_string = String::Flatten(isolate, iso_string); \
{ \
DisallowGarbageCollection no_gc; \
String::FlatContent str_content = iso_string->GetFlatContent(no_gc); \
if (str_content.IsOneByte()) { \
- *valid = Satisfy##NAME(str_content.ToOneByteVector(), &parsed); \
+ valid = Satisfy##NAME(str_content.ToOneByteVector(), &parsed); \
} else { \
- *valid = Satisfy##NAME(str_content.ToUC16Vector(), &parsed); \
+ valid = Satisfy##NAME(str_content.ToUC16Vector(), &parsed); \
} \
} \
- return Just(parsed); \
+ if (valid) return Just(parsed); \
+ return Nothing<R>(); \
}
IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalDateTimeString)
diff --git a/deps/v8/src/temporal/temporal-parser.h b/deps/v8/src/temporal/temporal-parser.h
index d3bc43a5a7..ad8254cc52 100644
--- a/deps/v8/src/temporal/temporal-parser.h
+++ b/deps/v8/src/temporal/temporal-parser.h
@@ -123,9 +123,9 @@ struct ParsedISO8601Duration {
*/
class V8_EXPORT_PRIVATE TemporalParser {
public:
-#define DEFINE_PARSE_METHOD(R, NAME) \
- V8_WARN_UNUSED_RESULT static Maybe<R> Parse##NAME( \
- Isolate* isolate, Handle<String> iso_string, bool* satisfy)
+#define DEFINE_PARSE_METHOD(R, NAME) \
+ V8_WARN_UNUSED_RESULT static Maybe<R> Parse##NAME(Isolate* isolate, \
+ Handle<String> iso_string)
DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalDateString);
DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalDateTimeString);
DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalTimeString);
diff --git a/deps/v8/src/torque/OWNERS b/deps/v8/src/torque/OWNERS
index 11e743943f..bc5786b075 100644
--- a/deps/v8/src/torque/OWNERS
+++ b/deps/v8/src/torque/OWNERS
@@ -1,2 +1,2 @@
-mvstanton@chromium.org
nicohartmann@chromium.org
+tebbi@chromium.org
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index a4ccefb304..d80993e38f 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -14,6 +14,7 @@
#include <vector>
#include "src/base/optional.h"
+#include "src/numbers/integer-literal.h"
#include "src/torque/constants.h"
#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
@@ -33,7 +34,8 @@ namespace torque {
V(ConditionalExpression) \
V(IdentifierExpression) \
V(StringLiteralExpression) \
- V(NumberLiteralExpression) \
+ V(IntegerLiteralExpression) \
+ V(FloatingPointLiteralExpression) \
V(FieldAccessExpression) \
V(ElementAccessExpression) \
V(DereferenceExpression) \
@@ -459,16 +461,28 @@ struct StringLiteralExpression : Expression {
std::string literal;
};
-struct NumberLiteralExpression : Expression {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
- NumberLiteralExpression(SourcePosition pos, double number)
- : Expression(kKind, pos), number(number) {}
+struct IntegerLiteralExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(IntegerLiteralExpression)
+ IntegerLiteralExpression(SourcePosition pos, IntegerLiteral value)
+ : Expression(kKind, pos), value(std::move(value)) {}
void VisitAllSubExpressions(VisitCallback callback) override {
callback(this);
}
- double number;
+ IntegerLiteral value;
+};
+
+struct FloatingPointLiteralExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(FloatingPointLiteralExpression)
+ FloatingPointLiteralExpression(SourcePosition pos, double value)
+ : Expression(kKind, pos), value(value) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
+ double value;
};
struct ElementAccessExpression : LocationExpression {
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 63cddf6e0a..325c6dea8f 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -67,6 +67,8 @@ static const char* const FLOAT64_OR_HOLE_TYPE_STRING = "float64_or_hole";
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
+static const char* const INTEGER_LITERAL_TYPE_STRING =
+ "constexpr IntegerLiteral";
static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
static const char* const MUTABLE_REFERENCE_TYPE_STRING = "MutableReference";
static const char* const CONST_REFERENCE_TYPE_STRING = "ConstReference";
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index c91cbc0f18..b03d49fb34 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -5,12 +5,12 @@
#ifndef V8_TORQUE_DECLARATION_VISITOR_H_
#define V8_TORQUE_DECLARATION_VISITOR_H_
-#include <set>
#include <string>
#include "src/base/macros.h"
#include "src/torque/declarations.h"
#include "src/torque/global-context.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -35,10 +35,22 @@ class PredeclarationVisitor {
for (Declaration* child : decl->declarations) Predeclare(child);
}
static void Predeclare(TypeDeclaration* decl) {
- Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ TypeAlias* alias =
+ Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ alias->SetPosition(decl->pos);
+ alias->SetIdentifierPosition(decl->name->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddTypeDefinition(alias);
+ }
}
static void Predeclare(StructDeclaration* decl) {
- Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ TypeAlias* alias =
+ Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ alias->SetPosition(decl->pos);
+ alias->SetIdentifierPosition(decl->name->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddTypeDefinition(alias);
+ }
}
static void Predeclare(GenericTypeDeclaration* generic_decl) {
Declarations::DeclareGenericType(generic_decl->declaration->name->value,
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index ed92004c92..6d349cb8a2 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -168,9 +168,9 @@ TypeAlias* Declarations::DeclareType(const Identifier* name, const Type* type) {
new TypeAlias(type, true, name->pos)));
}
-const TypeAlias* Declarations::PredeclareTypeAlias(const Identifier* name,
- TypeDeclaration* type,
- bool redeclaration) {
+TypeAlias* Declarations::PredeclareTypeAlias(const Identifier* name,
+ TypeDeclaration* type,
+ bool redeclaration) {
CheckAlreadyDeclared<TypeAlias>(name->value, "type");
std::unique_ptr<TypeAlias> alias_ptr(
new TypeAlias(type, redeclaration, name->pos));
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 739c021fec..3bd50f6e48 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -93,9 +93,9 @@ class Declarations {
static Namespace* DeclareNamespace(const std::string& name);
static TypeAlias* DeclareType(const Identifier* name, const Type* type);
- static const TypeAlias* PredeclareTypeAlias(const Identifier* name,
- TypeDeclaration* type,
- bool redeclaration);
+ static TypeAlias* PredeclareTypeAlias(const Identifier* name,
+ TypeDeclaration* type,
+ bool redeclaration);
static TorqueMacro* CreateTorqueMacro(std::string external_name,
std::string readable_name,
bool exported_to_csa,
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index f99424b1a6..adb7e77153 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -127,6 +127,7 @@ LexerResult Lexer::RunLexer(const std::string& input) {
while (pos != end) {
token_start = pos;
Symbol* symbol = MatchToken(&pos, end);
+ DCHECK_IMPLIES(symbol != nullptr, pos != token_start);
InputPosition token_end = pos;
line_column_tracker.Advance(token_start, token_end);
if (!symbol) {
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index bca3cf5fb1..6be44a619b 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -44,6 +44,8 @@ enum class ParseResultHolderBase::TypeId {
kStdString,
kBool,
kInt32,
+ kDouble,
+ kIntegerLiteral,
kStdVectorOfString,
kExpressionPtr,
kIdentifierPtr,
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index b8ffb2905e..e882dd0f2c 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/numbers/integer-literal-inl.h"
#include "src/torque/cc-generator.h"
#include "src/torque/cfg.h"
#include "src/torque/constants.h"
@@ -182,6 +183,7 @@ void ImplementationVisitor::BeginDebugMacrosFile() {
header << "#ifndef " << kHeaderDefine << "\n";
header << "#define " << kHeaderDefine << "\n\n";
header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
+ header << "#include \"src/numbers/integer-literal.h\"\n";
header << "\n";
header << "namespace v8 {\n"
@@ -605,12 +607,11 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
"Int32T>(argc)));\n";
csa_ccfile() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- csa_ccfile() << " TorqueStructArguments "
- "torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length, (kJSArgcIncludesReceiver ? "
- "FrameArgumentsArgcType::kCountIncludesReceiver : "
- "FrameArgumentsArgcType::kCountExcludesReceiver)"
- << "));\n";
+ csa_ccfile()
+ << " TorqueStructArguments "
+ "torque_arguments(GetFrameArguments(arguments_frame, "
+ "arguments_length, FrameArgumentsArgcType::kCountIncludesReceiver"
+ << "));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
@@ -944,22 +945,20 @@ VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
return scope.Yield(assignment_value);
}
-VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
+VisitResult ImplementationVisitor::Visit(FloatingPointLiteralExpression* expr) {
const Type* result_type = TypeOracle::GetConstFloat64Type();
- if (expr->number >= std::numeric_limits<int32_t>::min() &&
- expr->number <= std::numeric_limits<int32_t>::max()) {
- int32_t i = static_cast<int32_t>(expr->number);
- if (i == expr->number) {
- if ((i >> 30) == (i >> 31)) {
- result_type = TypeOracle::GetConstInt31Type();
- } else {
- result_type = TypeOracle::GetConstInt32Type();
- }
- }
- }
std::stringstream str;
str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
- << expr->number;
+ << expr->value;
+ return VisitResult{result_type, str.str()};
+}
+
+VisitResult ImplementationVisitor::Visit(IntegerLiteralExpression* expr) {
+ const Type* result_type = TypeOracle::GetIntegerLiteralType();
+ std::stringstream str;
+ str << "IntegerLiteral("
+ << (expr->value.is_negative() ? "true, 0x" : "false, 0x") << std::hex
+ << expr->value.absolute_value() << std::dec << "ull)";
return VisitResult{result_type, str.str()};
}
@@ -2849,7 +2848,9 @@ VisitResult ImplementationVisitor::GenerateCall(
// If we're currently generating a C++ macro and it's calling another macro,
// then we need to make sure that we also generate C++ code for the called
// macro within the same -inl.inc file.
- if (output_type_ == OutputType::kCC && !inline_macro) {
+ if ((output_type_ == OutputType::kCC ||
+ output_type_ == OutputType::kCCDebug) &&
+ !inline_macro) {
if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
auto* streams = CurrentFileStreams::Get();
SourceId file = streams ? streams->file : SourceId::Invalid();
@@ -2863,14 +2864,32 @@ VisitResult ImplementationVisitor::GenerateCall(
std::stringstream result;
result << "(";
bool first = true;
- if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
- result << extern_macro->external_assembler_name() << "(state_)."
- << extern_macro->ExternalName() << "(";
- } else {
- result << macro->ExternalName() << "(state_";
- first = false;
+ switch (output_type_) {
+ case OutputType::kCSA: {
+ if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
+ result << extern_macro->external_assembler_name() << "(state_)."
+ << extern_macro->ExternalName() << "(";
+ } else {
+ result << macro->ExternalName() << "(state_";
+ first = false;
+ }
+ break;
+ }
+ case OutputType::kCC: {
+ auto* extern_macro = ExternMacro::DynamicCast(macro);
+ CHECK_NOT_NULL(extern_macro);
+ result << extern_macro->CCName() << "(";
+ break;
+ }
+ case OutputType::kCCDebug: {
+ auto* extern_macro = ExternMacro::DynamicCast(macro);
+ CHECK_NOT_NULL(extern_macro);
+ result << extern_macro->CCDebugName() << "(accessor";
+ first = false;
+ break;
+ }
}
- for (VisitResult arg : arguments.parameters) {
+ for (VisitResult arg : converted_arguments) {
DCHECK(!arg.IsOnStack());
if (!first) {
result << ", ";
@@ -4748,21 +4767,22 @@ void ImplementationVisitor::GenerateClassDefinitions(
factory_impl << ");\n";
factory_impl << " Map map = factory()->read_only_roots()."
<< SnakeifyString(type->name()) << "_map();";
- factory_impl << " HeapObject result =\n";
+ factory_impl << " HeapObject raw_object =\n";
factory_impl << " factory()->AllocateRawWithImmortalMap(size, "
"allocation_type, map);\n";
+ factory_impl << " " << type->UnhandlifiedCppTypeName()
+ << " result = " << type->UnhandlifiedCppTypeName()
+ << "::cast(raw_object);\n";
+ factory_impl << " DisallowGarbageCollection no_gc;";
factory_impl << " WriteBarrierMode write_barrier_mode =\n"
<< " allocation_type == AllocationType::kYoung\n"
<< " ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n"
<< " USE(write_barrier_mode);\n";
- factory_impl << " " << type->HandlifiedCppTypeName()
- << " result_handle(" << type->name()
- << "::cast(result), factory()->isolate());\n";
for (const Field& f : type->ComputeAllFields()) {
if (f.name_and_type.name == "map") continue;
if (!f.index) {
- factory_impl << " result_handle->TorqueGeneratedClass::set_"
+ factory_impl << " result.TorqueGeneratedClass::set_"
<< SnakeifyString(f.name_and_type.name) << "(";
if (f.name_and_type.type->IsSubtypeOf(
TypeOracle::GetTaggedType()) &&
@@ -4776,7 +4796,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
}
- factory_impl << " return result_handle;\n";
+ factory_impl << " return handle(result, factory()->isolate());\n";
factory_impl << "}\n\n";
factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 8ebb72cc2e..36c9ca452e 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -555,7 +555,8 @@ class ImplementationVisitor {
VisitResult Visit(IncrementDecrementExpression* expr);
VisitResult Visit(AssignmentExpression* expr);
VisitResult Visit(StringLiteralExpression* expr);
- VisitResult Visit(NumberLiteralExpression* expr);
+ VisitResult Visit(FloatingPointLiteralExpression* expr);
+ VisitResult Visit(IntegerLiteralExpression* expr);
VisitResult Visit(AssumeTypeImpossibleExpression* expr);
VisitResult Visit(TryLabelExpression* expr);
VisitResult Visit(StatementExpression* expr);
diff --git a/deps/v8/src/torque/kythe-data.h b/deps/v8/src/torque/kythe-data.h
index ba18841949..c335d484ed 100644
--- a/deps/v8/src/torque/kythe-data.h
+++ b/deps/v8/src/torque/kythe-data.h
@@ -5,8 +5,6 @@
#ifndef V8_TORQUE_KYTHE_DATA_H_
#define V8_TORQUE_KYTHE_DATA_H_
-#include <map>
-
#include "src/torque/ast.h"
#include "src/torque/contextual.h"
#include "src/torque/global-context.h"
diff --git a/deps/v8/src/torque/runtime-macro-shims.h b/deps/v8/src/torque/runtime-macro-shims.h
index 04b09a7334..fe20a4052d 100644
--- a/deps/v8/src/torque/runtime-macro-shims.h
+++ b/deps/v8/src/torque/runtime-macro-shims.h
@@ -10,6 +10,8 @@
#include <cstdint>
+#include "src/numbers/integer-literal.h"
+
namespace v8 {
namespace internal {
@@ -21,6 +23,8 @@ inline intptr_t ChangeInt32ToIntPtr(int32_t i) { return i; }
inline uintptr_t ChangeUint32ToWord(uint32_t u) { return u; }
inline intptr_t IntPtrAdd(intptr_t a, intptr_t b) { return a + b; }
inline intptr_t IntPtrMul(intptr_t a, intptr_t b) { return a * b; }
+inline bool IntPtrLessThan(intptr_t a, intptr_t b) { return a < b; }
+inline bool IntPtrLessThanOrEqual(intptr_t a, intptr_t b) { return a <= b; }
inline intptr_t Signed(uintptr_t u) { return static_cast<intptr_t>(u); }
template <typename Smi>
inline int32_t SmiUntag(Smi s) {
@@ -33,6 +37,15 @@ inline uintptr_t Unsigned(intptr_t s) { return static_cast<uintptr_t>(s); }
#endif
inline bool Word32Equal(uint32_t a, uint32_t b) { return a == b; }
inline bool Word32NotEqual(uint32_t a, uint32_t b) { return a != b; }
+inline int32_t ConstexprIntegerLiteralToInt32(const IntegerLiteral& i) {
+ return i.To<int32_t>();
+}
+inline int31_t ConstexprIntegerLiteralToInt31(const IntegerLiteral& i) {
+ return int31_t(ConstexprIntegerLiteralToInt32(i));
+}
+inline intptr_t ConstexprIntegerLiteralToIntptr(const IntegerLiteral& i) {
+ return i.To<intptr_t>();
+}
} // namespace CodeStubAssembler
} // namespace TorqueRuntimeMacroShims
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 7578fe7fff..5cbe07309b 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -48,6 +48,13 @@ class BuildFlags : public ContextualClass<BuildFlags> {
build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
build_flags_["V8_EXTERNAL_CODE_SPACE"] = V8_EXTERNAL_CODE_SPACE_BOOL;
build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
+ build_flags_["V8_ENABLE_SWISS_NAME_DICTIONARY"] =
+ V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL;
+#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
+ build_flags_["V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS"] = true;
+#else
+ build_flags_["V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS"] = false;
+#endif
build_flags_["TRUE_FOR_TESTING"] = true;
build_flags_["FALSE_FOR_TESTING"] = false;
#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
@@ -60,6 +67,7 @@ class BuildFlags : public ContextualClass<BuildFlags> {
#else
build_flags_["V8_ENABLE_WEBASSEMBLY"] = false;
#endif
+ build_flags_["DEBUG"] = DEBUG_BOOL;
}
static bool GetFlag(const std::string& name, const char* production) {
auto it = Get().build_flags_.find(name);
@@ -85,6 +93,12 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<int32_t>::id =
ParseResultTypeId::kInt32;
template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<double>::id =
+ ParseResultTypeId::kDouble;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<IntegerLiteral>::id = ParseResultTypeId::kIntegerLiteral;
+template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<std::string>>::id =
ParseResultTypeId::kStdVectorOfString;
@@ -268,6 +282,8 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
namespace {
+bool ProcessIfAnnotation(ParseResultIterator* child_results);
+
base::Optional<ParseResult> AddGlobalDeclarations(
ParseResultIterator* child_results) {
auto declarations = child_results->NextAs<std::vector<Declaration*>>();
@@ -695,9 +711,11 @@ base::Optional<ParseResult> MakeExternConstDeclaration(
base::Optional<ParseResult> MakeTypeAliasDeclaration(
ParseResultIterator* child_results) {
+ bool enabled = ProcessIfAnnotation(child_results);
auto name = child_results->NextAs<Identifier*>();
auto type = child_results->NextAs<TypeExpression*>();
- Declaration* result = MakeNode<TypeAliasDeclaration>(name, type);
+ std::vector<Declaration*> result = {};
+ if (enabled) result = {MakeNode<TypeAliasDeclaration>(name, type)};
return ParseResult{result};
}
@@ -840,8 +858,22 @@ class AnnotationSet {
std::map<std::string, std::pair<AnnotationParameter, SourcePosition>> map_;
};
-base::Optional<ParseResult> MakeInt32(ParseResultIterator* child_results) {
- std::string value = child_results->NextAs<std::string>();
+bool ProcessIfAnnotation(ParseResultIterator* child_results) {
+ AnnotationSet annotations(child_results, {},
+ {ANNOTATION_IF, ANNOTATION_IFNOT});
+ if (base::Optional<std::string> condition =
+ annotations.GetStringParam(ANNOTATION_IF)) {
+ if (!BuildFlags::GetFlag(*condition, ANNOTATION_IF)) return false;
+ }
+ if (base::Optional<std::string> condition =
+ annotations.GetStringParam(ANNOTATION_IFNOT)) {
+ if (BuildFlags::GetFlag(*condition, ANNOTATION_IFNOT)) return false;
+ }
+ return true;
+}
+
+base::Optional<ParseResult> YieldInt32(ParseResultIterator* child_results) {
+ std::string value = child_results->matched_input().ToString();
size_t num_chars_converted = 0;
int result = 0;
try {
@@ -858,6 +890,43 @@ base::Optional<ParseResult> MakeInt32(ParseResultIterator* child_results) {
return ParseResult{result};
}
+base::Optional<ParseResult> YieldDouble(ParseResultIterator* child_results) {
+ std::string value = child_results->matched_input().ToString();
+ size_t num_chars_converted = 0;
+ double result = 0;
+ try {
+ result = std::stod(value, &num_chars_converted);
+ } catch (const std::out_of_range&) {
+ Error("double literal out-of-range");
+ return ParseResult{result};
+ }
+ // Tokenizer shouldn't have included extra trailing characters.
+ DCHECK_EQ(num_chars_converted, value.size());
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> YieldIntegerLiteral(
+ ParseResultIterator* child_results) {
+ std::string value = child_results->matched_input().ToString();
+ // Consume a leading minus.
+ bool negative = false;
+ if (value.size() > 0 && value[0] == '-') {
+ negative = true;
+ value = value.substr(1);
+ }
+ uint64_t absolute_value;
+ try {
+ size_t parsed = 0;
+ absolute_value = std::stoull(value, &parsed, 0);
+ DCHECK_EQ(parsed, value.size());
+ } catch (const std::invalid_argument&) {
+ Error("integer literal could not be parsed").Throw();
+ } catch (const std::out_of_range&) {
+ Error("integer literal value out of range").Throw();
+ }
+ return ParseResult(IntegerLiteral(negative, absolute_value));
+}
+
base::Optional<ParseResult> MakeStringAnnotationParameter(
ParseResultIterator* child_results) {
std::string value = child_results->NextAs<std::string>();
@@ -1878,29 +1947,17 @@ base::Optional<ParseResult> MakeAssignmentExpression(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeNumberLiteralExpression(
+base::Optional<ParseResult> MakeFloatingPointLiteralExpression(
ParseResultIterator* child_results) {
- auto number = child_results->NextAs<std::string>();
- // TODO(turbofan): Support 64bit literals.
- // Meanwhile, we type it as constexpr float64 when out of int32 range.
- double value = 0;
- try {
-#if defined(V8_OS_SOLARIS)
- // stod() on Solaris does not currently support hex strings. Use strtol()
- // specifically for hex literals until stod() support is available.
- if (number.find("0x") == std::string::npos &&
- number.find("0X") == std::string::npos) {
- value = std::stod(number);
- } else {
- value = static_cast<double>(strtol(number.c_str(), nullptr, 0));
- }
-#else
- value = std::stod(number);
-#endif // !defined(V8_OS_SOLARIS)
- } catch (const std::out_of_range&) {
- Error("double literal out-of-range").Throw();
- }
- Expression* result = MakeNode<NumberLiteralExpression>(value);
+ auto value = child_results->NextAs<double>();
+ Expression* result = MakeNode<FloatingPointLiteralExpression>(value);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeIntegerLiteralExpression(
+ ParseResultIterator* child_results) {
+ auto value = child_results->NextAs<IntegerLiteral>();
+ Expression* result = MakeNode<IntegerLiteralExpression>(std::move(value));
return ParseResult{result};
}
@@ -2060,8 +2117,19 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
// Internally, an optional field is just an indexed field where the count
// is zero or one.
index = MakeNode<ConditionalExpression>(
- *index, MakeNode<NumberLiteralExpression>(1),
- MakeNode<NumberLiteralExpression>(0));
+ *index,
+ MakeCall(
+ MakeNode<Identifier>("FromConstexpr"),
+ {MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>("intptr"),
+ std::vector<TypeExpression*>{})},
+ {MakeNode<IntegerLiteralExpression>(IntegerLiteral(1))}, {}),
+ MakeCall(
+ MakeNode<Identifier>("FromConstexpr"),
+ {MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>("intptr"),
+ std::vector<TypeExpression*>{})},
+ {MakeNode<IntegerLiteralExpression>(IntegerLiteral(0))}, {}));
}
index_info = ClassFieldIndexInfo{*index, optional};
}
@@ -2180,12 +2248,24 @@ struct TorqueGrammar : Grammar {
return false;
}
- static bool MatchDecimalLiteral(InputPosition* pos) {
+ static bool MatchIntegerLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ bool found_digit = false;
+ MatchString("-", &current);
+ while (MatchChar(std::isdigit, &current)) found_digit = true;
+ if (found_digit) {
+ *pos = current;
+ return true;
+ }
+ return false;
+ }
+
+ static bool MatchFloatingPointLiteral(InputPosition* pos) {
InputPosition current = *pos;
bool found_digit = false;
MatchString("-", &current);
while (MatchChar(std::isdigit, &current)) found_digit = true;
- MatchString(".", &current);
+ if (!MatchString(".", &current)) return false;
while (MatchChar(std::isdigit, &current)) found_digit = true;
if (!found_digit) return false;
*pos = current;
@@ -2205,21 +2285,10 @@ struct TorqueGrammar : Grammar {
ParseResultIterator* child_results) {
std::vector<T> l = {};
if (!first) l = child_results->NextAs<std::vector<T>>();
- AnnotationSet annotations(child_results, {},
- {ANNOTATION_IF, ANNOTATION_IFNOT});
- bool skipped = false;
- if (base::Optional<std::string> condition =
- annotations.GetStringParam(ANNOTATION_IF)) {
- if (!BuildFlags::GetFlag(*condition, ANNOTATION_IF)) skipped = true;
- }
- if (base::Optional<std::string> condition =
- annotations.GetStringParam(ANNOTATION_IFNOT)) {
- if (BuildFlags::GetFlag(*condition, ANNOTATION_IFNOT)) skipped = true;
- }
+ bool enabled = ProcessIfAnnotation(child_results);
T x = child_results->NextAs<T>();
- if (skipped) return ParseResult{std::move(l)};
- l.push_back(std::move(x));
+ if (enabled) l.push_back(std::move(x));
return ParseResult{std::move(l)};
}
@@ -2270,13 +2339,18 @@ struct TorqueGrammar : Grammar {
// Result: std::string
Symbol externalString = {Rule({&stringLiteral}, StringLiteralUnquoteAction)};
- // Result: std::string
- Symbol decimalLiteral = {
- Rule({Pattern(MatchDecimalLiteral)}, YieldMatchedInput),
- Rule({Pattern(MatchHexLiteral)}, YieldMatchedInput)};
+ // Result: IntegerLiteral
+ Symbol integerLiteral = {
+ Rule({Pattern(MatchIntegerLiteral)}, YieldIntegerLiteral),
+ Rule({Pattern(MatchHexLiteral)}, YieldIntegerLiteral)};
+
+ // Result: double
+ Symbol floatingPointLiteral = {
+ Rule({Pattern(MatchFloatingPointLiteral)}, YieldDouble)};
// Result: int32_t
- Symbol int32Literal = {Rule({&decimalLiteral}, MakeInt32)};
+ Symbol int32Literal = {Rule({Pattern(MatchIntegerLiteral)}, YieldInt32),
+ Rule({Pattern(MatchHexLiteral)}, YieldInt32)};
// Result: AnnotationParameter
Symbol annotationParameter = {
@@ -2495,7 +2569,8 @@ struct TorqueGrammar : Grammar {
MakeReferenceFieldAccessExpression),
Rule({&primaryExpression, Token("["), expression, Token("]")},
MakeElementAccessExpression),
- Rule({&decimalLiteral}, MakeNumberLiteralExpression),
+ Rule({&integerLiteral}, MakeIntegerLiteralExpression),
+ Rule({&floatingPointLiteral}, MakeFloatingPointLiteralExpression),
Rule({&stringLiteral}, MakeStringLiteralExpression),
Rule({&simpleType, &initializerList}, MakeStructExpression),
Rule({&newExpression}),
@@ -2721,8 +2796,8 @@ struct TorqueGrammar : Grammar {
Sequence({Token("constexpr"), &externalString})),
Token(";")},
MakeAbstractTypeDeclaration),
- Rule({Token("type"), &name, Token("="), &type, Token(";")},
- AsSingletonVector<Declaration*, MakeTypeAliasDeclaration>()),
+ Rule({annotations, Token("type"), &name, Token("="), &type, Token(";")},
+ MakeTypeAliasDeclaration),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListNoVararg, &returnType, &optionalBody},
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index e184bc0f72..7bcbd6a77b 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -315,6 +315,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONST_FLOAT64_TYPE_STRING);
}
+ static const Type* GetIntegerLiteralType() {
+ return Get().GetBuiltinType(INTEGER_LITERAL_TYPE_STRING);
+ }
+
static const Type* GetNeverType() {
return Get().GetBuiltinType(NEVER_TYPE_STRING);
}
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index aaae9e559c..7ca916f4ff 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -459,12 +459,12 @@ void TypeVisitor::VisitClassFieldsAndMethods(
field_size * ResidueClass::Unknown());
if (auto literal =
- NumberLiteralExpression::DynamicCast(field.index->expr)) {
- size_t value = static_cast<size_t>(literal->number);
- if (value != literal->number) {
- Error("non-integral array length").Position(field.pos);
+ IntegerLiteralExpression::DynamicCast(field.index->expr)) {
+ if (auto value = literal->value.TryTo<size_t>()) {
+ field_size *= *value;
+ } else {
+ Error("Not a valid field index").Position(field.pos);
}
- field_size *= value;
} else {
field_size *= ResidueClass::Unknown();
}
diff --git a/deps/v8/src/torque/type-visitor.h b/deps/v8/src/torque/type-visitor.h
index 205e842cc7..f183be3a7a 100644
--- a/deps/v8/src/torque/type-visitor.h
+++ b/deps/v8/src/torque/type-visitor.h
@@ -5,8 +5,6 @@
#ifndef V8_TORQUE_TYPE_VISITOR_H_
#define V8_TORQUE_TYPE_VISITOR_H_
-#include <string>
-
#include "src/torque/ast.h"
#include "src/torque/types.h"
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index c69986e407..9fa92d8723 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -850,7 +850,7 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
if (field.offset.has_value()) {
offset_expression =
- MakeNode<NumberLiteralExpression>(static_cast<double>(*field.offset));
+ MakeNode<IntegerLiteralExpression>(IntegerLiteral(*field.offset));
} else {
const Field* previous = GetFieldPreceding(field_index);
DCHECK_NOT_NULL(previous);
@@ -879,8 +879,8 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
std::tie(previous_element_size, std::ignore) =
*SizeOf(previous->name_and_type.type);
Expression* previous_element_size_expression =
- MakeNode<NumberLiteralExpression>(
- static_cast<double>(previous_element_size));
+ MakeNode<IntegerLiteralExpression>(
+ IntegerLiteral(previous_element_size));
// previous.length
Expression* previous_length_expression = MakeFieldAccessExpression(
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 45b7390f2b..c49753be21 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -6,7 +6,6 @@
#define V8_TORQUE_TYPES_H_
#include <algorithm>
-#include <map>
#include <set>
#include <string>
#include <vector>
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
index e2f03c201d..0150963a88 100644
--- a/deps/v8/src/tracing/trace-categories.h
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -42,6 +42,7 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.inspector")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")),
@@ -53,7 +54,9 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats")),
perfetto::Category::Group("v8,devtools.timeline"),
perfetto::Category::Group(TRACE_DISABLED_BY_DEFAULT("v8.turbofan") ","
- TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan")));
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan")),
+ perfetto::Category::Group(TRACE_DISABLED_BY_DEFAULT("v8.inspector") ","
+ TRACE_DISABLED_BY_DEFAULT("v8.stack_trace")));
// clang-format on
#endif // defined(V8_USE_PERFETTO)
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index 86e2fb8b8e..e4454c378f 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -29,7 +29,7 @@
#if defined(V8_OS_LINUX) || defined(V8_OS_FREEBSD)
#include <ucontext.h>
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
#include <sys/ucontext.h>
#endif
@@ -49,7 +49,7 @@ namespace trap_handler {
#if V8_OS_LINUX
#define CONTEXT_REG(reg, REG) &uc->uc_mcontext.gregs[REG_##REG]
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
#define CONTEXT_REG(reg, REG) &uc->uc_mcontext->__ss.__##reg
#elif V8_OS_FREEBSD
#define CONTEXT_REG(reg, REG) &uc->uc_mcontext.mc_##reg
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.h b/deps/v8/src/trap-handler/handler-inside-posix.h
index 27e46773bb..745e92d501 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.h
+++ b/deps/v8/src/trap-handler/handler-inside-posix.h
@@ -15,7 +15,7 @@ namespace trap_handler {
#if V8_OS_LINUX || V8_OS_FREEBSD
constexpr int kOobSignal = SIGSEGV;
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
constexpr int kOobSignal = SIGBUS;
#else
#error Posix trap handlers are only supported on Linux, MacOSX and FreeBSD.
diff --git a/deps/v8/src/trap-handler/handler-outside-simulator.cc b/deps/v8/src/trap-handler/handler-outside-simulator.cc
index d59debe625..179eab0659 100644
--- a/deps/v8/src/trap-handler/handler-outside-simulator.cc
+++ b/deps/v8/src/trap-handler/handler-outside-simulator.cc
@@ -5,11 +5,11 @@
#include "include/v8config.h"
#include "src/trap-handler/trap-handler-simulator.h"
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#define SYMBOL(name) "_" #name
-#else // !V8_OS_MACOSX
+#else // !V8_OS_DARWIN
#define SYMBOL(name) #name
-#endif // !V8_OS_MACOSX
+#endif // !V8_OS_DARWIN
// Define the ProbeMemory function declared in trap-handler-simulators.h.
asm(
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index 977d28daee..3d9e4c5cd2 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -43,7 +43,7 @@ MetadataLock::MetadataLock() {
abort();
}
- while (spinlock_.test_and_set(std::memory_order::memory_order_acquire)) {
+ while (spinlock_.test_and_set(std::memory_order_acquire)) {
}
}
@@ -52,7 +52,7 @@ MetadataLock::~MetadataLock() {
abort();
}
- spinlock_.clear(std::memory_order::memory_order_release);
+ spinlock_.clear(std::memory_order_release);
}
} // namespace trap_handler
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index d83f88ebb9..84ffdbd056 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -19,15 +19,15 @@ namespace trap_handler {
// X64 on Linux, Windows, MacOS, FreeBSD.
#if V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64 && \
- ((V8_OS_LINUX && !V8_OS_ANDROID) || V8_OS_WIN || V8_OS_MACOSX || \
+ ((V8_OS_LINUX && !V8_OS_ANDROID) || V8_OS_WIN || V8_OS_DARWIN || \
V8_OS_FREEBSD)
#define V8_TRAP_HANDLER_SUPPORTED true
// Arm64 (non-simulator) on Mac.
-#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_ARM64 && V8_OS_MACOSX
+#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_ARM64 && V8_OS_DARWIN
#define V8_TRAP_HANDLER_SUPPORTED true
// Arm64 simulator on x64 on Linux, Mac, or Windows.
#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && \
- (V8_OS_LINUX || V8_OS_MACOSX)
+ (V8_OS_LINUX || V8_OS_DARWIN || V8_OS_WIN)
#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
// Everything else is unsupported.
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 569c67fd25..41c0ac5dbe 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -19,7 +19,7 @@
#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
-#include "src/security/vm-cage.h"
+#include "src/sandbox/sandbox.h"
#include "src/utils/memcopy.h"
#if V8_LIBC_BIONIC
@@ -96,15 +96,15 @@ v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
return vas.get();
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
+#ifdef V8_SANDBOX
+v8::PageAllocator* GetSandboxPageAllocator() {
// TODO(chromium:1218005) remove this code once the cage is no longer
// optional.
- if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ if (GetProcessWideSandbox()->is_disabled()) {
return GetPlatformPageAllocator();
} else {
- CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
- return GetProcessWideVirtualMemoryCage()->page_allocator();
+ CHECK(GetProcessWideSandbox()->is_initialized());
+ return GetProcessWideSandbox()->page_allocator();
}
}
#endif
@@ -213,19 +213,19 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
return result;
}
-bool FreePages(v8::PageAllocator* page_allocator, void* address,
+void FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
- return page_allocator->FreePages(address, size);
+ CHECK(page_allocator->FreePages(address, size));
}
-bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
+void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
- return page_allocator->ReleasePages(address, size, new_size);
+ CHECK(page_allocator->ReleasePages(address, size, new_size));
}
bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
@@ -293,8 +293,8 @@ size_t VirtualMemory::Release(Address free_start) {
const size_t free_size = old_size - (free_start - region_.begin());
CHECK(InVM(free_start, free_size));
region_.set_size(old_size - free_size);
- CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
- old_size, region_.size()));
+ ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
+ old_size, region_.size());
return free_size;
}
@@ -307,8 +307,8 @@ void VirtualMemory::Free() {
Reset();
// FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here.
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
- RoundUp(region.size(), page_allocator->AllocatePageSize())));
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ RoundUp(region.size(), page_allocator->AllocatePageSize()));
}
void VirtualMemory::FreeReadOnly() {
@@ -320,8 +320,8 @@ void VirtualMemory::FreeReadOnly() {
// FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here.
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
- RoundUp(region.size(), page_allocator->AllocatePageSize())));
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ RoundUp(region.size(), page_allocator->AllocatePageSize()));
}
VirtualMemoryCage::VirtualMemoryCage() = default;
@@ -368,14 +368,17 @@ bool VirtualMemoryCage::InitReservation(
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
base_ = reservation_.address() + params.base_bias_size;
- } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
- // When the base doesn't need to be aligned, the virtual memory reservation
- // fails only due to OOM.
+ } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment ||
+ params.base_bias_size == 0) {
+ // When the base doesn't need to be aligned or when the requested
+ // base_bias_size is zero, the virtual memory reservation fails only
+ // due to OOM.
Address hint =
RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size));
VirtualMemory reservation(params.page_allocator, params.reservation_size,
- reinterpret_cast<void*>(hint));
+ reinterpret_cast<void*>(hint),
+ params.base_alignment);
if (!reservation.IsReserved()) return false;
reservation_ = std::move(reservation);
@@ -455,6 +458,7 @@ bool VirtualMemoryCage::InitReservation(
RoundDown(params.reservation_size - (allocatable_base - base_) -
params.base_bias_size,
params.page_size);
+ size_ = allocatable_base + allocatable_size - base_;
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
params.page_size,
@@ -465,6 +469,7 @@ bool VirtualMemoryCage::InitReservation(
void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
+ size_ = 0;
page_allocator_.reset();
reservation_.Free();
}
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 623214db7b..f9d940b10a 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -106,19 +106,19 @@ V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// pointer.
V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-// Returns the virtual memory cage page allocator instance for allocating pages
-// inside the virtual memory cage. Guaranteed to be a valid pointer.
-V8_EXPORT_PRIVATE v8::PageAllocator* GetVirtualMemoryCagePageAllocator();
+#ifdef V8_SANDBOX
+// Returns the page allocator instance for allocating pages inside the sandbox.
+// Guaranteed to be a valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetSandboxPageAllocator();
#endif
-// Returns the appropriate page allocator to use for ArrayBuffer backing stores.
-// If the virtual memory cage is enabled, these must be allocated inside the
-// cage and so this will be the CagePageAllocator. Otherwise it will be the
-// PlatformPageAllocator.
+// Returns the appropriate page allocator to use for ArrayBuffer backing
+// stores. If the sandbox is enabled, these must be allocated inside the
+// sandbox and so this will be the SandboxPageAllocator. Otherwise it will be
+// the PlatformPageAllocator.
inline v8::PageAllocator* GetArrayBufferPageAllocator() {
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- return GetVirtualMemoryCagePageAllocator();
+#ifdef V8_SANDBOX
+ return GetSandboxPageAllocator();
#else
return GetPlatformPageAllocator();
#endif
@@ -156,20 +156,18 @@ V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
PageAllocator::Permission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
-// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
+// be multiples of AllocatePageSize().
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator,
- void* address, const size_t size);
+void FreePages(v8::PageAllocator* page_allocator, void* address,
+ const size_t size);
// Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be
// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
// Released memory is left in an undefined state, so it should not be accessed.
-// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator,
- void* address, size_t size,
- size_t new_size);
+void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
+ size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be
// multiples of CommitPageSize(). Setting permission to kNoAccess may
@@ -296,16 +294,18 @@ class VirtualMemory final {
// ranges (on platforms that require code ranges) and are configurable via
// ReservationParams.
//
-// +------------+-----------+----------- ~~~ -+
-// | ... | ... | ... |
-// +------------+-----------+------------ ~~~ -+
+// +------------+-----------+------------ ~~~ --+- ~~~ -+
+// | ... | ... | ... | ... |
+// +------------+-----------+------------ ~~~ --+- ~~~ -+
// ^ ^ ^
// start cage base allocatable base
//
// <------------> <------------------->
// base bias size allocatable size
-// <-------------------------------------------->
-// reservation size
+// <------------------------------->
+// cage size
+// <---------------------------------------------------->
+// reservation size
//
// - The reservation is made using ReservationParams::page_allocator.
// - start is the start of the virtual memory reservation.
@@ -313,9 +313,13 @@ class VirtualMemory final {
// - allocatable base is the cage base rounded up to the nearest
// ReservationParams::page_size, and is the start of the allocatable area for
// the BoundedPageAllocator.
+// - cage size is the size of the area from cage base to the end of the
+// allocatable area.
//
// - The base bias is configured by ReservationParams::base_bias_size.
-// - The reservation size is configured by ReservationParams::reservation_size.
+// - The reservation size is configured by ReservationParams::reservation_size
+// but it might be actually bigger if we end up over-reserving the virtual
+// address space.
//
// Additionally,
// - The alignment of the cage base is configured by
@@ -334,9 +338,6 @@ class VirtualMemory final {
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
-//
-// TODO(chromium:1218005) can we either combine this class and
-// v8::VirtualMemoryCage in v8-platform.h or rename one of the two?
class VirtualMemoryCage {
public:
VirtualMemoryCage();
@@ -349,6 +350,7 @@ class VirtualMemoryCage {
VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPT;
Address base() const { return base_; }
+ size_t size() const { return size_; }
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
@@ -359,6 +361,7 @@ class VirtualMemoryCage {
bool IsReserved() const {
DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
+ DCHECK_EQ(base_ != kNullAddress, size_ != 0);
return reservation_.IsReserved();
}
@@ -389,6 +392,7 @@ class VirtualMemoryCage {
protected:
Address base_ = kNullAddress;
+ size_t size_ = 0;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
VirtualMemory reservation_;
};
diff --git a/deps/v8/src/utils/bit-vector.h b/deps/v8/src/utils/bit-vector.h
index c171f51160..fa06f9d080 100644
--- a/deps/v8/src/utils/bit-vector.h
+++ b/deps/v8/src/utils/bit-vector.h
@@ -18,25 +18,13 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
uintptr_t* ptr_; // valid if data_length_ > 1
uintptr_t inline_; // valid if data_length_ == 1
- DataStorage(uintptr_t value) : inline_(value) {}
+ explicit DataStorage(uintptr_t value) : inline_(value) {}
};
// Iterator for the elements of this BitVector.
class Iterator {
public:
- explicit Iterator(BitVector* target)
- : target_(target),
- current_index_(0),
- current_value_(target->is_inline() ? target->data_.inline_
- : target->data_.ptr_[0]),
- current_(-1) {
- Advance();
- }
- ~Iterator() = default;
-
- bool Done() const { return current_index_ >= target_->data_length_; }
-
- V8_EXPORT_PRIVATE inline void Advance() {
+ V8_EXPORT_PRIVATE inline void operator++() {
current_++;
// Skip zeroed words.
@@ -57,15 +45,46 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
current_value_ >>= 1;
}
- int Current() const {
+ int operator*() const {
DCHECK(!Done());
return current_;
}
+ bool operator!=(const Iterator& other) const {
+ // "other" is required to be the end sentinel value, to avoid us needing
+ // to compare exact "current" values.
+ DCHECK(other.Done());
+ DCHECK_EQ(target_, other.target_);
+ return current_index_ != other.current_index_;
+ }
+
private:
- BitVector* target_;
- int current_index_;
+ static constexpr struct StartTag {
+ } kStartTag = {};
+ static constexpr struct EndTag {
+ } kEndTag = {};
+
+ explicit Iterator(const BitVector* target, StartTag)
+ : target_(target),
+ current_value_(target->is_inline() ? target->data_.inline_
+ : target->data_.ptr_[0]),
+ current_index_(0),
+ current_(-1) {
+ ++(*this);
+ }
+ explicit Iterator(const BitVector* target, EndTag)
+ : target_(target),
+ current_value_(0),
+ current_index_(target->data_length_),
+ current_(-1) {
+ DCHECK(Done());
+ }
+
+ bool Done() const { return current_index_ >= target_->data_length_; }
+
+ const BitVector* target_;
uintptr_t current_value_;
+ int current_index_;
int current_;
friend class BitVector;
@@ -111,8 +130,15 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
}
void CopyFrom(const BitVector& other) {
- DCHECK_LE(other.length(), length());
- CopyFrom(other.data_, other.data_length_);
+ DCHECK_EQ(other.length(), length());
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ data_.inline_ = other.data_.inline_;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ data_.ptr_[i] = other.data_.ptr_[i];
+ }
+ }
}
void Resize(int new_length, Zone* zone) {
@@ -126,7 +152,19 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
DCHECK_GT(new_data_length, kDataLengthForInline);
data_.ptr_ = zone->NewArray<uintptr_t>(new_data_length);
data_length_ = new_data_length;
- CopyFrom(old_data, old_data_length);
+
+ // Copy over the data.
+ if (old_data_length == kDataLengthForInline) {
+ data_.ptr_[0] = old_data.inline_;
+ } else {
+ for (int i = 0; i < old_data_length; i++) {
+ data_.ptr_[i] = old_data.ptr_[i];
+ }
+ }
+ // Zero out the rest of the data.
+ for (int i = old_data_length; i < data_length_; i++) {
+ data_.ptr_[i] = 0;
+ }
}
length_ = new_length;
}
@@ -276,6 +314,10 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
int length() const { return length_; }
+ Iterator begin() const { return Iterator(this, Iterator::kStartTag); }
+
+ Iterator end() const { return Iterator(this, Iterator::kEndTag); }
+
#ifdef DEBUG
void Print() const;
#endif
@@ -288,88 +330,50 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
DataStorage data_;
bool is_inline() const { return data_length_ == kDataLengthForInline; }
-
- void CopyFrom(DataStorage other_data, int other_data_length) {
- DCHECK_LE(other_data_length, data_length_);
-
- if (is_inline()) {
- DCHECK_EQ(other_data_length, kDataLengthForInline);
- data_.inline_ = other_data.inline_;
- } else if (other_data_length == kDataLengthForInline) {
- data_.ptr_[0] = other_data.inline_;
- for (int i = 1; i < data_length_; i++) {
- data_.ptr_[i] = 0;
- }
- } else {
- for (int i = 0; i < other_data_length; i++) {
- data_.ptr_[i] = other_data.ptr_[i];
- }
- for (int i = other_data_length; i < data_length_; i++) {
- data_.ptr_[i] = 0;
- }
- }
- }
};
class GrowableBitVector {
public:
- class Iterator {
- public:
- Iterator(const GrowableBitVector* target, Zone* zone)
- : it_(target->bits_ == nullptr ? zone->New<BitVector>(1, zone)
- : target->bits_) {}
- bool Done() const { return it_.Done(); }
- void Advance() { it_.Advance(); }
- int Current() const { return it_.Current(); }
-
- private:
- BitVector::Iterator it_;
- };
-
- GrowableBitVector() : bits_(nullptr) {}
- GrowableBitVector(int length, Zone* zone)
- : bits_(zone->New<BitVector>(length, zone)) {}
+ GrowableBitVector() : bits_() {}
+ GrowableBitVector(int length, Zone* zone) : bits_(length, zone) {}
bool Contains(int value) const {
if (!InBitsRange(value)) return false;
- return bits_->Contains(value);
+ return bits_.Contains(value);
}
void Add(int value, Zone* zone) {
EnsureCapacity(value, zone);
- bits_->Add(value);
+ bits_.Add(value);
}
- void Union(const GrowableBitVector& other, Zone* zone) {
- for (Iterator it(&other, zone); !it.Done(); it.Advance()) {
- Add(it.Current(), zone);
- }
- }
+ void Clear() { bits_.Clear(); }
- void Clear() {
- if (bits_ != nullptr) bits_->Clear();
+ int length() const { return bits_.length(); }
+
+ bool Equals(const GrowableBitVector& other) const {
+ return length() == other.length() && bits_.Equals(other.bits_);
}
+ BitVector::Iterator begin() const { return bits_.begin(); }
+
+ BitVector::Iterator end() const { return bits_.end(); }
+
private:
- static const int kInitialLength = 1024;
+ static constexpr int kInitialLength = 1024;
- bool InBitsRange(int value) const {
- return bits_ != nullptr && bits_->length() > value;
- }
+ bool InBitsRange(int value) const { return bits_.length() > value; }
void EnsureCapacity(int value, Zone* zone) {
if (InBitsRange(value)) return;
- int new_length = bits_ == nullptr ? kInitialLength : bits_->length();
+ int new_length =
+ base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(value));
+ new_length = std::min(new_length, kInitialLength);
while (new_length <= value) new_length *= 2;
-
- if (bits_ == nullptr) {
- bits_ = zone->New<BitVector>(new_length, zone);
- } else {
- bits_->Resize(new_length, zone);
- }
+ bits_.Resize(new_length, zone);
}
- BitVector* bits_;
+ BitVector bits_;
};
} // namespace internal
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 516dd84d6e..3488a1115c 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,8 +1,6 @@
ahaas@chromium.org
-bbudge@chromium.org
clemensb@chromium.org
gdeepti@chromium.org
jkummerow@chromium.org
manoskouk@chromium.org
thibaudm@chromium.org
-zhin@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index daf4ff9488..fd67a671ec 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -55,7 +55,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
+// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector |
// -4 | tiering budget |
@@ -310,7 +310,6 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->str(src.gp(), dst);
break;
case kI64:
@@ -345,7 +344,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->ldr(dst.gp(), src);
break;
case kI64:
@@ -638,10 +636,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- ldr(dst, liftoff::GetInstanceOperand());
-}
-
namespace liftoff {
#define __ lasm->
inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst,
@@ -901,12 +895,9 @@ inline void AtomicOp32(
// the same register.
Register temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- // Make sure that {result} is unique.
- Register result_reg = result.gp();
- if (result_reg == value.gp() || result_reg == dst_addr ||
- result_reg == offset_reg) {
- result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
- }
+ // {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
+ DCHECK(result.gp() != value.gp() && result.gp() != dst_addr &&
+ result.gp() != offset_reg);
UseScratchRegisterScope temps(lasm);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -915,15 +906,12 @@ inline void AtomicOp32(
__ dmb(ISH);
Label retry;
__ bind(&retry);
- (lasm->*load)(result_reg, actual_addr, al);
- op(lasm, temp, result_reg, value.gp());
+ (lasm->*load)(result.gp(), actual_addr, al);
+ op(lasm, temp, result.gp(), value.gp());
(lasm->*store)(store_result, temp, actual_addr, al);
__ cmp(store_result, Operand(0));
__ b(ne, &retry);
__ dmb(ISH);
- if (result_reg != result.gp()) {
- __ mov(result.gp(), result_reg);
- }
}
inline void Add(LiftoffAssembler* lasm, Register dst, Register lhs,
@@ -1976,10 +1964,6 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
liftoff::EmitFloatMinOrMax(this, dst, lhs, rhs, liftoff::MinOrMax::kMax);
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
- // This is a nop on arm.
-}
-
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
@@ -3468,8 +3452,13 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
// to q14 and q15, which will be unused since they are not allocatable in
// Liftoff. If the operands are the same, then we build a smaller list
// operand below.
- static_assert(!(kLiftoffAssemblerFpCacheRegs &
- (d28.bit() | d29.bit() | d30.bit() | d31.bit())),
+ static_assert(!kLiftoffAssemblerFpCacheRegs.has(d28),
+ "This only works if q14-q15 (d28-d31) are not used.");
+ static_assert(!kLiftoffAssemblerFpCacheRegs.has(d29),
+ "This only works if q14-q15 (d28-d31) are not used.");
+ static_assert(!kLiftoffAssemblerFpCacheRegs.has(d30),
+ "This only works if q14-q15 (d28-d31) are not used.");
+ static_assert(!kLiftoffAssemblerFpCacheRegs.has(d31),
"This only works if q14-q15 (d28-d31) are not used.");
vmov(q14, src1);
src1 = q14;
@@ -4081,7 +4070,7 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
RegList core_regs = regs.GetGpList();
- if (core_regs != 0) {
+ if (!core_regs.is_empty()) {
stm(db_w, sp, core_regs);
}
LiftoffRegList fp_regs = regs & kFpCacheRegList;
@@ -4120,20 +4109,19 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
vldm(ia_w, sp, first, last);
}
RegList core_regs = regs.GetGpList();
- if (core_regs != 0) {
+ if (!core_regs.is_empty()) {
ldm(ia_w, sp, core_regs);
}
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetLastRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 1c186f39f1..70ed5de8f6 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -55,7 +55,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
+// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
// -4 | tiering budget |
@@ -85,7 +85,6 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
return reg.gp().X();
case kF32:
return reg.fp().S();
@@ -99,13 +98,13 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
}
inline CPURegList PadRegList(RegList list) {
- if ((base::bits::CountPopulation(list) & 1) != 0) list |= padreg.bit();
- return CPURegList(CPURegister::kRegister, kXRegSizeInBits, list);
+ if ((list.Count() & 1) != 0) list.set(padreg);
+ return CPURegList(kXRegSizeInBits, list);
}
-inline CPURegList PadVRegList(RegList list) {
- if ((base::bits::CountPopulation(list) & 1) != 0) list |= fp_scratch.bit();
- return CPURegList(CPURegister::kVRegister, kQRegSizeInBits, list);
+inline CPURegList PadVRegList(DoubleRegList list) {
+ if ((list.Count() & 1) != 0) list.set(fp_scratch);
+ return CPURegList(kQRegSizeInBits, list);
}
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
@@ -469,10 +468,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- Ldr(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -645,12 +640,9 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- // Make sure that {result} is unique.
- Register result_reg = result.gp();
- if (result_reg == value.gp() || result_reg == dst_addr ||
- result_reg == offset_reg) {
- result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
- }
+ // {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
+ DCHECK(result.gp() != value.gp() && result.gp() != dst_addr &&
+ result.gp() != offset_reg);
UseScratchRegisterScope temps(lasm);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -666,18 +658,18 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- __ ldaxrb(result_reg.W(), actual_addr);
+ __ ldaxrb(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- __ ldaxrh(result_reg.W(), actual_addr);
+ __ ldaxrh(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- __ ldaxr(result_reg.W(), actual_addr);
+ __ ldaxr(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store:
- __ ldaxr(result_reg.X(), actual_addr);
+ __ ldaxr(result.gp().X(), actual_addr);
break;
default:
UNREACHABLE();
@@ -685,19 +677,19 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
switch (op) {
case Binop::kAdd:
- __ add(temp, result_reg, value.gp());
+ __ add(temp, result.gp(), value.gp());
break;
case Binop::kSub:
- __ sub(temp, result_reg, value.gp());
+ __ sub(temp, result.gp(), value.gp());
break;
case Binop::kAnd:
- __ and_(temp, result_reg, value.gp());
+ __ and_(temp, result.gp(), value.gp());
break;
case Binop::kOr:
- __ orr(temp, result_reg, value.gp());
+ __ orr(temp, result.gp(), value.gp());
break;
case Binop::kXor:
- __ eor(temp, result_reg, value.gp());
+ __ eor(temp, result.gp(), value.gp());
break;
case Binop::kExchange:
__ mov(temp, value.gp());
@@ -725,10 +717,6 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
}
__ Cbnz(store_result.W(), &retry);
-
- if (result_reg != result.gp()) {
- __ mov(result.gp(), result_reg);
- }
}
#undef __
@@ -1337,7 +1325,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
Uxtw(dst, src);
}
@@ -1567,7 +1555,6 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
@@ -3144,13 +3131,11 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
PopCPURegList(liftoff::PadRegList(regs.GetGpList()));
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
- bool needs_padding =
- (base::bits::CountPopulation(all_spills.GetGpList()) & 1) != 0;
+ bool needs_padding = (all_spills.GetGpList().Count() & 1) != 0;
if (needs_padding) {
spill_space_size += kSystemPointerSize;
++spill_offset;
@@ -3158,7 +3143,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetLastRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index f976e76c6d..4ff56c5ec5 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -66,7 +66,7 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
static constexpr LiftoffRegList kByteRegs =
- LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>();
+ LiftoffRegList::FromBits<RegList{eax, ecx, edx}.bits()>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueKind kind) {
@@ -76,7 +76,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->mov(dst.gp(), src);
break;
case kI64:
@@ -105,7 +104,6 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->mov(dst, src.gp());
break;
case kI64:
@@ -121,7 +119,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
case kS128:
assm->movdqu(dst, src.fp());
break;
- default:
+ case kVoid:
+ case kBottom:
+ case kI8:
+ case kI16:
UNREACHABLE();
}
}
@@ -132,6 +133,7 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind,
case kI32:
case kRef:
case kOptRef:
+ case kRtt:
assm->AllocateStackSpace(padding);
assm->push(reg.gp());
break;
@@ -152,7 +154,10 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind,
assm->AllocateStackSpace(sizeof(double) * 2 + padding);
assm->movdqu(Operand(esp, 0), reg.fp());
break;
- default:
+ case kVoid:
+ case kBottom:
+ case kI8:
+ case kI16:
UNREACHABLE();
}
}
@@ -378,10 +383,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- mov(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -755,7 +756,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Binop op, Register dst_addr,
if (is_byte_store) {
// The scratch register has to be a byte register. As we are already tight
// on registers, we just use the root register here.
- static_assert((kLiftoffAssemblerGpCacheRegs & kRootRegister.bit()) == 0,
+ static_assert(!kLiftoffAssemblerGpCacheRegs.has(kRootRegister),
"root register is not Liftoff cache register");
DCHECK(kRootRegister.is_byte_register());
__ push(kRootRegister);
@@ -1160,17 +1161,15 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueKind kind) {
- if (needs_gp_reg_pair(kind)) {
- liftoff::MoveStackValue(this,
- liftoff::GetHalfStackSlot(src_offset, kLowWord),
- liftoff::GetHalfStackSlot(dst_offset, kLowWord));
- liftoff::MoveStackValue(this,
- liftoff::GetHalfStackSlot(src_offset, kHighWord),
- liftoff::GetHalfStackSlot(dst_offset, kHighWord));
- } else {
+ DCHECK_EQ(0, element_size_bytes(kind) % kSystemPointerSize);
+ int words = element_size_bytes(kind) / kSystemPointerSize;
+ DCHECK_LE(1, words);
+ do {
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
liftoff::GetStackSlot(dst_offset));
- }
+ dst_offset -= kSystemPointerSize;
+ src_offset -= kSystemPointerSize;
+ } while (--words);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
@@ -1200,7 +1199,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
mov(dst, reg.gp());
break;
case kI64:
@@ -1864,10 +1862,6 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
- // This is a nop on ia32.
-}
-
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2464,7 +2458,6 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case kI32:
@@ -4511,15 +4504,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 5b43a2a41d..e4458b602b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -15,105 +15,105 @@ namespace wasm {
#if V8_TARGET_ARCH_IA32
// Omit ebx, which is the root register.
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(eax, ecx, edx, esi, edi);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {eax, ecx, edx, esi, edi};
// Omit xmm7, which is the kScratchDoubleReg.
-constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {xmm0, xmm1, xmm2, xmm3,
+ xmm4, xmm5, xmm6};
#elif V8_TARGET_ARCH_X64
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(rax, rcx, rdx, rbx, rsi, rdi, r9);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {rax, rcx, rdx, rbx,
+ rsi, rdi, r9};
-constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {xmm0, xmm1, xmm2, xmm3,
+ xmm4, xmm5, xmm6, xmm7};
#elif V8_TARGET_ARCH_MIPS
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7, v0, v1);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, t0, t1, t2,
+ t3, t4, t5, t6, s7, v0, v1};
-constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24};
#elif V8_TARGET_ARCH_MIPS64
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7, v0, v1);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5, a6,
+ a7, t0, t1, t2, s7, v0, v1};
-constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26};
#elif V8_TARGET_ARCH_LOONG64
// t6-t8 and s3-s4: scratch registers, s6: root
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
- s1, s2, s5, s7, s8);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5, a6,
+ a7, t0, t1, t2, t3, t4, t5,
+ s0, s1, s2, s5, s7, s8};
// f29: zero, f30-f31: macro-assembler scratch float Registers.
-constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
- f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
+ f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28};
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {r0, r1, r2, r3, r4,
+ r5, r6, r7, r8, r9};
// d13: zero, d14-d15: scratch
-constexpr RegList kLiftoffAssemblerFpCacheRegs = LowDwVfpRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
#elif V8_TARGET_ARCH_ARM64
// x16: ip0, x17: ip1, x18: platform register, x26: root, x28: base, x29: fp,
// x30: lr, x31: xzr.
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- CPURegister::ListOf(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12,
- x13, x14, x15, x19, x20, x21, x22, x23, x24, x25, x27);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {
+ x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x19, x20, x21, x22, x23, x24, x25, x27};
// d15: fp_zero, d30-d31: macro-assembler scratch V Registers.
-constexpr RegList kLiftoffAssemblerFpCacheRegs = CPURegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d16, d17,
- d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14,
+ d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29};
#elif V8_TARGET_ARCH_S390X
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r2, r3, r4, r5, r6, r7, r8, cp);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {r2, r3, r4, r5,
+ r6, r7, r8, cp};
-constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
#elif V8_TARGET_ARCH_PPC64
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11, cp);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, cp};
-constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
- d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
#elif V8_TARGET_ARCH_RISCV64
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedGpRegs in frame-constants-riscv64.h
-constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5,
+ a6, a7, t0, t1, t2, s7};
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
-constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
- fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
+ ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11};
#else
-constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+constexpr RegList kLiftoffAssemblerGpCacheRegs = RegList::FromBits(0xff);
-constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
+constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegList::FromBits(0xff);
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 71ef80d241..6691c7dd32 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -549,28 +549,29 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
}
}
-void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
+void LiftoffAssembler::CacheState::DefineSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint) {
for (const auto& slot : stack_state) {
if (is_reference(slot.kind())) {
DCHECK(slot.is_stack());
- safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
+ safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(slot));
}
}
}
void LiftoffAssembler::CacheState::DefineSafepointWithCalleeSavedRegisters(
- Safepoint& safepoint) {
+ SafepointTableBuilder::Safepoint& safepoint) {
for (const auto& slot : stack_state) {
if (!is_reference(slot.kind())) continue;
if (slot.is_stack()) {
- safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
+ safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(slot));
} else {
DCHECK(slot.is_reg());
- safepoint.DefineRegister(slot.reg().gp().code());
+ safepoint.DefineTaggedRegister(slot.reg().gp().code());
}
}
if (cached_instance != no_reg) {
- safepoint.DefineRegister(cached_instance.code());
+ safepoint.DefineTaggedRegister(cached_instance.code());
}
}
@@ -840,6 +841,9 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
target.cached_mem_start, instance,
ObjectAccess::ToTagged(WasmInstanceObject::kMemoryStartOffset),
sizeof(size_t));
+#ifdef V8_SANDBOXED_POINTERS
+ DecodeSandboxedPointer(target.cached_mem_start);
+#endif
}
}
@@ -1056,7 +1060,7 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
// Reload the instance from the stack.
if (!target_instance) {
- FillInstanceInto(instance_reg);
+ LoadInstanceFromFrame(instance_reg);
}
}
@@ -1157,6 +1161,12 @@ void LiftoffAssembler::MoveToReturnLocations(
}
// Slow path for multi-return.
+ // We sometimes allocate a register to perform stack-to-stack moves, which can
+ // cause a spill in the cache state. Conservatively save and restore the
+ // original state in case it is needed after the current instruction
+ // (conditional branch).
+ CacheState saved_state;
+ saved_state.Split(*cache_state());
int call_desc_return_idx = 0;
DCHECK_LE(sig->return_count(), cache_state_.stack_height());
VarState* slots = cache_state_.stack_state.end() - sig->return_count();
@@ -1207,6 +1217,7 @@ void LiftoffAssembler::MoveToReturnLocations(
}
}
}
+ cache_state()->Steal(saved_state);
}
#ifdef ENABLE_SLOW_DCHECKS
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index a1a08d9a29..b3cb11a61e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -219,9 +219,10 @@ class LiftoffAssembler : public TurboAssembler {
/*out*/ LiftoffRegList* spills,
SpillLocation spill_location);
- void DefineSafepoint(Safepoint& safepoint);
+ void DefineSafepoint(SafepointTableBuilder::Safepoint& safepoint);
- void DefineSafepointWithCalleeSavedRegisters(Safepoint& safepoint);
+ void DefineSafepointWithCalleeSavedRegisters(
+ SafepointTableBuilder::Safepoint& safepoint);
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
@@ -705,7 +706,6 @@ class LiftoffAssembler : public TurboAssembler {
Register isolate_root);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
- inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
@@ -886,7 +886,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src);
inline bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_u32_to_intptr(Register dst, Register src);
+ inline void emit_u32_to_uintptr(Register dst, Register src);
void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
if (kSystemPointerSize == 8) {
@@ -1461,10 +1461,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
- inline void RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset);
+ inline void RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset);
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
@@ -1664,6 +1663,10 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, imm);
}
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
+ // This is a no-op on 32-bit systems.
+}
+
#endif // V8_TARGET_ARCH_32_BIT
// End of the partially platform-independent implementations of the
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 1fcc2f7b86..8c8a247837 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -590,10 +590,9 @@ class LiftoffCompiler {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
case kI8:
case kI16:
- bailout_reason = kRefTypes;
+ bailout_reason = kGC;
break;
default:
UNREACHABLE();
@@ -765,7 +764,7 @@ class LiftoffCompiler {
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
int actual_locals = __ num_locals() - num_params;
DCHECK_LE(0, actual_locals);
- constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs);
+ constexpr int kNumCacheRegisters = kLiftoffAssemblerGpCacheRegs.Count();
// If we have many locals, we put them on the stack initially. This avoids
// having to spill them on merge points. Use of these initial values should
// be rare anyway.
@@ -827,12 +826,13 @@ class LiftoffCompiler {
.AsRegister()));
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
// Load the feedback vector and cache it in a stack slot.
- constexpr LiftoffRegList parameter_registers = GetGpParamRegisters();
+ constexpr LiftoffRegList kGpParamRegisters = GetGpParamRegisters();
if (FLAG_wasm_speculative_inlining) {
+ CODE_COMMENT("load feedback vector");
int declared_func_index =
func_index_ - env_->module->num_imported_functions;
DCHECK_GE(declared_func_index, 0);
- LiftoffRegList pinned = parameter_registers;
+ LiftoffRegList pinned = kGpParamRegisters;
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadTaggedPointerFromInstance(
tmp.gp(), kWasmInstanceRegister,
@@ -842,11 +842,10 @@ class LiftoffCompiler {
declared_func_index),
pinned);
__ Spill(liftoff::kFeedbackVectorOffset, tmp, kPointerKind);
- } else {
- __ Spill(liftoff::kFeedbackVectorOffset, WasmValue::ForUintPtr(0));
}
if (dynamic_tiering()) {
- LiftoffRegList pinned = parameter_registers;
+ CODE_COMMENT("load tier up budget");
+ LiftoffRegList pinned = kGpParamRegisters;
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(tmp.gp(), TieringBudgetArray, kSystemPointerSize,
pinned);
@@ -854,8 +853,6 @@ class LiftoffCompiler {
kInt32Size * declared_function_index(env_->module, func_index_);
__ Load(tmp, tmp.gp(), no_reg, offset, LoadType::kI32Load, pinned);
__ Spill(liftoff::kTierupBudgetOffset, tmp, ValueKind::kI32);
- } else {
- __ Spill(liftoff::kTierupBudgetOffset, WasmValue::ForUintPtr(0));
}
if (for_debugging_) __ ResetOSRTarget();
@@ -964,11 +961,11 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
if (ool->safepoint_info) {
for (auto index : ool->safepoint_info->slots) {
- safepoint.DefinePointerSlot(index);
+ safepoint.DefineTaggedStackSlot(index);
}
int total_frame_size = __ GetTotalFrameSize();
@@ -2823,7 +2820,7 @@ class LiftoffCompiler {
// Convert the index to ptrsize, bounds-checking the high word on 32-bit
// systems for memory64.
if (!env_->module->is_memory64) {
- __ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
+ __ emit_u32_to_uintptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
__ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp());
@@ -2861,6 +2858,7 @@ class LiftoffCompiler {
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
uintptr_t offset, Register index,
LiftoffRegList pinned) {
+ CODE_COMMENT("alignment check");
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapUnalignedAccess, 0);
Register address = __ GetUnusedRegister(kGpReg, pinned).gp();
@@ -2967,6 +2965,9 @@ class LiftoffCompiler {
memory_start = __ GetUnusedRegister(kGpReg, pinned).gp();
LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize,
pinned);
+#ifdef V8_SANDBOXED_POINTERS
+ __ DecodeSandboxedPointer(memory_start);
+#endif
__ cache_state()->SetMemStartCacheRegister(memory_start);
}
return memory_start;
@@ -3279,7 +3280,7 @@ class LiftoffCompiler {
ValueType type =
index < static_cast<int>(__ num_locals())
? decoder->local_type(index)
- : exception ? ValueType::Ref(HeapType::kExtern, kNonNullable)
+ : exception ? ValueType::Ref(HeapType::kAny, kNonNullable)
: decoder->stack_value(decoder_stack_index--)->type;
DCHECK(CheckCompatibleStackSlotTypes(slot.kind(), type.kind()));
value.type = type;
@@ -4259,8 +4260,7 @@ class LiftoffCompiler {
}
case wasm::kRef:
case wasm::kOptRef:
- case wasm::kRtt:
- case wasm::kRttWithDepth: {
+ case wasm::kRtt: {
--(*index_in_array);
__ StoreTaggedPointer(
values_array, no_reg,
@@ -4318,8 +4318,7 @@ class LiftoffCompiler {
}
case wasm::kRef:
case wasm::kOptRef:
- case wasm::kRtt:
- case wasm::kRttWithDepth: {
+ case wasm::kRtt: {
__ LoadTaggedPointer(
value.gp(), values_array.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index),
@@ -4498,9 +4497,9 @@ class LiftoffCompiler {
LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32
// We have to reuse the value register as the result register so that we
- // don't run out of registers on ia32. For this we use the value register
- // as the result register if it has no other uses. Otherwise we allocate
- // a new register and let go of the value register to get spilled.
+ // don't run out of registers on ia32. For this we use the value register as
+ // the result register if it has no other uses. Otherwise we allocate a new
+ // register and let go of the value register to get spilled.
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
@@ -4520,6 +4519,7 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
+ CODE_COMMENT("atomic binop");
uintptr_t offset = imm.offset;
Register addr = pinned.set(GetMemoryStart(pinned));
@@ -4545,6 +4545,9 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+#ifdef V8_SANDBOXED_POINTERS
+ __ DecodeSandboxedPointer(addr);
+#endif
__ emit_i32_add(addr, addr, index);
pinned.clear(LiftoffRegister(index));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
@@ -4820,32 +4823,92 @@ class LiftoffCompiler {
void AtomicFence(FullDecoder* decoder) { __ AtomicFence(); }
+ // Pop a memtype (i32 or i64 depending on {WasmModule::is_memory64}) to a
+ // register, updating {*high_word} to contain the ORed combination of all
+ // popped high words. Returns the ptrsized register holding the popped value.
+ LiftoffRegister PopMemTypeToRegister(FullDecoder* decoder,
+ Register* high_word,
+ LiftoffRegList* pinned) {
+ LiftoffRegister reg = __ PopToRegister(*pinned);
+ LiftoffRegister intptr_reg = reg;
+ // For memory32 on 64-bit hosts, zero-extend.
+ if (kSystemPointerSize == kInt64Size && !env_->module->is_memory64) {
+ // Only overwrite {reg} if it's not used otherwise.
+ if (pinned->has(reg) || __ cache_state()->is_used(reg)) {
+ intptr_reg = __ GetUnusedRegister(kGpReg, *pinned);
+ }
+ __ emit_u32_to_uintptr(intptr_reg.gp(), reg.gp());
+ }
+ // For memory32 or memory64 on 64-bit, we are done here.
+ if (kSystemPointerSize == kInt64Size || !env_->module->is_memory64) {
+ pinned->set(intptr_reg);
+ return intptr_reg;
+ }
+
+ // For memory64 on 32-bit systems, combine all high words for a zero-check
+ // and only use the low words afterwards. This keeps the register pressure
+ // managable.
+ DCHECK_GE(kMaxUInt32, env_->max_memory_size);
+ pinned->set(reg.low());
+ if (*high_word == no_reg) {
+ // Choose a register to hold the (combination of) high word(s). It cannot
+ // be one of the pinned registers, and it cannot be used in the value
+ // stack.
+ *high_word =
+ pinned->has(reg.high())
+ ? __ GetUnusedRegister(kGpReg, *pinned).gp()
+ : __ GetUnusedRegister(kGpReg, {reg.high()}, *pinned).gp();
+ pinned->set(*high_word);
+ if (*high_word != reg.high_gp()) {
+ __ Move(*high_word, reg.high_gp(), kI32);
+ }
+ } else if (*high_word != reg.high_gp()) {
+ // Combine the new high word into existing high words.
+ __ emit_i32_or(*high_word, *high_word, reg.high_gp());
+ }
+ return reg.low();
+ }
+
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
+ Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister());
LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
- LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst =
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned);
- Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- __ FillInstanceInto(instance);
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ GetUnusedRegister(kGpReg, pinned).gp();
+ __ LoadInstanceFromFrame(instance);
+ }
+ pinned.set(instance);
+
+ // Only allocate the OOB code now, so the state of the stack is reflected
+ // correctly.
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ if (mem_offsets_high_word != no_reg) {
+ // If any high word has bits set, jump to the OOB trap.
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word);
+ pinned.clear(mem_offsets_high_word);
+ }
LiftoffRegister segment_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(segment_index, WasmValue(imm.data_segment.index));
- ExternalReference ext_ref = ExternalReference::wasm_memory_init();
- auto sig =
- MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32, kI32);
+ auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
+ kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kVoid, args, ext_ref);
- Label* trap_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args,
+ ExternalReference::wasm_memory_init());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
@@ -4872,42 +4935,75 @@ class LiftoffCompiler {
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
+ Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
- LiftoffRegister size = pinned.set(__ PopToRegister());
- LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
- LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
- Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- __ FillInstanceInto(instance);
- ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
- auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
+ LiftoffRegister size = pinned.set(
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned));
+ LiftoffRegister src = pinned.set(
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned));
+ LiftoffRegister dst = pinned.set(
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned));
+
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ GetUnusedRegister(kGpReg, pinned).gp();
+ __ LoadInstanceFromFrame(instance);
+ }
+
+ // Only allocate the OOB code now, so the state of the stack is reflected
+ // correctly.
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ if (mem_offsets_high_word != no_reg) {
+ // If any high word has bits set, jump to the OOB trap.
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word);
+ }
+
+ auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind,
+ kPointerKind, kPointerKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kVoid, args, ext_ref);
- Label* trap_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args,
+ ExternalReference::wasm_memory_copy());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
+ Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
- LiftoffRegister size = pinned.set(__ PopToRegister());
+ LiftoffRegister size = pinned.set(
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned));
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
- LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
- Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- __ FillInstanceInto(instance);
- ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
- auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
+ LiftoffRegister dst = pinned.set(
+ PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned));
+
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ GetUnusedRegister(kGpReg, pinned).gp();
+ __ LoadInstanceFromFrame(instance);
+ }
+
+ // Only allocate the OOB code now, so the state of the stack is reflected
+ // correctly.
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ if (mem_offsets_high_word != no_reg) {
+ // If any high word has bits set, jump to the OOB trap.
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word);
+ }
+
+ auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
+ kPointerKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kVoid, args, ext_ref);
- Label* trap_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args,
+ ExternalReference::wasm_memory_fill());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
@@ -5354,6 +5450,45 @@ class LiftoffCompiler {
__ PushRegister(kRef, array);
}
+ void ArrayInitFromData(FullDecoder* decoder,
+ const ArrayIndexImmediate<validate>& array_imm,
+ const IndexImmediate<validate>& data_segment,
+ const Value& /* offset */, const Value& /* length */,
+ const Value& /* rtt */, Value* /* result */) {
+ LiftoffRegList pinned;
+ LiftoffRegister data_segment_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(data_segment_reg,
+ WasmValue(static_cast<int32_t>(data_segment.index)));
+ LiftoffAssembler::VarState data_segment_var(kI32, data_segment_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmArrayInitFromData,
+ MakeSig::Returns(kRef).Params(kI32, kI32, kI32, kRtt),
+ {
+ data_segment_var,
+ __ cache_state()->stack_state.end()[-3], // offset
+ __ cache_state()->stack_state.end()[-2], // length
+ __ cache_state()->stack_state.end()[-1] // rtt
+ },
+ decoder->position());
+
+ LiftoffRegister result(kReturnRegister0);
+ // Reuse the data segment register for error handling.
+ LiftoffRegister error_smi = data_segment_reg;
+ LoadSmi(error_smi, kArrayInitFromDataArrayTooLargeErrorCode);
+ Label* trap_label_array_too_large =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
+ __ emit_cond_jump(kEqual, trap_label_array_too_large, kRef, result.gp(),
+ error_smi.gp());
+ LoadSmi(error_smi, kArrayInitFromDataSegmentOutOfBoundsErrorCode);
+ Label* trap_label_segment_out_of_bounds = AddOutOfLineTrap(
+ decoder, WasmCode::kThrowWasmTrapDataSegmentOutOfBounds);
+ __ emit_cond_jump(kEqual, trap_label_segment_out_of_bounds, kRef,
+ result.gp(), error_smi.gp());
+
+ __ PushRegister(kRef, result);
+ }
+
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
constexpr static int kI31To32BitSmiShift = 33;
@@ -5400,28 +5535,7 @@ class LiftoffCompiler {
__ LoadTaggedPointer(
rtt.gp(), rtt.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
- __ PushRegister(kRttWithDepth, rtt);
- }
-
- void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
- Value* result, WasmRttSubMode mode) {
- ValueKind parent_value_kind = parent.type.kind();
- ValueKind rtt_value_kind = kRttWithDepth;
- LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
- LiftoffAssembler::VarState parent_var =
- __ cache_state()->stack_state.end()[-1];
- __ LoadConstant(type_reg, WasmValue(type_index));
- LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
- WasmCode::RuntimeStubId target = mode == WasmRttSubMode::kCanonicalize
- ? WasmCode::kWasmAllocateRtt
- : WasmCode::kWasmAllocateFreshRtt;
- CallRuntimeStub(
- target,
- MakeSig::Returns(rtt_value_kind).Params(kI32, parent_value_kind),
- {type_var, parent_var}, decoder->position());
- // Drop the parent RTT.
- __ cache_state()->stack_state.pop_back(1);
- __ PushRegister(rtt_value_kind, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(kRtt, rtt);
}
enum NullSucceeds : bool { // --
@@ -5482,37 +5596,21 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
__ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
pinned);
- if (rtt.type.has_depth()) {
- // Step 3: check the list's length if needed.
- if (rtt.type.depth() >= kMinimumSupertypeArraySize) {
- LiftoffRegister list_length = tmp2;
- __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
- __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
- rtt.type.depth());
- }
- // Step 4: load the candidate list slot into {tmp1}, and compare it.
- __ LoadTaggedPointer(
- tmp1.gp(), tmp1.gp(), no_reg,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
- pinned);
- __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
- rtt_reg.gp());
- } else {
- // Step 3: if rtt's depth is unknown, we invoke a builtin to compute the
- // result, as we might not have enough available registers.
-
- // Preserve {obj_reg} across the call.
- LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
- __ PushRegisters(saved_regs);
- LiftoffAssembler::VarState rtt_state(kPointerKind, rtt_reg, 0);
- LiftoffAssembler::VarState tmp1_state(kPointerKind, tmp1, 0);
- CallRuntimeStub(WasmCode::kWasmSubtypeCheck,
- MakeSig::Returns(kI32).Params(kOptRef, rtt.type.kind()),
- {tmp1_state, rtt_state}, decoder->position());
- __ PopRegisters(saved_regs);
- __ Move(tmp1.gp(), kReturnRegister0, kI32);
- __ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
- }
+ // Step 3: check the list's length if needed.
+ uint32_t rtt_depth =
+ GetSubtypingDepth(decoder->module_, rtt.type.ref_index());
+ if (rtt_depth >= kMinimumSupertypeArraySize) {
+ LiftoffRegister list_length = tmp2;
+ __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
+ rtt_depth);
+ }
+ // Step 4: load the candidate list slot into {tmp1}, and compare it.
+ __ LoadTaggedPointer(
+ tmp1.gp(), tmp1.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt_depth), pinned);
+ __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
+ rtt_reg.gp());
// Fall through to {match}.
__ bind(&match);
@@ -5599,58 +5697,35 @@ class LiftoffCompiler {
// through to match.
LiftoffRegister DataCheck(const Value& obj, Label* no_match,
LiftoffRegList pinned, Register opt_scratch) {
- LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
-
- // Reserve all temporary registers up front, so that the cache state
- // tracking doesn't get confused by the following conditional jumps.
- LiftoffRegister tmp1 =
- opt_scratch != no_reg
- ? LiftoffRegister(opt_scratch)
- : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
-
- if (obj.type.is_nullable()) {
- LoadNullValue(tmp1.gp(), pinned);
- __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
- }
-
- __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
-
- // Load the object's map and check if it is a struct/array map.
- __ LoadMap(tmp1.gp(), obj_reg.gp());
- EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
-
- return obj_reg;
+ TypeCheckRegisters registers =
+ TypeCheckPrelude(obj, no_match, pinned, opt_scratch);
+ EmitDataRefCheck(registers.map_reg.gp(), no_match, registers.tmp_reg,
+ pinned);
+ return registers.obj_reg;
+ }
+
+ LiftoffRegister ArrayCheck(const Value& obj, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ TypeCheckRegisters registers =
+ TypeCheckPrelude(obj, no_match, pinned, opt_scratch);
+ __ Load(registers.map_reg, registers.map_reg.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U, pinned);
+ __ emit_i32_cond_jumpi(kUnequal, no_match, registers.map_reg.gp(),
+ WASM_ARRAY_TYPE);
+ return registers.obj_reg;
}
LiftoffRegister FuncCheck(const Value& obj, Label* no_match,
LiftoffRegList pinned, Register opt_scratch) {
- LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
-
- // Reserve all temporary registers up front, so that the cache state
- // tracking doesn't get confused by the following conditional jumps.
- LiftoffRegister tmp1 =
- opt_scratch != no_reg
- ? LiftoffRegister(opt_scratch)
- : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
-
- if (obj.type.is_nullable()) {
- LoadNullValue(tmp1.gp(), pinned);
- __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
- }
-
- __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
-
- // Load the object's map and check if its InstaceType field is that of a
- // function.
- __ LoadMap(tmp1.gp(), obj_reg.gp());
- __ Load(tmp1, tmp1.gp(), no_reg,
+ TypeCheckRegisters registers =
+ TypeCheckPrelude(obj, no_match, pinned, opt_scratch);
+ __ Load(registers.map_reg, registers.map_reg.gp(), no_reg,
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
LoadType::kI32Load16U, pinned);
- __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(),
+ __ emit_i32_cond_jumpi(kUnequal, no_match, registers.map_reg.gp(),
WASM_INTERNAL_FUNCTION_TYPE);
-
- return obj_reg;
+ return registers.obj_reg;
}
LiftoffRegister I31Check(const Value& object, Label* no_match,
@@ -5695,6 +5770,11 @@ class LiftoffCompiler {
return AbstractTypeCheck<&LiftoffCompiler::FuncCheck>(object);
}
+ void RefIsArray(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */) {
+ return AbstractTypeCheck<&LiftoffCompiler::ArrayCheck>(object);
+ }
+
void RefIsI31(FullDecoder* decoder, const Value& object,
Value* /* result */) {
return AbstractTypeCheck<&LiftoffCompiler::I31Check>(object);
@@ -5726,6 +5806,11 @@ class LiftoffCompiler {
return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
}
+ void RefAsArray(FullDecoder* decoder, const Value& object, Value* result) {
+ return AbstractTypeCast<&LiftoffCompiler::ArrayCheck>(object, decoder,
+ kRef);
+ }
+
template <TypeChecker type_checker>
void BrOnAbstractType(const Value& object, FullDecoder* decoder,
uint32_t br_depth) {
@@ -5786,6 +5871,12 @@ class LiftoffCompiler {
br_depth);
}
+ void BrOnArray(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder,
+ br_depth);
+ }
+
void BrOnNonData(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnNonAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
@@ -5804,6 +5895,12 @@ class LiftoffCompiler {
br_depth);
}
+ void BrOnNonArray(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnNonAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder,
+ br_depth);
+ }
+
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
// Nothing to do here.
}
@@ -6093,7 +6190,7 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset),
pinned);
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
__ LoadExternalPointer(target.gp(), func_ref.gp(),
WasmInternalFunction::kForeignAddressOffset,
@@ -6234,7 +6331,6 @@ class LiftoffCompiler {
case kOptRef:
return LoadNullValue(reg.gp(), pinned);
case kRtt:
- case kRttWithDepth:
case kVoid:
case kBottom:
case kRef:
@@ -6242,6 +6338,35 @@ class LiftoffCompiler {
}
}
+ struct TypeCheckRegisters {
+ LiftoffRegister obj_reg, map_reg, tmp_reg;
+ };
+
+ TypeCheckRegisters TypeCheckPrelude(const Value& obj, Label* no_match,
+ LiftoffRegList pinned,
+ Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister map_reg =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ if (obj.type.is_nullable()) {
+ LoadNullValue(map_reg.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), map_reg.gp());
+ }
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+
+ __ LoadMap(map_reg.gp(), obj_reg.gp());
+
+ return {obj_reg, map_reg, tmp_reg};
+ }
+
void EmitDataRefCheck(Register map, Label* not_data_ref, LiftoffRegister tmp,
LiftoffRegList pinned) {
constexpr int kInstanceTypeOffset =
@@ -6321,12 +6446,12 @@ class LiftoffCompiler {
}
void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
__ cache_state()->DefineSafepoint(safepoint);
}
void DefineSafepointWithCalleeSavedRegisters() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
__ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
}
@@ -6346,7 +6471,7 @@ class LiftoffCompiler {
// MVP:
kI32, kI64, kF32, kF64,
// Extern ref:
- kRef, kOptRef, kRtt, kRttWithDepth, kI8, kI16};
+ kRef, kOptRef, kRtt, kI8, kI16};
LiftoffAssembler asm_;
@@ -6477,7 +6602,8 @@ WasmCompilationResult ExecuteLiftoffCompilation(
result.source_positions = compiler->GetSourcePositionTable();
result.protected_instructions_data = compiler->GetProtectedInstructionsData();
result.frame_slot_count = compiler->GetTotalFrameSlotCountForGC();
- result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
+ auto* lowered_call_desc = GetLoweredCallDescriptor(&zone, call_descriptor);
+ result.tagged_parameter_slots = lowered_call_desc->GetTaggedParameterSlots();
result.func_index = func_index;
result.result_tier = ExecutionTier::kLiftoff;
result.for_debugging = for_debugging;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 7c6bcb04a0..69a7350fc5 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -17,7 +17,7 @@ namespace internal {
namespace wasm {
static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
-static constexpr bool kNeedS128RegPair = !kSimpleFPAliasing;
+static constexpr bool kNeedS128RegPair = kFPAliasing == AliasingKind::kCombine;
enum RegClass : uint8_t {
kGpReg,
@@ -69,7 +69,6 @@ static inline constexpr RegClass reg_class_for(ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
return kGpReg;
default:
return kNoReg; // unsupported kind
@@ -111,13 +110,9 @@ static inline constexpr RegClass reg_class_for(ValueKind kind) {
// (not sharing index space with gp), so in this example, it is fp register 2.
// Maximum code of a gp cache register.
-static constexpr int kMaxGpRegCode =
- 8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
- base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs) - 1;
+static constexpr int kMaxGpRegCode = kLiftoffAssemblerGpCacheRegs.last().code();
// Maximum code of an fp cache register.
-static constexpr int kMaxFpRegCode =
- 8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
- base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs) - 1;
+static constexpr int kMaxFpRegCode = kLiftoffAssemblerFpCacheRegs.last().code();
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
@@ -154,12 +149,12 @@ class LiftoffRegister {
public:
constexpr explicit LiftoffRegister(Register reg)
: LiftoffRegister(reg.code()) {
- DCHECK_NE(0, kLiftoffAssemblerGpCacheRegs & reg.bit());
+ DCHECK(kLiftoffAssemblerGpCacheRegs.has(reg));
DCHECK_EQ(reg, gp());
}
constexpr explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
- DCHECK_NE(0, kLiftoffAssemblerFpCacheRegs & reg.bit());
+ DCHECK(kLiftoffAssemblerFpCacheRegs.has(reg));
DCHECK_EQ(reg, fp());
}
@@ -191,7 +186,7 @@ class LiftoffRegister {
// LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) {
- if (!kSimpleFPAliasing && kind == kF32) {
+ if (kFPAliasing == AliasingKind::kCombine && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
@@ -341,9 +336,11 @@ class LiftoffRegList {
use_u16, uint16_t,
std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
- static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
- static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
- << kAfterMaxLiftoffGpRegCode;
+ static constexpr storage_t kGpMask =
+ storage_t{kLiftoffAssemblerGpCacheRegs.bits()};
+ static constexpr storage_t kFpMask =
+ storage_t{kLiftoffAssemblerFpCacheRegs.bits()}
+ << kAfterMaxLiftoffGpRegCode;
// Sets all even numbered fp registers.
static constexpr uint64_t kEvenFpSetMask = uint64_t{0x5555555555555555}
<< kAfterMaxLiftoffGpRegCode;
@@ -444,8 +441,11 @@ class LiftoffRegList {
return FromBits(regs_ & ~mask.regs_);
}
- RegList GetGpList() { return regs_ & kGpMask; }
- RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
+ RegList GetGpList() { return RegList::FromBits(regs_ & kGpMask); }
+ DoubleRegList GetFpList() {
+ return DoubleRegList::FromBits((regs_ & kFpMask) >>
+ kAfterMaxLiftoffGpRegCode);
+ }
inline Iterator begin() const;
inline Iterator end() const;
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
index 4808e6aa98..b62fee4e04 100644
--- a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -56,7 +56,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
+// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
// -4 | tiering budget |
@@ -106,7 +106,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
assm->Ld_d(dst.gp(), src);
break;
case kF32:
@@ -134,7 +133,6 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->St_d(src.gp(), dst);
break;
case kF32:
@@ -366,10 +364,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- Ld_d(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -514,58 +508,323 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U: {
+ Ld_bu(dst.gp(), src_op);
+ dbar(0);
+ return;
+ }
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U: {
+ Ld_hu(dst.gp(), src_op);
+ dbar(0);
+ return;
+ }
+ case LoadType::kI32Load: {
+ Ld_w(dst.gp(), src_op);
+ dbar(0);
+ return;
+ }
+ case LoadType::kI64Load32U: {
+ Ld_wu(dst.gp(), src_op);
+ dbar(0);
+ return;
+ }
+ case LoadType::kI64Load: {
+ Ld_d(dst.gp(), src_op);
+ dbar(0);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
-}
-
-void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
-}
-
-void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8: {
+ dbar(0);
+ St_b(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16: {
+ dbar(0);
+ St_h(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store: {
+ dbar(0);
+ St_w(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store: {
+ dbar(0);
+ St_d(src.gp(), dst_op);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
-}
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
+ bin_instr, aligned) \
+ do { \
+ Label binop; \
+ andi(temp3, temp0, aligned); \
+ Sub_d(temp0, temp0, Operand(temp3)); \
+ slli_w(temp3, temp3, 3); \
+ dbar(0); \
+ bind(&binop); \
+ load_linked(temp1, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp1, temp3, size, false); \
+ bin_instr(temp2, result.gp(), Operand(value.gp())); \
+ InsertBits(temp1, temp2, temp3, size); \
+ store_conditional(temp1, MemOperand(temp0, 0)); \
+ BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
+ dbar(0); \
+ } while (0)
+
+#define ATOMIC_BINOP_CASE(name, inst32, inst64, opcode) \
+ void LiftoffAssembler::Atomic##name( \
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
+ LiftoffRegister value, LiftoffRegister result, StoreType type) { \
+ LiftoffRegList pinned = \
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ MemOperand dst_op = \
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
+ Add_d(temp0, dst_op.base(), dst_op.offset()); \
+ switch (type.value()) { \
+ case StoreType::kI64Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, inst64, 7); \
+ break; \
+ case StoreType::kI32Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, inst32, 3); \
+ break; \
+ case StoreType::kI64Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, inst64, 7); \
+ break; \
+ case StoreType::kI32Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, inst32, 3); \
+ break; \
+ case StoreType::kI64Store32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, inst64, 7); \
+ break; \
+ case StoreType::kI32Store: \
+ am##opcode##_db_w(result.gp(), value.gp(), temp0); \
+ break; \
+ case StoreType::kI64Store: \
+ am##opcode##_db_d(result.gp(), value.gp(), temp0); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
-void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
-}
+ATOMIC_BINOP_CASE(Add, Add_w, Add_d, add)
+ATOMIC_BINOP_CASE(And, And, And, and)
+ATOMIC_BINOP_CASE(Or, Or, Or, or)
+ATOMIC_BINOP_CASE(Xor, Xor, Xor, xor)
+
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ dbar(0); \
+ bind(&binop); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ bin_instr(temp1, result.gp(), Operand(value.gp())); \
+ store_conditional(temp1, MemOperand(temp0, 0)); \
+ BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
+ dbar(0); \
+ } while (0)
-void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Add_d(temp0, dst_op.base(), dst_op.offset());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, Sub_d, 7);
+ break;
+ case StoreType::kI32Store8:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, Sub_w, 3);
+ break;
+ case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, Sub_d, 7);
+ break;
+ case StoreType::kI32Store16:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, Sub_w, 3);
+ break;
+ case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, Sub_d, 7);
+ break;
+ case StoreType::kI32Store:
+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
+ break;
+ case StoreType::kI64Store:
+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ATOMIC_BINOP_CASE
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
+ size, aligned) \
+ do { \
+ Label exchange; \
+ andi(temp1, temp0, aligned); \
+ Sub_d(temp0, temp0, Operand(temp1)); \
+ slli_w(temp1, temp1, 3); \
+ dbar(0); \
+ bind(&exchange); \
+ load_linked(temp2, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp2, temp1, size, false); \
+ InsertBits(temp2, value.gp(), temp1, size); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
+ dbar(0); \
+ } while (0)
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Add_d(temp0, dst_op.base(), dst_op.offset());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 8, 7);
+ break;
+ case StoreType::kI32Store8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 8, 3);
+ break;
+ case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 16, 7);
+ break;
+ case StoreType::kI32Store16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 16, 3);
+ break;
+ case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 32, 7);
+ break;
+ case StoreType::kI32Store:
+ amswap_db_w(result.gp(), value.gp(), temp0);
+ break;
+ case StoreType::kI64Store:
+ amswap_db_d(result.gp(), value.gp(), temp0);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ dbar(0); \
+ bind(&compareExchange); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
+ mov(temp2, new_value.gp()); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ dbar(0); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, size, aligned) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ andi(temp1, temp0, aligned); \
+ Sub_d(temp0, temp0, Operand(temp1)); \
+ slli_w(temp1, temp1, 3); \
+ dbar(0); \
+ bind(&compareExchange); \
+ load_linked(temp2, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp2, temp1, size, false); \
+ ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
+ BranchShort(&exit, ne, temp2, Operand(result.gp())); \
+ InsertBits(temp2, new_value.gp(), temp1, size); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ dbar(0); \
+ } while (0)
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ expected, new_value, result);
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Add_d(temp0, dst_op.base(), dst_op.offset());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 8, 7);
+ break;
+ case StoreType::kI32Store8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 8, 3);
+ break;
+ case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 16, 7);
+ break;
+ case StoreType::kI32Store16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 16, 3);
+ break;
+ case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 32, 7);
+ break;
+ case StoreType::kI32Store:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
+ break;
+ case StoreType::kI64Store:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
void LiftoffAssembler::AtomicFence() { dbar(0); }
@@ -623,7 +882,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
St_d(reg.gp(), dst);
break;
case kF32:
@@ -676,7 +934,6 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kOptRef:
// TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
case kRtt:
- case kRttWithDepth:
Ld_d(reg.gp(), src);
break;
case kF32:
@@ -949,7 +1206,7 @@ I64_SHIFTOP_I(shr, srl_d, srli_d)
#undef I64_SHIFTOP
#undef I64_SHIFTOP_I
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
bstrpick_d(dst, src, 31, 0);
}
@@ -985,7 +1242,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f32_copysign");
+ fcopysign_s(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -1012,7 +1269,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f64_copysign");
+ fcopysign_d(dst, lhs, rhs);
}
#define FP_BINOP(name, instruction) \
@@ -1248,55 +1505,87 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
movgr2fr_d(dst.fp(), src.gp());
return true;
case kExprI32SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ ftintrz_w_s(kScratchDoubleReg, src.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
return true;
- case kExprI32UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ case kExprI32UConvertSatF32: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ CompareF32(src.fp(), kScratchDoubleReg, CULE);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
+ }
case kExprI32SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ ftintrz_w_d(kScratchDoubleReg, src.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
return true;
- case kExprI32UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ case kExprI32UConvertSatF64: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ CompareF64(src.fp(), kScratchDoubleReg, CULE);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
+ }
case kExprI64SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ ftintrz_l_s(kScratchDoubleReg, src.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
return true;
- case kExprI64UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ case kExprI64UConvertSatF32: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ CompareF32(src.fp(), kScratchDoubleReg, CULE);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
+ }
case kExprI64SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ ftintrz_l_d(kScratchDoubleReg, src.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
return true;
- case kExprI64UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ case kExprI64UConvertSatF64: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ CompareF64(src.fp(), kScratchDoubleReg, CULE);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
+ }
default:
return false;
}
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- bailout(kComplexOperation, "i32_signextend_i8");
+ ext_w_b(dst, src);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- bailout(kComplexOperation, "i32_signextend_i16");
+ ext_w_h(dst, src);
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i8");
+ ext_w_b(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i16");
+ ext_w_h(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i32");
+ slli_w(dst.gp(), src.gp(), 0);
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -2643,15 +2932,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
addi_d(sp, sp, gp_offset);
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 7dec2ea677..9c02cf3697 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -53,8 +53,8 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
-// -2 | instance |
+// -1 | StackFrame::WASM |
+// -2 | instance |
// -3 | feedback vector |
// -4 | tiering budget |
// -----+--------------------+---------------------------
@@ -95,7 +95,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
assm->lw(dst.gp(), src);
break;
case kI64:
@@ -123,7 +122,6 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->Usw(src.gp(), dst);
break;
case kI64:
@@ -497,10 +495,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- lw(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -819,7 +813,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
sw(reg.gp(), dst);
break;
case kI64:
@@ -1242,10 +1235,6 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
- // This is a nop on mips32.
-}
-
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
@@ -2967,15 +2956,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
addiu(sp, sp, gp_offset);
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 528078827a..c0f934c656 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -56,7 +56,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
+// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
// -4 | tiering budget |
@@ -106,7 +106,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
assm->Ld(dst.gp(), src);
break;
case kF32:
@@ -134,7 +133,6 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->Usd(src.gp(), dst);
break;
case kF32:
@@ -482,10 +480,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- Ld(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -641,58 +635,299 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U: {
+ Lbu(dst.gp(), src_op);
+ sync();
+ return;
+ }
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U: {
+ Lhu(dst.gp(), src_op);
+ sync();
+ return;
+ }
+ case LoadType::kI32Load: {
+ Lw(dst.gp(), src_op);
+ sync();
+ return;
+ }
+ case LoadType::kI64Load32U: {
+ Lwu(dst.gp(), src_op);
+ sync();
+ return;
+ }
+ case LoadType::kI64Load: {
+ Ld(dst.gp(), src_op);
+ sync();
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
-}
-
-void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
-}
-
-void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
-}
-
-void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
-}
-
-void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8: {
+ sync();
+ Sb(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16: {
+ sync();
+ Sh(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store: {
+ sync();
+ Sw(src.gp(), dst_op);
+ return;
+ }
+ case StoreType::kI64Store: {
+ sync();
+ Sd(src.gp(), dst_op);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
-}
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ sync(); \
+ bind(&binop); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ bin_instr(temp1, result.gp(), Operand(value.gp())); \
+ store_conditional(temp1, MemOperand(temp0, 0)); \
+ BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
+ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
+ bin_instr, aligned) \
+ do { \
+ Label binop; \
+ andi(temp3, temp0, aligned); \
+ Dsubu(temp0, temp0, Operand(temp3)); \
+ sll(temp3, temp3, 3); \
+ sync(); \
+ bind(&binop); \
+ load_linked(temp1, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp1, temp3, size, false); \
+ bin_instr(temp2, result.gp(), value.gp()); \
+ InsertBits(temp1, temp2, temp3, size); \
+ store_conditional(temp1, MemOperand(temp0, 0)); \
+ BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
+ sync(); \
+ } while (0)
+
+#define ATOMIC_BINOP_CASE(name, inst32, inst64) \
+ void LiftoffAssembler::Atomic##name( \
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
+ LiftoffRegister value, LiftoffRegister result, StoreType type) { \
+ LiftoffRegList pinned = \
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ MemOperand dst_op = \
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
+ Daddu(temp0, dst_op.rm(), dst_op.offset()); \
+ switch (type.value()) { \
+ case StoreType::kI64Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst64, 7); \
+ break; \
+ case StoreType::kI32Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \
+ break; \
+ case StoreType::kI64Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst64, 7); \
+ break; \
+ case StoreType::kI32Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \
+ break; \
+ case StoreType::kI64Store32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 32, inst64, 7); \
+ break; \
+ case StoreType::kI32Store: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case StoreType::kI64Store: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
+
+ATOMIC_BINOP_CASE(Add, Addu, Daddu)
+ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
+ATOMIC_BINOP_CASE(And, And, And)
+ATOMIC_BINOP_CASE(Or, Or, Or)
+ATOMIC_BINOP_CASE(Xor, Xor, Xor)
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ATOMIC_BINOP_CASE
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
+ do { \
+ Label exchange; \
+ sync(); \
+ bind(&exchange); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ mov(temp1, value.gp()); \
+ store_conditional(temp1, MemOperand(temp0, 0)); \
+ BranchShort(&exchange, eq, temp1, Operand(zero_reg)); \
+ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
+ size, aligned) \
+ do { \
+ Label exchange; \
+ andi(temp1, temp0, aligned); \
+ Dsubu(temp0, temp0, Operand(temp1)); \
+ sll(temp1, temp1, 3); \
+ sync(); \
+ bind(&exchange); \
+ load_linked(temp2, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp2, temp1, size, false); \
+ InsertBits(temp2, value.gp(), temp1, size); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
+ sync(); \
+ } while (0)
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Daddu(temp0, dst_op.rm(), dst_op.offset());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7);
+ break;
+ case StoreType::kI32Store8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3);
+ break;
+ case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7);
+ break;
+ case StoreType::kI32Store16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3);
+ break;
+ case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, 32, 7);
+ break;
+ case StoreType::kI32Store:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case StoreType::kI64Store:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ sync(); \
+ bind(&compareExchange); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
+ mov(temp2, new_value.gp()); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, size, aligned) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ andi(temp1, temp0, aligned); \
+ Dsubu(temp0, temp0, Operand(temp1)); \
+ sll(temp1, temp1, 3); \
+ sync(); \
+ bind(&compareExchange); \
+ load_linked(temp2, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp2, temp1, size, false); \
+ ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
+ BranchShort(&exit, ne, temp2, Operand(result.gp())); \
+ InsertBits(temp2, new_value.gp(), temp1, size); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ sync(); \
+ } while (0)
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ expected, new_value, result);
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Daddu(temp0, dst_op.rm(), dst_op.offset());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7);
+ break;
+ case StoreType::kI32Store8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3);
+ break;
+ case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7);
+ break;
+ case StoreType::kI32Store16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3);
+ break;
+ case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, 32, 7);
+ break;
+ case StoreType::kI32Store:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case StoreType::kI64Store:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
void LiftoffAssembler::AtomicFence() { sync(); }
@@ -750,7 +985,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
Sd(reg.gp(), dst);
break;
case kF32:
@@ -1077,7 +1311,7 @@ I64_SHIFTOP_I(shr, dsrl)
#undef I64_SHIFTOP
#undef I64_SHIFTOP_I
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
Dext(dst, src, 0, 32);
}
@@ -1113,7 +1347,26 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f32_copysign");
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ DoubleRegister scratch = rhs;
+ if (dst == rhs) {
+ scratch = kScratchDoubleReg;
+ Move_d(scratch, rhs);
+ }
+ if (dst != lhs) {
+ Move_d(dst, lhs);
+ }
+ binsli_w(dst.toW(), scratch.toW(), 0);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ mfc1(scratch1, lhs);
+ mfc1(scratch2, rhs);
+ srl(scratch2, scratch2, 31);
+ Ins(scratch1, scratch2, 31, 1);
+ mtc1(scratch1, dst);
+ }
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -1140,7 +1393,26 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f64_copysign");
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ DoubleRegister scratch = rhs;
+ if (dst == rhs) {
+ scratch = kScratchDoubleReg;
+ Move_d(scratch, rhs);
+ }
+ if (dst != lhs) {
+ Move_d(dst, lhs);
+ }
+ binsli_d(dst.toW(), scratch.toW(), 0);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ dmfc1(scratch1, lhs);
+ dmfc1(scratch2, rhs);
+ dsrl32(scratch2, scratch2, 31);
+ Dins(scratch1, scratch2, 63, 1);
+ dmtc1(scratch1, dst);
+ }
}
#define FP_BINOP(name, instruction) \
@@ -1375,56 +1647,157 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64ReinterpretI64:
dmtc1(src.gp(), dst.fp());
return true;
- case kExprI32SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ case kExprI32SConvertSatF32: {
+ // Other arches use round to zero here, so we follow.
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ trunc_w_s(kScratchDoubleReg, src.fp());
+ mfc1(dst.gp(), kScratchDoubleReg);
+ } else {
+ Label done;
+ mov(dst.gp(), zero_reg);
+ CompareIsNanF32(src.fp(), src.fp());
+ BranchTrueShortF(&done);
+ li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
+ TurboAssembler::Move(
+ kScratchDoubleReg,
+ static_cast<float>(std::numeric_limits<int32_t>::min()));
+ CompareF32(OLT, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&done);
+ trunc_w_s(kScratchDoubleReg, src.fp());
+ mfc1(dst.gp(), kScratchDoubleReg);
+ bind(&done);
+ }
return true;
- case kExprI32UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ }
+ case kExprI32UConvertSatF32: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ CompareF32(ULE, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
- case kExprI32SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ }
+ case kExprI32SConvertSatF64: {
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ trunc_w_d(kScratchDoubleReg, src.fp());
+ mfc1(dst.gp(), kScratchDoubleReg);
+ } else {
+ Label done;
+ mov(dst.gp(), zero_reg);
+ CompareIsNanF64(src.fp(), src.fp());
+ BranchTrueShortF(&done);
+ li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
+ TurboAssembler::Move(
+ kScratchDoubleReg,
+ static_cast<double>(std::numeric_limits<int32_t>::min()));
+ CompareF64(OLT, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&done);
+ trunc_w_d(kScratchDoubleReg, src.fp());
+ mfc1(dst.gp(), kScratchDoubleReg);
+ bind(&done);
+ }
return true;
- case kExprI32UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ }
+ case kExprI32UConvertSatF64: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ CompareF64(ULE, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
- case kExprI64SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ }
+ case kExprI64SConvertSatF32: {
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ trunc_l_s(kScratchDoubleReg, src.fp());
+ dmfc1(dst.gp(), kScratchDoubleReg);
+ } else {
+ Label done;
+ mov(dst.gp(), zero_reg);
+ CompareIsNanF32(src.fp(), src.fp());
+ BranchTrueShortF(&done);
+ li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
+ TurboAssembler::Move(
+ kScratchDoubleReg,
+ static_cast<float>(std::numeric_limits<int64_t>::min()));
+ CompareF32(OLT, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&done);
+ trunc_l_s(kScratchDoubleReg, src.fp());
+ dmfc1(dst.gp(), kScratchDoubleReg);
+ bind(&done);
+ }
return true;
- case kExprI64UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ }
+ case kExprI64UConvertSatF32: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ CompareF32(ULE, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
- case kExprI64SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ }
+ case kExprI64SConvertSatF64: {
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ trunc_l_d(kScratchDoubleReg, src.fp());
+ dmfc1(dst.gp(), kScratchDoubleReg);
+ } else {
+ Label done;
+ mov(dst.gp(), zero_reg);
+ CompareIsNanF64(src.fp(), src.fp());
+ BranchTrueShortF(&done);
+ li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
+ TurboAssembler::Move(
+ kScratchDoubleReg,
+ static_cast<double>(std::numeric_limits<int64_t>::min()));
+ CompareF64(OLT, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&done);
+ trunc_l_d(kScratchDoubleReg, src.fp());
+ dmfc1(dst.gp(), kScratchDoubleReg);
+ bind(&done);
+ }
return true;
- case kExprI64UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ }
+ case kExprI64UConvertSatF64: {
+ Label isnan_or_lessthan_or_equal_zero;
+ mov(dst.gp(), zero_reg);
+ TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ CompareF64(ULE, src.fp(), kScratchDoubleReg);
+ BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
+ Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
+ bind(&isnan_or_lessthan_or_equal_zero);
return true;
+ }
default:
return false;
}
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- bailout(kComplexOperation, "i32_signextend_i8");
+ seb(dst, src);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- bailout(kComplexOperation, "i32_signextend_i16");
+ seh(dst, src);
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i8");
+ seb(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i16");
+ seh(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kComplexOperation, "i64_signextend_i32");
+ sll(dst.gp(), src.gp(), 0);
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -3129,15 +3502,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
daddiu(sp, sp, gp_offset);
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 1de2817563..cb4efb7dd7 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -31,8 +31,8 @@ namespace liftoff {
// 1 | previous frame (fp)|
// 0 | const pool (r28) | if const pool is enabled
// -----+--------------------+ <-- frame ptr (fp) or cp
-// -1 | 0xa: WASM |
-// -2 | instance |
+// -1 | StackFrame::WASM |
+// -2 | instance |
// -3 | feedback vector |
// -4 | tiering budget |
// -----+--------------------+---------------------------
@@ -300,10 +300,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- LoadU64(dst, liftoff::GetInstanceOperand(), r0);
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -815,7 +811,6 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
case kRef:
case kRtt:
case kOptRef:
- case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(fp, offset), r0);
break;
@@ -890,7 +885,6 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
case kRef:
case kRtt:
case kOptRef:
- case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(sp, offset), r0);
break;
@@ -969,7 +963,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
StoreU64(reg.gp(), liftoff::GetStackSlot(offset), r0);
break;
case kF32:
@@ -1018,7 +1011,6 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
LoadU64(reg.gp(), liftoff::GetStackSlot(offset), r0);
break;
case kF32:
@@ -1120,7 +1112,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
LFR_TO_REG, LFR_TO_REG, USE, , void) \
V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(u32_to_intptr, ZeroExtWord32, Register, Register, , , USE, , void) \
+ V(u32_to_uintptr, ZeroExtWord32, Register, Register, , , USE, , void) \
V(i32_signextend_i8, extsb, Register, Register, , , USE, , void) \
V(i32_signextend_i16, extsh, Register, Register, , , USE, , void) \
V(i64_signextend_i8, extsb, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
@@ -1602,7 +1594,6 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case kI64:
@@ -2902,15 +2893,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
MultiPop(regs.GetGpList());
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetLastRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
@@ -2997,7 +2987,6 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
LoadU64(result_reg->gp(), MemOperand(sp));
break;
case kF32:
@@ -3077,7 +3066,6 @@ void LiftoffStackSlots::Construct(int param_slots) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
case kI64: {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
UseScratchRegisterScope temps(asm_);
@@ -3120,7 +3108,6 @@ void LiftoffStackSlots::Construct(int param_slots) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
asm_->push(src.reg().gp());
break;
case kF32:
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index e53797ff74..cb6851d663 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -55,7 +55,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
+// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
// -4 | tiering budget |
@@ -123,19 +123,19 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
MemOperand dst(base, offset);
switch (kind) {
case kI32:
- assm->Usw(src.gp(), dst);
+ assm->Sw(src.gp(), dst);
break;
case kI64:
case kOptRef:
case kRef:
case kRtt:
- assm->Usd(src.gp(), dst);
+ assm->Sd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst, kScratchReg);
+ assm->StoreFloat(src.fp(), dst);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst, kScratchReg);
+ assm->StoreDouble(src.fp(), dst);
break;
default:
UNREACHABLE();
@@ -473,10 +473,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- Ld(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -539,27 +535,27 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
- TurboAssembler::Ulhu(dst.gp(), src_op);
+ TurboAssembler::Lhu(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
- TurboAssembler::Ulh(dst.gp(), src_op);
+ TurboAssembler::Lh(dst.gp(), src_op);
break;
case LoadType::kI64Load32U:
- TurboAssembler::Ulwu(dst.gp(), src_op);
+ TurboAssembler::Lwu(dst.gp(), src_op);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
- TurboAssembler::Ulw(dst.gp(), src_op);
+ TurboAssembler::Lw(dst.gp(), src_op);
break;
case LoadType::kI64Load:
- TurboAssembler::Uld(dst.gp(), src_op);
+ TurboAssembler::Ld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
+ TurboAssembler::LoadFloat(dst.fp(), src_op);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
+ TurboAssembler::LoadDouble(dst.fp(), src_op);
break;
case LoadType::kS128Load: {
VU.set(kScratchReg, E8, m1);
@@ -610,20 +606,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
- TurboAssembler::Ush(src.gp(), dst_op);
+ TurboAssembler::Sh(src.gp(), dst_op);
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
- TurboAssembler::Usw(src.gp(), dst_op);
+ TurboAssembler::Sw(src.gp(), dst_op);
break;
case StoreType::kI64Store:
- TurboAssembler::Usd(src.gp(), dst_op);
+ TurboAssembler::Sd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
+ TurboAssembler::StoreFloat(src.fp(), dst_op);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
+ TurboAssembler::StoreDouble(src.fp(), dst_op);
break;
case StoreType::kS128Store: {
VU.set(kScratchReg, E8, m1);
@@ -990,7 +986,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
Sd(reg.gp(), dst);
break;
case kF32:
@@ -1327,7 +1322,7 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
addw(dst, src, zero_reg);
}
@@ -1734,16 +1729,12 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
if (memtype == MachineType::Int32()) {
VU.set(kScratchReg, E32, m1);
Lwu(scratch, src_op);
- li(kScratchReg, 0x1 << 0);
- vmv_sx(v0, kScratchReg);
- vmerge_vx(dst_v, scratch, dst_v);
+ vmv_sx(dst_v, scratch);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
VU.set(kScratchReg, E64, m1);
Ld(scratch, src_op);
- li(kScratchReg, 0x1 << 0);
- vmv_sx(v0, kScratchReg);
- vmerge_vx(dst_v, scratch, dst_v);
+ vmv_sx(dst_v, scratch);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
@@ -1849,13 +1840,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
VU.set(kScratchReg, VSew::E64, Vlmul::m1);
- li(kScratchReg, 1);
- vmv_vx(v0, kScratchReg);
- li(kScratchReg, imm1);
- vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
li(kScratchReg, imm2);
- vsll_vi(v0, v0, 1);
- vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vslideup_vi(kSimd128ScratchReg, kSimd128ScratchReg2, 1);
+ li(kScratchReg, imm1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
VU.set(kScratchReg, E8, m1);
VRegister temp =
@@ -1877,7 +1866,22 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_popcnt");
+ VRegister src_v = src.fp().toV();
+ VRegister dst_v = dst.fp().toV();
+ Label t;
+
+ VU.set(kScratchReg, E8, m1);
+ vmv_vv(kSimd128ScratchReg, src_v);
+ vmv_vv(dst_v, kSimd128RegZero);
+
+ bind(&t);
+ vmsne_vv(v0, kSimd128ScratchReg, kSimd128RegZero);
+ vadd_vi(dst_v, dst_v, 1, Mask);
+ vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, Mask);
+ vand_vv(kSimd128ScratchReg, kSimd128ScratchReg, kSimd128ScratchReg2);
+ // kScratchReg = -1 if kSimd128ScratchReg == 0 i.e. no active element
+ vfirst_m(kScratchReg, kSimd128ScratchReg);
+ bgez(kScratchReg, &t);
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -3575,7 +3579,7 @@ void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- TurboAssembler::Uld(limit_address, MemOperand(limit_address));
+ TurboAssembler::Ld(limit_address, MemOperand(limit_address));
TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
@@ -3638,15 +3642,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
Add64(sp, sp, Operand(gp_offset));
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index abd3462050..9b7abea5f3 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -71,8 +71,8 @@ inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM |
-// -2 | instance |
+// -1 | StackFrame::WASM |
+// -2 | instance |
// -3 | feedback vector |
// -4 | tiering budget |
// -----+--------------------+---------------------------
@@ -277,10 +277,6 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- LoadU64(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -1251,7 +1247,6 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
case kRef:
case kRtt:
case kOptRef:
- case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(fp, offset));
break;
@@ -1330,7 +1325,6 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
case kRef:
case kRtt:
case kOptRef:
- case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(sp, offset));
break;
@@ -1432,7 +1426,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
StoreU64(reg.gp(), liftoff::GetStackSlot(offset));
break;
case kF32:
@@ -1483,7 +1476,6 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
LoadU64(reg.gp(), liftoff::GetStackSlot(offset));
break;
case kF32:
@@ -1565,7 +1557,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
LFR_TO_REG, USE, true, bool) \
- V(u32_to_intptr, LoadU32, Register, Register, , , USE, , void) \
+ V(u32_to_uintptr, LoadU32, Register, Register, , , USE, , void) \
V(i32_signextend_i8, lbr, Register, Register, , , USE, , void) \
V(i32_signextend_i16, lhr, Register, Register, , , USE, , void) \
V(i64_signextend_i8, lgbr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
@@ -2142,7 +2134,6 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case kI64:
@@ -2265,95 +2256,112 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_RR_LIST(V) \
- V(f64x2_add, F64x2Add, fp) \
- V(f64x2_sub, F64x2Sub, fp) \
- V(f64x2_mul, F64x2Mul, fp) \
- V(f64x2_div, F64x2Div, fp) \
- V(f64x2_min, F64x2Min, fp) \
- V(f64x2_max, F64x2Max, fp) \
- V(f64x2_eq, F64x2Eq, fp) \
- V(f64x2_ne, F64x2Ne, fp) \
- V(f64x2_lt, F64x2Lt, fp) \
- V(f64x2_le, F64x2Le, fp) \
- V(f32x4_add, F32x4Add, fp) \
- V(f32x4_sub, F32x4Sub, fp) \
- V(f32x4_mul, F32x4Mul, fp) \
- V(f32x4_div, F32x4Div, fp) \
- V(f32x4_min, F32x4Min, fp) \
- V(f32x4_max, F32x4Max, fp) \
- V(f32x4_eq, F32x4Eq, fp) \
- V(f32x4_ne, F32x4Ne, fp) \
- V(f32x4_lt, F32x4Lt, fp) \
- V(f32x4_le, F32x4Le, fp) \
- V(i64x2_add, I64x2Add, fp) \
- V(i64x2_sub, I64x2Sub, fp) \
- V(i64x2_mul, I64x2Mul, fp) \
- V(i64x2_eq, I64x2Eq, fp) \
- V(i64x2_ne, I64x2Ne, fp) \
- V(i64x2_gt_s, I64x2GtS, fp) \
- V(i64x2_ge_s, I64x2GeS, fp) \
- V(i64x2_shl, I64x2Shl, gp) \
- V(i64x2_shr_s, I64x2ShrS, gp) \
- V(i64x2_shr_u, I64x2ShrU, gp) \
- V(i32x4_add, I32x4Add, fp) \
- V(i32x4_sub, I32x4Sub, fp) \
- V(i32x4_mul, I32x4Mul, fp) \
- V(i32x4_eq, I32x4Eq, fp) \
- V(i32x4_ne, I32x4Ne, fp) \
- V(i32x4_gt_s, I32x4GtS, fp) \
- V(i32x4_ge_s, I32x4GeS, fp) \
- V(i32x4_gt_u, I32x4GtU, fp) \
- V(i32x4_ge_u, I32x4GeU, fp) \
- V(i32x4_min_s, I32x4MinS, fp) \
- V(i32x4_min_u, I32x4MinU, fp) \
- V(i32x4_max_s, I32x4MaxS, fp) \
- V(i32x4_max_u, I32x4MaxU, fp) \
- V(i32x4_shl, I32x4Shl, gp) \
- V(i32x4_shr_s, I32x4ShrS, gp) \
- V(i32x4_shr_u, I32x4ShrU, gp) \
- V(i16x8_add, I16x8Add, fp) \
- V(i16x8_sub, I16x8Sub, fp) \
- V(i16x8_mul, I16x8Mul, fp) \
- V(i16x8_eq, I16x8Eq, fp) \
- V(i16x8_ne, I16x8Ne, fp) \
- V(i16x8_gt_s, I16x8GtS, fp) \
- V(i16x8_ge_s, I16x8GeS, fp) \
- V(i16x8_gt_u, I16x8GtU, fp) \
- V(i16x8_ge_u, I16x8GeU, fp) \
- V(i16x8_min_s, I16x8MinS, fp) \
- V(i16x8_min_u, I16x8MinU, fp) \
- V(i16x8_max_s, I16x8MaxS, fp) \
- V(i16x8_max_u, I16x8MaxU, fp) \
- V(i16x8_shl, I16x8Shl, gp) \
- V(i16x8_shr_s, I16x8ShrS, gp) \
- V(i16x8_shr_u, I16x8ShrU, gp) \
- V(i8x16_add, I8x16Add, fp) \
- V(i8x16_sub, I8x16Sub, fp) \
- V(i8x16_eq, I8x16Eq, fp) \
- V(i8x16_ne, I8x16Ne, fp) \
- V(i8x16_gt_s, I8x16GtS, fp) \
- V(i8x16_ge_s, I8x16GeS, fp) \
- V(i8x16_gt_u, I8x16GtU, fp) \
- V(i8x16_ge_u, I8x16GeU, fp) \
- V(i8x16_min_s, I8x16MinS, fp) \
- V(i8x16_min_u, I8x16MinU, fp) \
- V(i8x16_max_s, I8x16MaxS, fp) \
- V(i8x16_max_u, I8x16MaxU, fp) \
- V(i8x16_shl, I8x16Shl, gp) \
- V(i8x16_shr_s, I8x16ShrS, gp) \
- V(i8x16_shr_u, I8x16ShrU, gp)
-
-#define EMIT_SIMD_BINOP_RR(name, op, stype) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add) \
+ V(f64x2_sub, F64x2Sub) \
+ V(f64x2_mul, F64x2Mul) \
+ V(f64x2_div, F64x2Div) \
+ V(f64x2_min, F64x2Min) \
+ V(f64x2_max, F64x2Max) \
+ V(f64x2_eq, F64x2Eq) \
+ V(f64x2_ne, F64x2Ne) \
+ V(f64x2_lt, F64x2Lt) \
+ V(f64x2_le, F64x2Le) \
+ V(f64x2_pmin, F64x2Pmin) \
+ V(f64x2_pmax, F64x2Pmax) \
+ V(f32x4_add, F32x4Add) \
+ V(f32x4_sub, F32x4Sub) \
+ V(f32x4_mul, F32x4Mul) \
+ V(f32x4_div, F32x4Div) \
+ V(f32x4_min, F32x4Min) \
+ V(f32x4_max, F32x4Max) \
+ V(f32x4_eq, F32x4Eq) \
+ V(f32x4_ne, F32x4Ne) \
+ V(f32x4_lt, F32x4Lt) \
+ V(f32x4_le, F32x4Le) \
+ V(f32x4_pmin, F32x4Pmin) \
+ V(f32x4_pmax, F32x4Pmax) \
+ V(i64x2_add, I64x2Add) \
+ V(i64x2_sub, I64x2Sub) \
+ V(i64x2_eq, I64x2Eq) \
+ V(i64x2_ne, I64x2Ne) \
+ V(i64x2_gt_s, I64x2GtS) \
+ V(i64x2_ge_s, I64x2GeS) \
+ V(i32x4_add, I32x4Add) \
+ V(i32x4_sub, I32x4Sub) \
+ V(i32x4_mul, I32x4Mul) \
+ V(i32x4_eq, I32x4Eq) \
+ V(i32x4_ne, I32x4Ne) \
+ V(i32x4_gt_s, I32x4GtS) \
+ V(i32x4_ge_s, I32x4GeS) \
+ V(i32x4_gt_u, I32x4GtU) \
+ V(i32x4_min_s, I32x4MinS) \
+ V(i32x4_min_u, I32x4MinU) \
+ V(i32x4_max_s, I32x4MaxS) \
+ V(i32x4_max_u, I32x4MaxU) \
+ V(i16x8_add, I16x8Add) \
+ V(i16x8_sub, I16x8Sub) \
+ V(i16x8_mul, I16x8Mul) \
+ V(i16x8_eq, I16x8Eq) \
+ V(i16x8_ne, I16x8Ne) \
+ V(i16x8_gt_s, I16x8GtS) \
+ V(i16x8_ge_s, I16x8GeS) \
+ V(i16x8_gt_u, I16x8GtU) \
+ V(i16x8_min_s, I16x8MinS) \
+ V(i16x8_min_u, I16x8MinU) \
+ V(i16x8_max_s, I16x8MaxS) \
+ V(i16x8_max_u, I16x8MaxU) \
+ V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
+ V(i8x16_add, I8x16Add) \
+ V(i8x16_sub, I8x16Sub) \
+ V(i8x16_eq, I8x16Eq) \
+ V(i8x16_ne, I8x16Ne) \
+ V(i8x16_gt_s, I8x16GtS) \
+ V(i8x16_ge_s, I8x16GeS) \
+ V(i8x16_gt_u, I8x16GtU) \
+ V(i8x16_min_s, I8x16MinS) \
+ V(i8x16_min_u, I8x16MinU) \
+ V(i8x16_max_s, I8x16MaxS) \
+ V(i8x16_max_u, I8x16MaxU) \
+ V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
+ V(s128_and, S128And) \
+ V(s128_or, S128Or) \
+ V(s128_xor, S128Xor) \
+ V(s128_and_not, S128AndNot)
+
+#define EMIT_SIMD_BINOP_RR(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.stype()); \
+ op(dst.fp(), lhs.fp(), rhs.fp()); \
}
SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
#undef EMIT_SIMD_BINOP_RR
#undef SIMD_BINOP_RR_LIST
-#define SIMD_BINOP_RI_LIST(V) \
+#define SIMD_SHIFT_RR_LIST(V) \
+ V(i64x2_shl, I64x2Shl) \
+ V(i64x2_shr_s, I64x2ShrS) \
+ V(i64x2_shr_u, I64x2ShrU) \
+ V(i32x4_shl, I32x4Shl) \
+ V(i32x4_shr_s, I32x4ShrS) \
+ V(i32x4_shr_u, I32x4ShrU) \
+ V(i16x8_shl, I16x8Shl) \
+ V(i16x8_shr_s, I16x8ShrS) \
+ V(i16x8_shr_u, I16x8ShrU) \
+ V(i8x16_shl, I8x16Shl) \
+ V(i8x16_shr_s, I8x16ShrS) \
+ V(i8x16_shr_u, I8x16ShrU)
+
+#define EMIT_SIMD_SHIFT_RR(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ op(dst.fp(), lhs.fp(), rhs.gp(), kScratchDoubleReg); \
+ }
+SIMD_SHIFT_RR_LIST(EMIT_SIMD_SHIFT_RR)
+#undef EMIT_SIMD_SHIFT_RR
+#undef SIMD_SHIFT_RR_LIST
+
+#define SIMD_SHIFT_RI_LIST(V) \
V(i64x2_shli, I64x2Shl) \
V(i64x2_shri_s, I64x2ShrS) \
V(i64x2_shri_u, I64x2ShrU) \
@@ -2367,27 +2375,63 @@ SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
V(i8x16_shri_s, I8x16ShrS) \
V(i8x16_shri_u, I8x16ShrU)
-#define EMIT_SIMD_BINOP_RI(name, op) \
+#define EMIT_SIMD_SHIFT_RI(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t rhs) { \
- op(dst.fp(), lhs.fp(), Operand(rhs)); \
+ op(dst.fp(), lhs.fp(), Operand(rhs), r0, kScratchDoubleReg); \
}
-SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
-#undef EMIT_SIMD_BINOP_RI
-#undef SIMD_BINOP_RI_LIST
-
-#define SIMD_UNOP_LIST(V) \
- V(f64x2_splat, F64x2Splat, fp, fp) \
- V(f32x4_splat, F32x4Splat, fp, fp) \
- V(i64x2_splat, I64x2Splat, fp, gp) \
- V(i32x4_splat, I32x4Splat, fp, gp) \
- V(i16x8_splat, I16x8Splat, fp, gp) \
- V(i8x16_splat, I8x16Splat, fp, gp)
-
-#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
- LiftoffRegister src) { \
- op(dst.dtype(), src.stype()); \
+SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
+#undef EMIT_SIMD_SHIFT_RI
+#undef SIMD_SHIFT_RI_LIST
+
+#define SIMD_UNOP_LIST(V) \
+ V(f64x2_splat, F64x2Splat, fp, fp, , void) \
+ V(f64x2_abs, F64x2Abs, fp, fp, , void) \
+ V(f64x2_neg, F64x2Neg, fp, fp, , void) \
+ V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void) \
+ V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool) \
+ V(f64x2_floor, F64x2Floor, fp, fp, true, bool) \
+ V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool) \
+ V(f64x2_nearest_int, F64x2NearestInt, fp, fp, true, bool) \
+ V(f32x4_abs, F32x4Abs, fp, fp, , void) \
+ V(f32x4_splat, F32x4Splat, fp, fp, , void) \
+ V(f32x4_neg, F32x4Neg, fp, fp, , void) \
+ V(f32x4_sqrt, F32x4Sqrt, fp, fp, , void) \
+ V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \
+ V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \
+ V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool) \
+ V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool) \
+ V(i64x2_abs, I64x2Abs, fp, fp, , void) \
+ V(i64x2_splat, I64x2Splat, fp, gp, , void) \
+ V(i64x2_neg, I64x2Neg, fp, fp, , void) \
+ V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, fp, fp, , void) \
+ V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, fp, fp, , void) \
+ V(i64x2_uconvert_i32x4_low, I64x2UConvertI32x4Low, fp, fp, , void) \
+ V(i64x2_uconvert_i32x4_high, I64x2UConvertI32x4High, fp, fp, , void) \
+ V(i32x4_abs, I32x4Abs, fp, fp, , void) \
+ V(i32x4_neg, I32x4Neg, fp, fp, , void) \
+ V(i32x4_splat, I32x4Splat, fp, gp, , void) \
+ V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, fp, fp, , void) \
+ V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, fp, fp, , void) \
+ V(i32x4_uconvert_i16x8_low, I32x4UConvertI16x8Low, fp, fp, , void) \
+ V(i32x4_uconvert_i16x8_high, I32x4UConvertI16x8High, fp, fp, , void) \
+ V(i16x8_abs, I16x8Abs, fp, fp, , void) \
+ V(i16x8_neg, I16x8Neg, fp, fp, , void) \
+ V(i16x8_splat, I16x8Splat, fp, gp, , void) \
+ V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, fp, fp, , void) \
+ V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, fp, fp, , void) \
+ V(i16x8_uconvert_i8x16_low, I16x8UConvertI8x16Low, fp, fp, , void) \
+ V(i16x8_uconvert_i8x16_high, I16x8UConvertI8x16High, fp, fp, , void) \
+ V(i8x16_abs, I8x16Abs, fp, fp, , void) \
+ V(i8x16_neg, I8x16Neg, fp, fp, , void) \
+ V(i8x16_splat, I8x16Splat, fp, gp, , void) \
+ V(s128_not, S128Not, fp, fp, , void)
+
+#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
+ return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.dtype(), src.stype()); \
+ return return_val; \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
@@ -2406,7 +2450,7 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#define EMIT_SIMD_EXTRACT_LANE(name, op, dtype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
uint8_t imm_lane_idx) { \
- op(dst.dtype(), src.fp(), imm_lane_idx); \
+ op(dst.dtype(), src.fp(), imm_lane_idx, r0); \
}
SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
#undef EMIT_SIMD_EXTRACT_LANE
@@ -2424,344 +2468,307 @@ SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
void LiftoffAssembler::emit_##name( \
LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \
- op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx); \
+ op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx, r0); \
}
SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
#undef EMIT_SIMD_REPLACE_LANE
#undef SIMD_REPLACE_LANE_LIST
+#define SIMD_EXT_MUL_LIST(V) \
+ V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
+ V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
+ V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
+ V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
+ V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
+ V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
+ V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
+ V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
+ V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
+ V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
+ V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
+ V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U)
+
+#define EMIT_SIMD_EXT_MUL(name, op) \
+ void LiftoffAssembler::emit_##name( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ op(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg); \
+ }
+SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
+#undef EMIT_SIMD_EXT_MUL
+#undef SIMD_EXT_MUL_LIST
+
+#define SIMD_ALL_TRUE_LIST(V) \
+ V(i64x2_alltrue, I64x2AllTrue) \
+ V(i32x4_alltrue, I32x4AllTrue) \
+ V(i16x8_alltrue, I16x8AllTrue) \
+ V(i8x16_alltrue, I8x16AllTrue)
+
+#define EMIT_SIMD_ALL_TRUE(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.gp(), src.fp(), r0, kScratchDoubleReg); \
+ }
+SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
+#undef EMIT_SIMD_ALL_TRUE
+#undef SIMD_ALL_TRUE_LIST
+
+#define SIMD_ADD_SUB_SAT_LIST(V) \
+ V(i16x8_add_sat_s, I16x8AddSatS) \
+ V(i16x8_sub_sat_s, I16x8SubSatS) \
+ V(i16x8_add_sat_u, I16x8AddSatU) \
+ V(i16x8_sub_sat_u, I16x8SubSatU) \
+ V(i8x16_add_sat_s, I8x16AddSatS) \
+ V(i8x16_sub_sat_s, I8x16SubSatS) \
+ V(i8x16_add_sat_u, I8x16AddSatU) \
+ V(i8x16_sub_sat_u, I8x16SubSatU)
+
+#define EMIT_SIMD_ADD_SUB_SAT(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ Simd128Register src1 = lhs.fp(); \
+ Simd128Register src2 = rhs.fp(); \
+ Simd128Register dest = dst.fp(); \
+ /* lhs and rhs are unique based on their selection under liftoff-compiler \
+ * `EmitBinOp`. */ \
+ /* Make sure dst and temp are also unique. */ \
+ if (dest == src1 || dest == src2) { \
+ dest = \
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src1, src2)).fp(); \
+ } \
+ Simd128Register temp = \
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1, src2)) \
+ .fp(); \
+ op(dest, src1, src2, kScratchDoubleReg, temp); \
+ /* Original dst register needs to be populated. */ \
+ if (dest != dst.fp()) { \
+ vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
+ } \
+ }
+SIMD_ADD_SUB_SAT_LIST(EMIT_SIMD_ADD_SUB_SAT)
+#undef EMIT_SIMD_ADD_SUB_SAT
+#undef SIMD_ADD_SUB_SAT_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
+ V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
+ V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
+ V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
+
+#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ Simd128Register src1 = src.fp(); \
+ Simd128Register dest = dst.fp(); \
+ /* Make sure dst and temp are unique. */ \
+ if (dest == src1) { \
+ dest = GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src1)).fp(); \
+ } \
+ Simd128Register temp = \
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1)).fp(); \
+ op(dest, src1, kScratchDoubleReg, temp); \
+ if (dest != dst.fp()) { \
+ vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
+ } \
+ }
+SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
+#undef EMIT_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
- bailout(kSimd, "Load transform unimplemented");
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand src_op =
+ MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ LoadAndExtend8x8SLE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Uint8()) {
+ LoadAndExtend8x8ULE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Int16()) {
+ LoadAndExtend16x4SLE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Uint16()) {
+ LoadAndExtend16x4ULE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Int32()) {
+ LoadAndExtend32x2SLE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Uint32()) {
+ LoadAndExtend32x2ULE(dst.fp(), src_op, r1);
+ }
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ LoadV32ZeroLE(dst.fp(), src_op, r1);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ LoadV64ZeroLE(dst.fp(), src_op, r1);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ LoadAndSplat8x16LE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Int16()) {
+ LoadAndSplat16x8LE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Int32()) {
+ LoadAndSplat32x4LE(dst.fp(), src_op, r1);
+ } else if (memtype == MachineType::Int64()) {
+ LoadAndSplat64x2LE(dst.fp(), src_op, r1);
+ }
+ }
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand src_op =
+ MemOperand(addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+
+ MachineType mem_type = type.mem_type();
+ if (dst != src) {
+ vlr(dst.fp(), src.fp(), Condition(0), Condition(0), Condition(0));
+ }
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ if (mem_type == MachineType::Int8()) {
+ LoadLane8LE(dst.fp(), src_op, 15 - laneidx, r1);
+ } else if (mem_type == MachineType::Int16()) {
+ LoadLane16LE(dst.fp(), src_op, 7 - laneidx, r1);
+ } else if (mem_type == MachineType::Int32()) {
+ LoadLane32LE(dst.fp(), src_op, 3 - laneidx, r1);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), mem_type);
+ LoadLane64LE(dst.fp(), src_op, 1 - laneidx, r1);
+ }
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
uint32_t* protected_store_pc) {
- bailout(kSimd, "store lane");
-}
-
-void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_swizzle");
-}
-
-void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f64x2_abs");
-}
-
-void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f64x2neg");
-}
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset != no_reg) {
+ AddS64(ip, offset);
+ }
+ offset = ip;
+ offset_imm = 0;
+ }
+ MemOperand dst_op =
+ MemOperand(dst, offset == no_reg ? r0 : offset, offset_imm);
-void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f64x2sqrt");
-}
+ if (protected_store_pc) *protected_store_pc = pc_offset();
-bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.ceil");
- return true;
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ StoreLane8LE(src.fp(), dst_op, 15 - lane, r1);
+ } else if (rep == MachineRepresentation::kWord16) {
+ StoreLane16LE(src.fp(), dst_op, 7 - lane, r1);
+ } else if (rep == MachineRepresentation::kWord32) {
+ StoreLane32LE(src.fp(), dst_op, 3 - lane, r1);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ StoreLane64LE(src.fp(), dst_op, 1 - lane, r1);
+ }
}
-bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.floor");
- return true;
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), r0, r1, ip);
}
-bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.trunc");
- return true;
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I32x4GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
-bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.nearest_int");
- return true;
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I16x8GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
-void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "pmin unimplemented");
+ I8x16GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
-void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmax unimplemented");
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Simd128Register src1 = lhs.fp();
+ Simd128Register src2 = rhs.fp();
+ Simd128Register dest = dst.fp();
+ Simd128Register temp =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1, src2)).fp();
+ I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg, temp);
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_s");
+ F64x2ConvertLowI32x4S(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_u");
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.promote_low_f32x4");
-}
-
-void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_abs");
-}
-
-void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f32x4neg");
-}
-
-void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f32x4sqrt");
-}
-
-bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4.ceil");
- return true;
-}
-
-bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4.floor");
- return true;
-}
-
-bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4.trunc");
- return true;
-}
-
-bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4.nearest_int");
- return true;
-}
-
-void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmin unimplemented");
-}
-
-void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmax unimplemented");
-}
-
-void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64x2neg");
-}
-
-void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_alltrue");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_low_i32x4_s unsupported");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_low_i32x4_u unsupported");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_high_i32x4_s unsupported");
+ F64x2PromoteLowF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
}
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
-}
-
-void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_low");
-}
-
-void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_high");
-}
-
-void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_low");
-}
-
-void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_high");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4neg");
-}
-
-void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4_alltrue");
+ I64x2BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4_bitmask");
+ I32x4BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_dot_i16x8_s");
-}
-
-void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
-}
-
-void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_low_i16x8_s unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_low_i16x8_u unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_high_i16x8_s unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8neg");
-}
-
-void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8_alltrue");
+ I32x4DotI16x8S(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8_bitmask");
-}
-
-void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
-}
-
-void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
-}
-
-void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
-}
-
-void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
-}
-
-void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
-}
-
-void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_low_i8x16_s unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_low_i8x16_u unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
+ I16x8BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- bailout(kSimd, "i16x8_q15mulr_sat_s");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8_extmul_high_i8x16_u unsupported");
+ Simd128Register s1 = src1.fp();
+ Simd128Register s2 = src2.fp();
+ Simd128Register dest = dst.fp();
+ // Make sure temp registers are unique.
+ Simd128Register temp1 =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, s1, s2)).fp();
+ Simd128Register temp2 =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, s1, s2, temp1))
+ .fp();
+ I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -2769,224 +2776,118 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "i8x16_shuffle");
+ // Remap the shuffle indices to match IBM lane numbering.
+ // TODO(miladfarca): Put this in a function and share it with the instrction
+ // selector.
+ int max_index = 15;
+ int total_lane_count = 2 * kSimd128Size;
+ uint8_t shuffle_remapped[kSimd128Size];
+ for (int i = 0; i < kSimd128Size; i++) {
+ uint8_t current_index = shuffle[i];
+ shuffle_remapped[i] = (current_index <= max_index
+ ? max_index - current_index
+ : total_lane_count - current_index + max_index);
+ }
+ uint64_t vals[2];
+ memcpy(vals, shuffle_remapped, sizeof(shuffle_remapped));
+#ifdef V8_TARGET_BIG_ENDIAN
+ vals[0] = ByteReverse(vals[0]);
+ vals[1] = ByteReverse(vals[1]);
+#endif
+ I8x16Shuffle(dst.fp(), lhs.fp(), rhs.fp(), vals[1], vals[0], r0, ip,
+ kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i8x16.popcnt");
-}
-
-void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i8x16neg");
+ I8x16Popcnt(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v8x16_anytrue");
-}
-
-void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i8x16_alltrue");
+ V128AnyTrue(dst.gp(), src.fp(), r0);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i8x16_bitmask");
-}
-
-void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
-}
-
-void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
-}
-
-void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
-}
-
-void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
+ I8x16BitMask(dst.gp(), src.fp(), r0, ip, kScratchDoubleReg);
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kUnsupportedArchitecture, "emit_s128_const");
-}
-
-void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_s128_not");
-}
-
-void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_and");
-}
-
-void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_or");
-}
-
-void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_xor");
+ uint64_t vals[2];
+ memcpy(vals, imms, sizeof(vals));
+#ifdef V8_TARGET_BIG_ENDIAN
+ vals[0] = ByteReverse(vals[0]);
+ vals[1] = ByteReverse(vals[1]);
+#endif
+ S128Const(dst.fp(), vals[1], vals[0], r0, ip);
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kUnsupportedArchitecture, "emit_s128select");
+ S128Select(dst.fp(), src1.fp(), src2.fp(), mask.fp());
}
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4_sconvert_f32x4");
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4_uconvert_f32x4");
+ I32x4UConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4_sconvert_i32x4");
+ F32x4SConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
}
void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4_uconvert_i32x4");
+ F32x4UConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
}
void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4.demote_f64x2_zero");
+ F32x4DemoteF64x2Zero(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
}
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_sconvert_i16x8");
+ I8x16SConvertI16x8(dst.fp(), lhs.fp(), rhs.fp());
}
void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_uconvert_i16x8");
+ I8x16UConvertI16x8(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i32x4");
+ I16x8SConvertI32x4(dst.fp(), lhs.fp(), rhs.fp());
}
void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i32x4");
-}
-
-void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_low");
-}
-
-void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_high");
-}
-
-void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_low");
-}
-
-void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_high");
-}
-
-void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_low");
-}
-
-void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_high");
-}
-
-void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_low");
-}
-
-void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
+ I16x8UConvertI32x4(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
-}
-
-void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_and_not");
-}
-
-void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_rounding_average_u");
-}
-
-void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
-}
-
-void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_abs");
-}
-
-void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_abs");
-}
-
-void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
-}
-
-void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2.abs");
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -3015,15 +2916,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
MultiPop(regs.GetGpList());
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetLastRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
@@ -3110,7 +3010,6 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
LoadU64(result_reg->gp(), MemOperand(sp));
break;
case kF32:
@@ -3216,7 +3115,6 @@ void LiftoffStackSlots::Construct(int param_slots) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
case kI64: {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
UseScratchRegisterScope temps(asm_);
@@ -3264,7 +3162,6 @@ void LiftoffStackSlots::Construct(int param_slots) {
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
asm_->push(src.reg().gp());
break;
case kF32:
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 895ba42c86..960e7ba273 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -53,14 +53,15 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
constexpr Register kScratchRegister2 = r11;
static_assert(kScratchRegister != kScratchRegister2, "collision");
static_assert((kLiftoffAssemblerGpCacheRegs &
- Register::ListOf(kScratchRegister, kScratchRegister2)) == 0,
+ RegList{kScratchRegister, kScratchRegister2})
+ .is_empty(),
"scratch registers must not be used as cache registers");
constexpr DoubleRegister kScratchDoubleReg2 = xmm14;
static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision");
static_assert((kLiftoffAssemblerFpCacheRegs &
- DoubleRegister::ListOf(kScratchDoubleReg, kScratchDoubleReg2)) ==
- 0,
+ DoubleRegList{kScratchDoubleReg, kScratchDoubleReg2})
+ .is_empty(),
"scratch registers must not be used as cache registers");
// rbp-8 holds the stack marker, rbp-16 is the instance parameter.
@@ -99,7 +100,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->movq(dst.gp(), src);
break;
case kF32:
@@ -128,7 +128,6 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
assm->StoreTaggedField(dst, src.gp());
break;
case kF32:
@@ -375,10 +374,6 @@ void LiftoffAssembler::ResetOSRTarget() {
movq(liftoff::GetOSRTargetSlot(), Immediate(0));
}
-void LiftoffAssembler::FillInstanceInto(Register dst) {
- movq(dst, liftoff::GetInstanceOperand());
-}
-
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
@@ -603,8 +598,10 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- DCHECK(!cache_state()->is_used(result));
- if (cache_state()->is_used(value)) {
+ LiftoffRegList dont_overwrite = cache_state()->used_registers |
+ LiftoffRegList::ForRegs(dst_addr, offset_reg);
+ DCHECK(!dont_overwrite.has(result));
+ if (dont_overwrite.has(value)) {
// We cannot overwrite {value}, but the {value} register is changed in the
// code we generate. Therefore we copy {value} to {result} and use the
// {result} register in the code below.
@@ -875,13 +872,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
- if (element_size_log2(kind) == 2) {
- movl(kScratchRegister, src);
- movl(dst, kScratchRegister);
- } else {
- DCHECK_EQ(3, element_size_log2(kind));
- movq(kScratchRegister, src);
- movq(dst, kScratchRegister);
+ size_t size = element_size_log2(kind);
+ if (kind == kRef || kind == kOptRef || kind == kRtt) {
+ // Pointers are uncompressed on the stack!
+ size = kSystemPointerSizeLog2;
+ }
+ switch (size) {
+ case 2:
+ movl(kScratchRegister, src);
+ movl(dst, kScratchRegister);
+ break;
+ case 3:
+ movq(kScratchRegister, src);
+ movq(dst, kScratchRegister);
+ break;
+ case 4:
+ Movdqu(kScratchDoubleReg, src);
+ Movdqu(dst, kScratchDoubleReg);
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -919,7 +929,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
movq(dst, reg.gp());
break;
case kF32:
@@ -1466,7 +1475,7 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
movl(dst, src);
}
@@ -2137,7 +2146,6 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kOptRef:
case kRtt:
- case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case kI64:
@@ -4051,15 +4059,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
}
-void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
- LiftoffRegList all_spills,
- LiftoffRegList ref_spills,
- int spill_offset) {
+void LiftoffAssembler::RecordSpillsInSafepoint(
+ SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
+ LiftoffRegList ref_spills, int spill_offset) {
int spill_space_size = 0;
while (!all_spills.is_empty()) {
LiftoffRegister reg = all_spills.GetFirstRegSet();
if (ref_spills.has(reg)) {
- safepoint.DefinePointerSlot(spill_offset);
+ safepoint.DefineTaggedStackSlot(spill_offset);
}
all_spills.clear(reg);
++spill_offset;
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index f501d7e4a9..a217babc73 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -31,9 +31,9 @@
#include "src/base/platform/wrappers.h"
#include "src/builtins/builtins.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-arguments.h"
@@ -84,9 +84,7 @@ ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
switch (v8_valtype.heap_representation()) {
case i::wasm::HeapType::kFunc:
return FUNCREF;
- case i::wasm::HeapType::kExtern:
- // TODO(7748): Rename this to EXTERNREF if/when third-party API
- // changes.
+ case i::wasm::HeapType::kAny:
return ANYREF;
default:
// TODO(wasm+): support new value types
@@ -111,7 +109,7 @@ i::wasm::ValueType WasmValKindToV8(ValKind kind) {
case FUNCREF:
return i::wasm::kWasmFuncRef;
case ANYREF:
- return i::wasm::kWasmExternRef;
+ return i::wasm::kWasmAnyRef;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -391,18 +389,26 @@ Engine::~Engine() { impl(this)->~EngineImpl(); }
void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config>&& config) -> own<Engine> {
- i::FLAG_expose_gc = true;
- i::FLAG_experimental_wasm_reftypes = true;
auto engine = new (std::nothrow) EngineImpl;
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(engine->platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
v8::V8::Initialize();
+ // The commandline flags get loaded in V8::Initialize(), so we can override
+ // the flag values only afterwards.
+ i::FLAG_expose_gc = true;
+ // We disable dynamic tiering because it interferes with serialization. We
+ // only serialize optimized code, but with dynamic tiering not all code gets
+ // optimized. It is then unclear what we should serialize in the first place.
+ i::FLAG_wasm_dynamic_tiering = false;
+ // We disable speculative inlining, because speculative inlining depends on
+ // dynamic tiering.
+ i::FLAG_wasm_speculative_inlining = false;
return make_own(seal<Engine>(engine));
}
@@ -1041,11 +1047,11 @@ own<Instance> GetInstance(StoreImpl* store,
own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
i::Isolate* isolate, StoreImpl* store) {
- i::Handle<i::StackFrameInfo> frame(
- i::StackFrameInfo::cast(frames->get(index)), isolate);
+ i::Handle<i::CallSiteInfo> frame(i::CallSiteInfo::cast(frames->get(index)),
+ isolate);
i::Handle<i::WasmInstanceObject> instance(frame->GetWasmInstance(), isolate);
uint32_t func_index = frame->GetWasmFunctionIndex();
- size_t module_offset = i::StackFrameInfo::GetSourcePosition(frame);
+ size_t module_offset = i::CallSiteInfo::GetSourcePosition(frame);
size_t func_offset = module_offset - i::wasm::GetWasmFunctionOffset(
instance->module(), func_index);
return own<Frame>(seal<Frame>(new (std::nothrow) FrameImpl(
@@ -1058,10 +1064,8 @@ own<Frame> Trap::origin() const {
i::Isolate* isolate = impl(this)->isolate();
i::HandleScope handle_scope(isolate);
- i::Handle<i::JSMessageObject> message =
- isolate->CreateMessage(impl(this)->v8_object(), nullptr);
- i::Handle<i::FixedArray> frames(i::FixedArray::cast(message->stack_frames()),
- isolate);
+ i::Handle<i::FixedArray> frames =
+ isolate->GetSimpleStackTrace(impl(this)->v8_object());
if (frames->length() == 0) {
return own<Frame>();
}
@@ -1072,10 +1076,8 @@ ownvec<Frame> Trap::trace() const {
i::Isolate* isolate = impl(this)->isolate();
i::HandleScope handle_scope(isolate);
- i::Handle<i::JSMessageObject> message =
- isolate->CreateMessage(impl(this)->v8_object(), nullptr);
- i::Handle<i::FixedArray> frames(i::FixedArray::cast(message->stack_frames()),
- isolate);
+ i::Handle<i::FixedArray> frames =
+ isolate->GetSimpleStackTrace(impl(this)->v8_object());
int num_frames = frames->length();
// {num_frames} can be 0; the code below can handle that case.
ownvec<Frame> result = ownvec<Frame>::make_uninitialized(num_frames);
@@ -1523,9 +1525,7 @@ void PrepareFunctionData(i::Isolate* isolate,
const i::wasm::FunctionSig* sig,
const i::wasm::WasmModule* module) {
// If the data is already populated, return immediately.
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- if (function_data->c_wrapper_code() !=
- ToCodeT(*BUILTIN_CODE(isolate, Illegal))) {
+ if (function_data->c_wrapper_code() != *BUILTIN_CODE(isolate, Illegal)) {
return;
}
// Compile wrapper code.
@@ -1560,7 +1560,6 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kS128:
// TODO(7748): Implement.
UNIMPLEMENTED();
@@ -1600,7 +1599,6 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
break;
}
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kS128:
// TODO(7748): Implement.
UNIMPLEMENTED();
@@ -1667,9 +1665,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
const i::wasm::FunctionSig* sig =
instance->module()->functions[function_index].sig;
PrepareFunctionData(isolate, function_data, sig, instance->module());
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- i::Handle<i::CodeT> wrapper_code = i::Handle<i::CodeT>(
- i::CodeT::cast(function_data->c_wrapper_code()), isolate);
+ i::Handle<i::CodeT> wrapper_code(function_data->c_wrapper_code(), isolate);
i::Address call_target = function_data->internal().foreign_address();
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
@@ -1865,7 +1861,6 @@ auto Global::get() const -> Val {
return Val(V8RefValueToWasm(store, v8_global->GetRef()));
}
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kS128:
// TODO(7748): Implement these.
UNIMPLEMENTED();
@@ -1931,8 +1926,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
break;
case ANYREF:
// See Engine::make().
- DCHECK(i::wasm::WasmFeatures::FromFlags().has_reftypes());
- i_type = i::wasm::kWasmExternRef;
+ i_type = i::wasm::kWasmAnyRef;
break;
default:
UNREACHABLE();
@@ -1978,7 +1972,7 @@ auto Table::type() const -> own<TableType> {
case i::wasm::HeapType::kFunc:
kind = FUNCREF;
break;
- case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kAny:
kind = ANYREF;
break;
default:
@@ -2299,12 +2293,20 @@ struct borrowed_vec {
// Vectors
+#ifdef V8_GC_MOLE
+#define ASSERT_VEC_BASE_SIZE(name, Name, vec, ptr_or_none)
+
+#else
+#define ASSERT_VEC_BASE_SIZE(name, Name, vec, ptr_or_none) \
+ static_assert(sizeof(wasm_##name##_vec_t) == sizeof(vec<Name>), \
+ "C/C++ incompatibility"); \
+ static_assert( \
+ sizeof(wasm_##name##_t ptr_or_none) == sizeof(vec<Name>::elem_type), \
+ "C/C++ incompatibility");
+#endif
+
#define WASM_DEFINE_VEC_BASE(name, Name, vec, ptr_or_none) \
- static_assert(sizeof(wasm_##name##_vec_t) == sizeof(vec<Name>), \
- "C/C++ incompatibility"); \
- static_assert( \
- sizeof(wasm_##name##_t ptr_or_none) == sizeof(vec<Name>::elem_type), \
- "C/C++ incompatibility"); \
+ ASSERT_VEC_BASE_SIZE(name, Name, vec, ptr_or_none) \
extern "C++" inline auto hide_##name##_vec(vec<Name>& v) \
->wasm_##name##_vec_t* { \
return reinterpret_cast<wasm_##name##_vec_t*>(&v); \
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index b27a7dbce6..f473439441 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -11,35 +11,27 @@ namespace v8 {
namespace internal {
namespace wasm {
-thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
-// The thread-local counter (above) is only valid if a single thread only works
-// on one module at a time. This second thread-local checks that.
-#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
nullptr;
-#endif
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
-#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
-CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
-#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
- : native_module_(native_module) {
-#ifdef DEBUG
- if (code_space_write_nesting_level_ == 0) {
- current_native_module_ = native_module;
+ : previous_native_module_(current_native_module_) {
+ DCHECK_NOT_NULL(native_module);
+ if (previous_native_module_ == native_module) return;
+ current_native_module_ = native_module;
+ if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
+ SetWritable();
}
- DCHECK_EQ(native_module, current_native_module_);
-#endif // DEBUG
-#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
- if (code_space_write_nesting_level_ == 0) SetWritable();
- code_space_write_nesting_level_++;
}
CodeSpaceWriteScope::~CodeSpaceWriteScope() {
- code_space_write_nesting_level_--;
- if (code_space_write_nesting_level_ == 0) SetExecutable();
+ if (previous_native_module_ == current_native_module_) return;
+ if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
+ SetExecutable();
+ }
+ current_native_module_ = previous_native_module_;
}
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
@@ -48,37 +40,49 @@ CodeSpaceWriteScope::~CodeSpaceWriteScope() {
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-void CodeSpaceWriteScope::SetWritable() const {
+// static
+void CodeSpaceWriteScope::SetWritable() {
pthread_jit_write_protect_np(0);
}
-void CodeSpaceWriteScope::SetExecutable() const {
+// static
+void CodeSpaceWriteScope::SetExecutable() {
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop
+// static
+bool CodeSpaceWriteScope::SwitchingPerNativeModule() { return false; }
+
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
-void CodeSpaceWriteScope::SetWritable() const {
- DCHECK_NOT_NULL(native_module_);
+// static
+void CodeSpaceWriteScope::SetWritable() {
auto* code_manager = GetWasmCodeManager();
if (code_manager->MemoryProtectionKeysEnabled()) {
code_manager->SetThreadWritable(true);
} else if (FLAG_wasm_write_protect_code_memory) {
- native_module_->AddWriter();
+ current_native_module_->AddWriter();
}
}
-void CodeSpaceWriteScope::SetExecutable() const {
+// static
+void CodeSpaceWriteScope::SetExecutable() {
auto* code_manager = GetWasmCodeManager();
if (code_manager->MemoryProtectionKeysEnabled()) {
DCHECK(FLAG_wasm_memory_protection_keys);
code_manager->SetThreadWritable(false);
} else if (FLAG_wasm_write_protect_code_memory) {
- native_module_->RemoveWriter();
+ current_native_module_->RemoveWriter();
}
}
+// static
+bool CodeSpaceWriteScope::SwitchingPerNativeModule() {
+ return !GetWasmCodeManager()->MemoryProtectionKeysEnabled() &&
+ FLAG_wasm_write_protect_code_memory;
+}
+
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
} // namespace wasm
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 788bb8eca3..502c406b2b 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -45,7 +45,7 @@ class NativeModule;
// permissions for all code pages.
class V8_NODISCARD CodeSpaceWriteScope final {
public:
- explicit V8_EXPORT_PRIVATE CodeSpaceWriteScope(NativeModule* native_module);
+ explicit V8_EXPORT_PRIVATE CodeSpaceWriteScope(NativeModule*);
V8_EXPORT_PRIVATE ~CodeSpaceWriteScope();
// Disable copy constructor and copy-assignment operator, since this manages
@@ -53,22 +53,27 @@ class V8_NODISCARD CodeSpaceWriteScope final {
CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete;
CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
- private:
- static thread_local int code_space_write_nesting_level_;
-#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
- static thread_local NativeModule* current_native_module_;
-#endif
-
- void SetWritable() const;
- void SetExecutable() const;
+ static bool IsInScope() { return current_native_module_ != nullptr; }
+ private:
// The M1 implementation knows implicitly from the {MAP_JIT} flag during
// allocation which region to switch permissions for. On non-M1 hardware
// without memory protection key support, we need the code space from the
- // {native_module_}.
-#if !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
- NativeModule* const native_module_;
-#endif
+ // {NativeModule}.
+ static thread_local NativeModule* current_native_module_;
+
+ // {SetWritable} and {SetExecutable} implicitly operate on
+ // {current_native_module_} (for mprotect-based protection).
+ static void SetWritable();
+ static void SetExecutable();
+
+ // Returns {true} if switching permissions happens on a per-module level, and
+ // not globally (like for MAP_JIT and PKU).
+ static bool SwitchingPerNativeModule();
+
+ // Save the previous module to put it back in {current_native_module_} when
+ // exiting this scope.
+ NativeModule* const previous_native_module_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index f64ab1cdf7..7517679b9b 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -103,6 +103,9 @@ class WireBytesStorage {
public:
virtual ~WireBytesStorage() = default;
virtual base::Vector<const uint8_t> GetCode(WireBytesRef) const = 0;
+ // Returns the ModuleWireBytes corresponding to the underlying module if
+ // available. Not supported if the wire bytes are owned by a StreamingDecoder.
+ virtual base::Optional<ModuleWireBytes> GetModuleBytes() const = 0;
};
// Callbacks will receive either {kFailedCompilation} or both
@@ -155,7 +158,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
void InitializeAfterDeserialization(
- base::Vector<const int> missing_functions);
+ base::Vector<const int> lazy_functions,
+ base::Vector<const int> liftoff_functions);
// Wait until top tier compilation finished, or compilation failed.
void WaitForTopTierFinished();
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 7597205246..fa2bffc589 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -208,6 +208,15 @@ class Decoder {
return result;
}
+ // Reads a LEB128 variable-length signed 64-bit integer and advances {pc_}.
+ int64_t consume_i64v(const char* name = nullptr) {
+ uint32_t length = 0;
+ int64_t result =
+ read_leb<int64_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
+ }
+
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
void consume_bytes(uint32_t size, const char* name = "skip") {
// Only trace if the name is not null.
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 0d3517c554..5b7201abb6 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -183,21 +183,6 @@ void DecodeError(Decoder* decoder, const char* str) {
namespace value_type_reader {
-V8_INLINE WasmFeature feature_for_heap_type(HeapType heap_type) {
- switch (heap_type.representation()) {
- case HeapType::kFunc:
- case HeapType::kExtern:
- return WasmFeature::kFeature_reftypes;
- case HeapType::kEq:
- case HeapType::kI31:
- case HeapType::kData:
- case HeapType::kAny:
- return WasmFeature::kFeature_gc;
- case HeapType::kBottom:
- UNREACHABLE();
- }
-}
-
// If {module} is not null, the read index will be checked against the module's
// type capacity.
template <Decoder::ValidateFlag validate>
@@ -215,29 +200,27 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
uint8_t uint_7_mask = 0x7F;
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
switch (code) {
- case kFuncRefCode:
case kEqRefCode:
- case kExternRefCode:
case kI31RefCode:
case kDataRefCode:
- case kAnyRefCode: {
- HeapType result = HeapType::from_code(code);
- if (!VALIDATE(enabled.contains(feature_for_heap_type(result)))) {
+ case kArrayRefCode:
+ case kAnyRefCodeAlias:
+ if (!VALIDATE(enabled.has_gc())) {
DecodeError<validate>(
decoder, pc,
- "invalid heap type '%s', enable with --experimental-wasm-%s",
- result.name().c_str(),
- WasmFeatures::name_for_feature(feature_for_heap_type(result)));
+ "invalid heap type '%s', enable with --experimental-wasm-gc",
+ HeapType::from_code(code).name().c_str());
return HeapType(HeapType::kBottom);
}
- return result;
- }
+ V8_FALLTHROUGH;
+ case kAnyRefCode:
+ case kFuncRefCode:
+ return HeapType::from_code(code);
default:
DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
heap_index);
return HeapType(HeapType::kBottom);
}
- UNREACHABLE();
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
DecodeError<validate>(decoder, pc,
@@ -264,6 +247,9 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
}
}
+HeapType consume_heap_type(Decoder* decoder, const WasmModule* module,
+ const WasmFeatures& enabled);
+
// Read a value type starting at address {pc} using {decoder}.
// No bytes are consumed.
// The length of the read value type is written in {length}.
@@ -281,26 +267,27 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
ValueTypeCode code = static_cast<ValueTypeCode>(val);
switch (code) {
- case kFuncRefCode:
case kEqRefCode:
- case kExternRefCode:
case kI31RefCode:
case kDataRefCode:
- case kAnyRefCode: {
- HeapType heap_type = HeapType::from_code(code);
- Nullability nullability = code == kI31RefCode || code == kDataRefCode
- ? kNonNullable
- : kNullable;
- ValueType result = ValueType::Ref(heap_type, nullability);
- if (!VALIDATE(enabled.contains(feature_for_heap_type(heap_type)))) {
+ case kArrayRefCode:
+ case kAnyRefCodeAlias:
+ if (!VALIDATE(enabled.has_gc())) {
DecodeError<validate>(
decoder, pc,
- "invalid value type '%s', enable with --experimental-wasm-%s",
- result.name().c_str(),
- WasmFeatures::name_for_feature(feature_for_heap_type(heap_type)));
+ "invalid value type '%sref', enable with --experimental-wasm-gc",
+ HeapType::from_code(code).name().c_str());
return kWasmBottom;
}
- return result;
+ V8_FALLTHROUGH;
+ case kAnyRefCode:
+ case kFuncRefCode: {
+ HeapType heap_type = HeapType::from_code(code);
+ Nullability nullability =
+ code == kI31RefCode || code == kDataRefCode || code == kArrayRefCode
+ ? kNonNullable
+ : kNullable;
+ return ValueType::Ref(heap_type, nullability);
}
case kI32Code:
return kWasmI32;
@@ -326,22 +313,27 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
return heap_type.is_bottom() ? kWasmBottom
: ValueType::Ref(heap_type, nullability);
}
- case kRttWithDepthCode: {
+ // TODO(7748): This is here only for backwards compatibility, and the parsed
+ // depth is ignored.
+ case kRttWithDepthCode:
+ case kRttCode: {
if (!VALIDATE(enabled.has_gc())) {
DecodeError<validate>(
decoder, pc,
"invalid value type 'rtt', enable with --experimental-wasm-gc");
return kWasmBottom;
}
- uint32_t depth = decoder->read_u32v<validate>(pc + 1, length, "depth");
- *length += 1;
- if (!VALIDATE(depth <= kV8MaxRttSubtypingDepth)) {
- DecodeError<validate>(
- decoder, pc,
- "subtyping depth %u is greater than the maximum depth "
- "%u supported by V8",
- depth, kV8MaxRttSubtypingDepth);
- return kWasmBottom;
+ if (code == kRttWithDepthCode) {
+ uint32_t depth = decoder->read_u32v<validate>(pc + 1, length, "depth");
+ *length += 1;
+ if (!VALIDATE(depth <= kV8MaxRttSubtypingDepth)) {
+ DecodeError<validate>(
+ decoder, pc,
+ "subtyping depth %u is greater than the maximum depth "
+ "%u supported by V8",
+ depth, kV8MaxRttSubtypingDepth);
+ return kWasmBottom;
+ }
}
uint32_t type_index_length;
uint32_t type_index =
@@ -362,32 +354,6 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
type_index);
return kWasmBottom;
}
- return ValueType::Rtt(type_index, depth);
- }
- case kRttCode: {
- if (!VALIDATE(enabled.has_gc())) {
- DecodeError<validate>(
- decoder, pc,
- "invalid value type 'rtt', enable with --experimental-wasm-gc");
- return kWasmBottom;
- }
- uint32_t type_index = decoder->read_u32v<validate>(pc + 1, length);
- *length += 1;
- if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
- DecodeError<validate>(
- decoder, pc,
- "Type index %u is greater than the maximum number %zu "
- "of type definitions supported by V8",
- type_index, kV8MaxWasmTypes);
- return kWasmBottom;
- }
- // We use capacity over size so this works mid-DecodeTypeSection.
- if (!VALIDATE(module == nullptr ||
- type_index < module->types.capacity())) {
- DecodeError<validate>(decoder, pc, "Type index %u is out of bounds",
- type_index);
- return kWasmBottom;
- }
return ValueType::Rtt(type_index);
}
case kS128Code: {
@@ -944,6 +910,8 @@ struct ControlBase : public PcForErrors<validate> {
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
F(S128Const, Simd128Immediate<validate>& imm, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
F(RefNull, ValueType type, Value* result) \
F(RefFunc, uint32_t function_index, Value* result) \
F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
@@ -953,149 +921,153 @@ struct ControlBase : public PcForErrors<validate> {
const Value& rtt, Value* result) \
F(ArrayInit, const ArrayIndexImmediate<validate>& imm, \
const base::Vector<Value>& elements, const Value& rtt, Value* result) \
+ F(ArrayInitFromData, const ArrayIndexImmediate<validate>& array_imm, \
+ const IndexImmediate<validate>& data_segment, const Value& offset, \
+ const Value& length, const Value& rtt, Value* result) \
F(RttCanon, uint32_t type_index, Value* result) \
- F(RttSub, uint32_t type_index, const Value& parent, Value* result, \
- WasmRttSubMode mode) \
F(DoReturn, uint32_t drop_values)
-#define INTERFACE_NON_CONSTANT_FUNCTIONS(F) \
- /* Control: */ \
- F(Block, Control* block) \
- F(Loop, Control* block) \
- F(Try, Control* block) \
- F(If, const Value& cond, Control* if_block) \
- F(FallThruTo, Control* c) \
- F(PopControl, Control* block) \
- /* Instructions: */ \
- F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
- Value* result) \
- F(RefAsNonNull, const Value& arg, Value* result) \
- F(Drop) \
- F(LocalGet, Value* result, const IndexImmediate<validate>& imm) \
- F(LocalSet, const Value& value, const IndexImmediate<validate>& imm) \
- F(LocalTee, const Value& value, Value* result, \
- const IndexImmediate<validate>& imm) \
- F(AllocateLocals, base::Vector<Value> local_values) \
- F(DeallocateLocals, uint32_t count) \
- F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(TableGet, const Value& index, Value* result, \
- const IndexImmediate<validate>& imm) \
- F(TableSet, const Value& index, const Value& value, \
- const IndexImmediate<validate>& imm) \
- F(Trap, TrapReason reason) \
- F(NopForTestingUnsupportedInLiftoff) \
- F(Select, const Value& cond, const Value& fval, const Value& tval, \
- Value* result) \
- F(BrOrRet, uint32_t depth, uint32_t drop_values) \
- F(BrIf, const Value& cond, uint32_t depth) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
- F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, Value* result) \
- F(LoadTransform, LoadType type, LoadTransformationKind transform, \
- const MemoryAccessImmediate<validate>& imm, const Value& index, \
- Value* result) \
- F(LoadLane, LoadType type, const Value& value, const Value& index, \
- const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
- Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value) \
- F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value, const uint8_t laneidx) \
- F(CurrentMemoryPages, Value* result) \
- F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
- Value returns[]) \
- F(CallRef, const Value& func_ref, const FunctionSig* sig, \
- uint32_t sig_index, const Value args[], const Value returns[]) \
- F(ReturnCallRef, const Value& func_ref, const FunctionSig* sig, \
- uint32_t sig_index, const Value args[]) \
- F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
- const Value args[]) \
- F(ReturnCallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[]) \
- F(BrOnNull, const Value& ref_object, uint32_t depth, \
- bool pass_null_along_branch, Value* result_on_fallthrough) \
- F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
- F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
- const base::Vector<Value> inputs, Value* result) \
- F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
- F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const TagIndexImmediate<validate>& imm, \
- const base::Vector<Value>& args) \
- F(Rethrow, Control* block) \
- F(CatchException, const TagIndexImmediate<validate>& imm, Control* block, \
- base::Vector<Value> caught_values) \
- F(Delegate, uint32_t depth, Control* block) \
- F(CatchAll, Control* block) \
- F(AtomicOp, WasmOpcode opcode, base::Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
- F(AtomicFence) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(DataDrop, const IndexImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
- const Value& value, const Value& size) \
- F(TableInit, const TableInitImmediate<validate>& imm, \
- base::Vector<Value> args) \
- F(ElemDrop, const IndexImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, \
- base::Vector<Value> args) \
- F(TableGrow, const IndexImmediate<validate>& imm, const Value& value, \
- const Value& delta, Value* result) \
- F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
- F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
- const Value& value, const Value& count) \
- F(StructGet, const Value& struct_object, \
- const FieldImmediate<validate>& field, bool is_signed, Value* result) \
- F(StructSet, const Value& struct_object, \
- const FieldImmediate<validate>& field, const Value& field_value) \
- F(ArrayNewWithRtt, const ArrayIndexImmediate<validate>& imm, \
- const Value& length, const Value& initial_value, const Value& rtt, \
- Value* result) \
- F(ArrayNewDefault, const ArrayIndexImmediate<validate>& imm, \
- const Value& length, const Value& rtt, Value* result) \
- F(ArrayGet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- bool is_signed, Value* result) \
- F(ArraySet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- const Value& value) \
- F(ArrayLen, const Value& array_obj, Value* result) \
- F(ArrayCopy, const Value& src, const Value& src_index, const Value& dst, \
- const Value& dst_index, const Value& length) \
- F(I31New, const Value& input, Value* result) \
- F(I31GetS, const Value& input, Value* result) \
- F(I31GetU, const Value& input, Value* result) \
- F(RefTest, const Value& obj, const Value& rtt, Value* result) \
- F(RefCast, const Value& obj, const Value& rtt, Value* result) \
- F(AssertNull, const Value& obj, Value* result) \
- F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
- uint32_t depth) \
- F(BrOnCastFail, const Value& obj, const Value& rtt, \
- Value* result_on_fallthrough, uint32_t depth) \
- F(RefIsFunc, const Value& object, Value* result) \
- F(RefIsData, const Value& object, Value* result) \
- F(RefIsI31, const Value& object, Value* result) \
- F(RefAsFunc, const Value& object, Value* result) \
- F(RefAsData, const Value& object, Value* result) \
- F(RefAsI31, const Value& object, Value* result) \
- F(BrOnFunc, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(BrOnNonFunc, const Value& object, Value* value_on_fallthrough, \
- uint32_t br_depth) \
- F(BrOnNonData, const Value& object, Value* value_on_fallthrough, \
- uint32_t br_depth) \
- F(BrOnNonI31, const Value& object, Value* value_on_fallthrough, \
+#define INTERFACE_NON_CONSTANT_FUNCTIONS(F) /* force 80 columns */ \
+ /* Control: */ \
+ F(Block, Control* block) \
+ F(Loop, Control* block) \
+ F(Try, Control* block) \
+ F(If, const Value& cond, Control* if_block) \
+ F(FallThruTo, Control* c) \
+ F(PopControl, Control* block) \
+ /* Instructions: */ \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(RefAsNonNull, const Value& arg, Value* result) \
+ F(Drop) \
+ F(LocalGet, Value* result, const IndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const IndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
+ const IndexImmediate<validate>& imm) \
+ F(AllocateLocals, base::Vector<Value> local_values) \
+ F(DeallocateLocals, uint32_t count) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(TableGet, const Value& index, Value* result, \
+ const IndexImmediate<validate>& imm) \
+ F(TableSet, const Value& index, const Value& value, \
+ const IndexImmediate<validate>& imm) \
+ F(Trap, TrapReason reason) \
+ F(NopForTestingUnsupportedInLiftoff) \
+ F(Select, const Value& cond, const Value& fval, const Value& tval, \
+ Value* result) \
+ F(BrOrRet, uint32_t depth, uint32_t drop_values) \
+ F(BrIf, const Value& cond, uint32_t depth) \
+ F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(Else, Control* if_block) \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, Value* result) \
+ F(LoadTransform, LoadType type, LoadTransformationKind transform, \
+ const MemoryAccessImmediate<validate>& imm, const Value& index, \
+ Value* result) \
+ F(LoadLane, LoadType type, const Value& value, const Value& index, \
+ const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
+ Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value) \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value, const uint8_t laneidx) \
+ F(CurrentMemoryPages, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
+ F(CallDirect, const CallFunctionImmediate<validate>& imm, \
+ const Value args[], Value returns[]) \
+ F(CallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[], \
+ Value returns[]) \
+ F(CallRef, const Value& func_ref, const FunctionSig* sig, \
+ uint32_t sig_index, const Value args[], const Value returns[]) \
+ F(ReturnCallRef, const Value& func_ref, const FunctionSig* sig, \
+ uint32_t sig_index, const Value args[]) \
+ F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[]) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth, \
+ bool pass_null_along_branch, Value* result_on_fallthrough) \
+ F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
+ F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ const base::Vector<Value> inputs, Value* result) \
+ F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
+ const Value& input0, const Value& input1, Value* result) \
+ F(Throw, const TagIndexImmediate<validate>& imm, \
+ const base::Vector<Value>& args) \
+ F(Rethrow, Control* block) \
+ F(CatchException, const TagIndexImmediate<validate>& imm, Control* block, \
+ base::Vector<Value> caught_values) \
+ F(Delegate, uint32_t depth, Control* block) \
+ F(CatchAll, Control* block) \
+ F(AtomicOp, WasmOpcode opcode, base::Vector<Value> args, \
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(DataDrop, const IndexImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, \
+ base::Vector<Value> args) \
+ F(ElemDrop, const IndexImmediate<validate>& imm) \
+ F(TableCopy, const TableCopyImmediate<validate>& imm, \
+ base::Vector<Value> args) \
+ F(TableGrow, const IndexImmediate<validate>& imm, const Value& value, \
+ const Value& delta, Value* result) \
+ F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
+ F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
+ const Value& value, const Value& count) \
+ F(StructGet, const Value& struct_object, \
+ const FieldImmediate<validate>& field, bool is_signed, Value* result) \
+ F(StructSet, const Value& struct_object, \
+ const FieldImmediate<validate>& field, const Value& field_value) \
+ F(ArrayNewWithRtt, const ArrayIndexImmediate<validate>& imm, \
+ const Value& length, const Value& initial_value, const Value& rtt, \
+ Value* result) \
+ F(ArrayNewDefault, const ArrayIndexImmediate<validate>& imm, \
+ const Value& length, const Value& rtt, Value* result) \
+ F(ArrayGet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ bool is_signed, Value* result) \
+ F(ArraySet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ const Value& value) \
+ F(ArrayLen, const Value& array_obj, Value* result) \
+ F(ArrayCopy, const Value& src, const Value& src_index, const Value& dst, \
+ const Value& dst_index, const Value& length) \
+ F(I31New, const Value& input, Value* result) \
+ F(I31GetS, const Value& input, Value* result) \
+ F(I31GetU, const Value& input, Value* result) \
+ F(RefTest, const Value& obj, const Value& rtt, Value* result) \
+ F(RefCast, const Value& obj, const Value& rtt, Value* result) \
+ F(AssertNull, const Value& obj, Value* result) \
+ F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
+ uint32_t depth) \
+ F(BrOnCastFail, const Value& obj, const Value& rtt, \
+ Value* result_on_fallthrough, uint32_t depth) \
+ F(RefIsFunc, const Value& object, Value* result) \
+ F(RefIsData, const Value& object, Value* result) \
+ F(RefIsI31, const Value& object, Value* result) \
+ F(RefIsArray, const Value& object, Value* result) \
+ F(RefAsFunc, const Value& object, Value* result) \
+ F(RefAsData, const Value& object, Value* result) \
+ F(RefAsI31, const Value& object, Value* result) \
+ F(RefAsArray, const Value& object, Value* result) \
+ F(BrOnFunc, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnArray, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnNonFunc, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth) \
+ F(BrOnNonData, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth) \
+ F(BrOnNonI31, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth) \
+ F(BrOnNonArray, const Value& object, Value* value_on_fallthrough, \
uint32_t br_depth)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
@@ -1281,6 +1253,8 @@ class WasmDecoder : public Decoder {
}
bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
+ // We compare with the current size of the globals vector. This is important
+ // if we are decoding a constant expression in the global section.
if (!VALIDATE(imm.index < module_->globals.size())) {
DecodeError(pc, "Invalid global index: %u", imm.index);
return false;
@@ -1354,13 +1328,6 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
if (!ValidateSignature(pc, imm.sig_imm)) return false;
- // call_indirect is not behind the reftypes feature, so we have to impose
- // the older format if reftypes is not enabled.
- if (!VALIDATE((imm.table_imm.index == 0 && imm.table_imm.length == 1) ||
- this->enabled_.has_reftypes())) {
- DecodeError(pc + imm.sig_imm.length, "expected table index 0, found %u",
- imm.table_imm.index);
- }
if (!ValidateTable(pc + imm.sig_imm.length, imm.table_imm)) {
return false;
}
@@ -1538,6 +1505,9 @@ class WasmDecoder : public Decoder {
// The following Validate* functions all validate an IndexImmediate, albeit
// differently according to context.
bool ValidateTable(const byte* pc, IndexImmediate<validate>& imm) {
+ if (imm.index > 0 || imm.length > 1) {
+ this->detected_->Add(kFeature_reftypes);
+ }
if (!VALIDATE(imm.index < module_->tables.size())) {
DecodeError(pc, "invalid table index: %u", imm.index);
return false;
@@ -1717,7 +1687,7 @@ class WasmDecoder : public Decoder {
case kExprRefAsNonNull:
return 1;
-#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+#define DECLARE_OPCODE_CASE(name, ...) case kExpr##name:
// clang-format off
/********** Simple and memory opcodes **********/
FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
@@ -1892,6 +1862,13 @@ class WasmDecoder : public Decoder {
pc + length + dst_imm.length);
return length + dst_imm.length + src_imm.length;
}
+ case kExprArrayInitFromData:
+ case kExprArrayInitFromDataStatic: {
+ ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
+ IndexImmediate<validate> data_imm(
+ decoder, pc + length + array_imm.length, "data segment index");
+ return length + array_imm.length + data_imm.length;
+ }
case kExprBrOnCast:
case kExprBrOnCastFail:
case kExprBrOnData:
@@ -1901,8 +1878,6 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprRttCanon:
- case kExprRttSub:
- case kExprRttFreshSub:
case kExprRefTestStatic:
case kExprRefCastStatic:
case kExprBrOnCastStatic:
@@ -1939,7 +1914,7 @@ class WasmDecoder : public Decoder {
// Prefixed opcodes (already handled, included here for completeness of
// switch)
FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_NUMERIC_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_NUMERIC_OPCODE(DECLARE_OPCODE_CASE, DECLARE_OPCODE_CASE)
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_GC_OPCODE(DECLARE_OPCODE_CASE)
@@ -2047,13 +2022,19 @@ class WasmDecoder : public Decoder {
return {2, 1};
FOREACH_SIMD_CONST_OPCODE(DECLARE_OPCODE_CASE)
return {0, 1};
+ // Special case numeric opcodes without fixed signature.
+ case kExprMemoryInit:
+ case kExprMemoryCopy:
+ case kExprMemoryFill:
+ return {3, 0};
+ case kExprTableGrow:
+ return {2, 1};
+ case kExprTableFill:
+ return {3, 0};
default: {
sig = WasmOpcodes::Signature(opcode);
- if (sig) {
- return {sig->parameter_count(), sig->return_count()};
- } else {
- UNREACHABLE();
- }
+ DCHECK_NOT_NULL(sig);
+ return {sig->parameter_count(), sig->return_count()};
}
}
}
@@ -2069,8 +2050,6 @@ class WasmDecoder : public Decoder {
case kExprI31GetU:
case kExprArrayNewDefault:
case kExprArrayLen:
- case kExprRttSub:
- case kExprRttFreshSub:
case kExprRefTestStatic:
case kExprRefCastStatic:
case kExprBrOnCastStatic:
@@ -2080,6 +2059,7 @@ class WasmDecoder : public Decoder {
return {2, 0};
case kExprArrayNew:
case kExprArrayNewDefaultWithRtt:
+ case kExprArrayInitFromDataStatic:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -2096,6 +2076,7 @@ class WasmDecoder : public Decoder {
case kExprStructNewDefault:
return {0, 1};
case kExprArrayNewWithRtt:
+ case kExprArrayInitFromData:
return {3, 1};
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, pc + 2);
@@ -2253,6 +2234,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DCHECK_LE(this->pc_, this->end_);
DCHECK_EQ(this->num_locals(), 0);
+ locals_offset_ = this->pc_offset();
this->InitializeLocalsFromSig();
uint32_t params_count = static_cast<uint32_t>(this->num_locals());
uint32_t locals_length;
@@ -2359,7 +2341,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
uint32_t pc_relative_offset() const {
- return this->pc_offset() - first_instruction_offset;
+ return this->pc_offset() - locals_offset_;
}
void DecodeFunctionBody() {
@@ -2393,7 +2375,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
}
- first_instruction_offset = this->pc_offset();
// Decode the function body.
while (this->pc_ < this->end_) {
// Most operations only grow the stack by at least one element (unary and
@@ -2426,7 +2407,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
private:
- uint32_t first_instruction_offset = 0;
+ uint32_t locals_offset_ = 0;
Interface interface_;
// The value stack, stored as individual pointers for maximum performance.
@@ -2518,9 +2499,16 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Append("T");
break;
case kControlIfElse:
+ Append("E");
+ break;
case kControlTryCatch:
+ Append("C");
+ break;
case kControlTryCatchAll:
- case kControlLet: // TODO(7748): Implement
+ Append("A");
+ break;
+ case kControlLet:
+ Append("D");
break;
}
if (c.start_merge.arity) Append("%u-", c.start_merge.arity);
@@ -2572,7 +2560,20 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
DECODE(op) { return BuildSimpleOperator_##sig(kExpr##op); }
- FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+ FOREACH_SIMPLE_NON_CONST_OPCODE(BUILD_SIMPLE_OPCODE)
+#undef BUILD_SIMPLE_OPCODE
+
+#define BUILD_SIMPLE_OPCODE(op, _, sig) \
+ DECODE(op) { \
+ if (decoding_mode == kInitExpression) { \
+ if (!VALIDATE(this->enabled_.has_extended_const())) { \
+ NonConstError(this, kExpr##op); \
+ return 0; \
+ } \
+ } \
+ return BuildSimpleOperator_##sig(kExpr##op); \
+ }
+ FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
DECODE(Block) {
@@ -2712,7 +2713,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
- Value ref_object = Peek(0, 0);
+ Value ref_object = Peek(0);
Control* c = control_at(imm.depth);
if (!VALIDATE(TypeCheckBranch<true>(c, 1))) return 0;
switch (ref_object.type.kind()) {
@@ -2768,6 +2769,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (V8_LIKELY(current_code_reachable_and_ok_)) {
CALL_INTERFACE(Forward, ref_object, stack_value(1));
CALL_INTERFACE(BrOrRet, imm.depth, 0);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
c->br_merge()->reached = true;
}
break;
@@ -2802,8 +2806,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
ArgVector let_local_values =
- PeekArgs(static_cast<uint32_t>(imm.in_arity()),
- base::VectorOf(this->local_types_.data(), new_locals_count));
+ PeekArgs(base::VectorOf(this->local_types_.data(), new_locals_count));
ArgVector args = PeekArgs(imm.sig, new_locals_count);
Control* let_block = PushControl(kControlLet, new_locals_count,
let_local_values.length() + args.length());
@@ -2925,7 +2928,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(Select) {
Value cond = Peek(0, 2, kWasmI32);
- Value fval = Peek(1, 1);
+ Value fval = Peek(1);
Value tval = Peek(2, 0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
if (!VALIDATE(!type.is_reference())) {
@@ -2941,7 +2944,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(SelectWithType) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (this->failed()) return 0;
@@ -3075,7 +3078,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(RefNull) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (!VALIDATE(this->ok())) return 0;
@@ -3087,8 +3090,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(RefIsNull) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
- Value value = Peek(0, 0);
+ this->detected_->Add(kFeature_reftypes);
+ Value value = Peek(0);
Value result = CreateValue(kWasmI32);
switch (value.type.kind()) {
case kOptRef:
@@ -3116,7 +3119,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(RefFunc) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
IndexImmediate<validate> imm(this, this->pc_ + 1, "function index");
if (!this->ValidateFunction(this->pc_ + 1, imm)) return 0;
HeapType heap_type(this->enabled_.has_typed_funcref()
@@ -3130,7 +3133,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(RefAsNonNull) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- Value value = Peek(0, 0);
+ Value value = Peek(0);
switch (value.type.kind()) {
case kBottom:
// We are in unreachable code. Forward the bottom value.
@@ -3192,7 +3195,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Drop) {
- Peek(0, 0);
+ Peek(0);
CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
Drop(1);
return 1;
@@ -3221,7 +3224,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(TableGet) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value index = Peek(0, 0, kWasmI32);
@@ -3233,7 +3236,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(TableSet) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value value = Peek(0, 1, this->module_->tables[imm.index].type);
@@ -3366,7 +3369,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(CallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- Value func_ref = Peek(0, 0);
+ Value func_ref = Peek(0);
ValueType func_type = func_ref.type;
if (func_type == kWasmBottom) {
// We are in unreachable code, maintain the polymorphic stack.
@@ -3392,7 +3395,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(ReturnCallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
CHECK_PROTOTYPE_OPCODE(return_call);
- Value func_ref = Peek(0, 0);
+ Value func_ref = Peek(0);
ValueType func_type = func_ref.type;
if (func_type == kWasmBottom) {
// We are in unreachable code, maintain the polymorphic stack.
@@ -3419,7 +3422,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
this->pc_, &opcode_length, "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
- CHECK_PROTOTYPE_OPCODE(reftypes);
+ this->detected_->Add(kFeature_reftypes);
}
trace_msg->AppendOpcode(full_opcode);
return DecodeNumericOpcode(full_opcode, opcode_length);
@@ -3511,8 +3514,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
static constexpr OpcodeHandler GetOpcodeHandlerTableEntry(size_t idx) {
DECODE_IMPL(Nop);
#define BUILD_SIMPLE_OPCODE(op, _, sig) DECODE_IMPL(op);
- FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+ FOREACH_SIMPLE_NON_CONST_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
+#define BUILD_SIMPLE_EXTENDED_CONST_OPCODE(op, _, sig) DECODE_IMPL_CONST(op);
+ FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(BUILD_SIMPLE_EXTENDED_CONST_OPCODE)
+#undef BUILD_SIMPLE_EXTENDED_CONST_OPCODE
DECODE_IMPL(Block);
DECODE_IMPL(Rethrow);
DECODE_IMPL(Throw);
@@ -3692,8 +3698,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(static_cast<int>(type->field_count()));
}
- V8_INLINE ArgVector PeekArgs(uint32_t base_index,
- base::Vector<ValueType> arg_types) {
+ V8_INLINE ArgVector PeekArgs(base::Vector<ValueType> arg_types) {
int size = static_cast<int>(arg_types.size());
EnsureStackArguments(size);
ArgVector args(stack_value(size), arg_types.size());
@@ -4050,28 +4055,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ ValueType rtt_type = ValueType::Rtt(imm.index);
Value rtt = opcode == kExprStructNew
- ? CreateValue(ValueType::Rtt(imm.index))
- : Peek(0, imm.struct_type->field_count());
+ ? CreateValue(rtt_type)
+ : Peek(0, imm.struct_type->field_count(), rtt_type);
if (opcode == kExprStructNew) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
- } else {
- DCHECK_EQ(opcode, kExprStructNewWithRtt);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index &&
- rtt.type.has_depth()))) {
- PopTypeError(
- imm.struct_type->field_count(), rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
- }
}
ArgVector args = PeekArgs(imm.struct_type, 1);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4098,27 +4088,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
}
}
- Value rtt = opcode == kExprStructNewDefault
- ? CreateValue(ValueType::Rtt(imm.index))
- : Peek(0, 0);
+ ValueType rtt_type = ValueType::Rtt(imm.index);
+ Value rtt = opcode == kExprStructNewDefault ? CreateValue(rtt_type)
+ : Peek(0, 0, rtt_type);
if (opcode == kExprStructNewDefault) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
- } else {
- DCHECK_EQ(opcode, kExprStructNewDefaultWithRtt);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(0, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index &&
- rtt.type.has_depth()))) {
- PopTypeError(
- 0, rtt, "rtt with depth for type " + std::to_string(imm.index));
- return 0;
- }
}
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewDefault, imm, rtt, &value);
@@ -4197,27 +4172,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = opcode == kExprArrayNew
- ? CreateValue(ValueType::Rtt(imm.index))
- : Peek(0, 2);
+ ValueType rtt_type = ValueType::Rtt(imm.index);
+ Value rtt = opcode == kExprArrayNew ? CreateValue(rtt_type)
+ : Peek(0, 2, rtt_type);
if (opcode == kExprArrayNew) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
- } else {
- DCHECK_EQ(opcode, kExprArrayNewWithRtt);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(2, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index &&
- rtt.type.has_depth()))) {
- PopTypeError(
- 2, rtt, "rtt with depth for type " + std::to_string(imm.index));
- return 0;
- }
}
Value length = Peek(1, 1, kWasmI32);
Value initial_value =
@@ -4241,27 +4201,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
imm.array_type->element_type().name().c_str());
return 0;
}
- Value rtt = opcode == kExprArrayNewDefault
- ? CreateValue(ValueType::Rtt(imm.index))
- : Peek(0, 1);
+ ValueType rtt_type = ValueType::Rtt(imm.index);
+ Value rtt = opcode == kExprArrayNewDefault ? CreateValue(rtt_type)
+ : Peek(0, 1, rtt_type);
if (opcode == kExprArrayNewDefault) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
- } else {
- DCHECK_EQ(opcode, kExprArrayNewDefaultWithRtt);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index &&
- rtt.type.has_depth()))) {
- PopTypeError(
- 1, rtt, "rtt with depth for type " + std::to_string(imm.index));
- return 0;
- }
}
Value length = Peek(1, 0, kWasmI32);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4271,6 +4216,54 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
+ case kExprArrayInitFromData:
+ case kExprArrayInitFromDataStatic: {
+ ArrayIndexImmediate<validate> array_imm(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
+ ValueType element_type = array_imm.array_type->element_type();
+ if (element_type.is_reference()) {
+ this->DecodeError(
+ "array.init_from_data can only be used with value-type arrays, "
+ "found array type #%d instead",
+ array_imm.index);
+ return 0;
+ }
+#if V8_TARGET_BIG_ENDIAN
+ // Byte sequences in data segments are interpreted as little endian for
+ // the purposes of this instruction. This means that those will have to
+ // be transformed in big endian architectures. TODO(7748): Implement.
+ if (element_type.element_size_bytes() > 1) {
+ UNIMPLEMENTED();
+ }
+#endif
+ const byte* data_index_pc =
+ this->pc_ + opcode_length + array_imm.length;
+ IndexImmediate<validate> data_segment(this, data_index_pc,
+ "data segment");
+ if (!this->ValidateDataSegment(data_index_pc, data_segment)) return 0;
+
+ ValueType rtt_type = ValueType::Rtt(array_imm.index);
+ Value rtt = opcode == kExprArrayInitFromDataStatic
+ ? CreateValue(rtt_type)
+ : Peek(0, 2, rtt_type);
+ if (opcode == kExprArrayInitFromDataStatic) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, array_imm.index, &rtt);
+ Push(rtt);
+ }
+
+ Value length = Peek(1, 1, kWasmI32);
+ Value offset = Peek(2, 0, kWasmI32);
+
+ Value array =
+ CreateValue(ValueType::Ref(array_imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayInitFromData, array_imm,
+ data_segment, offset, length, rtt,
+ &array);
+ Drop(3); // rtt, length, offset
+ Push(array);
+ return opcode_length + array_imm.length + data_segment.length;
+ }
case kExprArrayGetS:
case kExprArrayGetU: {
NON_CONST_ONLY
@@ -4332,9 +4325,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayLen: {
NON_CONST_ONLY
+ // Read but ignore an immediate array type index.
+ // TODO(7748): Remove this once we are ready to make breaking changes.
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value array_obj = Peek(0, 0, ValueType::Ref(imm.index, kNullable));
+ Value array_obj =
+ Peek(0, 0, ValueType::Ref(HeapType::kArray, kNullable));
Value value = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayLen, array_obj, &value);
Drop(array_obj);
@@ -4442,57 +4437,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- Value value = CreateValue(ValueType::Rtt(
- imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ Value value = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
Push(value);
return opcode_length + imm.length;
}
- case kExprRttFreshSub:
- case kExprRttSub: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "type index");
- if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- Value parent = Peek(0, 0);
- if (parent.type.is_bottom()) {
- DCHECK(!current_code_reachable_and_ok_);
- // Just leave the unreachable/bottom value on the stack.
- } else {
- if (!VALIDATE(parent.type.is_rtt() &&
- IsHeapSubtypeOf(imm.index, parent.type.ref_index(),
- this->module_))) {
- PopTypeError(
- 0, parent,
- "rtt for a supertype of type " + std::to_string(imm.index));
- return 0;
- }
- Value value = parent.type.has_depth()
- ? CreateValue(ValueType::Rtt(
- imm.index, parent.type.depth() + 1))
- : CreateValue(ValueType::Rtt(imm.index));
-
- WasmRttSubMode mode = opcode == kExprRttSub
- ? WasmRttSubMode::kCanonicalize
- : WasmRttSubMode::kFresh;
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RttSub, imm.index, parent, &value,
- mode);
- Drop(parent);
- Push(value);
- }
- return opcode_length + imm.length;
- }
case kExprRefTest:
case kExprRefTestStatic: {
NON_CONST_ONLY
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ Value rtt = Peek(0); // This is safe for the ...Static instruction.
if (opcode == kExprRefTestStatic) {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
- rtt = CreateValue(ValueType::Rtt(
- imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ rtt = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
} else {
@@ -4502,7 +4462,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
}
- Value obj = Peek(1, 0);
+ Value obj = Peek(1);
Value value = CreateValue(kWasmI32);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
@@ -4542,14 +4502,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprRefCast:
case kExprRefCastStatic: {
NON_CONST_ONLY
- Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ Value rtt = Peek(0); // This is safe for the ...Static instruction.
if (opcode == kExprRefCastStatic) {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
- rtt = CreateValue(ValueType::Rtt(
- imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ rtt = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
} else {
@@ -4559,7 +4518,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
}
- Value obj = Peek(1, 0);
+ Value obj = Peek(1);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4593,7 +4552,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
CALL_INTERFACE(AssertNull, obj, &value);
} else {
CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
- EndControl();
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
}
} else {
CALL_INTERFACE(RefCast, obj, rtt, &value);
@@ -4612,14 +4573,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ uint32_t pc_offset = opcode_length + branch_depth.length;
+ Value rtt = Peek(0); // This is safe for the ...Static instruction.
if (opcode == kExprBrOnCastStatic) {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ IndexImmediate<validate> imm(this, this->pc_ + pc_offset,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- opcode_length += imm.length;
- rtt = CreateValue(ValueType::Rtt(
- imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ pc_offset += imm.length;
+ rtt = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
} else {
@@ -4629,7 +4590,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
}
- Value obj = Peek(1, 0);
+ Value obj = Peek(1);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4655,24 +4616,28 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), kNonNullable));
Push(result_on_branch);
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack, so we can't
+ // reuse {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
if (V8_LIKELY(current_code_reachable_and_ok_)) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, rtt))) {
CALL_INTERFACE(Drop); // rtt
+ CALL_INTERFACE(Forward, obj, value_on_branch);
// The branch will still not be taken on null.
if (obj.type.is_nullable()) {
CALL_INTERFACE(BrOnNonNull, obj, branch_depth.depth);
} else {
CALL_INTERFACE(BrOrRet, branch_depth.depth, 0);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
}
c->br_merge()->reached = true;
} else if (V8_LIKELY(!TypeCheckAlwaysFails(obj, rtt))) {
- // The {value_on_branch} parameter we pass to the interface must
- // be pointer-identical to the object on the stack, so we can't
- // reuse {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
CALL_INTERFACE(BrOnCast, obj, rtt, value_on_branch,
branch_depth.depth);
c->br_merge()->reached = true;
@@ -4682,7 +4647,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
- return opcode_length + branch_depth.length;
+ return pc_offset;
}
case kExprBrOnCastFail:
case kExprBrOnCastStaticFail: {
@@ -4693,14 +4658,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ uint32_t pc_offset = opcode_length + branch_depth.length;
+ Value rtt = Peek(0); // This is safe for the ...Static instruction.
if (opcode == kExprBrOnCastStaticFail) {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ IndexImmediate<validate> imm(this, this->pc_ + pc_offset,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- opcode_length += imm.length;
- rtt = CreateValue(ValueType::Rtt(
- imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ pc_offset += imm.length;
+ rtt = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Push(rtt);
} else {
@@ -4710,7 +4675,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
}
- Value obj = Peek(1, 0);
+ Value obj = Peek(1);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4771,43 +4736,83 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Make sure the correct value is on the stack state on fallthrough.
Drop(obj);
Push(result_on_fallthrough);
- return opcode_length + branch_depth.length;
+ return pc_offset;
}
-#define ABSTRACT_TYPE_CHECK(heap_type) \
- case kExprRefIs##heap_type: { \
- NON_CONST_ONLY \
- Value arg = Peek(0, 0, kWasmAnyRef); \
- Value result = CreateValue(kWasmI32); \
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RefIs##heap_type, arg, &result); \
- Drop(arg); \
- Push(result); \
- return opcode_length; \
+#define ABSTRACT_TYPE_CHECK(h_type) \
+ case kExprRefIs##h_type: { \
+ NON_CONST_ONLY \
+ Value arg = Peek(0, 0, kWasmAnyRef); \
+ if (this->failed()) return 0; \
+ Value result = CreateValue(kWasmI32); \
+ if (V8_LIKELY(current_code_reachable_and_ok_)) { \
+ if (IsHeapSubtypeOf(arg.type.heap_representation(), HeapType::k##h_type, \
+ this->module_)) { \
+ if (arg.type.is_nullable()) { \
+ /* We abuse ref.as_non_null, which isn't otherwise used as a unary \
+ * operator, as a sentinel for the negation of ref.is_null. */ \
+ CALL_INTERFACE(UnOp, kExprRefAsNonNull, arg, &result); \
+ } else { \
+ CALL_INTERFACE(Drop); \
+ CALL_INTERFACE(I32Const, &result, 1); \
+ } \
+ } else if (!IsHeapSubtypeOf(HeapType::k##h_type, \
+ arg.type.heap_representation(), \
+ this->module_)) { \
+ CALL_INTERFACE(Drop); \
+ CALL_INTERFACE(I32Const, &result, 0); \
+ } else { \
+ CALL_INTERFACE(RefIs##h_type, arg, &result); \
+ } \
+ } \
+ Drop(arg); \
+ Push(result); \
+ return opcode_length; \
}
-
ABSTRACT_TYPE_CHECK(Data)
ABSTRACT_TYPE_CHECK(Func)
ABSTRACT_TYPE_CHECK(I31)
+ ABSTRACT_TYPE_CHECK(Array)
#undef ABSTRACT_TYPE_CHECK
-#define ABSTRACT_TYPE_CAST(heap_type) \
- case kExprRefAs##heap_type: { \
- NON_CONST_ONLY \
- Value arg = Peek(0, 0, kWasmAnyRef); \
- Value result = \
- CreateValue(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RefAs##heap_type, arg, &result); \
- Drop(arg); \
- Push(result); \
- return opcode_length; \
+#define ABSTRACT_TYPE_CAST(h_type) \
+ case kExprRefAs##h_type: { \
+ NON_CONST_ONLY \
+ Value arg = Peek(0, 0, kWasmAnyRef); \
+ ValueType non_nullable_abstract_type = \
+ ValueType::Ref(HeapType::k##h_type, kNonNullable); \
+ Value result = CreateValue(non_nullable_abstract_type); \
+ if (V8_LIKELY(current_code_reachable_and_ok_)) { \
+ if (IsHeapSubtypeOf(arg.type.heap_representation(), HeapType::k##h_type, \
+ this->module_)) { \
+ if (arg.type.is_nullable()) { \
+ CALL_INTERFACE(RefAsNonNull, arg, &result); \
+ } else { \
+ CALL_INTERFACE(Forward, arg, &result); \
+ } \
+ } else if (!IsHeapSubtypeOf(HeapType::k##h_type, \
+ arg.type.heap_representation(), \
+ this->module_)) { \
+ CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast); \
+ /* We know that the following code is not reachable, but according */ \
+ /* to the spec it technically is. Set it to spec-only reachable. */ \
+ SetSucceedingCodeDynamicallyUnreachable(); \
+ } else { \
+ CALL_INTERFACE(RefAs##h_type, arg, &result); \
+ } \
+ } \
+ Drop(arg); \
+ Push(result); \
+ return opcode_length; \
}
-
ABSTRACT_TYPE_CAST(Data)
ABSTRACT_TYPE_CAST(Func)
ABSTRACT_TYPE_CAST(I31)
+ ABSTRACT_TYPE_CAST(Array)
#undef ABSTRACT_TYPE_CAST
case kExprBrOnData:
case kExprBrOnFunc:
+ case kExprBrOnArray:
case kExprBrOnI31: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
@@ -4834,7 +4839,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
HeapType::Representation heap_type =
opcode == kExprBrOnFunc
? HeapType::kFunc
- : opcode == kExprBrOnData ? HeapType::kData : HeapType::kI31;
+ : opcode == kExprBrOnData
+ ? HeapType::kData
+ : opcode == kExprBrOnArray ? HeapType::kArray
+ : HeapType::kI31;
Value result_on_branch =
CreateValue(ValueType::Ref(heap_type, kNonNullable));
Push(result_on_branch);
@@ -4848,6 +4856,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
CALL_INTERFACE(BrOnFunc, obj, value_on_branch, branch_depth.depth);
} else if (opcode == kExprBrOnData) {
CALL_INTERFACE(BrOnData, obj, value_on_branch, branch_depth.depth);
+ } else if (opcode == kExprBrOnArray) {
+ CALL_INTERFACE(BrOnArray, obj, value_on_branch, branch_depth.depth);
} else {
CALL_INTERFACE(BrOnI31, obj, value_on_branch, branch_depth.depth);
}
@@ -4859,6 +4869,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprBrOnNonData:
case kExprBrOnNonFunc:
+ case kExprBrOnNonArray:
case kExprBrOnNonI31: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
@@ -4880,7 +4891,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
HeapType::Representation heap_type =
opcode == kExprBrOnNonFunc
? HeapType::kFunc
- : opcode == kExprBrOnNonData ? HeapType::kData : HeapType::kI31;
+ : opcode == kExprBrOnNonData
+ ? HeapType::kData
+ : opcode == kExprBrOnNonArray ? HeapType::kArray
+ : HeapType::kI31;
Value value_on_fallthrough =
CreateValue(ValueType::Ref(heap_type, kNonNullable));
@@ -4891,6 +4905,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
} else if (opcode == kExprBrOnNonData) {
CALL_INTERFACE(BrOnNonData, obj, &value_on_fallthrough,
branch_depth.depth);
+ } else if (opcode == kExprBrOnNonArray) {
+ CALL_INTERFACE(BrOnNonArray, obj, &value_on_fallthrough,
+ branch_depth.depth);
} else {
CALL_INTERFACE(BrOnNonI31, obj, &value_on_fallthrough,
branch_depth.depth);
@@ -4973,10 +4990,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
unsigned DecodeNumericOpcode(WasmOpcode opcode, uint32_t opcode_length) {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (!VALIDATE(sig != nullptr)) {
- this->DecodeError("invalid numeric opcode");
- return 0;
- }
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -4992,10 +5005,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryInit: {
MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Peek(0, 2, sig->GetParam(2));
- Value src = Peek(1, 1, sig->GetParam(1));
- Value dst = Peek(2, 0, sig->GetParam(0));
- CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryInit, imm, dst, src, size);
+ ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value size = Peek(0, 2, kWasmI32);
+ Value offset = Peek(1, 1, kWasmI32);
+ Value dst = Peek(2, 0, mem_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryInit, imm, dst, offset, size);
Drop(3);
return opcode_length + imm.length;
}
@@ -5011,9 +5025,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryCopy: {
MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Peek(0, 2, sig->GetParam(2));
- Value src = Peek(1, 1, sig->GetParam(1));
- Value dst = Peek(2, 0, sig->GetParam(0));
+ ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value size = Peek(0, 2, mem_type);
+ Value src = Peek(1, 1, mem_type);
+ Value dst = Peek(2, 0, mem_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryCopy, imm, dst, src, size);
Drop(3);
return opcode_length + imm.length;
@@ -5021,9 +5036,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Peek(0, 2, sig->GetParam(2));
- Value value = Peek(1, 1, sig->GetParam(1));
- Value dst = Peek(2, 0, sig->GetParam(0));
+ ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value size = Peek(0, 2, mem_type);
+ Value value = Peek(1, 1, kWasmI32);
+ Value dst = Peek(2, 0, mem_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryFill, imm, dst, value, size);
Drop(3);
return opcode_length + imm.length;
@@ -5059,7 +5075,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"table index");
if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
- Value delta = Peek(0, 1, sig->GetParam(1));
+ Value delta = Peek(0, 1, kWasmI32);
Value value = Peek(1, 0, this->module_->tables[imm.index].type);
Value result = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableGrow, imm, value, delta,
@@ -5081,9 +5097,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"table index");
if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
- Value count = Peek(0, 2, sig->GetParam(2));
+ Value count = Peek(0, 2, kWasmI32);
Value value = Peek(1, 1, this->module_->tables[imm.index].type);
- Value start = Peek(2, 0, sig->GetParam(0));
+ Value start = Peek(2, 0, kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableFill, imm, start, value, count);
Drop(3);
return opcode_length + imm.length;
@@ -5183,7 +5199,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
V8_INLINE Value Peek(int depth, int index, ValueType expected) {
- Value val = Peek(depth, index);
+ Value val = Peek(depth);
if (!VALIDATE(IsSubtypeOf(val.type, expected, this->module_) ||
val.type == kWasmBottom || expected == kWasmBottom)) {
PopTypeError(index, val, expected);
@@ -5191,7 +5207,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return val;
}
- V8_INLINE Value Peek(int depth, int index) {
+ V8_INLINE Value Peek(int depth) {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
if (V8_UNLIKELY(stack_size() <= limit + depth)) {
@@ -5407,11 +5423,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int BuildSimpleOperator(WasmOpcode opcode, const FunctionSig* sig) {
DCHECK_GE(1, sig->return_count());
- ValueType ret = sig->return_count() == 0 ? kWasmVoid : sig->GetReturn(0);
if (sig->parameter_count() == 1) {
- return BuildSimpleOperator(opcode, ret, sig->GetParam(0));
+ // All current simple unary operators have exactly 1 return value.
+ DCHECK_EQ(1, sig->return_count());
+ return BuildSimpleOperator(opcode, sig->GetReturn(0), sig->GetParam(0));
} else {
DCHECK_EQ(2, sig->parameter_count());
+ ValueType ret = sig->return_count() == 0 ? kWasmVoid : sig->GetReturn(0);
return BuildSimpleOperator(opcode, ret, sig->GetParam(0),
sig->GetParam(1));
}
@@ -5419,16 +5437,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType arg_type) {
+ DCHECK_NE(kWasmVoid, return_type);
Value val = Peek(0, 0, arg_type);
- if (return_type == kWasmVoid) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, opcode, val, nullptr);
- Drop(val);
- } else {
- Value ret = CreateValue(return_type);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, opcode, val, &ret);
- Drop(val);
- Push(ret);
- }
+ Value ret = CreateValue(return_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, opcode, val, &ret);
+ Drop(val);
+ Push(ret);
return 1;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index d5a82073d2..99928ed6e6 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -20,6 +20,17 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace value_type_reader {
+HeapType consume_heap_type(Decoder* decoder, const WasmModule* module,
+ const WasmFeatures& enabled) {
+ uint32_t length;
+ HeapType result = value_type_reader::read_heap_type<Decoder::kFullValidation>(
+ decoder, decoder->pc(), &length, module, enabled);
+ decoder->consume_bytes(length, "heap type");
+ return result;
+}
+} // namespace value_type_reader
+
bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const WasmModule* module, const byte* start,
const byte* end) {
@@ -95,8 +106,8 @@ void PrintRawWasmCode(const byte* start, const byte* end) {
namespace {
const char* RawOpcodeName(WasmOpcode opcode) {
switch (opcode) {
-#define DECLARE_NAME_CASE(name, opcode, sig) \
- case kExpr##name: \
+#define DECLARE_NAME_CASE(name, ...) \
+ case kExpr##name: \
return "kExpr" #name;
FOREACH_OPCODE(DECLARE_NAME_CASE)
#undef DECLARE_NAME_CASE
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index e520a7d680..5aa2eabd4b 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -64,7 +64,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
bool source_positions = is_asmjs_module(env->module);
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
env, kind, sig, source_positions,
- static_cast<int>(sig->parameter_count()));
+ static_cast<int>(sig->parameter_count()), wasm::kNoSuspend);
return result;
}
@@ -175,14 +175,21 @@ bool UseGenericWrapper(const FunctionSig* sig) {
if (sig->returns().size() > 1) {
return false;
}
- if (sig->returns().size() == 1 && sig->GetReturn(0).kind() != kI32 &&
- sig->GetReturn(0).kind() != kI64 && sig->GetReturn(0).kind() != kF32 &&
- sig->GetReturn(0).kind() != kF64) {
- return false;
+ if (sig->returns().size() == 1) {
+ ValueType ret = sig->GetReturn(0);
+ if (ret.kind() == kS128) return false;
+ if (ret.is_reference()) {
+ if (ret.heap_representation() != wasm::HeapType::kAny &&
+ ret.heap_representation() != wasm::HeapType::kFunc) {
+ return false;
+ }
+ }
}
for (ValueType type : sig->parameters()) {
if (type.kind() != kI32 && type.kind() != kI64 && type.kind() != kF32 &&
- type.kind() != kF64) {
+ type.kind() != kF64 &&
+ !(type.is_reference() &&
+ type.heap_representation() == wasm::HeapType::kAny)) {
return false;
}
}
@@ -220,7 +227,9 @@ void JSToWasmWrapperCompilationUnit::Execute() {
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
if (use_generic_wrapper_) {
- return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
+ return FromCodeT(
+ isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper),
+ isolate_);
}
CompilationJob::Status status = job_->FinalizeJob(isolate_);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 53bfaccf74..bd3317b3b2 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -212,14 +212,14 @@ class WasmGraphBuildingInterface {
nesting_depth++;
}
}
- // If this loop is nested, the parent loop's is_innermost field needs to
- // be false. If the last loop in loop_infos_ has less depth, it has to be
- // the parent loop. If it does not, it means another loop has been found
- // within the parent loop, and that loop will have set the parent's
- // is_innermost to false, so we do not need to do anything.
+ // If this loop is nested, the parent loop's can_be_innermost field needs
+ // to be false. If the last loop in loop_infos_ has less depth, it has to
+ // be the parent loop. If it does not, it means another loop has been
+ // found within the parent loop, and that loop will have set the parent's
+ // can_be_innermost to false, so we do not need to do anything.
if (nesting_depth > 0 &&
loop_infos_.back().nesting_depth < nesting_depth) {
- loop_infos_.back().is_innermost = false;
+ loop_infos_.back().can_be_innermost = false;
}
loop_infos_.emplace_back(loop_node, nesting_depth, true);
}
@@ -235,11 +235,16 @@ class WasmGraphBuildingInterface {
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->num_locals(), decoder->zone());
if (decoder->failed()) return;
+ int instance_cache_index = decoder->num_locals();
+ // If the module has shared memory, the stack guard might reallocate the
+ // shared memory. We have to assume the instance cache will be updated.
+ if (decoder->module_->has_shared_memory) {
+ assigned->Add(instance_cache_index);
+ }
DCHECK_NOT_NULL(assigned);
decoder->control_at(0)->loop_assignments = assigned;
// Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->num_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
TFNode* inputs[] = {ssa_env_->locals[i], control()};
@@ -253,7 +258,10 @@ class WasmGraphBuildingInterface {
// Now we setup a new environment for the inside of the loop.
SetEnv(Split(decoder->zone(), ssa_env_));
- builder_->StackCheck(decoder->position());
+ builder_->StackCheck(decoder->module_->has_shared_memory
+ ? &ssa_env_->instance_cache
+ : nullptr,
+ decoder->position());
ssa_env_->SetNotMerged();
// Wrap input merge into phis.
@@ -442,11 +450,6 @@ class WasmGraphBuildingInterface {
}
void Trap(FullDecoder* decoder, TrapReason reason) {
- ValueVector values;
- if (emit_loop_exits()) {
- BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
- values);
- }
builder_->Trap(reason, decoder->position());
}
@@ -845,7 +848,7 @@ class WasmGraphBuildingInterface {
CheckForException(decoder,
builder_->Throw(imm.index, imm.tag, base::VectorOf(args),
decoder->position()));
- TerminateThrow(decoder);
+ builder_->TerminateThrow(effect(), control());
}
void Rethrow(FullDecoder* decoder, Control* block) {
@@ -853,7 +856,7 @@ class WasmGraphBuildingInterface {
TFNode* exception = block->try_info->exception;
DCHECK_NOT_NULL(exception);
CheckForException(decoder, builder_->Rethrow(exception));
- TerminateThrow(decoder);
+ builder_->TerminateThrow(effect(), control());
}
void CatchException(FullDecoder* decoder,
@@ -910,7 +913,7 @@ class WasmGraphBuildingInterface {
// We just throw to the caller here, so no need to generate IfSuccess
// and IfFailure nodes.
builder_->Rethrow(block->try_info->exception);
- TerminateThrow(decoder);
+ builder_->TerminateThrow(effect(), control());
return;
}
DCHECK(decoder->control_at(depth)->is_try());
@@ -1064,7 +1067,7 @@ class WasmGraphBuildingInterface {
rtt.node, decoder->position());
// array.new_with_rtt introduces a loop. Therefore, we have to mark the
// immediately nesting loop (if any) as non-innermost.
- if (!loop_infos_.empty()) loop_infos_.back().is_innermost = false;
+ if (!loop_infos_.empty()) loop_infos_.back().can_be_innermost = false;
}
void ArrayNewDefault(FullDecoder* decoder,
@@ -1113,8 +1116,18 @@ class WasmGraphBuildingInterface {
for (uint32_t i = 0; i < elements.size(); i++) {
element_nodes[i] = elements[i].node;
}
- result->node = builder_->ArrayInit(imm.index, imm.array_type, rtt.node,
- VectorOf(element_nodes));
+ result->node =
+ builder_->ArrayInit(imm.array_type, rtt.node, VectorOf(element_nodes));
+ }
+
+ void ArrayInitFromData(FullDecoder* decoder,
+ const ArrayIndexImmediate<validate>& array_imm,
+ const IndexImmediate<validate>& data_segment,
+ const Value& offset, const Value& length,
+ const Value& rtt, Value* result) {
+ result->node = builder_->ArrayInitFromData(
+ array_imm.array_type, data_segment.index, offset.node, length.node,
+ rtt.node, decoder->position());
}
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
@@ -1133,11 +1146,6 @@ class WasmGraphBuildingInterface {
result->node = builder_->RttCanon(type_index);
}
- void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
- Value* result, WasmRttSubMode mode) {
- result->node = builder_->RttSub(type_index, parent.node, mode);
- }
-
using StaticKnowledge = compiler::WasmGraphBuilder::ObjectReferenceKnowledge;
StaticKnowledge ComputeStaticKnowledge(ValueType object_type,
@@ -1148,10 +1156,13 @@ class WasmGraphBuildingInterface {
DCHECK(object_type.is_object_reference()); // Checked by validation.
// In the bottom case, the result is irrelevant.
result.reference_kind =
- rtt_type != kWasmBottom && module->has_signature(rtt_type.ref_index())
+ !rtt_type.is_bottom() && module->has_signature(rtt_type.ref_index())
? compiler::WasmGraphBuilder::kFunction
: compiler::WasmGraphBuilder::kArrayOrStruct;
- result.rtt_depth = rtt_type.has_depth() ? rtt_type.depth() : -1;
+ result.rtt_depth = rtt_type.is_bottom()
+ ? 0 /* unused */
+ : static_cast<uint8_t>(GetSubtypingDepth(
+ module, rtt_type.ref_index()));
return result;
}
@@ -1253,6 +1264,29 @@ class WasmGraphBuildingInterface {
br_depth, false);
}
+ void RefIsArray(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = builder_->RefIsArray(object.node, object.type.is_nullable());
+ }
+
+ void RefAsArray(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = builder_->RefAsArray(object.node, object.type.is_nullable(),
+ decoder->position());
+ }
+
+ void BrOnArray(FullDecoder* decoder, const Value& object,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnArray>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
+ true);
+ }
+
+ void BrOnNonArray(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnArray>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
+ br_depth, false);
+ }
+
void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
result->node = builder_->RefIsI31(object.node);
}
@@ -1304,12 +1338,19 @@ class WasmGraphBuildingInterface {
->try_info;
}
- // Loop exits are only used during loop unrolling and are then removed, as
- // they cannot be handled by later optimization stages. Since unrolling comes
- // before inlining in the compilation pipeline, we should not emit loop exits
- // in inlined functions. Also, we should not do so when unrolling is disabled.
+ // If {emit_loop_exits()} returns true, we need to emit LoopExit,
+ // LoopExitEffect, and LoopExit nodes whenever a control resp. effect resp.
+ // value escapes a loop. We emit loop exits in the following cases:
+ // - When popping the control of a loop.
+ // - At some nodes which connect to the graph's end. We do not always need to
+ // emit loop exits for such nodes, since the wasm loop analysis algorithm
+ // can handle a loop body which connects directly to the graph's end.
+ // However, we need to emit them anyway for nodes that may be rewired to
+ // different nodes during inlining. These are Return and TailCall nodes.
+ // - After IfFailure nodes.
+ // - When exiting a loop through Delegate.
bool emit_loop_exits() {
- return FLAG_wasm_loop_unrolling && inlined_status_ == kRegularFunction;
+ return FLAG_wasm_loop_unrolling || FLAG_wasm_loop_peeling;
}
void GetNodes(TFNode** nodes, Value* values, size_t count) {
@@ -1354,17 +1395,18 @@ class WasmGraphBuildingInterface {
builder_->set_instance_cache(&env->instance_cache);
}
- V8_INLINE TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
- if (node == nullptr) return nullptr;
+ TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
+ DCHECK_NOT_NULL(node);
+ // We need to emit IfSuccess/IfException nodes if this node throws and has
+ // an exception handler. An exception handler can either be a try-scope
+ // around this node, or if this function is being inlined, the IfException
+ // output of the inlined Call node.
const bool inside_try_scope = decoder->current_catch() != -1;
- if (!inside_try_scope) return node;
-
- return CheckForExceptionImpl(decoder, node);
- }
+ if (inlined_status_ != kInlinedHandledCall && !inside_try_scope) {
+ return node;
+ }
- V8_NOINLINE TFNode* CheckForExceptionImpl(FullDecoder* decoder,
- TFNode* node) {
TFNode* if_success = nullptr;
TFNode* if_exception = nullptr;
if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
@@ -1378,21 +1420,33 @@ class WasmGraphBuildingInterface {
exception_env->control = if_exception;
exception_env->effect = if_exception;
SetEnv(exception_env);
- TryInfo* try_info = current_try_info(decoder);
+
if (emit_loop_exits()) {
ValueVector values;
- BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
+ BuildNestedLoopExits(decoder,
+ inside_try_scope
+ ? decoder->control_depth_of_current_catch()
+ : decoder->control_depth() - 1,
true, values, &if_exception);
}
- Goto(decoder, try_info->catch_env);
- if (try_info->exception == nullptr) {
- DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
- try_info->exception = if_exception;
+ if (inside_try_scope) {
+ TryInfo* try_info = current_try_info(decoder);
+ Goto(decoder, try_info->catch_env);
+ if (try_info->exception == nullptr) {
+ DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
+ try_info->exception = if_exception;
+ } else {
+ DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
+ try_info->exception = builder_->CreateOrMergeIntoPhi(
+ MachineRepresentation::kTaggedPointer, try_info->catch_env->control,
+ try_info->exception, if_exception);
+ }
} else {
- DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
- try_info->exception = builder_->CreateOrMergeIntoPhi(
- MachineRepresentation::kTaggedPointer, try_info->catch_env->control,
- try_info->exception, if_exception);
+ DCHECK_EQ(inlined_status_, kInlinedHandledCall);
+ // Leave the IfException/LoopExit node dangling. We will connect it during
+ // inlining to the handler of the inlined call.
+ // Note: We have to generate the handler now since we have no way of
+ // generating a LoopExit if needed in the inlining code.
}
SetEnv(success_env);
@@ -1417,7 +1471,6 @@ class WasmGraphBuildingInterface {
case kOptRef:
return builder_->RefNull();
case kRtt:
- case kRttWithDepth:
case kVoid:
case kBottom:
case kRef:
@@ -1683,9 +1736,11 @@ class WasmGraphBuildingInterface {
for (size_t i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
- // The invoked function could have used grow_memory, so we need to
- // reload mem_size and mem_start.
- LoadContextIntoSsa(ssa_env_);
+ if (decoder->module_->initial_pages != decoder->module_->maximum_pages) {
+ // The invoked function could have used grow_memory, so we need to
+ // reload mem_size and mem_start.
+ LoadContextIntoSsa(ssa_env_);
+ }
}
void DoReturnCall(FullDecoder* decoder, CallInfo call_info,
@@ -1725,23 +1780,17 @@ class WasmGraphBuildingInterface {
switch (call_info.call_mode()) {
case CallInfo::kCallIndirect:
- CheckForException(
- decoder,
- builder_->ReturnCallIndirect(
- call_info.table_index(), call_info.sig_index(), real_sig,
- base::VectorOf(arg_nodes), decoder->position()));
+ builder_->ReturnCallIndirect(
+ call_info.table_index(), call_info.sig_index(), real_sig,
+ base::VectorOf(arg_nodes), decoder->position());
break;
case CallInfo::kCallDirect:
- CheckForException(
- decoder, builder_->ReturnCall(call_info.callee_index(), real_sig,
- base::VectorOf(arg_nodes),
- decoder->position()));
+ builder_->ReturnCall(call_info.callee_index(), real_sig,
+ base::VectorOf(arg_nodes), decoder->position());
break;
case CallInfo::kCallRef:
- CheckForException(decoder,
- builder_->ReturnCallRef(
- real_sig, base::VectorOf(arg_nodes),
- call_info.null_check(), decoder->position()));
+ builder_->ReturnCallRef(real_sig, base::VectorOf(arg_nodes),
+ call_info.null_check(), decoder->position());
break;
}
}
@@ -1804,21 +1853,6 @@ class WasmGraphBuildingInterface {
}
}
- void TerminateThrow(FullDecoder* decoder) {
- if (emit_loop_exits()) {
- SsaEnv* internal_env = ssa_env_;
- SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
- SetEnv(exit_env);
- ValueVector stack_values;
- BuildNestedLoopExits(decoder, decoder->control_depth(), false,
- stack_values);
- builder_->TerminateThrow(effect(), control());
- SetEnv(internal_env);
- } else {
- builder_->TerminateThrow(effect(), control());
- }
- }
-
CheckForNull NullCheckFor(ValueType type) {
DCHECK(type.is_object_reference());
return (!FLAG_experimental_wasm_skip_null_checks && type.is_nullable())
@@ -1847,9 +1881,8 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
- if (FLAG_wasm_loop_unrolling && inlined_status == kRegularFunction) {
- *loop_infos = decoder.interface().loop_infos();
- }
+ *loop_infos = decoder.interface().loop_infos();
+
return decoder.toResult(nullptr);
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index 49d9dd353c..a22ee8f410 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -27,7 +27,15 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
-enum InlinedStatus { kInlinedFunction, kRegularFunction };
+enum InlinedStatus {
+ // Inlined function whose call node has IfSuccess/IfException outputs.
+ kInlinedHandledCall,
+ // Inlined function whose call node does not have IfSuccess/IfException
+ // outputs.
+ kInlinedNonHandledCall,
+ // Not an inlined call.
+ kRegularFunction
+};
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
diff --git a/deps/v8/src/wasm/init-expr-interface.cc b/deps/v8/src/wasm/init-expr-interface.cc
index 48cea65260..685dab463b 100644
--- a/deps/v8/src/wasm/init-expr-interface.cc
+++ b/deps/v8/src/wasm/init-expr-interface.cc
@@ -19,65 +19,108 @@ namespace wasm {
void InitExprInterface::I32Const(FullDecoder* decoder, Value* result,
int32_t value) {
- if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+ if (generate_result()) result->runtime_value = WasmValue(value);
}
void InitExprInterface::I64Const(FullDecoder* decoder, Value* result,
int64_t value) {
- if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+ if (generate_result()) result->runtime_value = WasmValue(value);
}
void InitExprInterface::F32Const(FullDecoder* decoder, Value* result,
float value) {
- if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+ if (generate_result()) result->runtime_value = WasmValue(value);
}
void InitExprInterface::F64Const(FullDecoder* decoder, Value* result,
double value) {
- if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+ if (generate_result()) result->runtime_value = WasmValue(value);
}
void InitExprInterface::S128Const(FullDecoder* decoder,
Simd128Immediate<validate>& imm,
Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
result->runtime_value = WasmValue(imm.value, kWasmS128);
}
+void InitExprInterface::BinOp(FullDecoder* decoder, WasmOpcode opcode,
+ const Value& lhs, const Value& rhs,
+ Value* result) {
+ if (!generate_result()) return;
+ switch (opcode) {
+ case kExprI32Add:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i32() + rhs.runtime_value.to_i32());
+ break;
+ case kExprI32Sub:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i32() - rhs.runtime_value.to_i32());
+ break;
+ case kExprI32Mul:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i32() * rhs.runtime_value.to_i32());
+ break;
+ case kExprI64Add:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i64() + rhs.runtime_value.to_i64());
+ break;
+ case kExprI64Sub:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i64() - rhs.runtime_value.to_i64());
+ break;
+ case kExprI64Mul:
+ result->runtime_value =
+ WasmValue(lhs.runtime_value.to_i64() * rhs.runtime_value.to_i64());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
void InitExprInterface::RefNull(FullDecoder* decoder, ValueType type,
Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
result->runtime_value = WasmValue(isolate_->factory()->null_value(), type);
}
void InitExprInterface::RefFunc(FullDecoder* decoder, uint32_t function_index,
Value* result) {
- if (isolate_ != nullptr) {
- auto internal = WasmInstanceObject::GetOrCreateWasmInternalFunction(
- isolate_, instance_, function_index);
- result->runtime_value = WasmValue(
- internal, ValueType::Ref(module_->functions[function_index].sig_index,
- kNonNullable));
- } else {
+ if (isolate_ == nullptr) {
outer_module_->functions[function_index].declared = true;
+ return;
}
+ if (!generate_result()) return;
+ ValueType type = ValueType::Ref(module_->functions[function_index].sig_index,
+ kNonNullable);
+ Handle<WasmInternalFunction> internal =
+ WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate_, instance_,
+ function_index);
+ result->runtime_value = WasmValue(internal, type);
}
void InitExprInterface::GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
const WasmGlobal& global = module_->globals[imm.index];
+ DCHECK(!global.mutability);
result->runtime_value =
global.type.is_numeric()
- ? WasmValue(GetRawUntaggedGlobalPtr(global), global.type)
- : WasmValue(handle(tagged_globals_->get(global.offset), isolate_),
- global.type);
+ ? WasmValue(
+ reinterpret_cast<byte*>(
+ instance_->untagged_globals_buffer().backing_store()) +
+ global.offset,
+ global.type)
+ : WasmValue(
+ handle(instance_->tagged_globals_buffer().get(global.offset),
+ isolate_),
+ global.type);
}
void InitExprInterface::StructNewWithRtt(
FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
std::vector<WasmValue> field_values(imm.struct_type->field_count());
for (size_t i = 0; i < field_values.size(); i++) {
field_values[i] = args[i].runtime_value;
@@ -108,7 +151,6 @@ WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
return WasmValue(isolate->factory()->null_value(), type);
case kVoid:
case kRtt:
- case kRttWithDepth:
case kRef:
case kBottom:
UNREACHABLE();
@@ -119,7 +161,7 @@ WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
void InitExprInterface::StructNewDefault(
FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
std::vector<WasmValue> field_values(imm.struct_type->field_count());
for (uint32_t i = 0; i < field_values.size(); i++) {
field_values[i] = DefaultValueForType(imm.struct_type->field(i), isolate_);
@@ -135,36 +177,55 @@ void InitExprInterface::ArrayInit(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements,
const Value& rtt, Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
std::vector<WasmValue> element_values;
for (Value elem : elements) element_values.push_back(elem.runtime_value);
result->runtime_value =
- WasmValue(isolate_->factory()->NewWasmArray(
+ WasmValue(isolate_->factory()->NewWasmArrayFromElements(
imm.array_type, element_values,
Handle<Map>::cast(rtt.runtime_value.to_ref())),
ValueType::Ref(HeapType(imm.index), kNonNullable));
}
+void InitExprInterface::ArrayInitFromData(
+ FullDecoder* decoder, const ArrayIndexImmediate<validate>& array_imm,
+ const IndexImmediate<validate>& data_segment_imm, const Value& offset_value,
+ const Value& length_value, const Value& rtt, Value* result) {
+ if (!generate_result()) return;
+
+ uint32_t length = length_value.runtime_value.to_u32();
+ uint32_t offset = offset_value.runtime_value.to_u32();
+ const WasmDataSegment& data_segment =
+ module_->data_segments[data_segment_imm.index];
+ uint32_t length_in_bytes =
+ length * array_imm.array_type->element_type().element_size_bytes();
+
+ // Error handling.
+ if (length >
+ static_cast<uint32_t>(WasmArray::MaxLength(array_imm.array_type))) {
+ error_ = "length for array.init_from_data too large";
+ return;
+ }
+ if (!base::IsInBounds<uint32_t>(offset, length_in_bytes,
+ data_segment.source.length())) {
+ error_ = "data segment is out of bounds";
+ return;
+ }
+
+ Address source =
+ instance_->data_segment_starts()[data_segment_imm.index] + offset;
+ Handle<WasmArray> array_value = isolate_->factory()->NewWasmArrayFromMemory(
+ length, Handle<Map>::cast(rtt.runtime_value.to_ref()), source);
+ result->runtime_value = WasmValue(
+ array_value, ValueType::Ref(HeapType(array_imm.index), kNonNullable));
+}
+
void InitExprInterface::RttCanon(FullDecoder* decoder, uint32_t type_index,
Value* result) {
- if (isolate_ == nullptr) return;
+ if (!generate_result()) return;
result->runtime_value = WasmValue(
handle(instance_->managed_object_maps().get(type_index), isolate_),
- ValueType::Rtt(type_index, 0));
-}
-
-void InitExprInterface::RttSub(FullDecoder* decoder, uint32_t type_index,
- const Value& parent, Value* result,
- WasmRttSubMode mode) {
- if (isolate_ == nullptr) return;
- ValueType type = parent.type.has_depth()
- ? ValueType::Rtt(type_index, parent.type.depth() + 1)
- : ValueType::Rtt(type_index);
- result->runtime_value =
- WasmValue(Handle<Object>::cast(AllocateSubRtt(
- isolate_, instance_, type_index,
- Handle<Map>::cast(parent.runtime_value.to_ref()), mode)),
- type);
+ ValueType::Rtt(type_index));
}
void InitExprInterface::DoReturn(FullDecoder* decoder,
@@ -172,12 +233,7 @@ void InitExprInterface::DoReturn(FullDecoder* decoder,
end_found_ = true;
// End decoding on "end".
decoder->set_end(decoder->pc() + 1);
- if (isolate_ != nullptr) result_ = decoder->stack_value(1)->runtime_value;
-}
-
-byte* InitExprInterface::GetRawUntaggedGlobalPtr(const WasmGlobal& global) {
- return reinterpret_cast<byte*>(untagged_globals_->backing_store()) +
- global.offset;
+ if (generate_result()) result_ = decoder->stack_value(1)->runtime_value;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/init-expr-interface.h b/deps/v8/src/wasm/init-expr-interface.h
index bf08fbf51a..c936faf3ad 100644
--- a/deps/v8/src/wasm/init-expr-interface.h
+++ b/deps/v8/src/wasm/init-expr-interface.h
@@ -45,15 +45,11 @@ class InitExprInterface {
WasmFullDecoder<validate, InitExprInterface, decoding_mode>;
InitExprInterface(const WasmModule* module, Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Handle<FixedArray> tagged_globals,
- Handle<JSArrayBuffer> untagged_globals)
+ Handle<WasmInstanceObject> instance)
: module_(module),
outer_module_(nullptr),
isolate_(isolate),
- instance_(instance),
- tagged_globals_(tagged_globals),
- untagged_globals_(untagged_globals) {
+ instance_(instance) {
DCHECK_NOT_NULL(isolate);
}
@@ -67,7 +63,7 @@ class InitExprInterface {
#define UNREACHABLE_INTERFACE_FUNCTION(name, ...) \
V8_INLINE void name(FullDecoder* decoder, ##__VA_ARGS__) { UNREACHABLE(); }
INTERFACE_NON_CONSTANT_FUNCTIONS(UNREACHABLE_INTERFACE_FUNCTION)
-#undef EMPTY_INTERFACE_FUNCTION
+#undef UNREACHABLE_INTERFACE_FUNCTION
#define DECLARE_INTERFACE_FUNCTION(name, ...) \
void name(FullDecoder* decoder, ##__VA_ARGS__);
@@ -79,18 +75,18 @@ class InitExprInterface {
return result_;
}
bool end_found() { return end_found_; }
+ bool runtime_error() { return error_ != nullptr; }
+ const char* runtime_error_msg() { return error_; }
private:
- byte* GetRawUntaggedGlobalPtr(const WasmGlobal& global);
-
+ bool generate_result() { return isolate_ != nullptr && !runtime_error(); }
bool end_found_ = false;
+ const char* error_ = nullptr;
WasmValue result_;
const WasmModule* module_;
WasmModule* outer_module_;
Isolate* isolate_;
Handle<WasmInstanceObject> instance_;
- Handle<FixedArray> tagged_globals_;
- Handle<JSArrayBuffer> untagged_globals_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index a26e306b83..02683ab997 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -37,10 +37,6 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
LEBHelper::write_u32v(&pos, locals_count);
*pos = locals_type.value_type_code();
++pos;
- if (locals_type.has_depth()) {
- *pos = locals_type.depth();
- ++pos;
- }
if (locals_type.is_rtt()) {
LEBHelper::write_u32v(&pos, locals_type.ref_index());
}
@@ -72,7 +68,6 @@ size_t LocalDeclEncoder::Size() const {
size +=
LEBHelper::sizeof_u32v(p.first) + // number of locals
1 + // Opcode
- (p.second.has_depth() ? 1 : 0) + // Inheritance depth
(p.second.encoding_needs_heap_type()
? LEBHelper::sizeof_i32v(p.second.heap_type().code())
: 0) +
diff --git a/deps/v8/src/wasm/memory-protection-key.cc b/deps/v8/src/wasm/memory-protection-key.cc
index 5bf89edf89..6812faa925 100644
--- a/deps/v8/src/wasm/memory-protection-key.cc
+++ b/deps/v8/src/wasm/memory-protection-key.cc
@@ -5,7 +5,8 @@
#include "src/wasm/memory-protection-key.h"
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
-#include <sys/mman.h> // For {mprotect()} protection macros.
+#include <sys/mman.h> // For {mprotect()} protection macros.
+#include <sys/utsname.h> // For {uname()}.
#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h
#endif
@@ -43,6 +44,66 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace {
+using pkey_alloc_t = int (*)(unsigned, unsigned);
+using pkey_free_t = int (*)(int);
+using pkey_mprotect_t = int (*)(void*, size_t, int, int);
+using pkey_get_t = int (*)(int);
+using pkey_set_t = int (*)(int, unsigned);
+
+pkey_alloc_t pkey_alloc = nullptr;
+pkey_free_t pkey_free = nullptr;
+pkey_mprotect_t pkey_mprotect = nullptr;
+pkey_get_t pkey_get = nullptr;
+pkey_set_t pkey_set = nullptr;
+
+#ifdef DEBUG
+bool pkey_initialized = false;
+#endif
+} // namespace
+
+void InitializeMemoryProtectionKeySupport() {
+ // Flip {pkey_initialized} (in debug mode) and check the new value.
+ DCHECK_EQ(true, pkey_initialized = !pkey_initialized);
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ // PKU was broken on Linux kernels before 5.13 (see
+ // https://lore.kernel.org/all/20210623121456.399107624@linutronix.de/).
+ // A fix is also included in the 5.4.182 and 5.10.103 versions ("x86/fpu:
+ // Correct pkru/xstate inconsistency" by Brian Geffon <bgeffon@google.com>).
+ // Thus check the kernel version we are running on, and bail out if does not
+ // contain the fix.
+ struct utsname uname_buffer;
+ CHECK_EQ(0, uname(&uname_buffer));
+ int kernel, major, minor;
+ // Conservatively return if the release does not match the format we expect.
+ if (sscanf(uname_buffer.release, "%d.%d.%d", &kernel, &major, &minor) != 3) {
+ return;
+ }
+ bool kernel_has_pkru_fix =
+ kernel > 5 || (kernel == 5 && major >= 13) || // anything >= 5.13
+ (kernel == 5 && major == 4 && minor >= 182) || // 5.4 >= 5.4.182
+ (kernel == 5 && major == 10 && minor >= 103); // 5.10 >= 5.10.103
+ if (!kernel_has_pkru_fix) return;
+
+ // Try to find the pkey functions in glibc.
+ void* pkey_alloc_ptr = dlsym(RTLD_DEFAULT, "pkey_alloc");
+ if (!pkey_alloc_ptr) return;
+
+ // If {pkey_alloc} is available, the others must also be available.
+ void* pkey_free_ptr = dlsym(RTLD_DEFAULT, "pkey_free");
+ void* pkey_mprotect_ptr = dlsym(RTLD_DEFAULT, "pkey_mprotect");
+ void* pkey_get_ptr = dlsym(RTLD_DEFAULT, "pkey_get");
+ void* pkey_set_ptr = dlsym(RTLD_DEFAULT, "pkey_set");
+ CHECK(pkey_free_ptr && pkey_mprotect_ptr && pkey_get_ptr && pkey_set_ptr);
+
+ pkey_alloc = reinterpret_cast<pkey_alloc_t>(pkey_alloc_ptr);
+ pkey_free = reinterpret_cast<pkey_free_t>(pkey_free_ptr);
+ pkey_mprotect = reinterpret_cast<pkey_mprotect_t>(pkey_mprotect_ptr);
+ pkey_get = reinterpret_cast<pkey_get_t>(pkey_get_ptr);
+ pkey_set = reinterpret_cast<pkey_set_t>(pkey_set_ptr);
+#endif
+}
+
// TODO(dlehmann) Security: Are there alternatives to disabling CFI altogether
// for the functions below? Since they are essentially an arbitrary indirect
// call gadget, disabling CFI should be only a last resort. In Chromium, there
@@ -57,139 +118,104 @@ namespace wasm {
// level code (probably in the order of 100 lines).
DISABLE_CFI_ICALL
int AllocateMemoryProtectionKey() {
-// See comment on the import on feature testing for PKEY support.
-#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
- // Try to to find {pkey_alloc()} support in glibc.
- typedef int (*pkey_alloc_t)(unsigned int, unsigned int);
- // Cache the {dlsym()} lookup in a {static} variable.
- static auto* pkey_alloc =
- bit_cast<pkey_alloc_t>(dlsym(RTLD_DEFAULT, "pkey_alloc"));
- if (pkey_alloc != nullptr) {
- // If there is support in glibc, try to allocate a new key.
- // This might still return -1, e.g., because the kernel does not support
- // PKU or because there is no more key available.
- // Different reasons for why {pkey_alloc()} failed could be checked with
- // errno, e.g., EINVAL vs ENOSPC vs ENOSYS. See manpages and glibc manual
- // (the latter is the authorative source):
- // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
- return pkey_alloc(/* flags, unused */ 0, kDisableAccess);
- }
-#endif
- return kNoMemoryProtectionKey;
+ DCHECK(pkey_initialized);
+ if (!pkey_alloc) return kNoMemoryProtectionKey;
+
+ // If there is support in glibc, try to allocate a new key.
+ // This might still return -1, e.g., because the kernel does not support
+ // PKU or because there is no more key available.
+ // Different reasons for why {pkey_alloc()} failed could be checked with
+ // errno, e.g., EINVAL vs ENOSPC vs ENOSYS. See manpages and glibc manual
+ // (the latter is the authorative source):
+ // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
+ STATIC_ASSERT(kNoMemoryProtectionKey == -1);
+ return pkey_alloc(/* flags, unused */ 0, kDisableAccess);
}
DISABLE_CFI_ICALL
void FreeMemoryProtectionKey(int key) {
+ DCHECK(pkey_initialized);
// Only free the key if one was allocated.
if (key == kNoMemoryProtectionKey) return;
-#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
- typedef int (*pkey_free_t)(int);
- static auto* pkey_free =
- bit_cast<pkey_free_t>(dlsym(RTLD_DEFAULT, "pkey_free"));
- // If a valid key was allocated, {pkey_free()} must also be available.
- DCHECK_NOT_NULL(pkey_free);
-
- int ret = pkey_free(key);
- CHECK_EQ(/* success */ 0, ret);
-#else
// On platforms without PKU support, we should have already returned because
// the key must be {kNoMemoryProtectionKey}.
- UNREACHABLE();
-#endif
+ DCHECK_NOT_NULL(pkey_free);
+ CHECK_EQ(/* success */ 0, pkey_free(key));
}
+int GetProtectionFromMemoryPermission(PageAllocator::Permission permission) {
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
-// TODO(dlehmann): Copied from base/platform/platform-posix.cc. Should be
-// removed once this code is integrated in base/platform/platform-linux.cc.
-int GetProtectionFromMemoryPermission(base::OS::MemoryPermission access) {
- switch (access) {
- case base::OS::MemoryPermission::kNoAccess:
- case base::OS::MemoryPermission::kNoAccessWillJitLater:
+ // Mappings for PKU are either RWX (on this level) or no access.
+ switch (permission) {
+ case PageAllocator::kNoAccess:
return PROT_NONE;
- case base::OS::MemoryPermission::kRead:
- return PROT_READ;
- case base::OS::MemoryPermission::kReadWrite:
- return PROT_READ | PROT_WRITE;
- case base::OS::MemoryPermission::kReadWriteExecute:
+ case PageAllocator::kReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
- case base::OS::MemoryPermission::kReadExecute:
- return PROT_READ | PROT_EXEC;
+ default:
+ UNREACHABLE();
}
+#endif
+ // Other platforms do not use PKU.
UNREACHABLE();
}
-#endif
DISABLE_CFI_ICALL
bool SetPermissionsAndMemoryProtectionKey(
PageAllocator* page_allocator, base::AddressRegion region,
PageAllocator::Permission page_permissions, int key) {
- DCHECK_NOT_NULL(page_allocator);
+ DCHECK(pkey_initialized);
void* address = reinterpret_cast<void*>(region.begin());
size_t size = region.size();
-#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
- typedef int (*pkey_mprotect_t)(void*, size_t, int, int);
- static auto* pkey_mprotect =
- bit_cast<pkey_mprotect_t>(dlsym(RTLD_DEFAULT, "pkey_mprotect"));
+ if (pkey_mprotect) {
+ // Copied with slight modifications from base/platform/platform-posix.cc
+ // {OS::SetPermissions()}.
+ // TODO(dlehmann): Move this block into its own function at the right
+ // abstraction boundary (likely some static method in platform.h {OS})
+ // once the whole PKU code is moved into base/platform/.
+ DCHECK_EQ(0, region.begin() % page_allocator->CommitPageSize());
+ DCHECK_EQ(0, size % page_allocator->CommitPageSize());
- if (pkey_mprotect == nullptr) {
- // If there is no runtime support for {pkey_mprotect()}, no key should have
- // been allocated in the first place.
- DCHECK_EQ(kNoMemoryProtectionKey, key);
+ int protection = GetProtectionFromMemoryPermission(page_permissions);
- // Without PKU support, fallback to regular {mprotect()}.
- return page_allocator->SetPermissions(address, size, page_permissions);
- }
+ int ret = pkey_mprotect(address, size, protection, key);
- // Copied with slight modifications from base/platform/platform-posix.cc
- // {OS::SetPermissions()}.
- // TODO(dlehmann): Move this block into its own function at the right
- // abstraction boundary (likely some static method in platform.h {OS})
- // once the whole PKU code is moved into base/platform/.
- DCHECK_EQ(0, region.begin() % page_allocator->CommitPageSize());
- DCHECK_EQ(0, size % page_allocator->CommitPageSize());
+ if (ret == 0 && page_permissions == PageAllocator::kNoAccess) {
+ // Similar to {OS::SetPermissions}, also discard the pages after switching
+ // to no access. This is advisory; ignore errors and continue execution.
+ USE(page_allocator->DiscardSystemPages(address, size));
+ }
- int protection = GetProtectionFromMemoryPermission(
- static_cast<base::OS::MemoryPermission>(page_permissions));
+ return ret == /* success */ 0;
+ }
- int ret = pkey_mprotect(address, size, protection, key);
+ // If there is no runtime support for {pkey_mprotect()}, no key should have
+ // been allocated in the first place.
+ DCHECK_EQ(kNoMemoryProtectionKey, key);
- return ret == /* success */ 0;
-#else
// Without PKU support, fallback to regular {mprotect()}.
return page_allocator->SetPermissions(address, size, page_permissions);
-#endif
}
DISABLE_CFI_ICALL
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions) {
+ DCHECK(pkey_initialized);
DCHECK_NE(kNoMemoryProtectionKey, key);
-#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
- typedef int (*pkey_set_t)(int, unsigned int);
- static auto* pkey_set = bit_cast<pkey_set_t>(dlsym(RTLD_DEFAULT, "pkey_set"));
// If a valid key was allocated, {pkey_set()} must also be available.
DCHECK_NOT_NULL(pkey_set);
- int ret = pkey_set(key, permissions);
- CHECK_EQ(0 /* success */, ret);
-#else
- // On platforms without PKU support, this method cannot be called because
- // no protection key can have been allocated.
- UNREACHABLE();
-#endif
+ CHECK_EQ(0 /* success */, pkey_set(key, permissions));
}
DISABLE_CFI_ICALL
MemoryProtectionKeyPermission GetMemoryProtectionKeyPermission(int key) {
+ DCHECK(pkey_initialized);
DCHECK_NE(kNoMemoryProtectionKey, key);
-#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
- typedef int (*pkey_get_t)(int);
- static auto* pkey_get = bit_cast<pkey_get_t>(dlsym(RTLD_DEFAULT, "pkey_get"));
// If a valid key was allocated, {pkey_get()} must also be available.
DCHECK_NOT_NULL(pkey_get);
@@ -197,11 +223,6 @@ MemoryProtectionKeyPermission GetMemoryProtectionKeyPermission(int key) {
CHECK(permission == kNoRestrictions || permission == kDisableAccess ||
permission == kDisableWrite);
return static_cast<MemoryProtectionKeyPermission>(permission);
-#else
- // On platforms without PKU support, this method cannot be called because
- // no protection key can have been allocated.
- UNREACHABLE();
-#endif
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/memory-protection-key.h b/deps/v8/src/wasm/memory-protection-key.h
index dd11b419ac..3fffee51f0 100644
--- a/deps/v8/src/wasm/memory-protection-key.h
+++ b/deps/v8/src/wasm/memory-protection-key.h
@@ -48,6 +48,10 @@ STATIC_ASSERT(kDisableAccess == PKEY_DISABLE_ACCESS);
STATIC_ASSERT(kDisableWrite == PKEY_DISABLE_WRITE);
#endif
+// Call exactly once per process to determine if PKU is supported on this
+// platform and initialize global data structures.
+void InitializeMemoryProtectionKeySupport();
+
// Allocates a memory protection key on platforms with PKU support, returns
// {kNoMemoryProtectionKey} on platforms without support or when allocation
// failed at runtime.
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index af7551e535..9de513ae4b 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -556,7 +556,8 @@ class CompilationStateImpl {
// Initialize the compilation progress after deserialization. This is needed
// for recompilation (e.g. for tier down) to work later.
void InitializeCompilationProgressAfterDeserialization(
- base::Vector<const int> missing_functions);
+ base::Vector<const int> lazy_functions,
+ base::Vector<const int> liftoff_functions);
// Initializes compilation units based on the information encoded in the
// {compilation_progress_}.
@@ -666,7 +667,7 @@ class CompilationStateImpl {
private:
uint8_t SetupCompilationProgressForFunction(
- bool lazy_module, NativeModule* module,
+ bool lazy_function, NativeModule* module,
const WasmFeatures& enabled_features, int func_index);
// Returns the potentially-updated {function_progress}.
@@ -849,9 +850,10 @@ void CompilationState::WaitForTopTierFinished() {
void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); }
void CompilationState::InitializeAfterDeserialization(
- base::Vector<const int> missing_functions) {
+ base::Vector<const int> lazy_functions,
+ base::Vector<const int> liftoff_functions) {
Impl(this)->InitializeCompilationProgressAfterDeserialization(
- missing_functions);
+ lazy_functions, liftoff_functions);
}
bool CompilationState::failed() const { return Impl(this)->failed(); }
@@ -1599,7 +1601,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
}
WasmImportWrapperCache::CacheKey key(
compiler::kDefaultImportCallKind, sig,
- static_cast<int>(sig->parameter_count()));
+ static_cast<int>(sig->parameter_count()), kNoSuspend);
auto it = keys.insert(key);
if (it.second) {
// Ensure that all keys exist in the cache, so that we can populate the
@@ -1879,10 +1881,13 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
}
// Create a new {NativeModule} first.
- const bool uses_liftoff = module->origin == kWasmOrigin && FLAG_liftoff;
+ const bool include_liftoff = module->origin == kWasmOrigin && FLAG_liftoff;
+ DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
size_t code_size_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(),
- uses_liftoff);
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ module.get(), include_liftoff, dynamic_tiering);
native_module =
engine->NewNativeModule(isolate, enabled, module, code_size_estimate);
native_module->SetWireBytes(std::move(wire_bytes_copy));
@@ -1953,6 +1958,9 @@ AsyncCompileJob::AsyncCompileJob(
: isolate_(isolate),
api_method_name_(api_method_name),
enabled_features_(enabled),
+ dynamic_tiering_(isolate_->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled),
wasm_lazy_compilation_(FLAG_wasm_lazy_compilation),
start_time_(base::TimeTicks::Now()),
bytes_copy_(std::move(bytes_copy)),
@@ -2485,10 +2493,10 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
} else {
// Decode passed.
std::shared_ptr<WasmModule> module = std::move(result).value();
- const bool kUsesLiftoff = false;
+ const bool include_liftoff = FLAG_liftoff;
size_t code_size_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(),
- kUsesLiftoff);
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ module.get(), include_liftoff, job->dynamic_tiering_);
job->DoSync<PrepareAndStartCompile>(std::move(module), true,
code_size_estimate);
}
@@ -2528,10 +2536,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
code_size_estimate_(code_size_estimate) {}
private:
- const std::shared_ptr<const WasmModule> module_;
- const bool start_compilation_;
- const size_t code_size_estimate_;
-
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
@@ -2575,6 +2579,10 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
}
}
+
+ const std::shared_ptr<const WasmModule> module_;
+ const bool start_compilation_;
+ const size_t code_size_estimate_;
};
//==========================================================================
@@ -2785,11 +2793,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
int num_imported_functions =
static_cast<int>(decoder_.module()->num_imported_functions);
DCHECK_EQ(kWasmOrigin, decoder_.module()->origin);
- const bool uses_liftoff = FLAG_liftoff;
+ const bool include_liftoff = FLAG_liftoff;
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
num_functions, num_imported_functions, code_section_length,
- uses_liftoff);
+ include_liftoff, job_->dynamic_tiering_);
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false, code_size_estimate);
@@ -2889,9 +2897,10 @@ void AsyncStreamingProcessor::OnFinishedStream(
if (prefix_cache_hit_) {
// Restart as an asynchronous, non-streaming compilation. Most likely
// {PrepareAndStartCompile} will get the native module from the cache.
+ const bool include_liftoff = FLAG_liftoff;
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
- result.value().get(), FLAG_liftoff);
+ result.value().get(), include_liftoff, job_->dynamic_tiering_);
job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(
std::move(result).value(), true, code_size_estimate);
return;
@@ -2948,30 +2957,13 @@ void AsyncStreamingProcessor::OnAbort() {
job_->Abort();
}
-namespace {
-class DeserializationTimeScope {
- public:
- explicit DeserializationTimeScope(TimedHistogram* counter)
- : counter_(counter), start_(base::TimeTicks::Now()) {}
-
- ~DeserializationTimeScope() {
- base::TimeDelta duration = base::TimeTicks::Now() - start_;
- int duration_usecs = static_cast<int>(duration.InMilliseconds());
- counter_->AddSample(duration_usecs);
- }
-
- private:
- TimedHistogram* counter_;
- base::TimeTicks start_;
-};
-} // namespace
-
bool AsyncStreamingProcessor::Deserialize(
base::Vector<const uint8_t> module_bytes,
base::Vector<const uint8_t> wire_bytes) {
TRACE_EVENT0("v8.wasm", "wasm.Deserialize");
- DeserializationTimeScope time_scope(
- job_->isolate()->counters()->wasm_deserialization_time());
+ TimedHistogramScope time_scope(
+ job_->isolate()->counters()->wasm_deserialization_time(),
+ job_->isolate());
// DeserializeNativeModule and FinishCompile assume that they are executed in
// a HandleScope, and that a context is set on the isolate.
HandleScope scope(job_->isolate_);
@@ -3030,12 +3022,12 @@ bool CompilationStateImpl::cancelled() const {
}
uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
- bool lazy_module, NativeModule* native_module,
+ bool lazy_function, NativeModule* native_module,
const WasmFeatures& enabled_features, int func_index) {
ExecutionTierPair requested_tiers =
GetRequestedExecutionTiers(native_module, enabled_features, func_index);
CompileStrategy strategy = GetCompileStrategy(
- native_module->module(), enabled_features, func_index, lazy_module);
+ native_module->module(), enabled_features, func_index, lazy_function);
bool required_for_baseline = strategy == CompileStrategy::kEager;
bool required_for_top_tier = strategy != CompileStrategy::kLazy;
@@ -3199,33 +3191,62 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
}
void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
- base::Vector<const int> missing_functions) {
- TRACE_EVENT1("v8.wasm", "wasm.CompilationAfterDeserialization",
- "num_missing_functions", missing_functions.size());
+ base::Vector<const int> lazy_functions,
+ base::Vector<const int> liftoff_functions) {
+ TRACE_EVENT2("v8.wasm", "wasm.CompilationAfterDeserialization",
+ "num_lazy_functions", lazy_functions.size(),
+ "num_liftoff_functions", liftoff_functions.size());
TimedHistogramScope lazy_compile_time_scope(
counters()->wasm_compile_after_deserialize());
auto* module = native_module_->module();
auto enabled_features = native_module_->enabled_features();
const bool lazy_module = IsLazyModule(module);
+ base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope;
+ if (lazy_module || !lazy_functions.empty()) {
+ lazy_code_space_write_scope.emplace(native_module_);
+ }
{
base::MutexGuard guard(&callbacks_mutex_);
DCHECK(compilation_progress_.empty());
- constexpr uint8_t kProgressAfterDeserialization =
+ constexpr uint8_t kProgressAfterTurbofanDeserialization =
RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
ReachedTierField::encode(ExecutionTier::kTurbofan);
finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
- if (missing_functions.empty() || FLAG_wasm_lazy_compilation) {
+ if (liftoff_functions.empty() || lazy_module) {
+ // We have to trigger the compilation events to finish compilation.
+ // Typically the events get triggered when a CompilationUnit finishes, but
+ // with lazy compilation there are no compilation units.
+ // The {kFinishedBaselineCompilation} event is needed for module
+ // compilation to finish.
finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
- finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
+ if (liftoff_functions.empty() && lazy_functions.empty()) {
+ // All functions exist now as TurboFan functions, so we can trigger the
+ // {kFinishedTopTierCompilation} event.
+ // The {kFinishedTopTierCompilation} event is needed for the C-API so
+ // that {serialize()} works after {deserialize()}.
+ finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
+ }
}
compilation_progress_.assign(module->num_declared_functions,
- kProgressAfterDeserialization);
- for (auto func_index : missing_functions) {
- if (FLAG_wasm_lazy_compilation) {
+ kProgressAfterTurbofanDeserialization);
+ for (auto func_index : lazy_functions) {
+ native_module_->UseLazyStub(func_index);
+
+ compilation_progress_[declared_function_index(module, func_index)] =
+ SetupCompilationProgressForFunction(/*lazy_function =*/true,
+ native_module_, enabled_features,
+ func_index);
+ }
+ for (auto func_index : liftoff_functions) {
+ if (lazy_module) {
native_module_->UseLazyStub(func_index);
}
+ // Check that {func_index} is not contained in {lazy_functions}.
+ DCHECK_EQ(
+ compilation_progress_[declared_function_index(module, func_index)],
+ kProgressAfterTurbofanDeserialization);
compilation_progress_[declared_function_index(module, func_index)] =
SetupCompilationProgressForFunction(lazy_module, native_module_,
enabled_features, func_index);
@@ -3634,7 +3655,7 @@ void CompilationStateImpl::PublishCompilationResults(
native_module_->module()->functions[func_index].sig;
WasmImportWrapperCache::CacheKey key(
compiler::kDefaultImportCallKind, sig,
- static_cast<int>(sig->parameter_count()));
+ static_cast<int>(sig->parameter_count()), kNoSuspend);
// If two imported functions have the same key, only one of them should
// have been added as a compilation unit. So it is always the first time
// we compile a wrapper for this key here.
@@ -3673,6 +3694,7 @@ void CompilationStateImpl::SchedulePublishCompilationResults(
}
publisher_running_ = true;
}
+ CodeSpaceWriteScope code_space_write_scope(native_module_);
while (true) {
PublishCompilationResults(std::move(unpublished_code));
unpublished_code.clear();
@@ -3874,19 +3896,19 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
WasmCode* CompileImportWrapper(
NativeModule* native_module, Counters* counters,
compiler::WasmImportCallKind kind, const FunctionSig* sig,
- int expected_arity,
+ int expected_arity, Suspend suspend,
WasmImportWrapperCache::ModificationScope* cache_scope) {
// Entry should exist, so that we don't insert a new one and invalidate
// other threads' iterators/references, but it should not have been compiled
// yet.
- WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
+ WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity, suspend);
DCHECK_NULL((*cache_scope)[key]);
bool source_positions = is_asmjs_module(native_module->module());
// Keep the {WasmCode} alive until we explicitly call {IncRef}.
WasmCodeRefScope code_ref_scope;
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- &env, kind, sig, source_positions, expected_arity);
+ &env, kind, sig, source_positions, expected_arity, suspend);
WasmCode* published_code;
{
CodeSpaceWriteScope code_space_write_scope(native_module);
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 1aab188d29..0e7ad2c0b4 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -70,7 +70,8 @@ V8_EXPORT_PRIVATE
WasmCode* CompileImportWrapper(
NativeModule* native_module, Counters* counters,
compiler::WasmImportCallKind kind, const FunctionSig* sig,
- int expected_arity, WasmImportWrapperCache::ModificationScope* cache_scope);
+ int expected_arity, Suspend suspend,
+ WasmImportWrapperCache::ModificationScope* cache_scope);
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
@@ -214,6 +215,7 @@ class AsyncCompileJob {
Isolate* const isolate_;
const char* const api_method_name_;
const WasmFeatures enabled_features_;
+ const DynamicTiering dynamic_tiering_;
const bool wasm_lazy_compilation_;
base::TimeTicks start_time_;
// Copy of the module wire bytes, moved into the {native_module_} on its
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 710424e95c..1c5aefb4a2 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -36,7 +36,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
-constexpr char kBranchHintsString[] = "branchHints";
+constexpr char kBranchHintsString[] = "metadata.code.branch_hint";
constexpr char kDebugInfoString[] = ".debug_info";
constexpr char kExternalDebugInfoString[] = "external_debug_info";
@@ -409,15 +409,20 @@ class ModuleDecoderImpl : public Decoder {
break;
case kDataCountSectionCode:
if (!CheckUnorderedSection(section_code)) return;
- if (!CheckSectionOrder(section_code, kElementSectionCode,
- kCodeSectionCode))
+ // If wasm-gc is enabled, we allow the data cound section anywhere in
+ // the module.
+ if (!enabled_features_.has_gc() &&
+ !CheckSectionOrder(section_code, kElementSectionCode,
+ kCodeSectionCode)) {
return;
+ }
break;
case kTagSectionCode:
if (!CheckUnorderedSection(section_code)) return;
if (!CheckSectionOrder(section_code, kMemorySectionCode,
- kGlobalSectionCode))
+ kGlobalSectionCode)) {
return;
+ }
break;
case kNameSectionCode:
// TODO(titzer): report out of place name section as a warning.
@@ -549,135 +554,203 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ TypeDefinition consume_base_type_definition() {
+ DCHECK(enabled_features_.has_gc());
+ uint8_t kind = consume_u8("type kind");
+ switch (kind) {
+ case kWasmFunctionTypeCode: {
+ const FunctionSig* sig = consume_sig(module_->signature_zone.get());
+ return {sig, kNoSuperType};
+ }
+ case kWasmStructTypeCode: {
+ const StructType* type = consume_struct(module_->signature_zone.get());
+ return {type, kNoSuperType};
+ }
+ case kWasmArrayTypeCode: {
+ const ArrayType* type = consume_array(module_->signature_zone.get());
+ return {type, kNoSuperType};
+ }
+ case kWasmFunctionNominalCode:
+ case kWasmArrayNominalCode:
+ case kWasmStructNominalCode:
+ errorf(pc() - 1,
+ "mixing nominal and isorecursive types is not allowed");
+ return {};
+ default:
+ errorf(pc() - 1, "unknown type form: %d", kind);
+ return {};
+ }
+ }
+
+ bool check_supertype(uint32_t supertype) {
+ if (V8_UNLIKELY(supertype >= module_->types.size())) {
+ errorf(pc(), "type %zu: forward-declared supertype %d",
+ module_->types.size(), supertype);
+ return false;
+ }
+ return true;
+ }
+
+ TypeDefinition consume_nominal_type_definition() {
+ DCHECK(enabled_features_.has_gc());
+ size_t num_types = module_->types.size();
+ uint8_t kind = consume_u8("type kind");
+ switch (kind) {
+ case kWasmFunctionNominalCode: {
+ const FunctionSig* sig = consume_sig(module_->signature_zone.get());
+ uint32_t super_index = kNoSuperType;
+ HeapType super_type = consume_super_type();
+ if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else if (V8_UNLIKELY(super_type != HeapType::kFunc)) {
+ errorf(pc() - 1, "type %zu: invalid supertype %d", num_types,
+ super_type.code());
+ return {};
+ }
+ return {sig, super_index};
+ }
+ case kWasmStructNominalCode: {
+ const StructType* type = consume_struct(module_->signature_zone.get());
+ uint32_t super_index = kNoSuperType;
+ HeapType super_type = consume_super_type();
+ if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else if (V8_UNLIKELY(super_type != HeapType::kData)) {
+ errorf(pc() - 1, "type %zu: invalid supertype %d", num_types,
+ super_type.code());
+ return {};
+ }
+ return {type, super_index};
+ }
+ case kWasmArrayNominalCode: {
+ const ArrayType* type = consume_array(module_->signature_zone.get());
+ uint32_t super_index = kNoSuperType;
+ HeapType super_type = consume_super_type();
+ if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else if (V8_UNLIKELY(super_type != HeapType::kData)) {
+ errorf(pc() - 1, "type %zu: invalid supertype %d", num_types,
+ super_type.code());
+ return {};
+ }
+ return {type, super_index};
+ }
+ case kWasmFunctionTypeCode:
+ case kWasmArrayTypeCode:
+ case kWasmStructTypeCode:
+ case kWasmSubtypeCode:
+ case kWasmRecursiveTypeGroupCode:
+ errorf(pc() - 1,
+ "mixing nominal and isorecursive types is not allowed");
+ return {};
+ default:
+ errorf(pc() - 1, "unknown type form: %d", kind);
+ return {};
+ }
+ }
+
+ TypeDefinition consume_subtype_definition() {
+ DCHECK(enabled_features_.has_gc());
+ uint8_t kind = read_u8<Decoder::kFullValidation>(pc(), "type kind");
+ if (kind == kWasmSubtypeCode) {
+ consume_bytes(1, "subtype definition");
+ constexpr uint32_t kMaximumSupertypes = 1;
+ uint32_t supertype_count =
+ consume_count("supertype count", kMaximumSupertypes);
+ uint32_t supertype =
+ supertype_count == 1 ? consume_u32v("supertype") : kNoSuperType;
+ if (!check_supertype(supertype)) return {};
+ TypeDefinition type = consume_base_type_definition();
+ type.supertype = supertype;
+ return type;
+ } else {
+ return consume_base_type_definition();
+ }
+ }
+
void DecodeTypeSection() {
uint32_t types_count = consume_count("types count", kV8MaxWasmTypes);
- module_->types.reserve(types_count);
- for (uint32_t i = 0; ok() && i < types_count; ++i) {
- TRACE("DecodeSignature[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- uint8_t kind = consume_u8("type kind");
- switch (kind) {
- case kWasmFunctionTypeCode:
- case kWasmFunctionSubtypeCode: {
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
- uint32_t super_index = kNoSuperType;
- if (kind == kWasmFunctionSubtypeCode) {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid function type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- HeapType super_type = consume_super_type();
- if (super_type == HeapType::kFunc) {
- super_index = kGenericSuperType;
- } else if (super_type.is_index()) {
- super_index = super_type.representation();
- } else {
- errorf(pc(), "type %d: invalid supertype %d", i,
- super_type.code());
- break;
- }
- }
- module_->add_signature(s, super_index);
- break;
+
+ // Non wasm-gc type section decoding.
+ if (!enabled_features_.has_gc()) {
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ expect_u8("signature definition", kWasmFunctionTypeCode);
+ const FunctionSig* sig = consume_sig(module_->signature_zone.get());
+ if (!ok()) break;
+ module_->add_signature(sig, kNoSuperType);
+ }
+ return;
+ }
+
+ if (types_count > 0) {
+ uint8_t first_type_opcode = this->read_u8<Decoder::kFullValidation>(pc());
+ if (first_type_opcode == kWasmFunctionNominalCode ||
+ first_type_opcode == kWasmStructNominalCode ||
+ first_type_opcode == kWasmArrayNominalCode) {
+ // wasm-gc nominal type section decoding.
+ // In a nominal module, all types belong in the same recursive group. We
+ // use the type vector's capacity to mark the end of the current
+ // recursive group.
+ module_->types.reserve(types_count);
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ TRACE("DecodeType[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ TypeDefinition type = consume_nominal_type_definition();
+ if (ok()) module_->add_type(type);
}
- case kWasmStructTypeCode:
- case kWasmStructSubtypeCode: {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid struct type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- const StructType* s = consume_struct(module_->signature_zone.get());
- uint32_t super_index = kNoSuperType;
- if (kind == kWasmStructSubtypeCode) {
- HeapType super_type = consume_super_type();
- if (super_type == HeapType::kData) {
- super_index = kGenericSuperType;
- } else if (super_type.is_index()) {
- super_index = super_type.representation();
- } else {
- errorf(pc(), "type %d: invalid supertype %d", i,
- super_type.code());
- break;
+ } else {
+ // wasm-gc isorecursive type section decoding.
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ TRACE("DecodeType[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ uint8_t kind = read_u8<Decoder::kFullValidation>(pc(), "type kind");
+ if (kind == kWasmRecursiveTypeGroupCode) {
+ consume_bytes(1, "rec. group definition");
+ uint32_t group_size =
+ consume_count("recursive group size", kV8MaxWasmTypes);
+ if (module_->types.size() + group_size > kV8MaxWasmTypes) {
+ errorf(pc(), "Type definition count exeeds maximum %zu",
+ kV8MaxWasmTypes);
+ return;
}
- }
- module_->add_struct_type(s, super_index);
- // TODO(7748): Should we canonicalize struct types, like
- // {signature_map} does for function signatures?
- break;
- }
- case kWasmArrayTypeCode:
- case kWasmArraySubtypeCode: {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid array type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- const ArrayType* type = consume_array(module_->signature_zone.get());
- uint32_t super_index = kNoSuperType;
- if (kind == kWasmArraySubtypeCode) {
- HeapType super_type = consume_super_type();
- if (super_type == HeapType::kData) {
- super_index = kGenericSuperType;
- } else if (super_type.is_index()) {
- super_index = super_type.representation();
- } else {
- errorf(pc(), "type %d: invalid supertype %d", i,
- super_type.code());
- break;
+ // Reserve space for the current recursive group, so we are
+ // allowed to reference its elements.
+ module_->types.reserve(module_->types.size() + group_size);
+ for (uint32_t i = 0; i < group_size; i++) {
+ TypeDefinition type = consume_subtype_definition();
+ if (ok()) module_->add_type(type);
}
+ } else {
+ TypeDefinition type = consume_subtype_definition();
+ if (ok()) module_->add_type(type);
}
- module_->add_array_type(type, super_index);
- break;
}
- default:
- errorf(pc(), "unknown type form: %d", kind);
- break;
}
}
+
// Check validity of explicitly defined supertypes.
const WasmModule* module = module_.get();
for (uint32_t i = 0; ok() && i < types_count; ++i) {
uint32_t explicit_super = module_->supertype(i);
if (explicit_super == kNoSuperType) continue;
- if (explicit_super == kGenericSuperType) continue;
DCHECK_LT(explicit_super, types_count); // {consume_super_type} checks.
- // Only types that have an explicit supertype themselves can be explicit
- // supertypes of other types.
- if (!module->has_supertype(explicit_super)) {
- errorf("type %d has invalid explicit supertype %d", i, explicit_super);
- continue;
- }
int depth = GetSubtypingDepth(module, i);
if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) {
errorf("type %d: subtyping depth is greater than allowed", i);
continue;
}
+ // TODO(7748): Replace this with a DCHECK once we reject inheritance
+ // cycles for nominal modules.
if (depth == -1) {
errorf("type %d: cyclic inheritance", i);
continue;
}
- switch (module_->type_kinds[i]) {
- case kWasmStructTypeCode:
- if (!module->has_struct(explicit_super)) break;
- if (!StructIsSubtypeOf(i, explicit_super, module, module)) break;
- continue;
- case kWasmArrayTypeCode:
- if (!module->has_array(explicit_super)) break;
- if (!ArrayIsSubtypeOf(i, explicit_super, module, module)) break;
- continue;
- case kWasmFunctionTypeCode:
- if (!module->has_signature(explicit_super)) break;
- if (!FunctionIsSubtypeOf(i, explicit_super, module, module)) break;
- continue;
- default:
- UNREACHABLE();
+ if (!ValidSubtypeDefinition(i, explicit_super, module, module)) {
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ continue;
}
- errorf("type %d has invalid explicit supertype %d", i, explicit_super);
}
module_->signature_map.Freeze();
}
@@ -722,7 +795,6 @@ class ModuleDecoderImpl : public Decoder {
}
case kExternalTable: {
// ===== Imported table ==============================================
- if (!AddTable(module_.get())) break;
import->index = static_cast<uint32_t>(module_->tables.size());
module_->num_imported_tables++;
module_->tables.emplace_back();
@@ -731,10 +803,7 @@ class ModuleDecoderImpl : public Decoder {
const byte* type_position = pc();
ValueType type = consume_reference_type();
if (!WasmTable::IsValidTableType(type, module_.get())) {
- error(
- type_position,
- "Currently, only externref and function references are allowed "
- "as table types");
+ errorf(type_position, "Invalid table type %s", type.name().c_str());
break;
}
table->type = type;
@@ -818,14 +887,9 @@ class ModuleDecoderImpl : public Decoder {
}
void DecodeTableSection() {
- // TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
- // implementation of ExternRef landed.
- uint32_t max_count =
- enabled_features_.has_reftypes() ? 100000 : kV8MaxWasmTables;
- uint32_t table_count = consume_count("table count", max_count);
+ uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
for (uint32_t i = 0; ok() && i < table_count; i++) {
- if (!AddTable(module_.get())) break;
module_->tables.emplace_back();
WasmTable* table = &module_->tables.back();
const byte* type_position = pc();
@@ -865,13 +929,15 @@ class ModuleDecoderImpl : public Decoder {
void DecodeGlobalSection() {
uint32_t globals_count = consume_count("globals count", kV8MaxWasmGlobals);
uint32_t imported_globals = static_cast<uint32_t>(module_->globals.size());
+ // It is important to not resize the globals vector from the beginning,
+ // because we use its current size when decoding the initializer.
module_->globals.reserve(imported_globals + globals_count);
for (uint32_t i = 0; ok() && i < globals_count; ++i) {
TRACE("DecodeGlobal[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
ValueType type = consume_value_type();
bool mutability = consume_mutability();
if (failed()) break;
- WireBytesRef init = consume_init_expr(module_.get(), type);
+ ConstantExpression init = consume_init_expr(module_.get(), type);
module_->globals.push_back({type, mutability, init, {0}, false, false});
}
if (ok()) CalculateGlobalOffsets(module_.get());
@@ -996,9 +1062,7 @@ class ModuleDecoderImpl : public Decoder {
consume_count("element count", FLAG_wasm_max_table_size);
for (uint32_t i = 0; i < element_count; ++i) {
- bool expressions_as_elements;
- WasmElemSegment segment =
- consume_element_segment_header(&expressions_as_elements);
+ WasmElemSegment segment = consume_element_segment_header();
if (failed()) return;
DCHECK_NE(segment.type, kWasmBottom);
@@ -1006,20 +1070,13 @@ class ModuleDecoderImpl : public Decoder {
consume_count("number of elements", max_table_init_entries());
for (uint32_t j = 0; j < num_elem; j++) {
- WasmElemSegment::Entry init =
- expressions_as_elements
- ? consume_element_expr()
- : WasmElemSegment::Entry(WasmElemSegment::Entry::kRefFuncEntry,
- consume_element_func_index());
+ ConstantExpression entry =
+ segment.element_type == WasmElemSegment::kExpressionElements
+ ? consume_init_expr(module_.get(), segment.type)
+ : ConstantExpression::RefFunc(
+ consume_element_func_index(segment.type));
if (failed()) return;
- if (!IsSubtypeOf(TypeOf(init), segment.type, module_.get())) {
- errorf(pc_,
- "Invalid type in the init expression. The expected type is "
- "'%s', but the actual type is '%s'.",
- segment.type.name().c_str(), TypeOf(init).name().c_str());
- return;
- }
- segment.entries.push_back(init);
+ segment.entries.push_back(entry);
}
module_->elem_segments.push_back(std::move(segment));
}
@@ -1100,7 +1157,7 @@ class ModuleDecoderImpl : public Decoder {
bool is_active;
uint32_t memory_index;
- WireBytesRef dest_addr;
+ ConstantExpression dest_addr;
consume_data_segment_header(&is_active, &memory_index, &dest_addr);
if (failed()) break;
@@ -1242,8 +1299,8 @@ class ModuleDecoderImpl : public Decoder {
hint.top_tier =
static_cast<WasmCompilationHintTier>(hint_byte >> 4 & 0x3);
- // Ensure that the top tier never downgrades a compilation result.
- // If baseline and top tier are the same compilation will be invoked only
+ // Ensure that the top tier never downgrades a compilation result. If
+ // baseline and top tier are the same compilation will be invoked only
// once.
if (hint.top_tier < hint.baseline_tier &&
hint.top_tier != WasmCompilationHintTier::kDefault) {
@@ -1286,11 +1343,6 @@ class ModuleDecoderImpl : public Decoder {
break;
}
last_func_idx = func_idx;
- uint8_t reserved = inner.consume_u8("reserved byte");
- if (reserved != 0x0) {
- inner.errorf("Invalid reserved byte: %#x", reserved);
- break;
- }
uint32_t num_hints = inner.consume_u32v("number of hints");
BranchHintMap func_branch_hints;
TRACE("DecodeBranchHints[%d] module+%d\n", func_idx,
@@ -1298,13 +1350,18 @@ class ModuleDecoderImpl : public Decoder {
// Keep track of the previous branch offset to validate the ordering
int64_t last_br_off = -1;
for (uint32_t j = 0; j < num_hints; ++j) {
- uint32_t br_dir = inner.consume_u32v("branch direction");
uint32_t br_off = inner.consume_u32v("branch instruction offset");
if (int64_t(br_off) <= last_br_off) {
inner.errorf("Invalid branch offset: %d", br_off);
break;
}
last_br_off = br_off;
+ uint32_t data_size = inner.consume_u32v("data size");
+ if (data_size != 1) {
+ inner.errorf("Invalid data size: %#x. Expected 1.", data_size);
+ break;
+ }
+ uint32_t br_dir = inner.consume_u8("branch direction");
TRACE("DecodeBranchHints[%d][%d] module+%d\n", func_idx, br_off,
static_cast<int>(inner.pc() - inner.start()));
WasmBranchHint hint;
@@ -1385,10 +1442,10 @@ class ModuleDecoderImpl : public Decoder {
ModuleResult FinishDecoding(bool verify_functions = true) {
if (ok() && CheckMismatchedCounts()) {
- // We calculate the global offsets here, because there may not be a global
- // section and code section that would have triggered the calculation
- // before. Even without the globals section the calculation is needed
- // because globals can also be defined in the import section.
+ // We calculate the global offsets here, because there may not be a
+ // global section and code section that would have triggered the
+ // calculation before. Even without the globals section the calculation
+ // is needed because globals can also be defined in the import section.
CalculateGlobalOffsets(module_.get());
}
@@ -1472,7 +1529,7 @@ class ModuleDecoderImpl : public Decoder {
return ok() ? result : nullptr;
}
- WireBytesRef DecodeInitExprForTesting(ValueType expected) {
+ ConstantExpression DecodeInitExprForTesting(ValueType expected) {
return consume_init_expr(module_.get(), expected);
}
@@ -1512,18 +1569,6 @@ class ModuleDecoderImpl : public Decoder {
AccountingAllocator allocator_;
Zone init_expr_zone_{&allocator_, "initializer expression zone"};
- ValueType TypeOf(WasmElemSegment::Entry entry) {
- switch (entry.kind) {
- case WasmElemSegment::Entry::kGlobalGetEntry:
- return module_->globals[entry.index].type;
- case WasmElemSegment::Entry::kRefFuncEntry:
- return ValueType::Ref(module_->functions[entry.index].sig_index,
- kNonNullable);
- case WasmElemSegment::Entry::kRefNullEntry:
- return ValueType::Ref(entry.index, kNullable);
- }
- }
-
bool has_seen_unordered_section(SectionCode section_code) {
return seen_unordered_sections_ & (1 << section_code);
}
@@ -1536,16 +1581,6 @@ class ModuleDecoderImpl : public Decoder {
return static_cast<uint32_t>(ptr - start_) + buffer_offset_;
}
- bool AddTable(WasmModule* module) {
- if (enabled_features_.has_reftypes()) return true;
- if (module->tables.size() > 0) {
- error("At most one table is supported");
- return false;
- } else {
- return true;
- }
- }
-
bool AddMemory(WasmModule* module) {
if (module->has_memory) {
error("At most one memory is supported");
@@ -1556,9 +1591,9 @@ class ModuleDecoderImpl : public Decoder {
}
}
- // Calculate individual global offsets and total size of globals table.
- // This function should be called after all globals have been defined, which
- // is after the import section and the global section, but before the global
+ // Calculate individual global offsets and total size of globals table. This
+ // function should be called after all globals have been defined, which is
+ // after the import section and the global section, but before the global
// offsets are accessed, e.g. by the function compilers. The moment when this
// function should be called is not well-defined, as the global section may
// not exist. Therefore this function is called multiple times.
@@ -1775,6 +1810,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ // Consumes a byte, and emits an error if it does not equal {expected}.
bool expect_u8(const char* name, uint8_t expected) {
const byte* pos = pc();
uint8_t value = consume_u8(name);
@@ -1785,9 +1821,79 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
- WireBytesRef consume_init_expr(WasmModule* module, ValueType expected) {
- FunctionBody body(FunctionSig::Build(&init_expr_zone_, {expected}, {}),
- buffer_offset_, pc_, end_);
+ ConstantExpression consume_init_expr(WasmModule* module, ValueType expected) {
+ uint32_t length;
+
+ // The error message mimics the one generated by the {WasmFullDecoder}.
+#define TYPE_CHECK(found) \
+ if (V8_UNLIKELY(!IsSubtypeOf(found, expected, module_.get()))) { \
+ errorf(pc() + 1, \
+ "type error in init. expression[0] (expected %s, got %s)", \
+ expected.name().c_str(), found.name().c_str()); \
+ return {}; \
+ }
+
+ // To avoid initializing a {WasmFullDecoder} for the most common
+ // expressions, we replicate their decoding and validation here. The
+ // manually handled cases correspond to {ConstantExpression}'s kinds.
+ // We need to make sure to check that the expression ends in {kExprEnd};
+ // otherwise, it is just the first operand of a composite expression, and we
+ // fall back to the default case.
+ if (!more()) {
+ error("Beyond end of code");
+ return {};
+ }
+ switch (static_cast<WasmOpcode>(*pc())) {
+ case kExprI32Const: {
+ int32_t value =
+ read_i32v<kFullValidation>(pc() + 1, &length, "i32.const");
+ if (V8_UNLIKELY(failed())) return {};
+ if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
+ TYPE_CHECK(kWasmI32)
+ consume_bytes(length + 2);
+ return ConstantExpression::I32Const(value);
+ }
+ break;
+ }
+ case kExprRefFunc: {
+ uint32_t index =
+ read_u32v<kFullValidation>(pc() + 1, &length, "ref.func");
+ if (V8_UNLIKELY(failed())) return {};
+ if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
+ if (V8_UNLIKELY(index >= module_->functions.size())) {
+ errorf(pc() + 1, "function index %u out of bounds", index);
+ return {};
+ }
+ ValueType type =
+ enabled_features_.has_typed_funcref()
+ ? ValueType::Ref(module_->functions[index].sig_index,
+ kNonNullable)
+ : kWasmFuncRef;
+ TYPE_CHECK(type)
+ module_->functions[index].declared = true;
+ consume_bytes(length + 2);
+ return ConstantExpression::RefFunc(index);
+ }
+ break;
+ }
+ case kExprRefNull: {
+ HeapType type = value_type_reader::read_heap_type<kFullValidation>(
+ this, pc() + 1, &length, module_.get(), enabled_features_);
+ if (V8_UNLIKELY(failed())) return {};
+ if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
+ TYPE_CHECK(ValueType::Ref(type, kNullable))
+ consume_bytes(length + 2);
+ return ConstantExpression::RefNull(type.representation());
+ }
+ break;
+ }
+ default:
+ break;
+ }
+#undef TYPE_CHECK
+
+ auto sig = FixedSizeSignature<ValueType>::Returns(expected);
+ FunctionBody body(&sig, buffer_offset_, pc_, end_);
WasmFeatures detected;
WasmFullDecoder<Decoder::kFullValidation, InitExprInterface,
kInitExpression>
@@ -1810,7 +1916,8 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- return {offset, static_cast<uint32_t>(decoder.end() - decoder.start())};
+ return ConstantExpression::WireBytes(
+ offset, static_cast<uint32_t>(decoder.end() - decoder.start()));
}
// Read a mutability flag
@@ -1830,12 +1937,8 @@ class ModuleDecoderImpl : public Decoder {
}
HeapType consume_super_type() {
- uint32_t type_length;
- HeapType result = value_type_reader::read_heap_type<kFullValidation>(
- this, this->pc(), &type_length, module_.get(),
- origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
- consume_bytes(type_length, "supertype");
- return result;
+ return value_type_reader::consume_heap_type(this, module_.get(),
+ enabled_features_);
}
ValueType consume_storage_type() {
@@ -1854,27 +1957,13 @@ class ModuleDecoderImpl : public Decoder {
}
// Reads a reference type for tables and element segment headers.
- // Unless extensions are enabled, only funcref is allowed.
- // TODO(manoskouk): Replace this with consume_value_type (and checks against
- // the returned type at callsites as needed) once the
- // 'reftypes' proposal is standardized.
ValueType consume_reference_type() {
- if (!enabled_features_.has_reftypes()) {
- uint8_t ref_type = consume_u8("reference type");
- if (ref_type != kFuncRefCode) {
- error(pc_ - 1,
- "invalid table type. Consider using experimental flags.");
- return kWasmBottom;
- }
- return kWasmFuncRef;
- } else {
- const byte* position = pc();
- ValueType result = consume_value_type();
- if (!result.is_reference()) {
- error(position, "expected reference type");
- }
- return result;
+ const byte* position = pc();
+ ValueType result = consume_value_type();
+ if (!result.is_reference()) {
+ error(position, "expected reference type");
}
+ return result;
}
const FunctionSig* consume_sig(Zone* zone) {
@@ -1912,10 +2001,8 @@ class ModuleDecoderImpl : public Decoder {
ValueType* fields = zone->NewArray<ValueType>(field_count);
bool* mutabilities = zone->NewArray<bool>(field_count);
for (uint32_t i = 0; ok() && i < field_count; ++i) {
- ValueType field = consume_storage_type();
- fields[i] = field;
- bool mutability = consume_mutability();
- mutabilities[i] = mutability;
+ fields[i] = consume_storage_type();
+ mutabilities[i] = consume_mutability();
}
if (failed()) return nullptr;
uint32_t* offsets = zone->NewArray<uint32_t>(field_count);
@@ -1923,10 +2010,10 @@ class ModuleDecoderImpl : public Decoder {
}
const ArrayType* consume_array(Zone* zone) {
- ValueType field = consume_storage_type();
- if (failed()) return nullptr;
+ ValueType element_type = consume_storage_type();
bool mutability = consume_mutability();
- return zone->New<ArrayType>(field, mutability);
+ if (failed()) return nullptr;
+ return zone->New<ArrayType>(element_type, mutability);
}
// Consume the attribute field of an exception.
@@ -1940,8 +2027,7 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- WasmElemSegment consume_element_segment_header(
- bool* expressions_as_elements) {
+ WasmElemSegment consume_element_segment_header() {
const byte* pos = pc();
// The mask for the bit in the flag which indicates if the segment is
@@ -1969,15 +2055,12 @@ class ModuleDecoderImpl : public Decoder {
? WasmElemSegment::kStatusDeclarative
: WasmElemSegment::kStatusPassive
: WasmElemSegment::kStatusActive;
- if (status == WasmElemSegment::kStatusDeclarative &&
- !enabled_features_.has_reftypes()) {
- error(
- "Declarative element segments require --experimental-wasm-reftypes");
- return {};
- }
const bool is_active = status == WasmElemSegment::kStatusActive;
- *expressions_as_elements = flag & kExpressionsAsElementsMask;
+ WasmElemSegment::ElementType element_type =
+ flag & kExpressionsAsElementsMask
+ ? WasmElemSegment::kExpressionElements
+ : WasmElemSegment::kFunctionIndexElements;
const bool has_table_index =
is_active && (flag & kHasTableIndexOrIsDeclarativeMask);
@@ -1990,7 +2073,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType table_type =
is_active ? module_->tables[table_index].type : kWasmBottom;
- WireBytesRef offset;
+ ConstantExpression offset;
if (is_active) {
offset = consume_init_expr(module_.get(), kWasmI32);
// Failed to parse offset initializer, return early.
@@ -2001,7 +2084,7 @@ class ModuleDecoderImpl : public Decoder {
const bool backwards_compatible_mode =
is_active && !(flag & kHasTableIndexOrIsDeclarativeMask);
ValueType type;
- if (*expressions_as_elements) {
+ if (element_type == WasmElemSegment::kExpressionElements) {
type =
backwards_compatible_mode ? kWasmFuncRef : consume_reference_type();
if (is_active && !IsSubtypeOf(type, table_type, this->module_.get())) {
@@ -2044,14 +2127,14 @@ class ModuleDecoderImpl : public Decoder {
}
if (is_active) {
- return {type, table_index, std::move(offset)};
+ return {type, table_index, std::move(offset), element_type};
} else {
- return {type, status == WasmElemSegment::kStatusDeclarative};
+ return {type, status, element_type};
}
}
void consume_data_segment_header(bool* is_active, uint32_t* index,
- WireBytesRef* offset) {
+ ConstantExpression* offset) {
const byte* pos = pc();
uint32_t flag = consume_u32v("flag");
@@ -2082,59 +2165,23 @@ class ModuleDecoderImpl : public Decoder {
}
}
- uint32_t consume_element_func_index() {
+ uint32_t consume_element_func_index(ValueType expected) {
WasmFunction* func = nullptr;
+ const byte* initial_pc = pc();
uint32_t index =
consume_func_index(module_.get(), &func, "element function index");
if (failed()) return index;
- func->declared = true;
- DCHECK_NE(func, nullptr);
+ DCHECK_NOT_NULL(func);
DCHECK_EQ(index, func->func_index);
- return index;
- }
-
- // TODO(manoskouk): When reftypes lands, consider if we can implement this
- // with consume_init_expr(). It will require changes in module-instantiate.cc,
- // in {LoadElemSegmentImpl}.
- WasmElemSegment::Entry consume_element_expr() {
- uint8_t opcode = consume_u8("element opcode");
- if (failed()) return {};
- switch (opcode) {
- case kExprRefNull: {
- HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
- this->pc(), module_.get());
- consume_bytes(imm.length, "ref.null immediate");
- expect_u8("end opcode", kExprEnd);
- return {WasmElemSegment::Entry::kRefNullEntry,
- static_cast<uint32_t>(imm.type.representation())};
- }
- case kExprRefFunc: {
- uint32_t index = consume_element_func_index();
- if (failed()) return {};
- expect_u8("end opcode", kExprEnd);
- return {WasmElemSegment::Entry::kRefFuncEntry, index};
- }
- case kExprGlobalGet: {
- if (!enabled_features_.has_reftypes()) {
- errorf(
- "Unexpected opcode 0x%x in element. Enable with "
- "--experimental-wasm-reftypes",
- kExprGlobalGet);
- return {};
- }
- uint32_t index = this->consume_u32v("global index");
- if (failed()) return {};
- if (index >= module_->globals.size()) {
- errorf("Out-of-bounds global index %d", index);
- return {};
- }
- expect_u8("end opcode", kExprEnd);
- return {WasmElemSegment::Entry::kGlobalGetEntry, index};
- }
- default:
- error("invalid opcode in element");
- return {};
+ ValueType entry_type = ValueType::Ref(func->sig_index, kNonNullable);
+ if (V8_UNLIKELY(!IsSubtypeOf(entry_type, expected, module_.get()))) {
+ errorf(initial_pc,
+ "Invalid type in element entry: expected %s, got %s instead.",
+ expected.name().c_str(), entry_type.name().c_str());
+ return index;
}
+ func->declared = true;
+ return index;
}
};
@@ -2258,9 +2305,10 @@ const FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled,
return decoder.DecodeFunctionSignature(zone, start);
}
-WireBytesRef DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
- const byte* start, const byte* end,
- ValueType expected) {
+ConstantExpression DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
+ const byte* start,
+ const byte* end,
+ ValueType expected) {
ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
AccountingAllocator allocator;
decoder.StartDecoding(nullptr, &allocator);
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 0a64326cff..20b9cc9d50 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -171,7 +171,7 @@ V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunctionForTesting(
const WasmModule* module, const byte* function_start,
const byte* function_end, Counters* counters);
-V8_EXPORT_PRIVATE WireBytesRef
+V8_EXPORT_PRIVATE ConstantExpression
DecodeWasmInitExprForTesting(const WasmFeatures& enabled, const byte* start,
const byte* end, ValueType expected);
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index f95d378f96..2dc43b69f4 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -4,7 +4,7 @@
#include "src/wasm/module-instantiate.h"
-#include "src/api/api.h"
+#include "src/api/api-inl.h"
#include "src/asmjs/asm-js.h"
#include "src/base/atomicops.h"
#include "src/base/platform/wrappers.h"
@@ -72,7 +72,7 @@ class CompileImportWrapperJob final : public JobTask {
// TODO(wasm): Batch code publishing, to avoid repeated locking and
// permission switching.
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
- key->expected_arity, cache_scope_);
+ key->expected_arity, key->suspend, cache_scope_);
if (delegate->ShouldYield()) return;
}
}
@@ -223,21 +223,21 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
// map for that supertype is created first, so that the supertypes list
// that's cached on every RTT can be set up correctly.
uint32_t supertype = module->supertype(type_index);
- if (supertype != kNoSuperType && supertype != kGenericSuperType) {
+ if (supertype != kNoSuperType) {
// This recursion is safe, because kV8MaxRttSubtypingDepth limits the
// number of recursive steps, so we won't overflow the stack.
CreateMapForType(isolate, module, supertype, instance, maps);
rtt_parent = handle(Map::cast(maps->get(supertype)), isolate);
}
Handle<Map> map;
- switch (module->type_kinds[type_index]) {
- case kWasmStructTypeCode:
+ switch (module->types[type_index].kind) {
+ case TypeDefinition::kStruct:
map = CreateStructMap(isolate, module, type_index, rtt_parent, instance);
break;
- case kWasmArrayTypeCode:
+ case TypeDefinition::kArray:
map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
break;
- case kWasmFunctionTypeCode:
+ case TypeDefinition::kFunction:
// TODO(7748): Create funcref RTTs lazily?
// TODO(7748): Canonicalize function maps (cross-module)?
map = CreateFuncRefMap(isolate, module, rtt_parent, instance);
@@ -246,73 +246,6 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
maps->set(type_index, *map);
}
-namespace {
-
-// TODO(7748): Consider storing this array in Maps'
-// "transitions_or_prototype_info" slot.
-// Also consider being more memory-efficient, e.g. use inline storage for
-// single entries, and/or adapt the growth strategy.
-class RttSubtypes : public ArrayList {
- public:
- static Handle<ArrayList> Insert(Isolate* isolate, Handle<ArrayList> array,
- uint32_t type_index, Handle<Map> sub_rtt) {
- Handle<Smi> key = handle(Smi::FromInt(type_index), isolate);
- return Add(isolate, array, key, sub_rtt);
- }
-
- static Map SearchSubtype(Handle<ArrayList> array, uint32_t type_index) {
- // Linear search for now.
- // TODO(7748): Consider keeping the array sorted and using binary search
- // here, if empirical data indicates that that would be worthwhile.
- int count = array->Length();
- for (int i = 0; i < count; i += 2) {
- if (Smi::cast(array->Get(i)).value() == static_cast<int>(type_index)) {
- return Map::cast(array->Get(i + 1));
- }
- }
- return {};
- }
-};
-
-} // namespace
-
-Handle<Map> AllocateSubRtt(Isolate* isolate,
- Handle<WasmInstanceObject> instance, uint32_t type,
- Handle<Map> parent, WasmRttSubMode mode) {
- DCHECK(parent->IsWasmStructMap() || parent->IsWasmArrayMap() ||
- parent->IsWasmInternalFunctionMap());
-
- const wasm::WasmModule* module = instance->module();
- if (module->has_signature(type)) {
- // Function references are implicitly allocated with their canonical rtt,
- // and type checks against sub-rtts will always fail. Therefore, we simply
- // create a fresh function map here.
- return CreateFuncRefMap(isolate, module, Handle<Map>(), instance);
- }
- // If canonicalization is requested, check for an existing RTT first.
- Handle<ArrayList> cache;
- if (mode == WasmRttSubMode::kCanonicalize) {
- cache = handle(parent->wasm_type_info().subtypes(), isolate);
- Map maybe_cached = RttSubtypes::SearchSubtype(cache, type);
- if (!maybe_cached.is_null()) return handle(maybe_cached, isolate);
- }
-
- // Allocate a fresh RTT otherwise.
- Handle<Map> rtt;
- if (module->has_struct(type)) {
- rtt = wasm::CreateStructMap(isolate, module, type, parent, instance);
- } else {
- DCHECK(module->has_array(type));
- rtt = wasm::CreateArrayMap(isolate, module, type, parent, instance);
- }
-
- if (mode == WasmRttSubMode::kCanonicalize) {
- cache = RttSubtypes::Insert(isolate, cache, type, rtt);
- parent->wasm_type_info().set_subtypes(*cache);
- }
- return rtt;
-}
-
// A helper class to simplify instantiating a module from a module object.
// It closes over the {Isolate}, the {ErrorThrower}, etc.
class InstanceBuilder {
@@ -349,6 +282,9 @@ class InstanceBuilder {
std::vector<Handle<WasmTagObject>> tags_wrappers_;
Handle<WasmExportedFunction> start_function_;
std::vector<SanitizedImport> sanitized_imports_;
+ // We pass this {Zone} to the temporary {WasmFullDecoder} we allocate during
+ // each call to {EvaluateInitExpression}. This has been found to improve
+ // performance a bit over allocating a new {Zone} each time.
Zone init_expr_zone_;
// Helper routines to print out errors with imports.
@@ -448,14 +384,11 @@ class InstanceBuilder {
// Process initialization of globals.
void InitGlobals(Handle<WasmInstanceObject> instance);
- WasmValue EvaluateInitExpression(WireBytesRef init, ValueType expected,
- Handle<WasmInstanceObject> instance);
-
// Process the exports, creating wrappers for functions, tables, memories,
// and globals.
void ProcessExports(Handle<WasmInstanceObject> instance);
- void InitializeIndirectFunctionTables(Handle<WasmInstanceObject> instance);
+ void InitializeNonDefaultableTables(Handle<WasmInstanceObject> instance);
void LoadTableSegments(Handle<WasmInstanceObject> instance);
@@ -562,7 +495,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
static_cast<int>(RoundUp(buffer->byte_length(), wasm::kWasmPageSize) /
wasm::kWasmPageSize);
memory_object_ =
- WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages);
+ WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages)
+ .ToHandleChecked();
} else {
// Actual wasm module must have either imported or created memory.
CHECK(memory_buffer_.is_null());
@@ -668,7 +602,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (int i = module_->num_imported_tables; i < table_count; i++) {
const WasmTable& table = module_->tables[i];
// Initialize tables with null for now. We will initialize non-defaultable
- // tables later, in {InitializeIndirectFunctionTables}.
+ // tables later, in {InitializeNonDefaultableTables}.
Handle<WasmTableObject> table_obj = WasmTableObject::New(
isolate_, instance, table.type, table.initial_size,
table.has_maximum_size, table.maximum_size, nullptr,
@@ -710,8 +644,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (enabled_.has_gc()) {
Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
- static_cast<int>(module_->type_kinds.size()));
- for (uint32_t index = 0; index < module_->type_kinds.size(); index++) {
+ static_cast<int>(module_->types.size()));
+ for (uint32_t index = 0; index < module_->types.size(); index++) {
CreateMapForType(isolate_, module_, index, instance, maps);
}
instance->set_managed_object_maps(*maps);
@@ -745,11 +679,31 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitGlobals(instance);
//--------------------------------------------------------------------------
- // Initialize the indirect tables.
+ // Initialize the indirect function tables and dispatch tables. We do this
+ // before initializing non-defaultable tables and loading element segments, so
+ // that indirect function tables in this module are included in the updates
+ // when we do so.
//--------------------------------------------------------------------------
- if (table_count > 0) {
- InitializeIndirectFunctionTables(instance);
- if (thrower_->error()) return {};
+ for (int table_index = 0;
+ table_index < static_cast<int>(module_->tables.size()); ++table_index) {
+ const WasmTable& table = module_->tables[table_index];
+
+ if (IsSubtypeOf(table.type, kWasmFuncRef, module_)) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, table.initial_size);
+ if (thrower_->error()) return {};
+ auto table_object = handle(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate_);
+ WasmTableObject::AddDispatchTable(isolate_, table_object, instance,
+ table_index);
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize non-defaultable tables.
+ //--------------------------------------------------------------------------
+ if (FLAG_experimental_wasm_typed_funcref) {
+ InitializeNonDefaultableTables(instance);
}
//--------------------------------------------------------------------------
@@ -766,7 +720,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
//--------------------------------------------------------------------------
- // Initialize the indirect function tables.
+ // Load element segments into tables.
//--------------------------------------------------------------------------
if (table_count > 0) {
LoadTableSegments(instance);
@@ -787,9 +741,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code =
- JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
- isolate_, function.sig, module_, function.imported);
+ Handle<CodeT> wrapper_code =
+ ToCodeT(JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate_, function.sig, module_, function.imported),
+ isolate_);
// TODO(clemensb): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
@@ -836,7 +791,7 @@ bool InstanceBuilder::ExecuteStartFunction() {
// v8::Context::Enter() and must happen in addition to the function call
// sequence doing the compiled version of "isolate->set_context(...)".
HandleScopeImplementer* hsi = isolate_->handle_scope_implementer();
- hsi->EnterContext(start_function_->context());
+ hsi->EnterContext(start_function_->context().native_context());
// Call the JS function.
Handle<Object> undefined = isolate_->factory()->undefined_value();
@@ -913,6 +868,57 @@ bool HasDefaultToNumberBehaviour(Isolate* isolate,
// Just a default function, which will convert to "Nan". Accept this.
return true;
}
+
+V8_INLINE WasmValue EvaluateInitExpression(Zone* zone, ConstantExpression expr,
+ ValueType expected, Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ ErrorThrower* thrower) {
+ switch (expr.kind()) {
+ case ConstantExpression::kEmpty:
+ UNREACHABLE();
+ case ConstantExpression::kI32Const:
+ return WasmValue(expr.i32_value());
+ case ConstantExpression::kRefNull:
+ return WasmValue(isolate->factory()->null_value(),
+ ValueType::Ref(expr.repr(), kNullable));
+ case ConstantExpression::kRefFunc: {
+ uint32_t index = expr.index();
+ Handle<Object> value =
+ WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
+ index);
+ return WasmValue(value, expected);
+ }
+ case ConstantExpression::kWireBytesRef: {
+ WireBytesRef ref = expr.wire_bytes_ref();
+
+ base::Vector<const byte> module_bytes =
+ instance->module_object().native_module()->wire_bytes();
+
+ const byte* start = module_bytes.begin() + ref.offset();
+ const byte* end = module_bytes.begin() + ref.end_offset();
+
+ auto sig = FixedSizeSignature<ValueType>::Returns(expected);
+ FunctionBody body(&sig, ref.offset(), start, end);
+ WasmFeatures detected;
+ // We use kFullValidation so we do not have to create another template
+ // instance of WasmFullDecoder, which would cost us >50Kb binary code
+ // size.
+ WasmFullDecoder<Decoder::kFullValidation, InitExprInterface,
+ kInitExpression>
+ decoder(zone, instance->module(), WasmFeatures::All(), &detected,
+ body, instance->module(), isolate, instance);
+
+ decoder.DecodeFunctionBody();
+
+ if (decoder.interface().runtime_error()) {
+ thrower->RuntimeError("%s", decoder.interface().runtime_error_msg());
+ return {};
+ }
+
+ return decoder.interface().result();
+ }
+ }
+}
} // namespace
// Look up an import value in the {ffi_} object specifically for linking an
@@ -974,8 +980,10 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
size_t dest_offset;
if (module_->is_memory64) {
uint64_t dest_offset_64 =
- EvaluateInitExpression(segment.dest_addr, kWasmI64, instance)
+ EvaluateInitExpression(&init_expr_zone_, segment.dest_addr, kWasmI64,
+ isolate_, instance, thrower_)
.to_u64();
+ if (thrower_->error()) return;
// Clamp to {std::numeric_limits<size_t>::max()}, which is always an
// invalid offset.
DCHECK_GT(std::numeric_limits<size_t>::max(), instance->memory_size());
@@ -983,8 +991,10 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
dest_offset_64, uint64_t{std::numeric_limits<size_t>::max()}));
} else {
dest_offset =
- EvaluateInitExpression(segment.dest_addr, kWasmI32, instance)
+ EvaluateInitExpression(&init_expr_zone_, segment.dest_addr, kWasmI32,
+ isolate_, instance, thrower_)
.to_u32();
+ if (thrower_->error()) return;
}
if (!base::IsInBounds<size_t>(dest_offset, size, instance->memory_size())) {
@@ -1081,8 +1091,8 @@ bool InstanceBuilder::ProcessImportedFunction(
const FunctionSig* expected_sig = module_->functions[func_index].sig;
auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
module_, enabled_);
- compiler::WasmImportCallKind kind = resolved.first;
- js_receiver = resolved.second;
+ compiler::WasmImportCallKind kind = resolved.kind;
+ js_receiver = resolved.callable;
switch (kind) {
case compiler::WasmImportCallKind::kLinkError:
ReportLinkError("imported function does not match the expected type",
@@ -1105,14 +1115,15 @@ bool InstanceBuilder::ProcessImportedFunction(
WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
// TODO(jkummerow): Consider precompiling CapiCallWrappers in parallel,
// just like other import wrappers.
- WasmCode* wasm_code = cache->MaybeGet(kind, expected_sig, expected_arity);
+ WasmCode* wasm_code =
+ cache->MaybeGet(kind, expected_sig, expected_arity, kNoSuspend);
if (wasm_code == nullptr) {
WasmCodeRefScope code_ref_scope;
WasmImportWrapperCache::ModificationScope cache_scope(cache);
wasm_code =
compiler::CompileWasmCapiCallWrapper(native_module, expected_sig);
- WasmImportWrapperCache::CacheKey key(kind, expected_sig,
- expected_arity);
+ WasmImportWrapperCache::CacheKey key(kind, expected_sig, expected_arity,
+ kNoSuspend);
cache_scope[key] = wasm_code;
wasm_code->IncRef();
isolate_->counters()->wasm_generated_code_size()->Increment(
@@ -1124,7 +1135,21 @@ bool InstanceBuilder::ProcessImportedFunction(
ImportedFunctionEntry entry(instance, func_index);
// We re-use the SetWasmToJs infrastructure because it passes the
// callable to the wrapper, which we need to get the function data.
- entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code,
+ isolate_->factory()->undefined_value());
+ break;
+ }
+ case compiler::WasmImportCallKind::kWasmToJSFastApi: {
+ NativeModule* native_module = instance->module_object().native_module();
+ DCHECK(js_receiver->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
+
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* wasm_code = compiler::CompileWasmJSFastCallWrapper(
+ native_module, expected_sig, function);
+ ImportedFunctionEntry entry(instance, func_index);
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code,
+ isolate_->factory()->undefined_value());
break;
}
default: {
@@ -1139,13 +1164,17 @@ bool InstanceBuilder::ProcessImportedFunction(
}
NativeModule* native_module = instance->module_object().native_module();
+ Suspend suspend =
+ resolved.suspender.is_null() || resolved.suspender->IsUndefined()
+ ? kNoSuspend
+ : kSuspend;
WasmCode* wasm_code = native_module->import_wrapper_cache()->Get(
- kind, expected_sig, expected_arity);
+ kind, expected_sig, expected_arity, suspend);
DCHECK_NOT_NULL(wasm_code);
ImportedFunctionEntry entry(instance, func_index);
if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
// Wasm to JS wrappers are treated specially in the import table.
- entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code, resolved.suspender);
} else {
// Wasm math intrinsics are compiled as regular Wasm functions.
DCHECK(kind >= compiler::WasmImportCallKind::kFirstMathIntrinsic &&
@@ -1392,7 +1421,6 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
value = WasmValue(global_object->GetF64());
break;
case kRtt:
- case kRttWithDepth:
case kRef:
case kOptRef:
value = WasmValue(global_object->GetRef(), global_object->type());
@@ -1534,23 +1562,28 @@ void InstanceBuilder::CompileImportWrappers(
const FunctionSig* sig = module_->functions[func_index].sig;
auto resolved =
compiler::ResolveWasmImportCall(js_receiver, sig, module_, enabled_);
- compiler::WasmImportCallKind kind = resolved.first;
+ compiler::WasmImportCallKind kind = resolved.kind;
if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
kind == compiler::WasmImportCallKind::kLinkError ||
- kind == compiler::WasmImportCallKind::kWasmToCapi) {
+ kind == compiler::WasmImportCallKind::kWasmToCapi ||
+ kind == compiler::WasmImportCallKind::kWasmToJSFastApi) {
continue;
}
int expected_arity = static_cast<int>(sig->parameter_count());
- if (resolved.first ==
+ if (resolved.kind ==
compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.callable);
SharedFunctionInfo shared = function->shared();
expected_arity =
shared.internal_formal_parameter_count_without_receiver();
}
- WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
+ Suspend suspend =
+ resolved.suspender.is_null() || resolved.suspender->IsUndefined()
+ ? kNoSuspend
+ : kSuspend;
+ WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity, suspend);
if (cache_scope[key] != nullptr) {
// Cache entry already exists, no need to compile it again.
continue;
@@ -1650,26 +1683,6 @@ T* InstanceBuilder::GetRawUntaggedGlobalPtr(const WasmGlobal& global) {
return reinterpret_cast<T*>(raw_buffer_ptr(untagged_globals_, global.offset));
}
-WasmValue InstanceBuilder::EvaluateInitExpression(
- WireBytesRef init, ValueType expected,
- Handle<WasmInstanceObject> instance) {
- base::Vector<const byte> module_bytes =
- instance->module_object().native_module()->wire_bytes();
- FunctionBody body(FunctionSig::Build(&init_expr_zone_, {expected}, {}),
- init.offset(), module_bytes.begin() + init.offset(),
- module_bytes.begin() + init.end_offset());
- WasmFeatures detected;
- // We use kFullValidation so we do not have to create another instance of
- // WasmFullDecoder, which would cost us >50Kb binary code size.
- WasmFullDecoder<Decoder::kFullValidation, InitExprInterface, kInitExpression>
- decoder(&init_expr_zone_, module_, WasmFeatures::All(), &detected, body,
- module_, isolate_, instance, tagged_globals_, untagged_globals_);
-
- decoder.DecodeFunctionBody();
-
- return decoder.interface().result();
-}
-
// Process initialization of globals.
void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
for (const WasmGlobal& global : module_->globals) {
@@ -1678,7 +1691,9 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
if (!global.init.is_set()) continue;
WasmValue value =
- EvaluateInitExpression(global.init, global.type, instance);
+ EvaluateInitExpression(&init_expr_zone_, global.init, global.type,
+ isolate_, instance, thrower_);
+ if (thrower_->error()) return;
if (global.type.is_reference()) {
tagged_globals_->set(global.offset, *value.to_ref());
@@ -1887,113 +1902,78 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
}
-void SetNullTableEntry(Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<WasmTableObject> table_object,
- uint32_t table_index, uint32_t entry_index) {
- const WasmModule* module = instance->module();
- if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- instance->GetIndirectFunctionTable(isolate, table_index)
- ->Clear(entry_index);
- }
- WasmTableObject::Set(isolate, table_object, entry_index,
- isolate->factory()->null_value());
-}
-
-void SetFunctionTableEntry(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Handle<WasmTableObject> table_object,
- uint32_t table_index, uint32_t entry_index,
- uint32_t func_index) {
+namespace {
+V8_INLINE void SetFunctionTablePlaceholder(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmTableObject> table_object,
+ uint32_t entry_index,
+ uint32_t func_index) {
const WasmModule* module = instance->module();
const WasmFunction* function = &module->functions[func_index];
-
- // For externref tables, we have to generate the WasmExternalFunction eagerly.
- // Later we cannot know if an entry is a placeholder or not.
- if (table_object->type().is_reference_to(HeapType::kExtern)) {
- Handle<WasmInternalFunction> wasm_internal_function =
- WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
- func_index);
- WasmTableObject::Set(isolate, table_object, entry_index,
- wasm_internal_function);
+ MaybeHandle<WasmInternalFunction> wasm_internal_function =
+ WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
+ func_index);
+ if (wasm_internal_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create a {Tuple2}
+ // holding the information to lazily allocate one.
+ WasmTableObject::SetFunctionTablePlaceholder(
+ isolate, table_object, entry_index, instance, func_index);
} else {
- DCHECK(IsSubtypeOf(table_object->type(), kWasmFuncRef, module));
-
- // Update the local dispatch table first if necessary.
- uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
- FunctionTargetAndRef entry(instance, func_index);
- instance->GetIndirectFunctionTable(isolate, table_index)
- ->Set(entry_index, sig_id, entry.call_target(), *entry.ref());
-
- // Update the table object's other dispatch tables.
- MaybeHandle<WasmInternalFunction> wasm_internal_function =
- WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
- func_index);
- if (wasm_internal_function.is_null()) {
- // No JSFunction entry yet exists for this function. Create a
- // {Tuple2} holding the information to lazily allocate one.
- WasmTableObject::SetFunctionTablePlaceholder(
- isolate, table_object, entry_index, instance, func_index);
- } else {
- table_object->entries().set(entry_index,
- *wasm_internal_function.ToHandleChecked());
- }
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
- function->sig, instance, func_index);
+ table_object->entries().set(entry_index,
+ *wasm_internal_function.ToHandleChecked());
}
+ WasmTableObject::UpdateDispatchTables(isolate, *table_object, entry_index,
+ function, *instance);
+}
+
+V8_INLINE void SetFunctionTableNullEntry(Isolate* isolate,
+ Handle<WasmTableObject> table_object,
+ uint32_t entry_index) {
+ table_object->entries().set(entry_index, *isolate->factory()->null_value());
+ WasmTableObject::ClearDispatchTables(isolate, table_object, entry_index);
}
+} // namespace
-void InstanceBuilder::InitializeIndirectFunctionTables(
+void InstanceBuilder::InitializeNonDefaultableTables(
Handle<WasmInstanceObject> instance) {
for (int table_index = 0;
table_index < static_cast<int>(module_->tables.size()); ++table_index) {
const WasmTable& table = module_->tables[table_index];
-
- if (IsSubtypeOf(table.type, kWasmFuncRef, module_)) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table_index, table.initial_size);
- }
-
if (!table.type.is_defaultable()) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(table_index)), isolate_);
- Handle<Object> value =
- EvaluateInitExpression(table.initial_value, table.type, instance)
- .to_ref();
- if (value.is_null()) {
+ bool is_function_table = IsSubtypeOf(table.type, kWasmFuncRef, module_);
+ if (is_function_table &&
+ table.initial_value.kind() == ConstantExpression::kRefFunc) {
for (uint32_t entry_index = 0; entry_index < table.initial_size;
entry_index++) {
- SetNullTableEntry(isolate_, instance, table_object, table_index,
- entry_index);
- }
- } else if (value->IsWasmInternalFunction()) {
- Handle<Object> external = handle(
- Handle<WasmInternalFunction>::cast(value)->external(), isolate_);
- // TODO(manoskouk): Support WasmJSFunction/WasmCapiFunction.
- if (!WasmExportedFunction::IsWasmExportedFunction(*external)) {
- thrower_->TypeError(
- "Initializing a table with a Webassembly.Function object is not "
- "supported yet");
+ SetFunctionTablePlaceholder(isolate_, instance, table_object,
+ entry_index, table.initial_value.index());
}
- uint32_t function_index =
- Handle<WasmExportedFunction>::cast(external)->function_index();
+ } else if (is_function_table &&
+ table.initial_value.kind() == ConstantExpression::kRefNull) {
for (uint32_t entry_index = 0; entry_index < table.initial_size;
entry_index++) {
- SetFunctionTableEntry(isolate_, instance, table_object, table_index,
- entry_index, function_index);
+ SetFunctionTableNullEntry(isolate_, table_object, entry_index);
}
} else {
+ WasmValue value =
+ EvaluateInitExpression(&init_expr_zone_, table.initial_value,
+ table.type, isolate_, instance, thrower_);
+ if (thrower_->error()) return;
for (uint32_t entry_index = 0; entry_index < table.initial_size;
entry_index++) {
- WasmTableObject::Set(isolate_, table_object, entry_index, value);
+ WasmTableObject::Set(isolate_, table_object, entry_index,
+ value.to_ref());
}
}
- }
}
}
+}
-bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
+namespace {
+bool LoadElemSegmentImpl(Zone* zone, Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object,
uint32_t table_index, uint32_t segment_index,
uint32_t dst, uint32_t src, size_t count) {
@@ -2011,43 +1991,30 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
return false;
}
+ bool is_function_table =
+ IsSubtypeOf(table_object->type(), kWasmFuncRef, instance->module());
+
+ ErrorThrower thrower(isolate, "LoadElemSegment");
+
for (size_t i = 0; i < count; ++i) {
- WasmElemSegment::Entry init = elem_segment.entries[src + i];
+ ConstantExpression entry = elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);
- switch (init.kind) {
- case WasmElemSegment::Entry::kRefNullEntry:
- SetNullTableEntry(isolate, instance, table_object, table_index,
- entry_index);
- break;
- case WasmElemSegment::Entry::kRefFuncEntry:
- SetFunctionTableEntry(isolate, instance, table_object, table_index,
- entry_index, init.index);
- break;
- case WasmElemSegment::Entry::kGlobalGetEntry: {
- Handle<Object> value =
- WasmInstanceObject::GetGlobalValue(
- instance, instance->module()->globals[init.index])
- .to_ref();
- if (value.is_null()) {
- SetNullTableEntry(isolate, instance, table_object, table_index,
- entry_index);
- } else if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- uint32_t function_index =
- Handle<WasmExportedFunction>::cast(value)->function_index();
- SetFunctionTableEntry(isolate, instance, table_object, table_index,
- entry_index, function_index);
- } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
- // TODO(manoskouk): Support WasmJSFunction.
- return false;
- } else {
- WasmTableObject::Set(isolate, table_object, entry_index, value);
- }
- break;
- }
+ if (is_function_table && entry.kind() == ConstantExpression::kRefFunc) {
+ SetFunctionTablePlaceholder(isolate, instance, table_object, entry_index,
+ entry.index());
+ } else if (is_function_table &&
+ entry.kind() == ConstantExpression::kRefNull) {
+ SetFunctionTableNullEntry(isolate, table_object, entry_index);
+ } else {
+ WasmValue value = EvaluateInitExpression(zone, entry, elem_segment.type,
+ isolate, instance, &thrower);
+ if (thrower.error()) return false;
+ WasmTableObject::Set(isolate, table_object, entry_index, value.to_ref());
}
}
return true;
}
+} // namespace
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
for (uint32_t segment_index = 0;
@@ -2058,39 +2025,25 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
uint32_t table_index = elem_segment.table_index;
uint32_t dst =
- EvaluateInitExpression(elem_segment.offset, kWasmI32, instance)
+ EvaluateInitExpression(&init_expr_zone_, elem_segment.offset, kWasmI32,
+ isolate_, instance, thrower_)
.to_u32();
+ if (thrower_->error()) return;
uint32_t src = 0;
size_t count = elem_segment.entries.size();
bool success = LoadElemSegmentImpl(
- isolate_, instance,
+ &init_expr_zone_, isolate_, instance,
handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)),
isolate_),
table_index, segment_index, dst, src, count);
- // Set the active segments to being already dropped, since memory.init on
- // a dropped passive segment and an active segment have the same
- // behavior.
+ // Set the active segments to being already dropped, since table.init on
+ // a dropped passive segment and an active segment have the same behavior.
instance->dropped_elem_segments()[segment_index] = 1;
if (!success) {
thrower_->RuntimeError("table initializer is out of bounds");
- // Break out instead of returning; we don't want to continue to
- // initialize any further element segments, but still need to add
- // dispatch tables below.
- break;
- }
- }
-
- int table_count = static_cast<int>(module_->tables.size());
- for (int index = 0; index < table_count; ++index) {
- if (IsSubtypeOf(module_->tables[index].type, kWasmFuncRef, module_)) {
- auto table_object = handle(
- WasmTableObject::cast(instance->tables().get(index)), isolate_);
-
- // Add the new dispatch table at the end to avoid redundant lookups.
- WasmTableObject::AddDispatchTable(isolate_, table_object, instance,
- index);
+ return;
}
}
}
@@ -2107,8 +2060,13 @@ void InstanceBuilder::InitializeTags(Handle<WasmInstanceObject> instance) {
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
+ AccountingAllocator allocator;
+ // This {Zone} will be used only by the temporary WasmFullDecoder allocated
+ // down the line from this call. Therefore it is safe to stack-allocate it
+ // here.
+ Zone zone(&allocator, "LoadElemSegment");
return LoadElemSegmentImpl(
- isolate, instance,
+ &zone, isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
table_index, segment_index, dst, src, count);
diff --git a/deps/v8/src/wasm/module-instantiate.h b/deps/v8/src/wasm/module-instantiate.h
index baa064f20d..35f63c4a6f 100644
--- a/deps/v8/src/wasm/module-instantiate.h
+++ b/deps/v8/src/wasm/module-instantiate.h
@@ -11,7 +11,6 @@
#include <stdint.h>
-#include "include/v8-metrics.h"
#include "include/v8config.h"
namespace v8 {
@@ -20,9 +19,9 @@ namespace internal {
class Isolate;
class JSArrayBuffer;
class JSReceiver;
-class WasmInitExpr;
class WasmModuleObject;
class WasmInstanceObject;
+class Zone;
template <typename T>
class Handle;
@@ -42,9 +41,6 @@ bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) V8_WARN_UNUSED_RESULT;
-uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
- const WasmInitExpr& expr);
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/stacks.h b/deps/v8/src/wasm/stacks.h
index 6e7c7f49d7..f29a673dbc 100644
--- a/deps/v8/src/wasm/stacks.h
+++ b/deps/v8/src/wasm/stacks.h
@@ -21,12 +21,14 @@ namespace wasm {
struct JumpBuffer {
Address sp;
Address fp;
+ Address pc;
void* stack_limit;
// TODO(thibaudm/fgm): Add general-purpose registers.
};
constexpr int kJmpBufSpOffset = offsetof(JumpBuffer, sp);
constexpr int kJmpBufFpOffset = offsetof(JumpBuffer, fp);
+constexpr int kJmpBufPcOffset = offsetof(JumpBuffer, pc);
constexpr int kJmpBufStackLimitOffset = offsetof(JumpBuffer, stack_limit);
class StackMemory {
@@ -42,7 +44,7 @@ class StackMemory {
~StackMemory() {
if (FLAG_trace_wasm_stack_switching) {
- PrintF("Delete stack (sp: %p)\n", reinterpret_cast<void*>(jmpbuf_.sp));
+ PrintF("Delete stack #%d\n", id_);
}
PageAllocator* allocator = GetPlatformPageAllocator();
if (owned_) allocator->DecommitPages(limit_, size_);
@@ -57,6 +59,7 @@ class StackMemory {
void* jslimit() const { return limit_ + kJSLimitOffsetKB; }
Address base() const { return reinterpret_cast<Address>(limit_ + size_); }
JumpBuffer* jmpbuf() { return &jmpbuf_; }
+ int id() { return id_; }
// Insert a stack in the linked list after this stack.
void Add(StackMemory* stack) {
@@ -76,10 +79,16 @@ class StackMemory {
}
private:
+#ifdef DEBUG
+ static constexpr int kJSLimitOffsetKB = 80;
+#else
static constexpr int kJSLimitOffsetKB = 40;
+#endif
// This constructor allocates a new stack segment.
explicit StackMemory(Isolate* isolate) : isolate_(isolate), owned_(true) {
+ static std::atomic<int> next_id(1);
+ id_ = next_id.fetch_add(1);
PageAllocator* allocator = GetPlatformPageAllocator();
int kJsStackSizeKB = 4;
size_ = (kJsStackSizeKB + kJSLimitOffsetKB) * KB;
@@ -87,8 +96,9 @@ class StackMemory {
limit_ = static_cast<byte*>(
allocator->AllocatePages(nullptr, size_, allocator->AllocatePageSize(),
PageAllocator::kReadWrite));
- if (FLAG_trace_wasm_stack_switching)
- PrintF("Allocate stack (sp: %p, limit: %p)\n", limit_ + size_, limit_);
+ if (FLAG_trace_wasm_stack_switching) {
+ PrintF("Allocate stack #%d\n", id_);
+ }
}
// Overload to represent a view of the libc stack.
@@ -96,13 +106,16 @@ class StackMemory {
: isolate_(isolate),
limit_(limit),
size_(reinterpret_cast<size_t>(limit)),
- owned_(false) {}
+ owned_(false) {
+ id_ = 0;
+ }
Isolate* isolate_;
byte* limit_;
size_t size_;
bool owned_;
JumpBuffer jmpbuf_;
+ int id_;
// Stacks form a circular doubly linked list per isolate.
StackMemory* next_ = this;
StackMemory* prev_ = this;
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index d182c87dbb..bf386ac6af 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -77,6 +77,8 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
offset_in_code_buffer + ref.length());
}
+ base::Optional<ModuleWireBytes> GetModuleBytes() const final { return {}; }
+
uint32_t module_offset() const { return module_offset_; }
base::Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
base::Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 6f4601b9f4..463d29f4d0 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -10,7 +10,6 @@
#define V8_WASM_STREAMING_DECODER_H_
#include <memory>
-#include <vector>
#include "src/base/macros.h"
#include "src/base/vector.h"
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 29482d007b..84929b85f7 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -41,13 +41,12 @@ class Simd128;
V(I8, 0, I8, Int8, 'b', "i8") \
V(I16, 1, I16, Int16, 'h', "i16")
-#define FOREACH_VALUE_TYPE(V) \
- V(Void, -1, Void, None, 'v', "<void>") \
- FOREACH_NUMERIC_VALUE_TYPE(V) \
- V(Rtt, kTaggedSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
- V(RttWithDepth, kTaggedSizeLog2, RttWithDepth, TaggedPointer, 'k', "rtt") \
- V(Ref, kTaggedSizeLog2, Ref, AnyTagged, 'r', "ref") \
- V(OptRef, kTaggedSizeLog2, OptRef, AnyTagged, 'n', "ref null") \
+#define FOREACH_VALUE_TYPE(V) \
+ V(Void, -1, Void, None, 'v', "<void>") \
+ FOREACH_NUMERIC_VALUE_TYPE(V) \
+ V(Rtt, kTaggedSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
+ V(Ref, kTaggedSizeLog2, Ref, AnyTagged, 'r', "ref") \
+ V(OptRef, kTaggedSizeLog2, OptRef, AnyTagged, 'n', "ref null") \
V(Bottom, -1, Void, None, '*', "<bot>")
constexpr int kMaxValueTypeSize = 16; // bytes
@@ -62,34 +61,31 @@ class HeapType {
public:
enum Representation : uint32_t {
kFunc = kV8MaxWasmTypes, // shorthand: c
- kExtern, // shorthand: e
kEq, // shorthand: q
kI31, // shorthand: j
kData, // shorthand: o
- kAny, // shorthand: a
+ kArray, // shorthand: g
+ kAny, // shorthand: a. Aka kExtern.
// This value is used to represent failures in the parsing of heap types and
- // does not correspond to a wasm heap type.
+ // does not correspond to a wasm heap type. It has to be last in this list.
kBottom
};
- // Internal use only; defined in the public section to make it easy to
- // check that they are defined correctly:
- static constexpr Representation kFirstSentinel = kFunc;
- static constexpr Representation kLastSentinel = kAny;
static constexpr HeapType from_code(uint8_t code) {
switch (code) {
case ValueTypeCode::kFuncRefCode:
return HeapType(kFunc);
- case ValueTypeCode::kExternRefCode:
- return HeapType(kExtern);
case ValueTypeCode::kEqRefCode:
return HeapType(kEq);
case ValueTypeCode::kI31RefCode:
return HeapType(kI31);
case ValueTypeCode::kAnyRefCode:
+ case ValueTypeCode::kAnyRefCodeAlias:
return HeapType(kAny);
case ValueTypeCode::kDataRefCode:
return HeapType(kData);
+ case ValueTypeCode::kArrayRefCode:
+ return HeapType(kArray);
default:
return HeapType(kBottom);
}
@@ -134,16 +130,16 @@ class HeapType {
switch (representation_) {
case kFunc:
return std::string("func");
- case kExtern:
- return std::string("extern");
case kEq:
return std::string("eq");
case kI31:
return std::string("i31");
case kData:
return std::string("data");
+ case kArray:
+ return std::string("array");
case kAny:
- return std::string("any");
+ return std::string(FLAG_experimental_wasm_gc ? "any" : "extern");
default:
return std::to_string(representation_);
}
@@ -157,14 +153,14 @@ class HeapType {
switch (representation_) {
case kFunc:
return mask | kFuncRefCode;
- case kExtern:
- return mask | kExternRefCode;
case kEq:
return mask | kEqRefCode;
case kI31:
return mask | kI31RefCode;
case kData:
return mask | kDataRefCode;
+ case kArray:
+ return mask | kArrayRefCode;
case kAny:
return mask | kAnyRefCode;
default:
@@ -174,8 +170,14 @@ class HeapType {
private:
friend class ValueType;
- Representation representation_;
+
constexpr bool is_valid() const { return representation_ <= kLastSentinel; }
+
+ static constexpr Representation kFirstSentinel =
+ static_cast<Representation>(kV8MaxWasmTypes);
+ static constexpr Representation kLastSentinel =
+ static_cast<Representation>(kBottom - 1);
+ Representation representation_;
};
enum Nullability : bool { kNonNullable, kNullable };
@@ -199,8 +201,7 @@ constexpr bool is_numeric(ValueKind kind) {
}
constexpr bool is_reference(ValueKind kind) {
- return kind == kRef || kind == kOptRef || kind == kRtt ||
- kind == kRttWithDepth;
+ return kind == kRef || kind == kOptRef || kind == kRtt;
}
constexpr bool is_object_reference(ValueKind kind) {
@@ -271,9 +272,7 @@ constexpr ValueKind unpacked(ValueKind kind) {
return is_packed(kind) ? kI32 : kind;
}
-constexpr bool is_rtt(ValueKind kind) {
- return kind == kRtt || kind == kRttWithDepth;
-}
+constexpr bool is_rtt(ValueKind kind) { return kind == kRtt; }
constexpr bool is_defaultable(ValueKind kind) {
DCHECK(kind != kBottom && kind != kVoid);
@@ -310,14 +309,6 @@ class ValueType {
HeapTypeField::encode(type_index));
}
- static constexpr ValueType Rtt(uint32_t type_index,
- uint8_t inheritance_depth) {
- DCHECK(HeapType(type_index).is_index());
- return ValueType(KindField::encode(kRttWithDepth) |
- HeapTypeField::encode(type_index) |
- DepthField::encode(inheritance_depth));
- }
-
// Useful when deserializing a type stored in a runtime object.
static constexpr ValueType FromRawBitField(uint32_t bit_field) {
return ValueType(bit_field);
@@ -340,7 +331,6 @@ class ValueType {
}
constexpr bool is_rtt() const { return wasm::is_rtt(kind()); }
- constexpr bool has_depth() const { return kind() == kRttWithDepth; }
constexpr bool has_index() const {
return is_rtt() || (is_object_reference() && heap_type().is_index());
@@ -374,10 +364,6 @@ class ValueType {
DCHECK(is_object_reference());
return HeapType(heap_representation());
}
- constexpr uint8_t depth() const {
- DCHECK(has_depth());
- return DepthField::decode(bit_field_);
- }
constexpr uint32_t ref_index() const {
DCHECK(has_index());
return HeapTypeField::decode(bit_field_);
@@ -432,7 +418,7 @@ class ValueType {
case MachineRepresentation::kFloat64:
return Primitive(kF64);
case MachineRepresentation::kTaggedPointer:
- return Ref(HeapType::kExtern, kNullable);
+ return Ref(HeapType::kAny, kNullable);
case MachineRepresentation::kSimd128:
return Primitive(kS128);
default:
@@ -455,8 +441,6 @@ class ValueType {
switch (heap_representation()) {
case HeapType::kFunc:
return kFuncRefCode;
- case HeapType::kExtern:
- return kExternRefCode;
case HeapType::kEq:
return kEqRefCode;
case HeapType::kAny:
@@ -470,6 +454,8 @@ class ValueType {
return kI31RefCode;
case HeapType::kData:
return kDataRefCode;
+ case HeapType::kArray:
+ return kArrayRefCode;
default:
return kRefCode;
}
@@ -477,8 +463,6 @@ class ValueType {
return kVoidCode;
case kRtt:
return kRttCode;
- case kRttWithDepth:
- return kRttWithDepthCode;
#define NUMERIC_TYPE_CASE(kind, ...) \
case k##kind: \
return k##kind##Code;
@@ -493,14 +477,21 @@ class ValueType {
// Returns true iff the heap type is needed to encode this type in the wasm
// binary format, taking into account available type shorthands.
constexpr bool encoding_needs_heap_type() const {
- return (kind() == kRef && heap_representation() != HeapType::kI31 &&
- heap_representation() != HeapType::kData) ||
- (kind() == kOptRef && (heap_type().is_index() ||
- heap_representation() == HeapType::kI31 ||
- heap_representation() == HeapType::kData));
+ switch (kind()) {
+ case kRef:
+ return heap_representation() != HeapType::kI31 &&
+ heap_representation() != HeapType::kArray &&
+ heap_representation() != HeapType::kData;
+ case kOptRef:
+ return heap_representation() != HeapType::kFunc &&
+ heap_representation() != HeapType::kEq &&
+ heap_representation() != HeapType::kAny;
+ default:
+ return false;
+ }
}
- static constexpr int kLastUsedBit = 30;
+ static constexpr int kLastUsedBit = 24;
/****************************** Pretty-printing *****************************/
constexpr char short_name() const { return wasm::short_name(kind()); }
@@ -517,10 +508,6 @@ class ValueType {
buf << heap_type().name() << "ref";
}
break;
- case kRttWithDepth:
- buf << "(rtt " << static_cast<uint32_t>(depth()) << " " << ref_index()
- << ")";
- break;
case kRtt:
buf << "(rtt " << ref_index() << ")";
break;
@@ -530,24 +517,23 @@ class ValueType {
return buf.str();
}
- private:
// We only use 31 bits so ValueType fits in a Smi. This can be changed if
// needed.
static constexpr int kKindBits = 5;
static constexpr int kHeapTypeBits = 20;
- static constexpr int kDepthBits = 6;
+
+ private:
STATIC_ASSERT(kV8MaxWasmTypes < (1u << kHeapTypeBits));
- // Note: we currently conservatively allow only 5 bits, but have room to
- // store 6, so we can raise the limit if needed.
- STATIC_ASSERT(kV8MaxRttSubtypingDepth < (1u << kDepthBits));
+
+ // {hash_value} directly reads {bit_field_}.
+ friend size_t hash_value(ValueType type);
+
using KindField = base::BitField<ValueKind, 0, kKindBits>;
using HeapTypeField = KindField::Next<uint32_t, kHeapTypeBits>;
- using DepthField = HeapTypeField::Next<uint8_t, kDepthBits>;
// This is implemented defensively against field order changes.
- STATIC_ASSERT(kLastUsedBit == std::max(KindField::kLastUsedBit,
- std::max(HeapTypeField::kLastUsedBit,
- DepthField::kLastUsedBit)));
+ STATIC_ASSERT(kLastUsedBit ==
+ std::max(KindField::kLastUsedBit, HeapTypeField::kLastUsedBit));
constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
@@ -562,7 +548,8 @@ static_assert(ValueType::kLastUsedBit < 8 * sizeof(ValueType) - kSmiTagSize,
"ValueType has space to be encoded in a Smi");
inline size_t hash_value(ValueType type) {
- return static_cast<size_t>(type.kind());
+ // Just use the whole encoded bit field, similar to {operator==}.
+ return static_cast<size_t>(type.bit_field_);
}
// Output operator, useful for DCHECKS and others.
@@ -580,19 +567,22 @@ constexpr ValueType kWasmI8 = ValueType::Primitive(kI8);
constexpr ValueType kWasmI16 = ValueType::Primitive(kI16);
constexpr ValueType kWasmVoid = ValueType::Primitive(kVoid);
constexpr ValueType kWasmBottom = ValueType::Primitive(kBottom);
-// Established reference-type proposal shorthands.
+// Established reference-type and wasm-gc proposal shorthands.
constexpr ValueType kWasmFuncRef = ValueType::Ref(HeapType::kFunc, kNullable);
-constexpr ValueType kWasmExternRef =
- ValueType::Ref(HeapType::kExtern, kNullable);
+constexpr ValueType kWasmAnyRef = ValueType::Ref(HeapType::kAny, kNullable);
constexpr ValueType kWasmEqRef = ValueType::Ref(HeapType::kEq, kNullable);
constexpr ValueType kWasmI31Ref = ValueType::Ref(HeapType::kI31, kNonNullable);
constexpr ValueType kWasmDataRef =
ValueType::Ref(HeapType::kData, kNonNullable);
-constexpr ValueType kWasmAnyRef = ValueType::Ref(HeapType::kAny, kNullable);
+constexpr ValueType kWasmArrayRef =
+ ValueType::Ref(HeapType::kArray, kNonNullable);
+
+// Constants used by the generic js-to-wasm wrapper.
+constexpr int kWasmValueKindBitsMask = (1u << ValueType::kKindBits) - 1;
// This is used in wasm.tq.
-constexpr ValueType kWasmExternNonNullableRef =
- ValueType::Ref(HeapType::kExtern, kNonNullable);
+constexpr ValueType kWasmAnyNonNullableRef =
+ ValueType::Ref(HeapType::kAny, kNonNullable);
#define FOREACH_WASMVALUE_CTYPES(V) \
V(kI32, int32_t) \
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 0ad2c15df5..eb5edb877d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -599,28 +599,39 @@ size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
return overhead;
}
-// Returns both the minimum size to reserve, and an estimate how much should be
-// reserved.
-std::pair<size_t, size_t> ReservationSize(size_t code_size_estimate,
- int num_declared_functions,
- size_t total_reserved) {
+// Returns an estimate how much code space should be reserved.
+size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
+ size_t total_reserved) {
size_t overhead = OverheadPerCodeSpace(num_declared_functions);
- // Reserve a power of two at least as big as any of
+ // Reserve the maximum of
// a) needed size + overhead (this is the minimum needed)
// b) 2 * overhead (to not waste too much space by overhead)
// c) 1/4 of current total reservation size (to grow exponentially)
size_t minimum_size = 2 * overhead;
- size_t suggested_size = base::bits::RoundUpToPowerOfTwo(
+ size_t suggested_size =
std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
minimum_size),
- total_reserved / 4));
+ total_reserved / 4);
+
+ if (V8_UNLIKELY(minimum_size > WasmCodeAllocator::kMaxCodeSpaceSize)) {
+ constexpr auto format = base::StaticCharVector(
+ "wasm code reservation: required minimum (%zu) is bigger than "
+ "supported maximum (%zu)");
+ constexpr int kMaxMessageLength =
+ format.size() - 6 + 2 * std::numeric_limits<size_t>::digits10;
+ base::EmbeddedVector<char, kMaxMessageLength + 1> message;
+ SNPrintF(message, format.begin(), minimum_size,
+ WasmCodeAllocator::kMaxCodeSpaceSize);
+ V8::FatalProcessOutOfMemory(nullptr, message.begin());
+ UNREACHABLE();
+ }
// Limit by the maximum supported code space size.
size_t reserve_size =
std::min(WasmCodeAllocator::kMaxCodeSpaceSize, suggested_size);
- return {minimum_size, reserve_size};
+ return reserve_size;
}
#ifdef DEBUG
@@ -709,14 +720,18 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
size_t total_reserved = 0;
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
- size_t min_reservation;
- size_t reserve_size;
- std::tie(min_reservation, reserve_size) = ReservationSize(
+ size_t reserve_size = ReservationSize(
size, native_module->module()->num_declared_functions, total_reserved);
VirtualMemory new_mem =
code_manager->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
- if (!new_mem.IsReserved() || new_mem.size() < min_reservation) {
- V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
+ if (!new_mem.IsReserved()) {
+ constexpr auto format = base::StaticCharVector(
+ "Cannot allocate more code space (%zu bytes, currently %zu)");
+ constexpr int kMaxMessageLength =
+ format.size() - 6 + 2 * std::numeric_limits<size_t>::digits10;
+ base::EmbeddedVector<char, kMaxMessageLength + 1> message;
+ SNPrintF(message, format.begin(), total_reserved, reserve_size);
+ V8::FatalProcessOutOfMemory(nullptr, message.begin());
UNREACHABLE();
}
@@ -893,7 +908,8 @@ void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
writable_memory_.erase(previous);
}
}
- if (region.end() == insert_pos->begin()) {
+ if (insert_pos != writable_memory_.end() &&
+ region.end() == insert_pos->begin()) {
region = {region.begin(), insert_pos->size() + region.size()};
insert_pos = writable_memory_.erase(insert_pos);
}
@@ -1060,7 +1076,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
base::Vector<const byte> instructions(
reinterpret_cast<byte*>(code->raw_body_start()),
static_cast<size_t>(code->raw_body_size()));
- const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ const int stack_slots = code->stack_slots();
// Metadata offsets in Code objects are relative to the start of the metadata
// section, whereas WasmCode expects offsets relative to InstructionStart.
@@ -1135,9 +1151,11 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ // Avoid opening a new write scope per function. The caller should hold the
+ // scope instead.
+ DCHECK(CodeSpaceWriteScope::IsInScope());
base::RecursiveMutexGuard guard(&allocation_mutex_);
- CodeSpaceWriteScope code_space_write_scope(this);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
@@ -1264,14 +1282,14 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
std::vector<WasmCode*> NativeModule::PublishCode(
base::Vector<std::unique_ptr<WasmCode>> codes) {
+ // Publishing often happens in a loop, so the caller should hold the
+ // {CodeSpaceWriteScope} outside of such a loop.
+ DCHECK(CodeSpaceWriteScope::IsInScope());
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::RecursiveMutexGuard lock(&allocation_mutex_);
- // Get writable permission already here (and not inside the loop in
- // {PatchJumpTablesLocked}), to avoid switching for each {code} individually.
- CodeSpaceWriteScope code_space_write_scope(this);
// The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code)));
@@ -1517,10 +1535,21 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
- code_allocator_.MakeWritable(
- AddressRegionOf(code_space_data.jump_table->instructions()));
- code_allocator_.MakeWritable(
- AddressRegionOf(code_space_data.far_jump_table->instructions()));
+ // Jump tables are often allocated next to each other, so we can switch
+ // permissions on both at the same time.
+ if (code_space_data.jump_table->instructions().end() ==
+ code_space_data.far_jump_table->instructions().begin()) {
+ base::Vector<uint8_t> jump_tables_space = base::VectorOf(
+ code_space_data.jump_table->instructions().begin(),
+ code_space_data.jump_table->instructions().size() +
+ code_space_data.far_jump_table->instructions().size());
+ code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space));
+ } else {
+ code_allocator_.MakeWritable(
+ AddressRegionOf(code_space_data.jump_table->instructions()));
+ code_allocator_.MakeWritable(
+ AddressRegionOf(code_space_data.far_jump_table->instructions()));
+ }
DCHECK_LT(slot_index, module_->num_declared_functions);
Address jump_table_slot =
@@ -1653,6 +1682,11 @@ class NativeModuleWireBytesStorage final : public WireBytesStorage {
.SubVector(ref.offset(), ref.end_offset());
}
+ base::Optional<ModuleWireBytes> GetModuleBytes() const final {
+ return base::Optional<ModuleWireBytes>(
+ std::atomic_load(&wire_bytes_)->as_vector());
+ }
+
private:
const std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
};
@@ -1672,13 +1706,12 @@ void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
if (tier == WasmCompilationUnit::GetBaselineExecutionTier(this->module())) {
if (!compilation_state_->baseline_compilation_finished()) {
- baseline_compilation_cpu_duration_.fetch_add(
- cpu_duration, std::memory_order::memory_order_relaxed);
+ baseline_compilation_cpu_duration_.fetch_add(cpu_duration,
+ std::memory_order_relaxed);
}
} else if (tier == ExecutionTier::kTurbofan) {
if (!compilation_state_->top_tier_compilation_finished()) {
- tier_up_cpu_duration_.fetch_add(cpu_duration,
- std::memory_order::memory_order_relaxed);
+ tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
}
}
}
@@ -1937,15 +1970,10 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
DCHECK_LE(region.size(), old_committed);
USE(old_committed);
- TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
+ TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
- if (MemoryProtectionKeysEnabled()) {
- CHECK(SetPermissionsAndMemoryProtectionKey(
- allocator, region, PageAllocator::kNoAccess, kNoMemoryProtectionKey));
- } else {
- CHECK(SetPermissions(allocator, region.begin(), region.size(),
- PageAllocator::kNoAccess));
- }
+ CHECK(allocator->DecommitPages(reinterpret_cast<void*>(region.begin()),
+ region.size()));
}
void WasmCodeManager::AssignRange(base::AddressRegion region,
@@ -1987,47 +2015,43 @@ namespace {
// separate code spaces being allocated (compile time and runtime overhead),
// choosing them too large results in over-reservation (virtual address space
// only).
-// The current numbers have been determined on 2019-11-11 by clemensb@, based
-// on one small and one large module compiled from C++ by Emscripten. If in
-// doubt, they where chosen slightly larger than required, as over-reservation
-// is not a big issue currently.
-// Numbers will change when Liftoff or TurboFan evolve, other toolchains are
-// used to produce the wasm code, or characteristics of wasm modules on the
-// web change. They might require occasional tuning.
-// This patch might help to find reasonable numbers for any future adaptation:
-// https://crrev.com/c/1910945
+// In doubt, choose the numbers slightly too large, because over-reservation is
+// less critical than multiple separate code spaces (especially on 64-bit).
+// Numbers can be determined by running benchmarks with
+// --trace-wasm-compilation-times, and piping the output through
+// tools/wasm/code-size-factors.py.
#if V8_TARGET_ARCH_X64
-constexpr size_t kTurbofanFunctionOverhead = 20;
+constexpr size_t kTurbofanFunctionOverhead = 24;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
-constexpr size_t kLiftoffFunctionOverhead = 60;
+constexpr size_t kLiftoffFunctionOverhead = 56;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
-constexpr size_t kImportSize = 350;
+constexpr size_t kImportSize = 640;
#elif V8_TARGET_ARCH_IA32
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
-constexpr size_t kLiftoffFunctionOverhead = 60;
+constexpr size_t kLiftoffFunctionOverhead = 48;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
-constexpr size_t kImportSize = 480;
+constexpr size_t kImportSize = 320;
#elif V8_TARGET_ARCH_ARM
-constexpr size_t kTurbofanFunctionOverhead = 40;
+constexpr size_t kTurbofanFunctionOverhead = 44;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
-constexpr size_t kLiftoffFunctionOverhead = 108;
-constexpr size_t kLiftoffCodeSizeMultiplier = 7;
-constexpr size_t kImportSize = 750;
+constexpr size_t kLiftoffFunctionOverhead = 96;
+constexpr size_t kLiftoffCodeSizeMultiplier = 5;
+constexpr size_t kImportSize = 550;
#elif V8_TARGET_ARCH_ARM64
-constexpr size_t kTurbofanFunctionOverhead = 60;
-constexpr size_t kTurbofanCodeSizeMultiplier = 4;
-constexpr size_t kLiftoffFunctionOverhead = 80;
-constexpr size_t kLiftoffCodeSizeMultiplier = 7;
-constexpr size_t kImportSize = 750;
-#else
-// Other platforms should add their own estimates if needed. Numbers below are
-// the minimum of other architectures.
-constexpr size_t kTurbofanFunctionOverhead = 20;
+constexpr size_t kTurbofanFunctionOverhead = 40;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
-constexpr size_t kLiftoffFunctionOverhead = 60;
+constexpr size_t kLiftoffFunctionOverhead = 68;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
-constexpr size_t kImportSize = 350;
+constexpr size_t kImportSize = 750;
+#else
+// Other platforms should add their own estimates for best performance. Numbers
+// below are the maximum of other architectures.
+constexpr size_t kTurbofanFunctionOverhead = 44;
+constexpr size_t kTurbofanCodeSizeMultiplier = 4;
+constexpr size_t kLiftoffFunctionOverhead = 96;
+constexpr size_t kLiftoffCodeSizeMultiplier = 5;
+constexpr size_t kImportSize = 750;
#endif
} // namespace
@@ -2038,8 +2062,9 @@ size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
}
// static
-size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module,
- bool include_liftoff) {
+size_t WasmCodeManager::EstimateNativeModuleCodeSize(
+ const WasmModule* module, bool include_liftoff,
+ DynamicTiering dynamic_tiering) {
int num_functions = static_cast<int>(module->num_declared_functions);
int num_imported_functions = static_cast<int>(module->num_imported_functions);
int code_section_length = 0;
@@ -2051,31 +2076,40 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module,
static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
}
return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
- code_section_length, include_liftoff);
+ code_section_length, include_liftoff,
+ dynamic_tiering);
}
// static
-size_t WasmCodeManager::EstimateNativeModuleCodeSize(int num_functions,
- int num_imported_functions,
- int code_section_length,
- bool include_liftoff) {
- const size_t overhead_per_function =
- kTurbofanFunctionOverhead + kCodeAlignment / 2 +
- (include_liftoff ? kLiftoffFunctionOverhead + kCodeAlignment / 2 : 0);
- const size_t overhead_per_code_byte =
- kTurbofanCodeSizeMultiplier +
- (include_liftoff ? kLiftoffCodeSizeMultiplier : 0);
- const size_t jump_table_size = RoundUp<kCodeAlignment>(
- JumpTableAssembler::SizeForNumberOfSlots(num_functions));
- const size_t far_jump_table_size =
- RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
- WasmCode::kRuntimeStubCount,
- NumWasmFunctionsInFarJumpTable(num_functions)));
- return jump_table_size // jump table
- + far_jump_table_size // far jump table
- + overhead_per_function * num_functions // per function
- + overhead_per_code_byte * code_section_length // per code byte
- + kImportSize * num_imported_functions; // per import
+size_t WasmCodeManager::EstimateNativeModuleCodeSize(
+ int num_functions, int num_imported_functions, int code_section_length,
+ bool include_liftoff, DynamicTiering dynamic_tiering) {
+ // Note that the size for jump tables is added later, in {ReservationSize} /
+ // {OverheadPerCodeSpace}.
+
+ const size_t size_of_imports = kImportSize * num_imported_functions;
+
+ const size_t overhead_per_function_turbofan =
+ kTurbofanFunctionOverhead + kCodeAlignment / 2;
+ size_t size_of_turbofan = overhead_per_function_turbofan * num_functions +
+ kTurbofanCodeSizeMultiplier * code_section_length;
+
+ const size_t overhead_per_function_liftoff =
+ kLiftoffFunctionOverhead + kCodeAlignment / 2;
+ size_t size_of_liftoff = overhead_per_function_liftoff * num_functions +
+ kLiftoffCodeSizeMultiplier * code_section_length;
+
+ if (!include_liftoff) {
+ size_of_liftoff = 0;
+ }
+ // With dynamic tiering we don't expect to compile more than 25% with
+ // TurboFan. If there is no liftoff though then all code will get generated
+ // by TurboFan.
+ if (include_liftoff && dynamic_tiering == DynamicTiering::kEnabled) {
+ size_of_turbofan /= 4;
+ }
+
+ return size_of_imports + size_of_liftoff + size_of_turbofan;
}
// static
@@ -2087,11 +2121,19 @@ size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
// TODO(wasm): Include wire bytes size.
size_t native_module_estimate =
- sizeof(NativeModule) + /* NativeModule struct */
- (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
- (sizeof(WasmCode) * num_wasm_functions); /* code object size */
+ sizeof(NativeModule) + // NativeModule struct
+ (sizeof(WasmCode*) * num_wasm_functions) + // code table size
+ (sizeof(WasmCode) * num_wasm_functions); // code object size
+
+ size_t jump_table_size = RoundUp<kCodeAlignment>(
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
+ size_t far_jump_table_size =
+ RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount,
+ NumWasmFunctionsInFarJumpTable(num_wasm_functions)));
- return wasm_module_estimate + native_module_estimate;
+ return wasm_module_estimate + native_module_estimate + jump_table_size +
+ far_jump_table_size;
}
void WasmCodeManager::SetThreadWritable(bool writable) {
@@ -2123,12 +2165,6 @@ bool WasmCodeManager::MemoryProtectionKeyWritable() const {
MemoryProtectionKeyPermission::kNoRestrictions;
}
-void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
- if (memory_protection_key_ == kNoMemoryProtectionKey) {
- memory_protection_key_ = AllocateMemoryProtectionKey();
- }
-}
-
void WasmCodeManager::InitializeMemoryProtectionKeyPermissionsIfSupported()
const {
if (!HasMemoryProtectionKeySupport()) return;
@@ -2154,9 +2190,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
committed + (max_committed_code_space_ - committed) / 2);
}
- size_t min_code_size;
- size_t code_vmem_size;
- std::tie(min_code_size, code_vmem_size) =
+ size_t code_vmem_size =
ReservationSize(code_size_estimate, module->num_declared_functions, 0);
// The '--wasm-max-initial-code-space-reservation' testing flag can be used to
@@ -2167,12 +2201,6 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
}
- // If we cannot allocate enough code space, fail with an OOM message.
- if (code_vmem_size < min_code_size) {
- V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
- UNREACHABLE();
- }
-
// Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
@@ -2182,7 +2210,13 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
code_space = TryAllocate(code_vmem_size);
if (code_space.IsReserved()) break;
if (retries == kAllocationRetries) {
- V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
+ constexpr auto format = base::StaticCharVector(
+ "NewNativeModule cannot allocate code space of %zu bytes");
+ constexpr int kMaxMessageLength =
+ format.size() - 3 + std::numeric_limits<size_t>::digits10;
+ base::EmbeddedVector<char, kMaxMessageLength + 1> message;
+ SNPrintF(message, format.begin(), code_vmem_size);
+ V8::FatalProcessOutOfMemory(isolate, message.begin());
UNREACHABLE();
}
// Run one GC, then try the allocation again.
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index f8c3db2cf4..137c3074d5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -120,11 +120,11 @@ struct WasmModule;
V(WasmAllocateArray_InitZero) \
V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
- V(WasmAllocateRtt) \
- V(WasmAllocateFreshRtt) \
+ V(WasmArrayInitFromData) \
V(WasmAllocateStructWithRtt) \
V(WasmSubtypeCheck) \
- V(WasmOnStackReplace)
+ V(WasmOnStackReplace) \
+ V(WasmSuspend)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
@@ -292,7 +292,11 @@ class V8_EXPORT_PRIVATE WasmCode final {
uint32_t raw_tagged_parameter_slots_for_serialization() const {
return tagged_parameter_slots_;
}
+
bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
+
+ bool is_turbofan() const { return tier() == ExecutionTier::kTurbofan; }
+
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_) <= pc &&
pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
@@ -491,7 +495,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
// often for rather small functions.
// Increase the limit if needed, but first check if the size increase is
// justified.
+#ifndef V8_GC_MOLE
STATIC_ASSERT(sizeof(WasmCode) <= 88);
+#endif
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
@@ -781,8 +787,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
void AddLiftoffBailout() {
- liftoff_bailout_count_.fetch_add(1,
- std::memory_order::memory_order_relaxed);
+ liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
}
WasmCode* Lookup(Address) const;
@@ -1027,13 +1032,15 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
static size_t EstimateLiftoffCodeSize(int body_size);
// Estimate the needed code space from a completely decoded module.
static size_t EstimateNativeModuleCodeSize(const WasmModule* module,
- bool include_liftoff);
+ bool include_liftoff,
+ DynamicTiering dynamic_tiering);
// Estimate the needed code space from the number of functions and total code
// section length.
static size_t EstimateNativeModuleCodeSize(int num_functions,
int num_imported_functions,
int code_section_length,
- bool include_liftoff);
+ bool include_liftoff,
+ DynamicTiering dynamic_tiering);
// Estimate the size of meta data needed for the NativeModule, excluding
// generated code. This data still be stored on the C++ heap.
static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
@@ -1058,10 +1065,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
bool MemoryProtectionKeyWritable() const;
- // This allocates a memory protection key (if none was allocated before),
- // independent of the --wasm-memory-protection-keys flag.
- void InitializeMemoryProtectionKeyForTesting();
-
// Initialize the current thread's permissions for the memory protection key,
// if we have support.
void InitializeMemoryProtectionKeyPermissionsIfSupported() const;
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index f5bce0a5b4..61e4bbe435 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -24,7 +24,7 @@ constexpr uint32_t kWasmVersion = 0x01;
// Binary encoding of value and heap types.
enum ValueTypeCode : uint8_t {
- // Current wasm types
+ // Current value types
kVoidCode = 0x40,
kI32Code = 0x7f,
kI64Code = 0x7e,
@@ -32,27 +32,35 @@ enum ValueTypeCode : uint8_t {
kF64Code = 0x7c,
// Simd proposal
kS128Code = 0x7b,
- // reftypes, typed-funcref, and GC proposals
+ // GC proposal packed types
kI8Code = 0x7a,
kI16Code = 0x79,
+ // Current reference types
kFuncRefCode = 0x70,
- kExternRefCode = 0x6f,
- kAnyRefCode = 0x6e,
+ kAnyRefCode = 0x6f, // aka externref
+ // typed-funcref and GC proposal types
+ // TODO(7748): For backwards compatibility only, remove when able.
+ kAnyRefCodeAlias = 0x6e,
kEqRefCode = 0x6d,
kOptRefCode = 0x6c,
kRefCode = 0x6b,
kI31RefCode = 0x6a,
+ // TODO(7748): Only here for backwards compatibility, remove when able.
kRttWithDepthCode = 0x69,
kRttCode = 0x68,
kDataRefCode = 0x67,
+ kArrayRefCode = 0x66
};
-// Binary encoding of other types.
+
+// Binary encoding of type definitions.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
-constexpr uint8_t kWasmFunctionSubtypeCode = 0x5d;
-constexpr uint8_t kWasmStructSubtypeCode = 0x5c;
-constexpr uint8_t kWasmArraySubtypeCode = 0x5b;
+constexpr uint8_t kWasmFunctionNominalCode = 0x5d;
+constexpr uint8_t kWasmStructNominalCode = 0x5c;
+constexpr uint8_t kWasmArrayNominalCode = 0x5b;
+constexpr uint8_t kWasmSubtypeCode = 0x50;
+constexpr uint8_t kWasmRecursiveTypeGroupCode = 0x4f;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -168,6 +176,9 @@ constexpr uint32_t kMinimumSupertypeArraySize = 3;
constexpr int32_t kOSRTargetOffset = 5 * kSystemPointerSize;
#endif
+constexpr Tagged_t kArrayInitFromDataArrayTooLargeErrorCode = 0;
+constexpr Tagged_t kArrayInitFromDataSegmentOutOfBoundsErrorCode = 1;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index a0ecab9596..4750d9fafe 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -657,8 +657,7 @@ class DebugInfoImpl {
return WasmValue(Simd128(ReadUnalignedValue<int16>(stack_address)));
case kRef:
case kOptRef:
- case kRtt:
- case kRttWithDepth: {
+ case kRtt: {
Handle<Object> obj(Object(ReadUnalignedValue<Address>(stack_address)),
isolate);
return WasmValue(obj, value->type);
@@ -898,6 +897,19 @@ int FindNextBreakablePosition(wasm::NativeModule* native_module, int func_index,
return 0;
}
+void SetBreakOnEntryFlag(Script script, bool enabled) {
+ if (script.break_on_entry() == enabled) return;
+
+ script.set_break_on_entry(enabled);
+ // Update the "break_on_entry" flag on all live instances.
+ i::WeakArrayList weak_instance_list = script.wasm_weak_instance_list();
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsCleared()) continue;
+ i::WasmInstanceObject instance =
+ i::WasmInstanceObject::cast(weak_instance_list.Get(i)->GetHeapObject());
+ instance.set_break_on_entry(enabled);
+ }
+}
} // namespace
// static
@@ -922,20 +934,13 @@ bool WasmScript::SetBreakPoint(Handle<Script> script, int* position,
}
// static
-void WasmScript::SetBreakPointOnEntry(Handle<Script> script,
- Handle<BreakPoint> break_point) {
+void WasmScript::SetInstrumentationBreakpoint(Handle<Script> script,
+ Handle<BreakPoint> break_point) {
// Special handling for on-entry breakpoints.
AddBreakpointToInfo(script, kOnEntryBreakpointPosition, break_point);
- script->set_break_on_entry(true);
// Update the "break_on_entry" flag on all live instances.
- i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
- for (int i = 0; i < weak_instance_list.length(); ++i) {
- if (weak_instance_list.Get(i)->IsCleared()) continue;
- i::WasmInstanceObject instance =
- i::WasmInstanceObject::cast(weak_instance_list.Get(i)->GetHeapObject());
- instance.set_break_on_entry(true);
- }
+ SetBreakOnEntryFlag(*script, true);
}
// static
@@ -1035,12 +1040,17 @@ bool WasmScript::ClearBreakPoint(Handle<Script> script, int position,
breakpoint_infos->set_undefined(breakpoint_infos->length() - 1);
}
- // Remove the breakpoint from DebugInfo and recompile.
- wasm::NativeModule* native_module = script->wasm_native_module();
- const wasm::WasmModule* module = native_module->module();
- int func_index = GetContainingWasmFunction(module, position);
- native_module->GetDebugInfo()->RemoveBreakpoint(func_index, position,
- isolate);
+ if (break_point->id() == v8::internal::Debug::kInstrumentationId) {
+ // Special handling for instrumentation breakpoints.
+ SetBreakOnEntryFlag(*script, false);
+ } else {
+ // Remove the breakpoint from DebugInfo and recompile.
+ wasm::NativeModule* native_module = script->wasm_native_module();
+ const wasm::WasmModule* module = native_module->module();
+ int func_index = GetContainingWasmFunction(module, position);
+ native_module->GetDebugInfo()->RemoveBreakpoint(func_index, position,
+ isolate);
+ }
return true;
}
@@ -1077,6 +1087,7 @@ bool WasmScript::ClearBreakPointById(Handle<Script> script, int breakpoint_id) {
void WasmScript::ClearAllBreakpoints(Script script) {
script.set_wasm_breakpoint_infos(
ReadOnlyRoots(script.GetIsolate()).empty_fixed_array());
+ SetBreakOnEntryFlag(script, false);
}
// static
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index eb39d7910a..a0cbbccacb 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -1365,13 +1365,11 @@ void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
PotentiallyFinishCurrentGC();
}
-void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
- wasm::WasmCodeRefScope code_ref_scope;
- std::unordered_set<wasm::WasmCode*> live_wasm_code;
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- StackFrame* const frame = it.frame();
- if (frame->type() != StackFrame::WASM) continue;
- live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
+namespace {
+void ReportLiveCodeFromFrameForGC(
+ StackFrame* frame, std::unordered_set<wasm::WasmCode*>& live_wasm_code) {
+ if (frame->type() != StackFrame::WASM) return;
+ live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
#if V8_TARGET_ARCH_X64
if (WasmFrame::cast(frame)->wasm_code()->for_debugging()) {
Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
@@ -1383,6 +1381,32 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
}
}
#endif
+}
+} // namespace
+
+void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
+ wasm::WasmCodeRefScope code_ref_scope;
+ std::unordered_set<wasm::WasmCode*> live_wasm_code;
+ if (FLAG_experimental_wasm_stack_switching) {
+ wasm::StackMemory* current = isolate->wasm_stacks();
+ DCHECK_NOT_NULL(current);
+ do {
+ if (current->IsActive()) {
+ // The active stack's jump buffer does not match the current state, use
+ // the thread info below instead.
+ current = current->next();
+ continue;
+ }
+ for (StackFrameIterator it(isolate, current); !it.done(); it.Advance()) {
+ StackFrame* const frame = it.frame();
+ ReportLiveCodeFromFrameForGC(frame, live_wasm_code);
+ }
+ current = current->next();
+ } while (current != isolate->wasm_stacks());
+ }
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ StackFrame* const frame = it.frame();
+ ReportLiveCodeFromFrameForGC(frame, live_wasm_code);
}
CheckNoArchivedThreads(isolate);
@@ -1630,6 +1654,7 @@ GlobalWasmState* global_wasm_state = nullptr;
// static
void WasmEngine::InitializeOncePerProcess() {
+ InitializeMemoryProtectionKeySupport();
DCHECK_NULL(global_wasm_state);
global_wasm_state = new GlobalWasmState();
}
@@ -1655,6 +1680,9 @@ WasmCodeManager* GetWasmCodeManager() {
// {max_mem_pages} is declared in wasm-limits.h.
uint32_t max_mem_pages() {
+ static_assert(
+ kV8MaxWasmMemoryPages * kWasmPageSize <= JSArrayBuffer::kMaxByteLength,
+ "Wasm memories must not be bigger than JSArrayBuffers");
STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index d1ae05c570..27e022bdb8 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -460,31 +460,28 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
-inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
+inline byte* EffectiveAddress(WasmInstanceObject instance, uintptr_t index) {
return instance.memory_start() + index;
}
-inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
- return base + index;
-}
-
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
*offset += sizeof(V);
return result;
}
+
+constexpr int32_t kSuccess = 1;
+constexpr int32_t kOutOfBounds = 0;
} // namespace
int32_t memory_init_wrapper(Address data) {
- constexpr int32_t kSuccess = 1;
- constexpr int32_t kOutOfBounds = 0;
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
- uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
@@ -497,22 +494,19 @@ int32_t memory_init_wrapper(Address data) {
byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
- std::memcpy(EffectiveAddress(instance, dst),
- EffectiveAddress(seg_start, seg_size, src), size);
+ std::memcpy(EffectiveAddress(instance, dst), seg_start + src, size);
return kSuccess;
}
int32_t memory_copy_wrapper(Address data) {
- constexpr int32_t kSuccess = 1;
- constexpr int32_t kOutOfBounds = 0;
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
- uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
- uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
- uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
+ uintptr_t src = ReadAndIncrementOffset<uintptr_t>(data, &offset);
+ uintptr_t size = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
@@ -525,19 +519,16 @@ int32_t memory_copy_wrapper(Address data) {
}
int32_t memory_fill_wrapper(Address data) {
- constexpr int32_t kSuccess = 1;
- constexpr int32_t kOutOfBounds = 0;
-
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
- uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint8_t value =
static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
- uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uintptr_t size = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index d35b0d14f2..e40df6aee2 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -64,7 +64,12 @@
/* Stack Switching proposal. */ \
/* https://github.com/WebAssembly/stack-switching */ \
/* V8 side owner: thibaudm, fgm */ \
- V(stack_switching, "stack switching", false)
+ V(stack_switching, "stack switching", false) \
+ \
+ /* Extended Constant Expressions Proposal. */ \
+ /* https://github.com/WebAssembly/extended-const */ \
+ /* V8 side owner: manoskouk */ \
+ V(extended_const, "extended constant expressions", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -98,13 +103,6 @@
/* Shipped in v9.1 * */ \
V(simd, "SIMD opcodes", true) \
\
- /* Reference Types, a.k.a. reftypes proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- /* Shipped in v9.6 * */ \
- V(reftypes, "reference type opcodes", true) \
- \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index ebc04766fc..bedafd1d25 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -16,8 +16,11 @@ WasmFeatures WasmFeatures::FromFlags() {
WasmFeatures features = WasmFeatures::None();
#define FLAG_REF(feat, ...) \
if (FLAG_experimental_wasm_##feat) features.Add(kFeature_##feat);
- FOREACH_WASM_FEATURE(FLAG_REF)
+ FOREACH_WASM_FEATURE_FLAG(FLAG_REF)
#undef FLAG_REF
+#define NON_FLAG_REF(feat, ...) features.Add(kFeature_##feat);
+ FOREACH_WASM_NON_FLAG_FEATURE(NON_FLAG_REF)
+#undef NON_FLAG_REF
return features;
}
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
index 8a72c593ed..12162db1fe 100644
--- a/deps/v8/src/wasm/wasm-features.h
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -14,8 +14,14 @@
#include "src/base/macros.h"
#include "src/wasm/wasm-feature-flags.h"
+// Features that are always enabled and do not have a flag.
+#define FOREACH_WASM_NON_FLAG_FEATURE(V) \
+ V(reftypes, "reference type opcodes", true)
+
// All features, including features that do not have flags.
-#define FOREACH_WASM_FEATURE FOREACH_WASM_FEATURE_FLAG
+#define FOREACH_WASM_FEATURE(V) \
+ FOREACH_WASM_FEATURE_FLAG(V) \
+ FOREACH_WASM_NON_FLAG_FEATURE(V)
namespace v8 {
namespace internal {
@@ -58,6 +64,8 @@ class WasmFeatures : public base::EnumSet<WasmFeature> {
static inline constexpr WasmFeatures All();
static inline constexpr WasmFeatures None();
static inline constexpr WasmFeatures ForAsmjs();
+ // Retuns optional features that are enabled by flags, plus features that are
+ // not enabled by a flag and are always on.
static WasmFeatures FromFlags();
static V8_EXPORT_PRIVATE WasmFeatures FromIsolate(Isolate*);
static V8_EXPORT_PRIVATE WasmFeatures FromContext(Isolate*,
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index c760634a76..e1a99a3ea5 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -25,20 +25,22 @@ WasmCode*& WasmImportWrapperCache::operator[](
WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
const FunctionSig* sig,
- int expected_arity) const {
+ int expected_arity,
+ Suspend suspend) const {
base::MutexGuard lock(&mutex_);
- auto it = entry_map_.find({kind, sig, expected_arity});
+ auto it = entry_map_.find({kind, sig, expected_arity, suspend});
DCHECK(it != entry_map_.end());
return it->second;
}
WasmCode* WasmImportWrapperCache::MaybeGet(compiler::WasmImportCallKind kind,
const FunctionSig* sig,
- int expected_arity) const {
+ int expected_arity,
+ Suspend suspend) const {
base::MutexGuard lock(&mutex_);
- auto it = entry_map_.find({kind, sig, expected_arity});
+ auto it = entry_map_.find({kind, sig, expected_arity, suspend});
if (it == entry_map_.end()) return nullptr;
return it->second;
}
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 57c92bc6bb..f12b07477f 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -29,21 +29,23 @@ class WasmImportWrapperCache {
public:
struct CacheKey {
CacheKey(const compiler::WasmImportCallKind& _kind, const FunctionSig* _sig,
- int _expected_arity)
+ int _expected_arity, Suspend _suspend)
: kind(_kind),
signature(_sig),
expected_arity(_expected_arity == kDontAdaptArgumentsSentinel
? 0
- : _expected_arity) {}
+ : _expected_arity),
+ suspend(_suspend) {}
bool operator==(const CacheKey& rhs) const {
return kind == rhs.kind && signature == rhs.signature &&
- expected_arity == rhs.expected_arity;
+ expected_arity == rhs.expected_arity && suspend == rhs.suspend;
}
compiler::WasmImportCallKind kind;
const FunctionSig* signature;
int expected_arity;
+ Suspend suspend;
};
class CacheKeyHash {
@@ -73,11 +75,11 @@ class WasmImportWrapperCache {
// Thread-safe. Assumes the key exists in the map.
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
- const FunctionSig* sig,
- int expected_arity) const;
+ const FunctionSig* sig, int expected_arity,
+ Suspend suspend) const;
// Thread-safe. Returns nullptr if the key doesn't exist in the map.
WasmCode* MaybeGet(compiler::WasmImportCallKind kind, const FunctionSig* sig,
- int expected_arity) const;
+ int expected_arity, Suspend suspend) const;
~WasmImportWrapperCache();
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
index db7e003e95..3f22daeec5 100644
--- a/deps/v8/src/wasm/wasm-init-expr.cc
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -46,17 +46,7 @@ ValueType WasmInitExpr::type(const WasmModule* module,
case kArrayInitStatic:
return ValueType::Ref(immediate().index, kNonNullable);
case kRttCanon:
- return ValueType::Rtt(immediate().heap_type, 0);
- case kRttSub:
- case kRttFreshSub: {
- ValueType operand_type = (*operands())[0].type(module, enabled_features);
- if (!operand_type.is_rtt()) return kWasmBottom;
- if (operand_type.has_depth()) {
- return ValueType::Rtt(immediate().heap_type, operand_type.depth() + 1);
- } else {
- return ValueType::Rtt(immediate().heap_type);
- }
- }
+ return ValueType::Rtt(immediate().heap_type);
}
}
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
index 1673062f0f..44fadd4525 100644
--- a/deps/v8/src/wasm/wasm-init-expr.h
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -21,7 +21,9 @@ namespace wasm {
struct WasmModule;
class WasmFeatures;
-// Representation of an initializer expression.
+// Representation of an initializer expression. Unlike {ConstantExpression} in
+// wasm-module.h, this does not use {WireBytesRef}, i.e., it does not depend on
+// a wasm module's bytecode representation.
class WasmInitExpr : public ZoneObject {
public:
enum Operator {
@@ -41,8 +43,6 @@ class WasmInitExpr : public ZoneObject {
kArrayInit,
kArrayInitStatic,
kRttCanon,
- kRttSub,
- kRttFreshSub,
};
union Immediate {
@@ -147,25 +147,6 @@ class WasmInitExpr : public ZoneObject {
return expr;
}
- static WasmInitExpr RttSub(Zone* zone, uint32_t index,
- WasmInitExpr supertype) {
- WasmInitExpr expr(
- kRttSub, zone->New<ZoneVector<WasmInitExpr>>(
- std::initializer_list<WasmInitExpr>{supertype}, zone));
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RttFreshSub(Zone* zone, uint32_t index,
- WasmInitExpr supertype) {
- WasmInitExpr expr(
- kRttFreshSub,
- zone->New<ZoneVector<WasmInitExpr>>(
- std::initializer_list<WasmInitExpr>{supertype}, zone));
- expr.immediate_.index = index;
- return expr;
- }
-
Immediate immediate() const { return immediate_; }
Operator kind() const { return kind_; }
const ZoneVector<WasmInitExpr>* operands() const { return operands_; }
@@ -209,10 +190,6 @@ class WasmInitExpr : public ZoneObject {
if (operands()[i] != other.operands()[i]) return false;
}
return true;
- case kRttSub:
- case kRttFreshSub:
- return immediate().index == other.immediate().index &&
- operands()[0] == other.operands()[0];
}
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 936bf14301..7d9a5593ae 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -25,9 +25,11 @@
#include "src/init/v8.h"
#include "src/objects/fixed-array.h"
#include "src/objects/instance-type.h"
+#include "src/objects/js-function.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/tasks/task-utils.h"
@@ -671,9 +673,9 @@ bool TransferPrototype(i::Isolate* isolate, i::Handle<i::JSObject> destination,
i::JSObject::GetPrototype(isolate, source);
i::Handle<i::HeapObject> prototype;
if (maybe_prototype.ToHandle(&prototype)) {
- Maybe<bool> result = i::JSObject::SetPrototype(destination, prototype,
- /*from_javascript=*/false,
- internal::kThrowOnError);
+ Maybe<bool> result = i::JSObject::SetPrototype(
+ isolate, destination, prototype,
+ /*from_javascript=*/false, internal::kThrowOnError);
if (!result.FromJust()) {
DCHECK(isolate->has_pending_exception());
return false;
@@ -1153,9 +1155,14 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
// The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_reftypes() &&
- string->StringEquals(v8_str(isolate, "externref"))) {
- type = i::wasm::kWasmExternRef;
+ } else if (enabled_features.has_type_reflection() &&
+ string->StringEquals(v8_str(isolate, "funcref"))) {
+ // With the type reflection proposal, "funcref" replaces "anyfunc",
+ // and anyfunc just becomes an alias for "funcref".
+ type = i::wasm::kWasmFuncRef;
+ } else if (string->StringEquals(v8_str(isolate, "externref"))) {
+ // externref is known as anyref as of wasm-gc.
+ type = i::wasm::kWasmAnyRef;
} else {
thrower.TypeError(
"Descriptor property 'element' must be a WebAssembly reference type");
@@ -1326,11 +1333,14 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
*type = i::wasm::kWasmF64;
- } else if (enabled_features.has_reftypes() &&
- string->StringEquals(v8_str(isolate, "externref"))) {
- *type = i::wasm::kWasmExternRef;
- } else if (enabled_features.has_reftypes() &&
- string->StringEquals(v8_str(isolate, "anyfunc"))) {
+ } else if (string->StringEquals(v8_str(isolate, "externref"))) {
+ *type = i::wasm::kWasmAnyRef;
+ } else if (enabled_features.has_type_reflection() &&
+ string->StringEquals(v8_str(isolate, "funcref"))) {
+ // The type reflection proposal renames "anyfunc" to "funcref", and makes
+ // "anyfunc" an alias of "funcref".
+ *type = i::wasm::kWasmFuncRef;
+ } else if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
// The JS api spec uses 'anyfunc' instead of 'funcref'.
*type = i::wasm::kWasmFuncRef;
} else if (enabled_features.has_gc() &&
@@ -1487,7 +1497,6 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
case i::wasm::kRef:
case i::wasm::kOptRef: {
switch (type.heap_representation()) {
- case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kAny: {
if (args.Length() < 2) {
// When no initial value is provided, we have to use the WebAssembly
@@ -1519,6 +1528,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
case i::wasm::HeapType::kEq:
case internal::wasm::HeapType::kI31:
case internal::wasm::HeapType::kData:
+ case internal::wasm::HeapType::kArray:
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
@@ -1526,7 +1536,6 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
break;
}
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
// TODO(7748): Implement.
UNIMPLEMENTED();
case i::wasm::kI8:
@@ -1659,7 +1668,7 @@ uint32_t GetEncodedSize(i::Handle<i::WasmTagObject> tag_object) {
}
void EncodeExceptionValues(v8::Isolate* isolate,
- i::PodArray<i::wasm::ValueType> signature,
+ i::Handle<i::PodArray<i::wasm::ValueType>> signature,
const Local<Value>& arg,
ScheduledErrorThrower* thrower,
i::Handle<i::FixedArray> values_out) {
@@ -1670,10 +1679,10 @@ void EncodeExceptionValues(v8::Isolate* isolate,
return;
}
auto values = arg.As<Object>();
- for (int i = 0; i < signature.length(); ++i) {
+ for (int i = 0; i < signature->length(); ++i) {
MaybeLocal<Value> maybe_value = values->Get(context, i);
Local<Value> value = maybe_value.ToLocalChecked();
- i::wasm::ValueType type = signature.get(i);
+ i::wasm::ValueType type = signature->get(i);
switch (type.kind()) {
case i::wasm::kI32: {
int32_t i32 = 0;
@@ -1704,12 +1713,12 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::kRef:
case i::wasm::kOptRef:
switch (type.heap_representation()) {
- case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kFunc:
case i::wasm::HeapType::kAny:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kArray:
values_out->set(index++, *Utils::OpenHandle(*value));
break;
case internal::wasm::HeapType::kBottom:
@@ -1720,7 +1729,6 @@ void EncodeExceptionValues(v8::Isolate* isolate,
}
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kVoid:
@@ -1752,17 +1760,19 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("Argument 0 must be a WebAssembly tag");
return;
}
- auto tag_object = i::Handle<i::WasmTagObject>::cast(arg0);
- auto tag = i::Handle<i::WasmExceptionTag>(
+ i::Handle<i::WasmTagObject> tag_object =
+ i::Handle<i::WasmTagObject>::cast(arg0);
+ i::Handle<i::WasmExceptionTag> tag(
i::WasmExceptionTag::cast(tag_object->tag()), i_isolate);
uint32_t size = GetEncodedSize(tag_object);
i::Handle<i::WasmExceptionPackage> runtime_exception =
i::WasmExceptionPackage::New(i_isolate, tag, size);
// The constructor above should guarantee that the cast below succeeds.
- auto values = i::Handle<i::FixedArray>::cast(
+ i::Handle<i::FixedArray> values = i::Handle<i::FixedArray>::cast(
i::WasmExceptionPackage::GetExceptionValues(i_isolate,
runtime_exception));
- auto signature = tag_object->serialized_signature();
+ i::Handle<i::PodArray<i::wasm::ValueType>> signature(
+ tag_object->serialized_signature(), i_isolate);
EncodeExceptionValues(isolate, signature, args[1], &thrower, values);
if (thrower.error()) return;
args.GetReturnValue().Set(
@@ -1887,8 +1897,8 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::JSFunction> result =
- i::WasmJSFunction::New(i_isolate, sig, callable);
+ i::Handle<i::JSFunction> result = i::WasmJSFunction::New(
+ i_isolate, sig, callable, i::Handle<i::HeapObject>());
args.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -1903,7 +1913,23 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
- sig = i::Handle<i::WasmExportedFunction>::cast(arg0)->sig();
+ auto wasm_exported_function =
+ i::Handle<i::WasmExportedFunction>::cast(arg0);
+ auto sfi = handle(wasm_exported_function->shared(), i_isolate);
+ i::Handle<i::WasmExportedFunctionData> data =
+ handle(sfi->wasm_exported_function_data(), i_isolate);
+ sig = wasm_exported_function->sig();
+ if (!data->suspender().IsUndefined()) {
+ // If this export is wrapped by a Suspender, the function returns a
+ // promise as an externref instead of the original return type.
+ size_t param_count = sig->parameter_count();
+ i::wasm::FunctionSig::Builder builder(&zone, 1, param_count);
+ for (size_t i = 0; i < param_count; ++i) {
+ builder.AddParam(sig->GetParam(0));
+ }
+ builder.AddReturn(i::wasm::kWasmAnyRef);
+ sig = builder.Build();
+ }
} else if (i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
sig = i::Handle<i::WasmJSFunction>::cast(arg0)->GetSignature(&zone);
} else {
@@ -1918,6 +1944,7 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
+constexpr const char* kName_WasmSuspenderObject = "WebAssembly.Suspender";
constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
constexpr const char* kName_WasmTagObject = "WebAssembly.Tag";
constexpr const char* kName_WasmExceptionPackage = "WebAssembly.Exception";
@@ -2000,7 +2027,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(old_size);
}
-// WebAssembly.Table.get(num) -> JSFunction
+// WebAssembly.Table.get(num) -> any
void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -2030,7 +2057,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Utils::ToLocal(result));
}
-// WebAssembly.Table.set(num, JSFunction)
+// WebAssembly.Table.set(num, any)
void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -2049,28 +2076,23 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> element;
- if (args.Length() >= 2) {
- element = Utils::OpenHandle(*args[1]);
- } else {
- element = DefaultReferenceValue(i_isolate, table_object->type());
- }
+ i::Handle<i::Object> element =
+ args.Length() >= 2
+ ? Utils::OpenHandle(*args[1])
+ : DefaultReferenceValue(i_isolate, table_object->type());
+
if (!i::WasmTableObject::IsValidElement(i_isolate, table_object, element)) {
- thrower.TypeError(
- "Argument 1 must be null or a WebAssembly function of type compatible "
- "to 'this'");
+ thrower.TypeError("Argument 1 is invalid for table of type %s",
+ table_object->type().name().c_str());
return;
}
- // TODO(7748): Generalize this if other table types are allowed.
- bool has_function_type = table_object->type() == i::wasm::kWasmFuncRef ||
- table_object->type().has_index();
- if (has_function_type && !element->IsNull()) {
- element = i::WasmInternalFunction::FromExternal(element, i_isolate)
- .ToHandleChecked();
- }
+ i::Handle<i::Object> external_element;
+ bool is_external = i::WasmInternalFunction::FromExternal(element, i_isolate)
+ .ToHandle(&external_element);
- i::WasmTableObject::Set(i_isolate, table_object, index, element);
+ i::WasmTableObject::Set(i_isolate, table_object, index,
+ is_external ? external_element : element);
}
// WebAssembly.Table.type() -> TableType
@@ -2254,12 +2276,12 @@ void WebAssemblyExceptionGetArg(
case i::wasm::kRef:
case i::wasm::kOptRef:
switch (signature.get(i).heap_representation()) {
- case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kFunc:
case i::wasm::HeapType::kAny:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kArray:
decode_index++;
break;
case i::wasm::HeapType::kBottom:
@@ -2270,7 +2292,6 @@ void WebAssemblyExceptionGetArg(
}
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kVoid:
@@ -2313,11 +2334,11 @@ void WebAssemblyExceptionGetArg(
case i::wasm::kRef:
case i::wasm::kOptRef:
switch (signature.get(index).heap_representation()) {
- case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kFunc:
case i::wasm::HeapType::kAny:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kData: {
auto obj = values->get(decode_index);
result = Utils::ToLocal(i::Handle<i::Object>(obj, i_isolate));
@@ -2331,7 +2352,6 @@ void WebAssemblyExceptionGetArg(
}
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kVoid:
@@ -2397,11 +2417,10 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::kRef:
case i::wasm::kOptRef:
switch (receiver->type().heap_representation()) {
- case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kAny:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
- case i::wasm::HeapType::kFunc:
- case i::wasm::HeapType::kAny: {
+ case i::wasm::HeapType::kFunc: {
i::Handle<i::Object> result = receiver->GetRef();
if (result->IsWasmInternalFunction()) {
result = handle(
@@ -2411,10 +2430,11 @@ void WebAssemblyGlobalGetValueCommon(
return_value.Set(Utils::ToLocal(result));
break;
}
- case internal::wasm::HeapType::kBottom:
+ case i::wasm::HeapType::kBottom:
UNREACHABLE();
- case internal::wasm::HeapType::kI31:
- case internal::wasm::HeapType::kData:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kEq:
default:
// TODO(7748): Implement these.
@@ -2422,7 +2442,6 @@ void WebAssemblyGlobalGetValueCommon(
}
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
UNIMPLEMENTED(); // TODO(7748): Implement.
case i::wasm::kI8:
case i::wasm::kI16:
@@ -2493,7 +2512,6 @@ void WebAssemblyGlobalSetValue(
case i::wasm::kRef:
case i::wasm::kOptRef:
switch (receiver->type().heap_representation()) {
- case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kAny:
receiver->SetExternRef(Utils::OpenHandle(*args[0]));
break;
@@ -2505,10 +2523,11 @@ void WebAssemblyGlobalSetValue(
}
break;
}
- case internal::wasm::HeapType::kBottom:
+ case i::wasm::HeapType::kBottom:
UNREACHABLE();
- case internal::wasm::HeapType::kI31:
- case internal::wasm::HeapType::kData:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kEq:
default:
// TODO(7748): Implement these.
@@ -2516,7 +2535,6 @@ void WebAssemblyGlobalSetValue(
}
break;
case i::wasm::kRtt:
- case i::wasm::kRttWithDepth:
// TODO(7748): Implement.
UNIMPLEMENTED();
case i::wasm::kI8:
@@ -2561,18 +2579,63 @@ void WebAssemblySuspenderReturnPromiseOnSuspend(
thrower.TypeError("Argument 0 must be a wasm function");
}
i::WasmExportedFunctionData data = sfi.wasm_exported_function_data();
+ if (data.sig()->return_count() != 1) {
+ thrower.TypeError(
+ "Expected a WebAssembly.Function with exactly one return type");
+ }
int index = data.function_index();
i::Handle<i::WasmInstanceObject> instance(
i::WasmInstanceObject::cast(data.internal().ref()), i_isolate);
- i::Handle<i::Code> wrapper = i_isolate->builtins()->code_handle(
- i::Builtin::kWasmReturnPromiseOnSuspend);
- i::Handle<i::JSObject> result =
+ i::Handle<i::CodeT> wrapper =
+ BUILTIN_CODE(i_isolate, WasmReturnPromiseOnSuspend);
+ // Upcast to JSFunction to re-use the existing ToLocal helper below.
+ i::Handle<i::JSFunction> result =
i::Handle<i::WasmExternalFunction>::cast(i::WasmExportedFunction::New(
i_isolate, instance, index,
static_cast<int>(data.sig()->parameter_count()), wrapper));
+ EXTRACT_THIS(suspender, WasmSuspenderObject);
+ auto function_data = i::WasmExportedFunctionData::cast(
+ result->shared().function_data(kAcquireLoad));
+ function_data.set_suspender(*suspender);
args.GetReturnValue().Set(Utils::ToLocal(result));
}
+// WebAssembly.Suspender.suspendOnReturnedPromise(Function) -> Function
+void WebAssemblySuspenderSuspendOnReturnedPromise(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(
+ i_isolate, "WebAssembly.Suspender.suspendOnReturnedPromise()");
+ if (!args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a WebAssembly.Function");
+ return;
+ }
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
+ const i::wasm::FunctionSig* sig;
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
+
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
+ // TODO(thibaudm): Suspend on wrapped wasm-to-wasm calls too.
+ UNIMPLEMENTED();
+ } else if (!i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
+ thrower.TypeError("Argument 0 must be a WebAssembly.Function");
+ return;
+ }
+ sig = i::Handle<i::WasmJSFunction>::cast(arg0)->GetSignature(&zone);
+ if (sig->return_count() != 1 || sig->GetReturn(0) != i::wasm::kWasmAnyRef) {
+ thrower.TypeError("Expected a WebAssembly.Function with return type %s",
+ i::wasm::kWasmAnyRef.name().c_str());
+ }
+
+ auto callable = handle(
+ i::Handle<i::WasmJSFunction>::cast(arg0)->GetCallable(), i_isolate);
+ EXTRACT_THIS(suspender, WasmSuspenderObject);
+ i::Handle<i::JSFunction> result =
+ i::WasmJSFunction::New(i_isolate, sig, callable, suspender);
+ args.GetReturnValue().Set(Utils::ToLocal(result));
+}
} // namespace
// TODO(titzer): we use the API to create the function template because the
@@ -2869,6 +2932,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
WasmSuspenderObject::kHeaderSize, "WebAssembly.Suspender");
InstallFunc(isolate, suspender_proto, "returnPromiseOnSuspend",
WebAssemblySuspenderReturnPromiseOnSuspend, 1);
+ InstallFunc(isolate, suspender_proto, "suspendOnReturnedPromise",
+ WebAssemblySuspenderSuspendOnReturnedPromise, 1);
}
// Setup Function
@@ -2882,7 +2947,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<Map> function_map = isolate->factory()->CreateSloppyFunctionMap(
FUNCTION_WITHOUT_PROTOTYPE, MaybeHandle<JSFunction>());
CHECK(JSObject::SetPrototype(
- function_proto,
+ isolate, function_proto,
handle(context->function_function().prototype(), isolate), false,
kDontThrow)
.FromJust());
@@ -2940,7 +3005,8 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
// Setup Exception
Handle<String> tag_name = v8_str(isolate, "Tag");
- if (JSObject::HasOwnProperty(webassembly, tag_name).FromMaybe(true)) {
+ if (JSObject::HasOwnProperty(isolate, webassembly, tag_name)
+ .FromMaybe(true)) {
// The {Exception} constructor already exists, there is nothing more to
// do.
return;
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index fcafb69395..c04a431004 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -40,7 +40,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
// Also, do not use this limit to validate declared memory, use
// kSpecMaxMemoryPages for that.
constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
- ? 32768 // = 2 GiB
+ ? 32767 // = 2 GiB
: 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -52,7 +52,7 @@ constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
constexpr size_t kV8MaxWasmTableInitEntries = 10000000;
-constexpr size_t kV8MaxWasmTables = 1;
+constexpr size_t kV8MaxWasmTables = 100000;
constexpr size_t kV8MaxWasmMemories = 1;
// GC proposal. These limits are not standardized yet.
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index ecf59f9ed5..77ed549c90 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -122,7 +122,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {d0, d2};
// ===========================================================================
// Note that kGpParamRegisters and kFpParamRegisters are used in
// Builtins::Generate_WasmCompileLazy (builtins-riscv64.cc)
-constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6};
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
constexpr Register kGpReturnRegisters[] = {a0, a1};
constexpr DoubleRegister kFpParamRegisters[] = {fa0, fa1, fa2, fa3,
fa4, fa5, fa6};
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index ab7262ed74..4d4a487485 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -58,12 +58,19 @@ void WasmFunctionBuilder::EmitI32V(int32_t val) { body_.write_i32v(val); }
void WasmFunctionBuilder::EmitU32V(uint32_t val) { body_.write_u32v(val); }
-void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
+void WasmFunctionBuilder::SetSignature(const FunctionSig* sig) {
DCHECK(!locals_.has_sig());
locals_.set_sig(sig);
signature_index_ = builder_->AddSignature(sig);
}
+void WasmFunctionBuilder::SetSignature(uint32_t sig_index) {
+ DCHECK(!locals_.has_sig());
+ DCHECK_EQ(builder_->types_[sig_index].kind, TypeDefinition::kFunction);
+ signature_index_ = sig_index;
+ locals_.set_sig(builder_->types_[sig_index].function_sig);
+}
+
uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
DCHECK(locals_.has_sig());
return locals_.AddLocals(1, type);
@@ -127,7 +134,6 @@ void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
buffer->write_i32v(type.heap_type().code());
}
if (type.is_rtt()) {
- if (type.has_depth()) buffer->write_u32v(type.depth());
buffer->write_u32v(type.ref_index());
}
}
@@ -274,13 +280,19 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
has_max_memory_size_(false),
has_shared_memory_(false) {}
-WasmFunctionBuilder* WasmModuleBuilder::AddFunction(FunctionSig* sig) {
+WasmFunctionBuilder* WasmModuleBuilder::AddFunction(const FunctionSig* sig) {
functions_.push_back(zone_->New<WasmFunctionBuilder>(this));
// Add the signature if one was provided here.
if (sig) functions_.back()->SetSignature(sig);
return functions_.back();
}
+WasmFunctionBuilder* WasmModuleBuilder::AddFunction(uint32_t sig_index) {
+ functions_.push_back(zone_->New<WasmFunctionBuilder>(this));
+ functions_.back()->SetSignature(sig_index);
+ return functions_.back();
+}
+
void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
uint32_t dest) {
data_segments_.push_back({ZoneVector<byte>(zone()), dest});
@@ -290,21 +302,22 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
}
-uint32_t WasmModuleBuilder::ForceAddSignature(FunctionSig* sig,
+uint32_t WasmModuleBuilder::ForceAddSignature(const FunctionSig* sig,
uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- types_.push_back(Type(sig, supertype));
+ types_.emplace_back(sig, supertype);
return index;
}
-uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig, uint32_t supertype) {
+uint32_t WasmModuleBuilder::AddSignature(const FunctionSig* sig,
+ uint32_t supertype) {
auto sig_entry = signature_map_.find(*sig);
if (sig_entry != signature_map_.end()) return sig_entry->second;
return ForceAddSignature(sig, supertype);
}
-uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
+uint32_t WasmModuleBuilder::AddException(const FunctionSig* type) {
DCHECK_EQ(0, type->return_count());
int type_index = AddSignature(type);
uint32_t except_index = static_cast<uint32_t>(exceptions_.size());
@@ -315,20 +328,16 @@ uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
uint32_t WasmModuleBuilder::AddStructType(StructType* type,
uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type, supertype));
+ types_.emplace_back(type, supertype);
return index;
}
uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type, supertype));
+ types_.emplace_back(type, supertype);
return index;
}
-// static
-const uint32_t WasmModuleBuilder::kNullIndex =
- std::numeric_limits<uint32_t>::max();
-
uint32_t WasmModuleBuilder::IncreaseTableMinSize(uint32_t table_index,
uint32_t count) {
DCHECK_LT(table_index, tables_.size());
@@ -510,7 +519,6 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
case kBottom:
case kRef:
case kRtt:
- case kRttWithDepth:
UNREACHABLE();
}
break;
@@ -567,19 +575,6 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
buffer->write_u8(static_cast<uint8_t>(kExprRttCanon));
buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
break;
- case WasmInitExpr::kRttSub:
- case WasmInitExpr::kRttFreshSub:
- // The operand to rtt.sub must be emitted first.
- WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
- kWasmBottom);
- STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
- STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
- buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(init.kind() == WasmInitExpr::kRttSub
- ? kExprRttSub
- : kExprRttFreshSub));
- buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
- break;
}
}
@@ -600,13 +595,17 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kTypeSectionCode, buffer);
buffer->write_size(types_.size());
- for (const Type& type : types_) {
- bool has_super = type.supertype != kNoSuperType;
+ // TODO(7748): Add support for recursive groups.
+ for (const TypeDefinition& type : types_) {
+ if (type.supertype != kNoSuperType) {
+ buffer->write_u8(kWasmSubtypeCode);
+ buffer->write_u8(1); // The supertype count is always 1.
+ buffer->write_u32v(type.supertype);
+ }
switch (type.kind) {
- case Type::kFunctionSig: {
- FunctionSig* sig = type.sig;
- buffer->write_u8(has_super ? kWasmFunctionSubtypeCode
- : kWasmFunctionTypeCode);
+ case TypeDefinition::kFunction: {
+ const FunctionSig* sig = type.function_sig;
+ buffer->write_u8(kWasmFunctionTypeCode);
buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
WriteValueType(buffer, param);
@@ -615,40 +614,23 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
for (auto ret : sig->returns()) {
WriteValueType(buffer, ret);
}
- if (type.supertype == kGenericSuperType) {
- buffer->write_u8(kFuncRefCode);
- } else if (has_super) {
- buffer->write_i32v(type.supertype);
- }
break;
}
- case Type::kStructType: {
- StructType* struct_type = type.struct_type;
- buffer->write_u8(has_super ? kWasmStructSubtypeCode
- : kWasmStructTypeCode);
+ case TypeDefinition::kStruct: {
+ const StructType* struct_type = type.struct_type;
+ buffer->write_u8(kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
for (uint32_t i = 0; i < struct_type->field_count(); i++) {
WriteValueType(buffer, struct_type->field(i));
buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
- if (type.supertype == kGenericSuperType) {
- buffer->write_u8(kDataRefCode);
- } else if (has_super) {
- buffer->write_i32v(type.supertype);
- }
break;
}
- case Type::kArrayType: {
- ArrayType* array_type = type.array_type;
- buffer->write_u8(has_super ? kWasmArraySubtypeCode
- : kWasmArrayTypeCode);
+ case TypeDefinition::kArray: {
+ const ArrayType* array_type = type.array_type;
+ buffer->write_u8(kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
buffer->write_u8(array_type->mutability() ? 1 : 0);
- if (type.supertype == kGenericSuperType) {
- buffer->write_u8(kDataRefCode);
- } else if (has_super) {
- buffer->write_i32v(type.supertype);
- }
break;
}
}
@@ -785,59 +767,40 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_size(element_segments_.size());
for (const WasmElemSegment& segment : element_segments_) {
bool is_active = segment.status == WasmElemSegment::kStatusActive;
- // If this segment is expressible in the backwards-compatible syntax
- // (before reftypes proposal), we should emit it in that syntax.
- // This is the case if the segment is active and all entries are function
- // references. Note that this is currently the only path that allows
- // kRelativeToImports function indexing mode.
- // TODO(manoskouk): Remove this logic once reftypes has shipped.
- bool backwards_compatible =
- is_active && segment.table_index == 0 &&
- std::all_of(
- segment.entries.begin(), segment.entries.end(), [](auto& entry) {
- return entry.kind ==
- WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry;
- });
- if (backwards_compatible) {
- buffer->write_u8(0);
+ // We pick the most general syntax, i.e., we always explicitly emit the
+ // table index and the type, and use the expressions-as-elements syntax.
+ // The initial byte is one of 0x05, 0x06, and 0x07.
+ uint8_t kind_mask =
+ segment.status == WasmElemSegment::kStatusActive
+ ? 0b10
+ : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
+ : 0b01;
+ uint8_t expressions_as_elements_mask = 0b100;
+ buffer->write_u8(kind_mask | expressions_as_elements_mask);
+ if (is_active) {
+ buffer->write_u32v(segment.table_index);
WriteInitializerExpression(buffer, segment.offset, segment.type);
- buffer->write_size(segment.entries.size());
- for (const WasmElemSegment::Entry entry : segment.entries) {
- buffer->write_u32v(
- segment.indexing_mode == WasmElemSegment::kRelativeToImports
- ? entry.index
- : entry.index +
- static_cast<uint32_t>(function_imports_.size()));
- }
- } else {
- DCHECK_EQ(segment.indexing_mode, WasmElemSegment::kRelativeToImports);
- // If we pick the general syntax, we always explicitly emit the table
- // index and the type, and use the expressions-as-elements syntax. I.e.
- // the initial byte is one of 0x05, 0x06, and 0x07.
- uint8_t kind_mask =
- segment.status == WasmElemSegment::kStatusActive
- ? 0b10
- : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
- : 0b01;
- uint8_t expressions_as_elements_mask = 0b100;
- buffer->write_u8(kind_mask | expressions_as_elements_mask);
- if (is_active) {
- buffer->write_u32v(segment.table_index);
- WriteInitializerExpression(buffer, segment.offset, segment.type);
- }
- WriteValueType(buffer, segment.type);
- buffer->write_size(segment.entries.size());
- for (const WasmElemSegment::Entry entry : segment.entries) {
- uint8_t opcode =
- entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
- ? kExprGlobalGet
- : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
- ? kExprRefFunc
- : kExprRefNull;
- buffer->write_u8(opcode);
- buffer->write_u32v(entry.index);
- buffer->write_u8(kExprEnd);
- }
+ }
+ WriteValueType(buffer, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ uint8_t opcode =
+ entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
+ ? kExprGlobalGet
+ : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
+ ? kExprRefFunc
+ : kExprRefNull;
+ bool needs_function_offset =
+ segment.indexing_mode ==
+ WasmElemSegment::kRelativeToDeclaredFunctions &&
+ entry.kind == WasmElemSegment::Entry::kRefFuncEntry;
+ uint32_t index =
+ entry.index + (needs_function_offset
+ ? static_cast<uint32_t>(function_imports_.size())
+ : 0);
+ buffer->write_u8(opcode);
+ buffer->write_u32v(index);
+ buffer->write_u8(kExprEnd);
}
}
FixupSection(buffer, start);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index ca4ed582df..9ac13891fc 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -166,7 +166,8 @@ class WasmModuleBuilder;
class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
public:
// Building methods.
- void SetSignature(FunctionSig* sig);
+ void SetSignature(const FunctionSig* sig);
+ void SetSignature(uint32_t sig_index);
uint32_t AddLocal(ValueType type);
void EmitByte(byte b);
void EmitI32V(int32_t val);
@@ -208,7 +209,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
uint32_t sig_index() { return signature_index_; }
- inline FunctionSig* signature();
+ inline const FunctionSig* signature();
private:
explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
@@ -311,7 +312,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Building methods.
uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
base::Vector<const char> module = {});
- WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
+ WasmFunctionBuilder* AddFunction(const FunctionSig* sig = nullptr);
+ WasmFunctionBuilder* AddFunction(uint32_t sig_index);
uint32_t AddGlobal(ValueType type, bool mutability = true,
WasmInitExpr init = WasmInitExpr());
uint32_t AddGlobalImport(base::Vector<const char> name, ValueType type,
@@ -332,11 +334,12 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// exceeded.
uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
// Adds the signature to the module if it does not already exist.
- uint32_t AddSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType);
+ uint32_t AddSignature(const FunctionSig* sig,
+ uint32_t supertype = kNoSuperType);
// Does not deduplicate function signatures.
- uint32_t ForceAddSignature(FunctionSig* sig,
+ uint32_t ForceAddSignature(const FunctionSig* sig,
uint32_t supertype = kNoSuperType);
- uint32_t AddException(FunctionSig* type);
+ uint32_t AddException(const FunctionSig* type);
uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType);
uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType);
uint32_t AddTable(ValueType type, uint32_t min_size);
@@ -365,25 +368,27 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ValueType GetTableType(uint32_t index) { return tables_[index].type; }
bool IsSignature(uint32_t index) {
- return types_[index].kind == Type::kFunctionSig;
+ return types_[index].kind == TypeDefinition::kFunction;
}
- FunctionSig* GetSignature(uint32_t index) {
- DCHECK(types_[index].kind == Type::kFunctionSig);
- return types_[index].sig;
+ const FunctionSig* GetSignature(uint32_t index) {
+ DCHECK(types_[index].kind == TypeDefinition::kFunction);
+ return types_[index].function_sig;
}
bool IsStructType(uint32_t index) {
- return types_[index].kind == Type::kStructType;
+ return types_[index].kind == TypeDefinition::kStruct;
}
- StructType* GetStructType(uint32_t index) {
+ const StructType* GetStructType(uint32_t index) {
return types_[index].struct_type;
}
bool IsArrayType(uint32_t index) {
- return types_[index].kind == Type::kArrayType;
+ return types_[index].kind == TypeDefinition::kArray;
+ }
+ const ArrayType* GetArrayType(uint32_t index) {
+ return types_[index].array_type;
}
- ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
WasmFunctionBuilder* GetFunction(uint32_t index) { return functions_[index]; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
@@ -394,30 +399,11 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
int NumFunctions() { return static_cast<int>(functions_.size()); }
- FunctionSig* GetExceptionType(int index) {
- return types_[exceptions_[index]].sig;
+ const FunctionSig* GetExceptionType(int index) {
+ return types_[exceptions_[index]].function_sig;
}
- static const uint32_t kNullIndex;
-
private:
- struct Type {
- enum Kind { kFunctionSig, kStructType, kArrayType };
- explicit Type(FunctionSig* signature, uint32_t supertype)
- : kind(kFunctionSig), supertype(supertype), sig(signature) {}
- explicit Type(StructType* struct_type, uint32_t supertype)
- : kind(kStructType), supertype(supertype), struct_type(struct_type) {}
- explicit Type(ArrayType* array_type, uint32_t supertype)
- : kind(kArrayType), supertype(supertype), array_type(array_type) {}
- Kind kind;
- uint32_t supertype;
- union {
- FunctionSig* sig;
- StructType* struct_type;
- ArrayType* array_type;
- };
- };
-
struct WasmFunctionImport {
base::Vector<const char> module;
base::Vector<const char> name;
@@ -438,8 +424,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
};
struct WasmGlobal {
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmGlobal);
-
ValueType type;
bool mutability;
WasmInitExpr init;
@@ -460,7 +444,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
friend class WasmFunctionBuilder;
Zone* zone_;
- ZoneVector<Type> types_;
+ ZoneVector<TypeDefinition> types_;
ZoneVector<WasmFunctionImport> function_imports_;
ZoneVector<WasmGlobalImport> global_imports_;
ZoneVector<WasmExport> exports_;
@@ -482,8 +466,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#endif
};
-FunctionSig* WasmFunctionBuilder::signature() {
- return builder_->types_[signature_index_].sig;
+const FunctionSig* WasmFunctionBuilder::signature() {
+ return builder_->types_[signature_index_].function_sig;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 0035c00bf2..ea9a891fcf 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -119,11 +119,8 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
int GetSubtypingDepth(const WasmModule* module, uint32_t type_index) {
uint32_t starting_point = type_index;
int depth = 0;
- while ((type_index = module->supertype(type_index)) != kGenericSuperType) {
+ while ((type_index = module->supertype(type_index)) != kNoSuperType) {
if (type_index == starting_point) return -1; // Cycle detected.
- // This is disallowed and will be rejected by validation, but might occur
- // when this function is called.
- if (type_index == kNoSuperType) break;
depth++;
if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) break;
}
@@ -224,8 +221,6 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
: signature_zone(std::move(signature_zone)) {}
-WasmModule::~WasmModule() { DeleteCachedTypeJudgementsForModule(this); }
-
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
@@ -248,9 +243,7 @@ namespace {
// Converts the given {type} into a string representation that can be used in
// reflective functions. Should be kept in sync with the {GetValueType} helper.
Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
- return isolate->factory()->InternalizeUtf8String(
- type == kWasmFuncRef ? base::CStrVector("anyfunc")
- : base::VectorOf(type.name()));
+ return isolate->factory()->InternalizeUtf8String(base::VectorOf(type.name()));
}
} // namespace
@@ -336,14 +329,8 @@ Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
base::Optional<uint32_t> max_size) {
Factory* factory = isolate->factory();
- Handle<String> element;
- if (type.is_reference_to(HeapType::kFunc)) {
- // TODO(wasm): We should define the "anyfunc" string in one central
- // place and then use that constant everywhere.
- element = factory->InternalizeUtf8String("anyfunc");
- } else {
- element = factory->InternalizeUtf8String(base::VectorOf(type.name()));
- }
+ Handle<String> element =
+ factory->InternalizeUtf8String(base::VectorOf(type.name()));
Handle<JSFunction> object_function = isolate->object_function();
Handle<JSObject> object = factory->NewJSObject(object_function);
@@ -637,7 +624,7 @@ size_t EstimateStoredSize(const WasmModule* module) {
return sizeof(WasmModule) + VectorSize(module->globals) +
(module->signature_zone ? module->signature_zone->allocation_size()
: 0) +
- VectorSize(module->types) + VectorSize(module->type_kinds) +
+ VectorSize(module->types) +
VectorSize(module->canonicalized_type_ids) +
VectorSize(module->functions) + VectorSize(module->data_segments) +
VectorSize(module->tables) + VectorSize(module->import_table) +
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index e67940a2b5..868f50bd8f 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -22,6 +22,7 @@
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-init-expr.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
@@ -72,14 +73,110 @@ struct WasmFunction {
bool declared;
};
+// A representation of a constant expression. The most common expression types
+// are hard-coded, while the rest are represented as a {WireBytesRef}.
+class ConstantExpression {
+ public:
+ enum Kind {
+ kEmpty,
+ kI32Const,
+ kRefNull,
+ kRefFunc,
+ kWireBytesRef,
+ kLastKind = kWireBytesRef
+ };
+
+ union Value {
+ int32_t i32_value;
+ uint32_t index_or_offset;
+ HeapType::Representation repr;
+ };
+
+ ConstantExpression() : bit_field_(KindField::encode(kEmpty)) {}
+
+ static ConstantExpression I32Const(int32_t value) {
+ return ConstantExpression(ValueField::encode(value) |
+ KindField::encode(kI32Const));
+ }
+ static ConstantExpression RefFunc(uint32_t index) {
+ return ConstantExpression(ValueField::encode(index) |
+ KindField::encode(kRefFunc));
+ }
+ static ConstantExpression RefNull(HeapType::Representation repr) {
+ return ConstantExpression(ValueField::encode(repr) |
+ KindField::encode(kRefNull));
+ }
+ static ConstantExpression WireBytes(uint32_t offset, uint32_t length) {
+ return ConstantExpression(OffsetField::encode(offset) |
+ LengthField::encode(length) |
+ KindField::encode(kWireBytesRef));
+ }
+
+ Kind kind() const { return KindField::decode(bit_field_); }
+
+ bool is_set() const { return kind() != kEmpty; }
+
+ uint32_t index() const {
+ DCHECK_EQ(kind(), kRefFunc);
+ return ValueField::decode(bit_field_);
+ }
+
+ HeapType::Representation repr() const {
+ DCHECK_EQ(kind(), kRefNull);
+ return static_cast<HeapType::Representation>(
+ ValueField::decode(bit_field_));
+ }
+
+ int32_t i32_value() const {
+ DCHECK_EQ(kind(), kI32Const);
+ return ValueField::decode(bit_field_);
+ }
+
+ WireBytesRef wire_bytes_ref() const {
+ DCHECK_EQ(kind(), kWireBytesRef);
+ return WireBytesRef(OffsetField::decode(bit_field_),
+ LengthField::decode(bit_field_));
+ }
+
+ private:
+ static constexpr int kValueBits = 32;
+ static constexpr int kLengthBits = 30;
+ static constexpr int kOffsetBits = 30;
+ static constexpr int kKindBits = 3;
+
+ // There are two possible combinations of fields: offset + length + kind if
+ // kind = kWireBytesRef, or value + kind for anything else.
+ using ValueField = base::BitField<uint32_t, 0, kValueBits, uint64_t>;
+ using OffsetField = base::BitField<uint32_t, 0, kOffsetBits, uint64_t>;
+ using LengthField = OffsetField::Next<uint32_t, kLengthBits>;
+ using KindField = LengthField::Next<Kind, kKindBits>;
+
+ // Make sure we reserve enough bits for a {WireBytesRef}'s length and offset.
+ STATIC_ASSERT(kV8MaxWasmModuleSize <= LengthField::kMax + 1);
+ STATIC_ASSERT(kV8MaxWasmModuleSize <= OffsetField::kMax + 1);
+ // Make sure kind fits in kKindBits.
+ STATIC_ASSERT(kLastKind <= KindField::kMax + 1);
+
+ explicit ConstantExpression(uint64_t bit_field) : bit_field_(bit_field) {}
+
+ uint64_t bit_field_;
+};
+
+// We want to keep {ConstantExpression} small to reduce memory usage during
+// compilation/instantiation.
+STATIC_ASSERT(sizeof(ConstantExpression) <= 8);
+
// Static representation of a wasm global variable.
struct WasmGlobal {
- ValueType type; // type of the global.
- bool mutability; // {true} if mutable.
- WireBytesRef init; // the initialization expression of the global.
+ ValueType type; // type of the global.
+ bool mutability; // {true} if mutable.
+ ConstantExpression init; // the initialization expression of the global.
union {
- uint32_t index; // index of imported mutable global.
- uint32_t offset; // offset into global memory (if not imported & mutable).
+ // Index of imported mutable global.
+ uint32_t index;
+ // Offset into global memory (if not imported & mutable). Expressed in bytes
+ // for value-typed globals, and in tagged words for reference-typed globals.
+ uint32_t offset;
};
bool imported; // true if imported.
bool exported; // true if exported.
@@ -100,58 +197,60 @@ struct WasmTag {
// Static representation of a wasm data segment.
struct WasmDataSegment {
// Construct an active segment.
- explicit WasmDataSegment(WireBytesRef dest_addr)
- : dest_addr(std::move(dest_addr)), active(true) {}
+ explicit WasmDataSegment(ConstantExpression dest_addr)
+ : dest_addr(dest_addr), active(true) {}
// Construct a passive segment, which has no dest_addr.
WasmDataSegment() : active(false) {}
- WireBytesRef dest_addr; // destination memory address of the data.
- WireBytesRef source; // start offset in the module bytes.
- bool active = true; // true if copied automatically during instantiation.
+ ConstantExpression dest_addr; // destination memory address of the data.
+ WireBytesRef source; // start offset in the module bytes.
+ bool active = true; // true if copied automatically during instantiation.
};
// Static representation of wasm element segment (table initializer).
struct WasmElemSegment {
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ };
+ enum ElementType { kFunctionIndexElements, kExpressionElements };
+
// Construct an active segment.
- WasmElemSegment(ValueType type, uint32_t table_index, WireBytesRef offset)
- : type(type),
+ WasmElemSegment(ValueType type, uint32_t table_index,
+ ConstantExpression offset, ElementType element_type)
+ : status(kStatusActive),
+ type(type),
table_index(table_index),
offset(std::move(offset)),
- status(kStatusActive) {}
+ element_type(element_type) {}
// Construct a passive or declarative segment, which has no table index or
// offset.
- WasmElemSegment(ValueType type, bool declarative)
- : type(type),
- table_index(0),
- status(declarative ? kStatusDeclarative : kStatusPassive) {}
+ WasmElemSegment(ValueType type, Status status, ElementType element_type)
+ : status(status), type(type), table_index(0), element_type(element_type) {
+ DCHECK_NE(status, kStatusActive);
+ }
- // Construct a passive or declarative segment, which has no table index or
- // offset.
+ // Default constructor. Constucts an invalid segment.
WasmElemSegment()
- : type(kWasmBottom), table_index(0), status(kStatusActive) {}
+ : status(kStatusActive),
+ type(kWasmBottom),
+ table_index(0),
+ element_type(kFunctionIndexElements) {}
WasmElemSegment(const WasmElemSegment&) = delete;
WasmElemSegment(WasmElemSegment&&) V8_NOEXCEPT = default;
WasmElemSegment& operator=(const WasmElemSegment&) = delete;
WasmElemSegment& operator=(WasmElemSegment&&) V8_NOEXCEPT = default;
+ Status status;
ValueType type;
uint32_t table_index;
- WireBytesRef offset;
- struct Entry {
- enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
- uint32_t index;
- Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
- Entry() : kind(kRefNullEntry), index(0) {}
- };
- std::vector<Entry> entries;
- enum Status {
- kStatusActive, // copied automatically during instantiation.
- kStatusPassive, // copied explicitly after instantiation.
- kStatusDeclarative // purely declarative and never copied.
- } status;
+ ConstantExpression offset;
+ ElementType element_type;
+ std::vector<ConstantExpression> entries;
};
// Static representation of a wasm import.
@@ -245,15 +344,28 @@ class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
std::unique_ptr<AsmJsOffsets> decoded_offsets_;
};
+// Used as the supertype for a type at the top of the type hierarchy.
+constexpr uint32_t kNoSuperType = std::numeric_limits<uint32_t>::max();
+
struct TypeDefinition {
- explicit TypeDefinition(const FunctionSig* sig) : function_sig(sig) {}
- explicit TypeDefinition(const StructType* type) : struct_type(type) {}
- explicit TypeDefinition(const ArrayType* type) : array_type(type) {}
+ enum Kind { kFunction, kStruct, kArray };
+
+ TypeDefinition(const FunctionSig* sig, uint32_t supertype)
+ : function_sig(sig), supertype(supertype), kind(kFunction) {}
+ TypeDefinition(const StructType* type, uint32_t supertype)
+ : struct_type(type), supertype(supertype), kind(kStruct) {}
+ TypeDefinition(const ArrayType* type, uint32_t supertype)
+ : array_type(type), supertype(supertype), kind(kArray) {}
+ TypeDefinition()
+ : function_sig(nullptr), supertype(kNoSuperType), kind(kFunction) {}
+
union {
const FunctionSig* function_sig;
const StructType* struct_type;
const ArrayType* array_type;
};
+ uint32_t supertype;
+ Kind kind;
};
struct V8_EXPORT_PRIVATE WasmDebugSymbols {
@@ -279,11 +391,6 @@ struct TypeFeedbackStorage {
struct WasmTable;
-// End of a chain of explicit supertypes.
-constexpr uint32_t kGenericSuperType = 0xFFFFFFFE;
-// Used for types that have no explicit supertype.
-constexpr uint32_t kNoSuperType = 0xFFFFFFFF;
-
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<Zone> signature_zone;
@@ -296,7 +403,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool mem_export = false; // true if the memory is exported
int start_function_index = -1; // start function, >= 0 if any
- std::vector<WasmGlobal> globals;
// Size of the buffer required for all globals that are not imported and
// mutable.
uint32_t untagged_globals_buffer_size = 0;
@@ -311,26 +417,26 @@ struct V8_EXPORT_PRIVATE WasmModule {
// ID and length).
WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<TypeDefinition> types; // by type index
- std::vector<uint8_t> type_kinds; // by type index
- std::vector<uint32_t> supertypes; // by type index
- // Map from each type index to the index of its corresponding canonical index.
- // Canonical indices do not correspond to types.
- // Note: right now, only functions are canonicalized, and arrays and structs
- // map to 0.
- std::vector<uint32_t> canonicalized_type_ids;
+
+ void add_type(TypeDefinition type) {
+ types.push_back(type);
+ uint32_t canonical_id = type.kind == TypeDefinition::kFunction
+ ? signature_map.FindOrInsert(*type.function_sig)
+ : 0;
+ canonicalized_type_ids.push_back(canonical_id);
+ }
bool has_type(uint32_t index) const { return index < types.size(); }
void add_signature(const FunctionSig* sig, uint32_t supertype) {
- types.push_back(TypeDefinition(sig));
- type_kinds.push_back(kWasmFunctionTypeCode);
- supertypes.push_back(supertype);
- uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
+ types.push_back(TypeDefinition(sig, supertype));
+ DCHECK_NOT_NULL(sig);
+ uint32_t canonical_id = signature_map.FindOrInsert(*sig);
canonicalized_type_ids.push_back(canonical_id);
}
bool has_signature(uint32_t index) const {
- return index < types.size() && type_kinds[index] == kWasmFunctionTypeCode;
+ return index < types.size() &&
+ types[index].kind == TypeDefinition::kFunction;
}
const FunctionSig* signature(uint32_t index) const {
DCHECK(has_signature(index));
@@ -338,14 +444,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
void add_struct_type(const StructType* type, uint32_t supertype) {
- types.push_back(TypeDefinition(type));
- type_kinds.push_back(kWasmStructTypeCode);
- supertypes.push_back(supertype);
+ types.push_back(TypeDefinition(type, supertype));
// No canonicalization for structs.
canonicalized_type_ids.push_back(0);
}
bool has_struct(uint32_t index) const {
- return index < types.size() && type_kinds[index] == kWasmStructTypeCode;
+ return index < types.size() && types[index].kind == TypeDefinition::kStruct;
}
const StructType* struct_type(uint32_t index) const {
DCHECK(has_struct(index));
@@ -353,14 +457,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
void add_array_type(const ArrayType* type, uint32_t supertype) {
- types.push_back(TypeDefinition(type));
- type_kinds.push_back(kWasmArrayTypeCode);
- supertypes.push_back(supertype);
+ types.push_back(TypeDefinition(type, supertype));
// No canonicalization for arrays.
canonicalized_type_ids.push_back(0);
}
bool has_array(uint32_t index) const {
- return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
+ return index < types.size() && types[index].kind == TypeDefinition::kArray;
}
const ArrayType* array_type(uint32_t index) const {
DCHECK(has_array(index));
@@ -368,14 +470,23 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
uint32_t supertype(uint32_t index) const {
- DCHECK(index < supertypes.size());
- return supertypes[index];
+ DCHECK(index < types.size());
+ return types[index].supertype;
}
bool has_supertype(uint32_t index) const {
return supertype(index) != kNoSuperType;
}
+ std::vector<TypeDefinition> types; // by type index
+ // Map from each type index to the index of its corresponding canonical index.
+ // Canonical indices do not correspond to types.
+ // Note: right now, only functions are canonicalized, and arrays and structs
+ // map to 0.
+ std::vector<uint32_t> canonicalized_type_ids;
+ // Canonicalizing map for signature indexes.
+ SignatureMap signature_map;
std::vector<WasmFunction> functions;
+ std::vector<WasmGlobal> globals;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
std::vector<WasmImport> import_table;
@@ -384,9 +495,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmElemSegment> elem_segments;
std::vector<WasmCompilationHint> compilation_hints;
BranchHintInfo branch_hints;
- SignatureMap signature_map; // canonicalizing map for signature indexes.
- // Entries in this storage are short-lived: when tier-up of a function is
- // scheduled, an entry is placed; the Turbofan graph builder consumes it.
mutable TypeFeedbackStorage type_feedback;
ModuleOrigin origin = kWasmOrigin; // origin of the module
@@ -399,7 +507,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
WasmModule(const WasmModule&) = delete;
- ~WasmModule();
WasmModule& operator=(const WasmModule&) = delete;
};
@@ -413,7 +520,7 @@ struct WasmTable {
static bool IsValidTableType(ValueType type, const WasmModule* module) {
if (!type.is_object_reference()) return false;
HeapType heap_type = type.heap_type();
- return heap_type == HeapType::kFunc || heap_type == HeapType::kExtern ||
+ return heap_type == HeapType::kFunc || heap_type == HeapType::kAny ||
(module != nullptr && heap_type.is_index() &&
module->has_signature(heap_type.ref_index()));
}
@@ -424,7 +531,7 @@ struct WasmTable {
bool has_maximum_size = false; // true if there is a maximum size.
bool imported = false; // true if imported.
bool exported = false; // true if exported.
- WireBytesRef initial_value;
+ ConstantExpression initial_value;
};
inline bool is_asmjs_module(const WasmModule* module) {
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 6f33696e7d..caaeafbffb 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -12,6 +12,7 @@
#include <type_traits>
#include "src/base/memory.h"
+#include "src/common/ptr-compr.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/foreign.h"
@@ -54,6 +55,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmContinuationObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmSuspenderObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmOnFulfilledData)
CAST_ACCESSOR(WasmInstanceObject)
@@ -65,30 +67,24 @@ CAST_ACCESSOR(WasmInstanceObject)
ACCESSORS_CHECKED2(holder, name, type, offset, \
!value.IsUndefined(GetReadOnlyRoots(cage_base)), true)
-#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
- type holder::name() const { \
- if (COMPRESS_POINTERS_BOOL && alignof(type) > kTaggedSize) { \
- /* TODO(ishell, v8:8875): When pointer compression is enabled 8-byte */ \
- /* size fields (external pointers, doubles and BigInt data) are only */ \
- /* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
- /* way of accessing them in order to avoid undefined behavior in C++ */ \
- /* code. */ \
- return base::ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
- } else { \
- return *reinterpret_cast<type const*>(FIELD_ADDR(*this, offset)); \
- } \
- } \
- void holder::set_##name(type value) { \
- if (COMPRESS_POINTERS_BOOL && alignof(type) > kTaggedSize) { \
- /* TODO(ishell, v8:8875): When pointer compression is enabled 8-byte */ \
- /* size fields (external pointers, doubles and BigInt data) are only */ \
- /* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
- /* way of accessing them in order to avoid undefined behavior in C++ */ \
- /* code. */ \
- base::WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
- } else { \
- *reinterpret_cast<type*>(FIELD_ADDR(*this, offset)) = value; \
- } \
+#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
+ type holder::name() const { \
+ return ReadMaybeUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
+ } \
+ void holder::set_##name(type value) { \
+ WriteMaybeUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
+ }
+
+#define SANDBOXED_POINTER_ACCESSORS(holder, name, type, offset) \
+ type holder::name() const { \
+ PtrComprCageBase sandbox_base = GetPtrComprCageBase(*this); \
+ Address value = ReadSandboxedPointerField(offset, sandbox_base); \
+ return reinterpret_cast<type>(value); \
+ } \
+ void holder::set_##name(type value) { \
+ PtrComprCageBase sandbox_base = GetPtrComprCageBase(*this); \
+ Address addr = reinterpret_cast<Address>(value); \
+ WriteSandboxedPointerField(offset, sandbox_base, addr); \
}
// WasmModuleObject
@@ -127,7 +123,7 @@ void WasmGlobalObject::set_type(wasm::ValueType value) {
int WasmGlobalObject::type_size() const { return type().element_size_bytes(); }
Address WasmGlobalObject::address() const {
- DCHECK_NE(type(), wasm::kWasmExternRef);
+ DCHECK_NE(type(), wasm::kWasmAnyRef);
DCHECK_LE(offset() + type_size(), untagged_buffer().byte_length());
return Address(untagged_buffer().backing_store()) + offset();
}
@@ -171,8 +167,7 @@ void WasmGlobalObject::SetF64(double value) {
}
void WasmGlobalObject::SetExternRef(Handle<Object> value) {
- DCHECK(type().is_reference_to(wasm::HeapType::kExtern) ||
- type().is_reference_to(wasm::HeapType::kAny));
+ DCHECK(type().is_reference_to(wasm::HeapType::kAny));
tagged_buffer().set(offset(), *value);
}
@@ -187,7 +182,8 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
}
// WasmInstanceObject
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
+SANDBOXED_POINTER_ACCESSORS(WasmInstanceObject, memory_start, byte*,
+ kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
kIsolateRootOffset)
@@ -287,13 +283,6 @@ CAST_ACCESSOR(WasmExportedFunction)
// WasmFunctionData
ACCESSORS(WasmFunctionData, internal, WasmInternalFunction, kInternalOffset)
-DEF_GETTER(WasmFunctionData, wrapper_code, Code) {
- return FromCodeT(TorqueGeneratedClass::wrapper_code(cage_base));
-}
-void WasmFunctionData::set_wrapper_code(Code code, WriteBarrierMode mode) {
- TorqueGeneratedClass::set_wrapper_code(ToCodeT(code), mode);
-}
-
wasm::FunctionSig* WasmExportedFunctionData::sig() const {
return reinterpret_cast<wasm::FunctionSig*>(signature().foreign_address());
}
@@ -307,16 +296,6 @@ CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData)
-// WasmInternalFunction
-ACCESSORS(WasmInternalFunction, raw_code, CodeT, kCodeOffset)
-
-DEF_GETTER(WasmInternalFunction, code, Code) {
- return FromCodeT(raw_code(cage_base));
-}
-void WasmInternalFunction::set_code(Code code, WriteBarrierMode mode) {
- set_raw_code(ToCodeT(code), mode);
-}
-
// WasmCapiFunction
WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmCapiFunction(*this));
@@ -389,7 +368,6 @@ Handle<Object> WasmObject::ReadValueAt(Isolate* isolate, Handle<HeapObject> obj,
}
case wasm::kRtt:
- case wasm::kRttWithDepth:
// Rtt values are not supposed to be made available to JavaScript side.
UNREACHABLE();
@@ -425,7 +403,6 @@ MaybeHandle<Object> WasmObject::ToWasmValue(Isolate* isolate,
UNREACHABLE();
case wasm::kRtt:
- case wasm::kRttWithDepth:
// Rtt values are not supposed to be made available to JavaScript side.
UNREACHABLE();
@@ -503,7 +480,6 @@ void WasmObject::WriteValueAt(Isolate* isolate, Handle<HeapObject> obj,
UNREACHABLE();
case wasm::kRtt:
- case wasm::kRttWithDepth:
// Rtt values are not supposed to be made available to JavaScript side.
UNREACHABLE();
@@ -647,7 +623,7 @@ void WasmArray::EncodeElementSizeInMap(int element_size, Map map) {
int WasmArray::DecodeElementSizeFromMap(Map map) { return map.WasmByte1(); }
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
-#ifdef V8_HEAP_SANDBOX
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
// Due to the type-specific pointer tags for external pointers, we need to
// allocate an entry in the table here even though it will just store nullptr.
AllocateExternalPointerEntries(isolate);
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index a69dc4f173..deeab21028 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -50,47 +50,31 @@ namespace {
// an instance finalizer is not guaranteed to run upon isolate shutdown,
// we must use a Managed<WasmInstanceNativeAllocations> to guarantee
// it is freed.
-// Native allocations are the signature ids and targets for indirect call
-// targets, as well as the call targets for imported functions.
class WasmInstanceNativeAllocations {
public:
-// Helper macro to set an internal field and the corresponding field
-// on an instance.
-#define SET(instance, field, value) \
- instance->set_##field((this->field##_ = value).get());
-
- // Allocates initial native storage for a given instance.
WasmInstanceNativeAllocations(Handle<WasmInstanceObject> instance,
size_t num_imported_functions,
size_t num_imported_mutable_globals,
size_t num_data_segments,
- size_t num_elem_segments) {
- SET(instance, imported_function_targets,
- std::make_unique<Address[]>(num_imported_functions));
- SET(instance, imported_mutable_globals,
- std::make_unique<Address[]>(num_imported_mutable_globals));
- SET(instance, data_segment_starts,
- std::make_unique<Address[]>(num_data_segments));
- SET(instance, data_segment_sizes,
- std::make_unique<uint32_t[]>(num_data_segments));
- SET(instance, dropped_elem_segments,
- std::make_unique<uint8_t[]>(num_elem_segments));
+ size_t num_elem_segments)
+ : imported_function_targets_(new Address[num_imported_functions]),
+ imported_mutable_globals_(new Address[num_imported_mutable_globals]),
+ data_segment_starts_(new Address[num_data_segments]),
+ data_segment_sizes_(new uint32_t[num_data_segments]),
+ dropped_elem_segments_(new uint8_t[num_elem_segments]) {
+ instance->set_imported_function_targets(imported_function_targets_.get());
+ instance->set_imported_mutable_globals(imported_mutable_globals_.get());
+ instance->set_data_segment_starts(data_segment_starts_.get());
+ instance->set_data_segment_sizes(data_segment_sizes_.get());
+ instance->set_dropped_elem_segments(dropped_elem_segments_.get());
}
private:
- template <typename T>
- std::unique_ptr<T[]> grow(T* old_arr, size_t old_size, size_t new_size) {
- std::unique_ptr<T[]> new_arr = std::make_unique<T[]>(new_size);
- std::copy_n(old_arr, old_size, new_arr.get());
- return new_arr;
- }
-
- std::unique_ptr<Address[]> imported_function_targets_;
- std::unique_ptr<Address[]> imported_mutable_globals_;
- std::unique_ptr<Address[]> data_segment_starts_;
- std::unique_ptr<uint32_t[]> data_segment_sizes_;
- std::unique_ptr<uint8_t[]> dropped_elem_segments_;
-#undef SET
+ const std::unique_ptr<Address[]> imported_function_targets_;
+ const std::unique_ptr<Address[]> imported_mutable_globals_;
+ const std::unique_ptr<Address[]> data_segment_starts_;
+ const std::unique_ptr<uint32_t[]> data_segment_sizes_;
+ const std::unique_ptr<uint8_t[]> dropped_elem_segments_;
};
size_t EstimateNativeAllocationsSize(const WasmModule* module) {
@@ -106,7 +90,6 @@ size_t EstimateNativeAllocationsSize(const WasmModule* module) {
enum DispatchTableElements : int {
kDispatchTableInstanceOffset,
kDispatchTableIndexOffset,
- kDispatchTableFunctionTableOffset,
// Marker:
kDispatchTableNumElements
};
@@ -373,10 +356,8 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate,
isolate);
int func_index = exported_function->function_index();
auto* wasm_function = &target_instance->module()->functions[func_index];
- DCHECK_NOT_NULL(wasm_function);
- DCHECK_NOT_NULL(wasm_function->sig);
- UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
- target_instance, func_index);
+ UpdateDispatchTables(isolate, *table, entry_index, wasm_function,
+ *target_instance);
} else if (WasmJSFunction::IsWasmJSFunction(*external)) {
UpdateDispatchTables(isolate, table, entry_index,
Handle<WasmJSFunction>::cast(external));
@@ -399,7 +380,6 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
int entry_index = static_cast<int>(index);
switch (table->type().heap_representation()) {
- case wasm::HeapType::kExtern:
case wasm::HeapType::kAny:
entries->set(entry_index, *entry);
return;
@@ -408,6 +388,7 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
return;
case wasm::HeapType::kEq:
case wasm::HeapType::kData:
+ case wasm::HeapType::kArray:
case wasm::HeapType::kI31:
// TODO(7748): Implement once we have struct/arrays/i31ref tables.
UNREACHABLE();
@@ -441,7 +422,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
}
switch (table->type().heap_representation()) {
- case wasm::HeapType::kExtern:
+ case wasm::HeapType::kAny:
return entry;
case wasm::HeapType::kFunc:
if (entry->IsWasmInternalFunction()) return entry;
@@ -449,7 +430,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
case wasm::HeapType::kData:
- case wasm::HeapType::kAny:
+ case wasm::HeapType::kArray:
// TODO(7748): Implement once we have a story for struct/arrays/i31ref in
// JS.
UNIMPLEMENTED();
@@ -493,29 +474,53 @@ void WasmTableObject::Fill(Isolate* isolate, Handle<WasmTableObject> table,
}
}
-void WasmTableObject::UpdateDispatchTables(
- Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
- const wasm::FunctionSig* sig, Handle<WasmInstanceObject> target_instance,
- int target_func_index) {
+void WasmTableObject::UpdateDispatchTables(Isolate* isolate,
+ WasmTableObject table,
+ int entry_index,
+ const wasm::WasmFunction* func,
+ WasmInstanceObject target_instance) {
+ DisallowGarbageCollection no_gc;
+
// We simply need to update the IFTs for each instance that imports
// this table.
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+ FixedArray dispatch_tables = table.dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables.length() % kDispatchTableNumElements);
- for (int i = 0; i < dispatch_tables->length();
+ Object call_ref =
+ func->imported
+ // The function in the target instance was imported. Use its imports
+ // table, which contains a tuple needed by the import wrapper.
+ ? target_instance.imported_function_refs().get(func->func_index)
+ // For wasm functions, just pass the target instance.
+ : target_instance;
+ Address call_target = target_instance.GetCallTarget(func->func_index);
+
+ int original_sig_id = func->sig_index;
+
+ for (int i = 0, len = dispatch_tables.length(); i < len;
i += kDispatchTableNumElements) {
int table_index =
- Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset)),
- isolate);
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_id = instance->module()->signature_map.Find(*sig);
- FunctionTargetAndRef entry(target_instance, target_func_index);
- instance->GetIndirectFunctionTable(isolate, table_index)
- ->Set(entry_index, sig_id, entry.call_target(), *entry.ref());
+ Smi::cast(dispatch_tables.get(i + kDispatchTableIndexOffset)).value();
+ WasmInstanceObject instance = WasmInstanceObject::cast(
+ dispatch_tables.get(i + kDispatchTableInstanceOffset));
+ const WasmModule* module = instance.module();
+ // Try to avoid the signature map lookup by checking if the signature in
+ // {module} at {original_sig_id} matches {func->sig}.
+ int sig_id;
+ // TODO(7748): wasm-gc signatures cannot be canonicalized this way because
+ // references could wrongly be detected as identical.
+ if (module->has_signature(original_sig_id) &&
+ *module->signature(original_sig_id) == *func->sig) {
+ sig_id = module->canonicalized_type_ids[original_sig_id];
+ DCHECK_EQ(sig_id, module->signature_map.Find(*func->sig));
+ } else {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ sig_id = module->signature_map.Find(*func->sig);
+ }
+ WasmIndirectFunctionTable ift = WasmIndirectFunctionTable::cast(
+ instance.indirect_function_tables().get(table_index));
+ ift.Set(entry_index, sig_id, call_target, call_ref);
}
}
@@ -579,12 +584,14 @@ void WasmTableObject::UpdateDispatchTables(
instance->module_object().native_module();
wasm::WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
auto kind = compiler::WasmImportCallKind::kWasmToCapi;
- wasm::WasmCode* wasm_code = cache->MaybeGet(kind, &sig, param_count);
+ wasm::WasmCode* wasm_code =
+ cache->MaybeGet(kind, &sig, param_count, wasm::kNoSuspend);
if (wasm_code == nullptr) {
wasm::WasmCodeRefScope code_ref_scope;
wasm::WasmImportWrapperCache::ModificationScope cache_scope(cache);
wasm_code = compiler::CompileWasmCapiCallWrapper(native_module, &sig);
- wasm::WasmImportWrapperCache::CacheKey key(kind, &sig, param_count);
+ wasm::WasmImportWrapperCache::CacheKey key(kind, &sig, param_count,
+ wasm::kNoSuspend);
cache_scope[key] = wasm_code;
wasm_code->IncRef();
isolate->counters()->wasm_generated_code_size()->Increment(
@@ -629,9 +636,12 @@ void WasmTableObject::SetFunctionTablePlaceholder(
Handle<WasmInstanceObject> instance, int func_index) {
// Put (instance, func_index) as a Tuple2 into the entry_index.
// The {WasmExportedFunction} will be created lazily.
+ // Allocate directly in old space as the tuples are typically long-lived, and
+ // we create many of them, which would result in lots of GC when initializing
+ // large tables.
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
instance, Handle<Smi>(Smi::FromInt(func_index), isolate),
- AllocationType::kYoung);
+ AllocationType::kOld);
table->entries().set(entry_index, *tuple);
}
@@ -797,13 +807,14 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
}
} // namespace
-Handle<WasmMemoryObject> WasmMemoryObject::New(
+MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer, int maximum) {
Handle<JSArrayBuffer> buffer;
if (!maybe_buffer.ToHandle(&buffer)) {
// If no buffer was provided, create a zero-length one.
auto backing_store =
BackingStore::AllocateWasmMemory(isolate, 0, 0, SharedFlag::kNotShared);
+ if (!backing_store) return {};
buffer = isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
}
@@ -984,8 +995,14 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
size_t new_pages = old_pages + pages;
DCHECK_LT(old_pages, new_pages);
// Try allocating a new backing store and copying.
+ // To avoid overall quadratic complexity of many small grow operations, we
+ // grow by at least 0.5 MB + 12.5% of the existing memory size.
+ // These numbers are kept small because we must be careful about address
+ // space consumption on 32-bit platforms.
+ size_t min_growth = old_pages + 8 + (old_pages >> 3);
+ size_t new_capacity = std::max(new_pages, min_growth);
std::unique_ptr<BackingStore> new_backing_store =
- backing_store->CopyWasmMemory(isolate, new_pages);
+ backing_store->CopyWasmMemory(isolate, new_pages, new_capacity);
if (!new_backing_store) {
// Crash on out-of-memory if the correctness fuzzer is running.
if (FLAG_correctness_fuzzer_suppressions) {
@@ -1076,7 +1093,7 @@ FunctionTargetAndRef::FunctionTargetAndRef(
void ImportedFunctionEntry::SetWasmToJs(
Isolate* isolate, Handle<JSReceiver> callable,
- const wasm::WasmCode* wasm_to_js_wrapper) {
+ const wasm::WasmCode* wasm_to_js_wrapper, Handle<HeapObject> suspender) {
TRACE_IFT("Import callable 0x%" PRIxPTR "[%d] = {callable=0x%" PRIxPTR
", target=%p}\n",
instance_->ptr(), index_, callable->ptr(),
@@ -1084,7 +1101,7 @@ void ImportedFunctionEntry::SetWasmToJs(
DCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper ||
wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToCapiWrapper);
Handle<WasmApiFunctionRef> ref =
- isolate->factory()->NewWasmApiFunctionRef(callable);
+ isolate->factory()->NewWasmApiFunctionRef(callable, suspender);
instance_->imported_function_refs().set(index_, *ref);
instance_->imported_function_targets()[index_] =
wasm_to_js_wrapper->instruction_start();
@@ -1181,7 +1198,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->factory()->NewFixedArray(num_imported_functions);
instance->set_imported_function_refs(*imported_function_refs);
- instance->SetRawMemory(nullptr, 0);
+ instance->SetRawMemory(reinterpret_cast<byte*>(EmptyBackingStoreBuffer()), 0);
instance->set_isolate_root(isolate->isolate_root());
instance->set_stack_limit_address(
isolate->stack_guard()->address_of_jslimit());
@@ -1385,16 +1402,18 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction(
Handle<Object> entry =
FixedArray::get(module_object->export_wrappers(), wrapper_index, isolate);
- Handle<Code> wrapper;
- if (entry->IsCode()) {
- wrapper = Handle<Code>::cast(entry);
+ Handle<CodeT> wrapper;
+ if (entry->IsCodeT()) {
+ wrapper = Handle<CodeT>::cast(entry);
} else {
// The wrapper may not exist yet if no function in the exports section has
// this signature. We compile it and store the wrapper in the module for
// later use.
- wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
- isolate, function.sig, instance->module(), function.imported);
- module_object->export_wrappers().set(wrapper_index, ToCodeT(*wrapper));
+ wrapper = ToCodeT(
+ wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate, function.sig, instance->module(), function.imported),
+ isolate);
+ module_object->export_wrappers().set(wrapper_index, *wrapper);
}
auto external = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
isolate, instance, function_index,
@@ -1446,8 +1465,8 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
const wasm::WasmFeatures enabled = native_module->enabled_features();
auto resolved = compiler::ResolveWasmImportCall(
callable, sig, instance->module(), enabled);
- compiler::WasmImportCallKind kind = resolved.first;
- callable = resolved.second; // Update to ultimate target.
+ compiler::WasmImportCallKind kind = resolved.kind;
+ callable = resolved.callable; // Update to ultimate target.
DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
wasm::CompilationEnv env = native_module->CreateCompilationEnv();
// {expected_arity} should only be used if kind != kJSFunctionArityMismatch.
@@ -1457,9 +1476,13 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
->shared()
.internal_formal_parameter_count_without_receiver();
}
+ wasm::Suspend suspend =
+ resolved.suspender.is_null() || resolved.suspender->IsUndefined()
+ ? wasm::kNoSuspend
+ : wasm::kSuspend;
// TODO(manoskouk): Reuse js_function->wasm_to_js_wrapper_code().
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- &env, kind, sig, false, expected_arity);
+ &env, kind, sig, false, expected_arity, suspend);
wasm::CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
@@ -1477,8 +1500,9 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
}
// Update the dispatch table.
+ Handle<HeapObject> suspender = handle(js_function->GetSuspender(), isolate);
Handle<WasmApiFunctionRef> ref =
- isolate->factory()->NewWasmApiFunctionRef(callable);
+ isolate->factory()->NewWasmApiFunctionRef(callable, suspender);
WasmIndirectFunctionTable::cast(
instance->indirect_function_tables().get(table_index))
.Set(entry_index, sig_id, call_target, *ref);
@@ -1559,7 +1583,6 @@ wasm::WasmValue WasmStruct::GetFieldValue(uint32_t index) {
return wasm::WasmValue(ref, field_type);
}
case wasm::kRtt:
- case wasm::kRttWithDepth:
// TODO(7748): Expose RTTs to DevTools.
UNIMPLEMENTED();
case wasm::kVoid:
@@ -1589,7 +1612,6 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
return wasm::WasmValue(ref, element_type);
}
case wasm::kRtt:
- case wasm::kRttWithDepth:
// TODO(7748): Expose RTTs to DevTools.
UNIMPLEMENTED();
case wasm::kVoid:
@@ -1744,19 +1766,21 @@ void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
// static
Handle<WasmContinuationObject> WasmContinuationObject::New(
Isolate* isolate, std::unique_ptr<wasm::StackMemory> stack,
- HeapObject parent) {
- Handle<WasmContinuationObject> result = Handle<WasmContinuationObject>::cast(
- isolate->factory()->NewStruct(WASM_CONTINUATION_OBJECT_TYPE));
+ Handle<HeapObject> parent) {
stack->jmpbuf()->stack_limit = stack->jslimit();
stack->jmpbuf()->sp = stack->base();
stack->jmpbuf()->fp = kNullAddress;
- result->set_jmpbuf(*isolate->factory()->NewForeign(
- reinterpret_cast<Address>(stack->jmpbuf())));
+ wasm::JumpBuffer* jmpbuf = stack->jmpbuf();
size_t external_size = stack->owned_size();
Handle<Foreign> managed_stack = Managed<wasm::StackMemory>::FromUniquePtr(
isolate, external_size, std::move(stack));
+ Handle<Foreign> foreign_jmpbuf =
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(jmpbuf));
+ Handle<WasmContinuationObject> result = Handle<WasmContinuationObject>::cast(
+ isolate->factory()->NewStruct(WASM_CONTINUATION_OBJECT_TYPE));
+ result->set_jmpbuf(*foreign_jmpbuf);
result->set_stack(*managed_stack);
- result->set_parent(parent);
+ result->set_parent(*parent);
return result;
}
@@ -1764,12 +1788,12 @@ Handle<WasmContinuationObject> WasmContinuationObject::New(
Handle<WasmContinuationObject> WasmContinuationObject::New(
Isolate* isolate, std::unique_ptr<wasm::StackMemory> stack) {
auto parent = ReadOnlyRoots(isolate).undefined_value();
- return New(isolate, std::move(stack), parent);
+ return New(isolate, std::move(stack), handle(parent, isolate));
}
// static
Handle<WasmContinuationObject> WasmContinuationObject::New(
- Isolate* isolate, WasmContinuationObject parent) {
+ Isolate* isolate, Handle<WasmContinuationObject> parent) {
auto stack =
std::unique_ptr<wasm::StackMemory>(wasm::StackMemory::New(isolate));
return New(isolate, std::move(stack), parent);
@@ -1784,6 +1808,8 @@ Handle<WasmSuspenderObject> WasmSuspenderObject::New(Isolate* isolate) {
auto suspender = Handle<WasmSuspenderObject>::cast(
isolate->factory()->NewJSObject(suspender_cons, AllocationType::kOld));
suspender->set_continuation(ReadOnlyRoots(isolate).undefined_value());
+ suspender->set_parent(ReadOnlyRoots(isolate).undefined_value());
+ suspender->set_state(Inactive);
return suspender;
}
@@ -1829,7 +1855,6 @@ uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) {
encoded_size += 1;
break;
case wasm::kRtt:
- case wasm::kRttWithDepth:
case wasm::kVoid:
case wasm::kBottom:
case wasm::kI8:
@@ -1843,7 +1868,7 @@ uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) {
bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object.IsJSFunction()) return false;
JSFunction js_function = JSFunction::cast(object);
- Code code = js_function.code();
+ CodeT code = js_function.code();
if (CodeKind::JS_TO_WASM_FUNCTION != code.kind() &&
code.builtin_id() != Builtin::kGenericJSToWasmWrapper &&
code.builtin_id() != Builtin::kWasmReturnPromiseOnSuspend) {
@@ -1882,8 +1907,7 @@ Handle<WasmCapiFunction> WasmCapiFunction::New(
Handle<Map> rtt = isolate->factory()->wasm_internal_function_map();
Handle<WasmCapiFunctionData> fun_data =
isolate->factory()->NewWasmCapiFunctionData(
- call_target, embedder_data,
- isolate->builtins()->code_handle(Builtin::kIllegal), rtt,
+ call_target, embedder_data, BUILTIN_CODE(isolate, Illegal), rtt,
serialized_signature);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
@@ -1904,7 +1928,7 @@ int WasmExportedFunction::function_index() {
Handle<WasmExportedFunction> WasmExportedFunction::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
- int arity, Handle<Code> export_wrapper) {
+ int arity, Handle<CodeT> export_wrapper) {
DCHECK(
CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() ||
(export_wrapper->is_builtin() &&
@@ -1920,7 +1944,9 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
Address call_target = instance->GetCallTarget(func_index);
Handle<Map> rtt;
- if (FLAG_experimental_wasm_gc) {
+ bool has_gc =
+ instance->module_object().native_module()->enabled_features().has_gc();
+ if (has_gc) {
int sig_index = instance->module()->functions[func_index].sig_index;
// TODO(7748): Create funcref RTTs lazily?
rtt = handle(Map::cast(instance->managed_object_maps().get(sig_index)),
@@ -2028,7 +2054,8 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
const wasm::FunctionSig* sig,
- Handle<JSReceiver> callable) {
+ Handle<JSReceiver> callable,
+ Handle<HeapObject> suspender) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
int return_count = static_cast<int>(sig->return_count());
@@ -2040,8 +2067,9 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
// TODO(wasm): Think about caching and sharing the JS-to-JS wrappers per
// signature instead of compiling a new one for every instantiation.
- Handle<Code> wrapper_code =
- compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+ Handle<CodeT> wrapper_code = ToCodeT(
+ compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked(),
+ isolate);
// WasmJSFunctions use on-heap Code objects as call targets, so we can't
// cache the target address, unless the WasmJSFunction wraps a
@@ -2057,7 +2085,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<Map> rtt = factory->wasm_internal_function_map();
Handle<WasmJSFunctionData> function_data = factory->NewWasmJSFunctionData(
call_target, callable, return_count, parameter_count, serialized_sig,
- wrapper_code, rtt);
+ wrapper_code, rtt, suspender);
if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
using CK = compiler::WasmImportCallKind;
@@ -2073,9 +2101,14 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
// TODO(wasm): Think about caching and sharing the wasm-to-JS wrappers per
// signature instead of compiling a new one for every instantiation.
- Handle<Code> wasm_to_js_wrapper_code =
- compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity)
- .ToHandleChecked();
+ wasm::Suspend suspend =
+ suspender.is_null() ? wasm::kNoSuspend : wasm::kSuspend;
+ DCHECK_IMPLIES(!suspender.is_null(), !suspender->IsUndefined());
+ Handle<CodeT> wasm_to_js_wrapper_code =
+ ToCodeT(compiler::CompileWasmToJSWrapper(isolate, sig, kind,
+ expected_arity, suspend)
+ .ToHandleChecked(),
+ isolate);
function_data->internal().set_code(*wasm_to_js_wrapper_code);
}
@@ -2103,6 +2136,12 @@ JSReceiver WasmJSFunction::GetCallable() const {
.callable());
}
+HeapObject WasmJSFunction::GetSuspender() const {
+ return WasmApiFunctionRef::cast(
+ shared().wasm_js_function_data().internal().ref())
+ .suspender();
+}
+
const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
WasmJSFunctionData function_data = shared().wasm_js_function_data();
int sig_size = function_data.serialized_signature().length();
@@ -2115,12 +2154,33 @@ const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return zone->New<wasm::FunctionSig>(return_count, parameter_count, types);
}
+bool WasmJSFunction::MatchesSignatureForSuspend(const wasm::FunctionSig* sig) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ int return_count = static_cast<int>(sig->return_count());
+ DisallowHeapAllocation no_alloc;
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ if (parameter_count != function_data.serialized_parameter_count()) {
+ return false;
+ }
+ if (sig_size == 0) return true; // Prevent undefined behavior.
+ // This function is only called for functions wrapped by a
+ // WebAssembly.Suspender object, so the return type has to be externref.
+ CHECK_EQ(function_data.serialized_return_count(), 1);
+ CHECK_EQ(function_data.serialized_signature().get(0), wasm::kWasmAnyRef);
+ const wasm::ValueType* expected = sig->all().begin();
+ return function_data.serialized_signature().matches(
+ 1, expected + return_count, parameter_count);
+}
+
// TODO(9495): Update this if function type variance is introduced.
bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
int return_count = static_cast<int>(sig->return_count());
int parameter_count = static_cast<int>(sig->parameter_count());
+ DisallowHeapAllocation no_alloc;
WasmJSFunctionData function_data = shared().wasm_js_function_data();
if (return_count != function_data.serialized_return_count() ||
parameter_count != function_data.serialized_parameter_count()) {
@@ -2143,9 +2203,6 @@ bool WasmExternalFunction::IsWasmExternalFunction(Object object) {
// static
MaybeHandle<WasmInternalFunction> WasmInternalFunction::FromExternal(
Handle<Object> external, Isolate* isolate) {
- if (external->IsNull(isolate)) {
- return MaybeHandle<WasmInternalFunction>();
- }
if (WasmExportedFunction::IsWasmExportedFunction(*external) ||
WasmJSFunction::IsWasmJSFunction(*external) ||
WasmCapiFunction::IsWasmCapiFunction(*external)) {
@@ -2154,7 +2211,6 @@ MaybeHandle<WasmInternalFunction> WasmInternalFunction::FromExternal(
kAcquireLoad));
return handle(data.internal(), isolate);
}
- // {external} is not null or a wasm external function.
return MaybeHandle<WasmInternalFunction>();
}
@@ -2172,8 +2228,8 @@ Handle<AsmWasmData> AsmWasmData::New(
const WasmModule* module = native_module->module();
const bool kUsesLiftoff = false;
size_t memory_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module,
- kUsesLiftoff) +
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ module, kUsesLiftoff, wasm::DynamicTiering::kDisabled) +
wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
Handle<Managed<wasm::NativeModule>> managed_native_module =
Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
@@ -2196,8 +2252,9 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
case kOptRef:
if (value->IsNull(isolate)) return true;
V8_FALLTHROUGH;
- case kRef:
- switch (expected.heap_representation()) {
+ case kRef: {
+ HeapType::Representation repr = expected.heap_representation();
+ switch (repr) {
case HeapType::kFunc: {
if (!(WasmExternalFunction::IsWasmExternalFunction(*value) ||
WasmCapiFunction::IsWasmCapiFunction(*value))) {
@@ -2208,10 +2265,10 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
}
return true;
}
- case HeapType::kExtern:
case HeapType::kAny:
return true;
case HeapType::kData:
+ case HeapType::kArray:
case HeapType::kEq:
case HeapType::kI31: {
// TODO(7748): Change this when we have a decision on the JS API for
@@ -2229,22 +2286,21 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
value = it.GetDataValue();
}
- if (expected.is_reference_to(HeapType::kEq)) return true;
-
- if (expected.is_reference_to(HeapType::kData)) {
- if (value->IsSmi()) {
- *error_message = "dataref-typed object must be a heap object";
- return false;
- }
- return true;
- } else {
- DCHECK(expected.is_reference_to(HeapType::kI31));
+ if (repr == HeapType::kI31) {
if (!value->IsSmi()) {
*error_message = "i31ref-typed object cannot be a heap object";
return false;
}
return true;
}
+
+ if (!((repr == HeapType::kEq && value->IsSmi()) ||
+ (repr != HeapType::kArray && value->IsWasmStruct()) ||
+ value->IsWasmArray())) {
+ *error_message = "object incompatible with wasm type";
+ return false;
+ }
+ return true;
}
default:
if (module == nullptr) {
@@ -2314,8 +2370,8 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
"Javascript is not supported yet.";
return false;
}
+ }
case kRtt:
- case kRttWithDepth:
// TODO(7748): Implement when the JS API for rtts is decided on.
*error_message =
"passing rtts between Webassembly and Javascript is not supported "
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index bf07fd2bb3..5cff97a24a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -32,6 +32,7 @@ namespace wasm {
class InterpretedFrame;
class NativeModule;
class WasmCode;
+struct WasmFunction;
struct WasmGlobal;
struct WasmModule;
struct WasmTag;
@@ -91,7 +92,8 @@ class ImportedFunctionEntry {
// Initialize this entry as a Wasm to JS call. This accepts the isolate as a
// parameter, since it must allocate a tuple.
V8_EXPORT_PRIVATE void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
- const wasm::WasmCode* wasm_to_js_wrapper);
+ const wasm::WasmCode* wasm_to_js_wrapper,
+ Handle<HeapObject> suspender);
// Initialize this entry as a Wasm to Wasm call.
void SetWasmToWasm(WasmInstanceObject target_instance, Address call_target);
@@ -201,12 +203,10 @@ class WasmTableObject
uint32_t count);
// TODO(wasm): Unify these three methods into one.
- static void UpdateDispatchTables(Isolate* isolate,
- Handle<WasmTableObject> table,
+ static void UpdateDispatchTables(Isolate* isolate, WasmTableObject table,
int entry_index,
- const wasm::FunctionSig* sig,
- Handle<WasmInstanceObject> target_instance,
- int target_func_index);
+ const wasm::WasmFunction* func,
+ WasmInstanceObject target_instance);
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index,
@@ -254,7 +254,7 @@ class WasmMemoryObject
Handle<WasmInstanceObject> object);
inline bool has_maximum_pages();
- V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
+ V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int maximum);
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate,
@@ -334,7 +334,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(wasm_internal_functions, FixedArray)
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_ACCESSORS(feedback_vectors, FixedArray)
- DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
+ DECL_SANDBOXED_POINTER_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
@@ -597,7 +597,7 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE static Handle<WasmExportedFunction> New(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
- int arity, Handle<Code> export_wrapper);
+ int arity, Handle<CodeT> export_wrapper);
Address GetWasmCallTarget();
@@ -622,13 +622,17 @@ class WasmJSFunction : public JSFunction {
static Handle<WasmJSFunction> New(Isolate* isolate,
const wasm::FunctionSig* sig,
- Handle<JSReceiver> callable);
+ Handle<JSReceiver> callable,
+ Handle<HeapObject> suspender);
JSReceiver GetCallable() const;
+ HeapObject GetSuspender() const;
// Deserializes the signature of this function using the provided zone. Note
// that lifetime of the signature is hence directly coupled to the zone.
const wasm::FunctionSig* GetSignature(Zone* zone);
bool MatchesSignature(const wasm::FunctionSig* sig);
+ // Special typing rule for imports wrapped by a Suspender.
+ bool MatchesSignatureForSuspend(const wasm::FunctionSig* sig);
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
@@ -693,7 +697,6 @@ class WasmFunctionData
: public TorqueGeneratedWasmFunctionData<WasmFunctionData, HeapObject> {
public:
DECL_ACCESSORS(internal, WasmInternalFunction)
- DECL_ACCESSORS(wrapper_code, Code)
DECL_PRINTER(WasmFunctionData)
@@ -736,8 +739,6 @@ class WasmInternalFunction
: public TorqueGeneratedWasmInternalFunction<WasmInternalFunction,
Foreign> {
public:
- DECL_ACCESSORS(code, Code)
-
// Returns a handle to the corresponding WasmInternalFunction if {external} is
// a WasmExternalFunction, or an empty handle otherwise.
static MaybeHandle<WasmInternalFunction> FromExternal(Handle<Object> external,
@@ -749,9 +750,6 @@ class WasmInternalFunction
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(WasmInternalFunction)
-
- private:
- DECL_ACCESSORS(raw_code, CodeT)
};
// Information for a WasmJSFunction which is referenced as the function data of
@@ -761,7 +759,7 @@ class WasmJSFunctionData
: public TorqueGeneratedWasmJSFunctionData<WasmJSFunctionData,
WasmFunctionData> {
public:
- DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
+ DECL_ACCESSORS(wasm_to_js_wrapper_code, CodeT)
// Dispatched behavior.
DECL_PRINTER(WasmJSFunctionData)
@@ -785,6 +783,16 @@ class WasmCapiFunctionData
TQ_OBJECT_CONSTRUCTORS(WasmCapiFunctionData)
};
+class WasmOnFulfilledData
+ : public TorqueGeneratedWasmOnFulfilledData<WasmOnFulfilledData,
+ HeapObject> {
+ public:
+ using BodyDescriptor =
+ FlexibleBodyDescriptor<WasmOnFulfilledData::kStartOfStrongFieldsOffset>;
+ DECL_PRINTER(WasmOnFulfilledData)
+ TQ_OBJECT_CONSTRUCTORS(WasmOnFulfilledData)
+};
+
class WasmScript : public AllStatic {
public:
// Position used for storing "on entry" breakpoints (a.k.a. instrumentation
@@ -803,7 +811,7 @@ class WasmScript : public AllStatic {
// Set an "on entry" breakpoint (a.k.a. instrumentation breakpoint) inside
// the given module. This will affect all live and future instances of the
// module.
- V8_EXPORT_PRIVATE static void SetBreakPointOnEntry(
+ V8_EXPORT_PRIVATE static void SetInstrumentationBreakpoint(
Handle<Script>, Handle<BreakPoint> break_point);
// Set a breakpoint on first breakable position of the given function index
@@ -970,12 +978,15 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
inline uint32_t element_offset(uint32_t index);
inline Address ElementAddress(uint32_t index);
- static int MaxLength(const wasm::ArrayType* type) {
+ static int MaxLength(uint32_t element_size_bytes) {
// The total object size must fit into a Smi, for filler objects. To make
// the behavior of Wasm programs independent from the Smi configuration,
// we hard-code the smaller of the two supported ranges.
- int element_shift = type->element_type().element_size_log2();
- return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) >> element_shift;
+ return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) / element_size_bytes;
+ }
+
+ static int MaxLength(const wasm::ArrayType* type) {
+ return MaxLength(type->element_type().element_size_bytes());
}
static inline void EncodeElementSizeInMap(int element_size, Map map);
@@ -995,8 +1006,8 @@ class WasmContinuationObject
public:
static Handle<WasmContinuationObject> New(
Isolate* isolate, std::unique_ptr<wasm::StackMemory> stack);
- static Handle<WasmContinuationObject> New(Isolate* isolate,
- WasmContinuationObject parent);
+ static Handle<WasmContinuationObject> New(
+ Isolate* isolate, Handle<WasmContinuationObject> parent);
DECL_PRINTER(WasmContinuationObject)
@@ -1005,7 +1016,7 @@ class WasmContinuationObject
private:
static Handle<WasmContinuationObject> New(
Isolate* isolate, std::unique_ptr<wasm::StackMemory> stack,
- HeapObject parent);
+ Handle<HeapObject> parent);
TQ_OBJECT_CONSTRUCTORS(WasmContinuationObject)
};
@@ -1015,6 +1026,7 @@ class WasmContinuationObject
class WasmSuspenderObject
: public TorqueGeneratedWasmSuspenderObject<WasmSuspenderObject, JSObject> {
public:
+ enum State : int { Inactive = 0, Active, Suspended };
static Handle<WasmSuspenderObject> New(Isolate* isolate);
// TODO(thibaudm): returnPromiseOnSuspend & suspendOnReturnedPromise.
DECL_PRINTER(WasmSuspenderObject)
@@ -1031,9 +1043,6 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
int array_index, MaybeHandle<Map> rtt_parent,
Handle<WasmInstanceObject> instance);
-Handle<Map> AllocateSubRtt(Isolate* isolate,
- Handle<WasmInstanceObject> instance, uint32_t type,
- Handle<Map> parent, WasmRttSubMode mode);
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 8525d530fd..9fa8f0fb2e 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -14,13 +14,14 @@ extern class WasmInstanceObject extends JSObject;
// Represents the context of a function that is defined through the JS or C
// APIs. Corresponds to the WasmInstanceObject passed to a Wasm function
// reference.
-// TODO(manoskouk): If V8_HEAP_SANDBOX, we cannot encode the isolate_root as a
-// sandboxed pointer, because that would require having access to the isolate
-// root in the first place.
+// TODO(manoskouk): If V8_SANDBOXED_EXTERNAL_POINTERS, we cannot encode the
+// isolate_root as a sandboxed pointer, because that would require having access
+// to the isolate root in the first place.
extern class WasmApiFunctionRef extends HeapObject {
isolate_root: RawPtr;
native_context: NativeContext;
callable: JSReceiver|Undefined;
+ suspender: WasmSuspenderObject|Undefined;
}
// This is the representation that is used internally by wasm to represent
@@ -41,9 +42,6 @@ extern class WasmInternalFunction extends Foreign {
@if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
}
-// WasmInternalFunction is safely comparable for pointer equality.
-extern operator '==' macro TaggedEqual(WasmInternalFunction, Object): bool;
-extern operator '==' macro TaggedEqual(Object, WasmInternalFunction): bool;
extern class WasmFunctionData extends HeapObject {
// The wasm-internal representation of this function object.
@@ -66,6 +64,9 @@ extern class WasmExportedFunctionData extends WasmFunctionData {
@if(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: Code;
packed_args_size: Smi;
+ // Functions returned by suspender.returnPromiseOnSuspend() have this field
+ // set to the host suspender object.
+ suspender: WasmSuspenderObject|Undefined;
}
extern class WasmJSFunctionData extends WasmFunctionData {
@@ -79,6 +80,10 @@ extern class WasmCapiFunctionData extends WasmFunctionData {
serialized_signature: PodArrayOfWasmValueType;
}
+extern class WasmOnFulfilledData extends HeapObject {
+ suspender: WasmSuspenderObject;
+}
+
extern class WasmIndirectFunctionTable extends Struct {
size: uint32;
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@@ -97,6 +102,8 @@ extern class WasmContinuationObject extends Struct {
extern class WasmSuspenderObject extends JSObject {
continuation: WasmContinuationObject|Undefined;
+ parent: WasmSuspenderObject|Undefined;
+ state: Smi; // 0: Inactive, 1: Active, 2: Suspended.
}
extern class WasmExceptionTag extends Struct {
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index 4ec290f836..50504213dd 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -414,12 +414,12 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ArrayCopy, "array.copy")
CASE_OP(ArrayInit, "array.init")
CASE_OP(ArrayInitStatic, "array.init_static")
+ CASE_OP(ArrayInitFromData, "array.init_from_data")
+ CASE_OP(ArrayInitFromDataStatic, "array.init_from_data_static")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
CASE_OP(RttCanon, "rtt.canon")
- CASE_OP(RttSub, "rtt.sub")
- CASE_OP(RttFreshSub, "rtt.fresh_sub")
CASE_OP(RefTest, "ref.test")
CASE_OP(RefTestStatic, "ref.test_static")
CASE_OP(RefCast, "ref.cast")
@@ -431,15 +431,19 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
+ CASE_OP(RefIsArray, "ref.is_array")
CASE_OP(RefAsFunc, "ref.as_func")
CASE_OP(RefAsData, "ref.as_data")
CASE_OP(RefAsI31, "ref.as_i31")
+ CASE_OP(RefAsArray, "ref.as_array")
CASE_OP(BrOnFunc, "br_on_func")
CASE_OP(BrOnData, "br_on_data")
CASE_OP(BrOnI31, "br_on_i31")
+ CASE_OP(BrOnArray, "br_on_array")
CASE_OP(BrOnNonFunc, "br_on_non_func")
CASE_OP(BrOnNonData, "br_on_non_data")
CASE_OP(BrOnNonI31, "br_on_non_i31")
+ CASE_OP(BrOnNonArray, "br_on_non_array")
case kNumericPrefix:
case kSimdPrefix:
@@ -629,9 +633,11 @@ constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
}
constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
-#undef CASE
+#define CASE_SIG(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+#define CASE_VARIADIC(name, opc)
+ return FOREACH_NUMERIC_OPCODE(CASE_SIG, CASE_VARIADIC) kSigEnum_None;
+#undef CASE_SIG
+#undef CASE_VARIADIC
}
constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 0e1927d088..c758a119a1 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -116,136 +116,145 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(MemoryGrow, 0x40, i_i)
// Expressions with signatures.
-#define FOREACH_SIMPLE_OPCODE(V) \
- V(I32Eqz, 0x45, i_i) \
- V(I32Eq, 0x46, i_ii) \
- V(I32Ne, 0x47, i_ii) \
- V(I32LtS, 0x48, i_ii) \
- V(I32LtU, 0x49, i_ii) \
- V(I32GtS, 0x4a, i_ii) \
- V(I32GtU, 0x4b, i_ii) \
- V(I32LeS, 0x4c, i_ii) \
- V(I32LeU, 0x4d, i_ii) \
- V(I32GeS, 0x4e, i_ii) \
- V(I32GeU, 0x4f, i_ii) \
- V(I64Eqz, 0x50, i_l) \
- V(I64Eq, 0x51, i_ll) \
- V(I64Ne, 0x52, i_ll) \
- V(I64LtS, 0x53, i_ll) \
- V(I64LtU, 0x54, i_ll) \
- V(I64GtS, 0x55, i_ll) \
- V(I64GtU, 0x56, i_ll) \
- V(I64LeS, 0x57, i_ll) \
- V(I64LeU, 0x58, i_ll) \
- V(I64GeS, 0x59, i_ll) \
- V(I64GeU, 0x5a, i_ll) \
- V(F32Eq, 0x5b, i_ff) \
- V(F32Ne, 0x5c, i_ff) \
- V(F32Lt, 0x5d, i_ff) \
- V(F32Gt, 0x5e, i_ff) \
- V(F32Le, 0x5f, i_ff) \
- V(F32Ge, 0x60, i_ff) \
- V(F64Eq, 0x61, i_dd) \
- V(F64Ne, 0x62, i_dd) \
- V(F64Lt, 0x63, i_dd) \
- V(F64Gt, 0x64, i_dd) \
- V(F64Le, 0x65, i_dd) \
- V(F64Ge, 0x66, i_dd) \
- V(I32Clz, 0x67, i_i) \
- V(I32Ctz, 0x68, i_i) \
- V(I32Popcnt, 0x69, i_i) \
- V(I32Add, 0x6a, i_ii) \
- V(I32Sub, 0x6b, i_ii) \
- V(I32Mul, 0x6c, i_ii) \
- V(I32DivS, 0x6d, i_ii) \
- V(I32DivU, 0x6e, i_ii) \
- V(I32RemS, 0x6f, i_ii) \
- V(I32RemU, 0x70, i_ii) \
- V(I32And, 0x71, i_ii) \
- V(I32Ior, 0x72, i_ii) \
- V(I32Xor, 0x73, i_ii) \
- V(I32Shl, 0x74, i_ii) \
- V(I32ShrS, 0x75, i_ii) \
- V(I32ShrU, 0x76, i_ii) \
- V(I32Rol, 0x77, i_ii) \
- V(I32Ror, 0x78, i_ii) \
- V(I64Clz, 0x79, l_l) \
- V(I64Ctz, 0x7a, l_l) \
- V(I64Popcnt, 0x7b, l_l) \
- V(I64Add, 0x7c, l_ll) \
- V(I64Sub, 0x7d, l_ll) \
- V(I64Mul, 0x7e, l_ll) \
- V(I64DivS, 0x7f, l_ll) \
- V(I64DivU, 0x80, l_ll) \
- V(I64RemS, 0x81, l_ll) \
- V(I64RemU, 0x82, l_ll) \
- V(I64And, 0x83, l_ll) \
- V(I64Ior, 0x84, l_ll) \
- V(I64Xor, 0x85, l_ll) \
- V(I64Shl, 0x86, l_ll) \
- V(I64ShrS, 0x87, l_ll) \
- V(I64ShrU, 0x88, l_ll) \
- V(I64Rol, 0x89, l_ll) \
- V(I64Ror, 0x8a, l_ll) \
- V(F32Abs, 0x8b, f_f) \
- V(F32Neg, 0x8c, f_f) \
- V(F32Ceil, 0x8d, f_f) \
- V(F32Floor, 0x8e, f_f) \
- V(F32Trunc, 0x8f, f_f) \
- V(F32NearestInt, 0x90, f_f) \
- V(F32Sqrt, 0x91, f_f) \
- V(F32Add, 0x92, f_ff) \
- V(F32Sub, 0x93, f_ff) \
- V(F32Mul, 0x94, f_ff) \
- V(F32Div, 0x95, f_ff) \
- V(F32Min, 0x96, f_ff) \
- V(F32Max, 0x97, f_ff) \
- V(F32CopySign, 0x98, f_ff) \
- V(F64Abs, 0x99, d_d) \
- V(F64Neg, 0x9a, d_d) \
- V(F64Ceil, 0x9b, d_d) \
- V(F64Floor, 0x9c, d_d) \
- V(F64Trunc, 0x9d, d_d) \
- V(F64NearestInt, 0x9e, d_d) \
- V(F64Sqrt, 0x9f, d_d) \
- V(F64Add, 0xa0, d_dd) \
- V(F64Sub, 0xa1, d_dd) \
- V(F64Mul, 0xa2, d_dd) \
- V(F64Div, 0xa3, d_dd) \
- V(F64Min, 0xa4, d_dd) \
- V(F64Max, 0xa5, d_dd) \
- V(F64CopySign, 0xa6, d_dd) \
- V(I32ConvertI64, 0xa7, i_l) \
- V(I32SConvertF32, 0xa8, i_f) \
- V(I32UConvertF32, 0xa9, i_f) \
- V(I32SConvertF64, 0xaa, i_d) \
- V(I32UConvertF64, 0xab, i_d) \
- V(I64SConvertI32, 0xac, l_i) \
- V(I64UConvertI32, 0xad, l_i) \
- V(I64SConvertF32, 0xae, l_f) \
- V(I64UConvertF32, 0xaf, l_f) \
- V(I64SConvertF64, 0xb0, l_d) \
- V(I64UConvertF64, 0xb1, l_d) \
- V(F32SConvertI32, 0xb2, f_i) \
- V(F32UConvertI32, 0xb3, f_i) \
- V(F32SConvertI64, 0xb4, f_l) \
- V(F32UConvertI64, 0xb5, f_l) \
- V(F32ConvertF64, 0xb6, f_d) \
- V(F64SConvertI32, 0xb7, d_i) \
- V(F64UConvertI32, 0xb8, d_i) \
- V(F64SConvertI64, 0xb9, d_l) \
- V(F64UConvertI64, 0xba, d_l) \
- V(F64ConvertF32, 0xbb, d_f) \
- V(I32ReinterpretF32, 0xbc, i_f) \
- V(I64ReinterpretF64, 0xbd, l_d) \
- V(F32ReinterpretI32, 0xbe, f_i) \
- V(F64ReinterpretI64, 0xbf, d_l) \
- V(I32SExtendI8, 0xc0, i_i) \
- V(I32SExtendI16, 0xc1, i_i) \
- V(I64SExtendI8, 0xc2, l_l) \
- V(I64SExtendI16, 0xc3, l_l) \
+
+// The following opcodes can be used as constant expressions under
+// --experimental-wasm-extended-const.
+#define FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
+ V(I32Add, 0x6a, i_ii) \
+ V(I32Sub, 0x6b, i_ii) \
+ V(I32Mul, 0x6c, i_ii) \
+ V(I64Add, 0x7c, l_ll) \
+ V(I64Sub, 0x7d, l_ll) \
+ V(I64Mul, 0x7e, l_ll)
+
+#define FOREACH_SIMPLE_NON_CONST_OPCODE(V) \
+ V(I32Eqz, 0x45, i_i) \
+ V(I32Eq, 0x46, i_ii) \
+ V(I32Ne, 0x47, i_ii) \
+ V(I32LtS, 0x48, i_ii) \
+ V(I32LtU, 0x49, i_ii) \
+ V(I32GtS, 0x4a, i_ii) \
+ V(I32GtU, 0x4b, i_ii) \
+ V(I32LeS, 0x4c, i_ii) \
+ V(I32LeU, 0x4d, i_ii) \
+ V(I32GeS, 0x4e, i_ii) \
+ V(I32GeU, 0x4f, i_ii) \
+ V(I64Eqz, 0x50, i_l) \
+ V(I64Eq, 0x51, i_ll) \
+ V(I64Ne, 0x52, i_ll) \
+ V(I64LtS, 0x53, i_ll) \
+ V(I64LtU, 0x54, i_ll) \
+ V(I64GtS, 0x55, i_ll) \
+ V(I64GtU, 0x56, i_ll) \
+ V(I64LeS, 0x57, i_ll) \
+ V(I64LeU, 0x58, i_ll) \
+ V(I64GeS, 0x59, i_ll) \
+ V(I64GeU, 0x5a, i_ll) \
+ V(F32Eq, 0x5b, i_ff) \
+ V(F32Ne, 0x5c, i_ff) \
+ V(F32Lt, 0x5d, i_ff) \
+ V(F32Gt, 0x5e, i_ff) \
+ V(F32Le, 0x5f, i_ff) \
+ V(F32Ge, 0x60, i_ff) \
+ V(F64Eq, 0x61, i_dd) \
+ V(F64Ne, 0x62, i_dd) \
+ V(F64Lt, 0x63, i_dd) \
+ V(F64Gt, 0x64, i_dd) \
+ V(F64Le, 0x65, i_dd) \
+ V(F64Ge, 0x66, i_dd) \
+ V(I32Clz, 0x67, i_i) \
+ V(I32Ctz, 0x68, i_i) \
+ V(I32Popcnt, 0x69, i_i) \
+ V(I32DivS, 0x6d, i_ii) \
+ V(I32DivU, 0x6e, i_ii) \
+ V(I32RemS, 0x6f, i_ii) \
+ V(I32RemU, 0x70, i_ii) \
+ V(I32And, 0x71, i_ii) \
+ V(I32Ior, 0x72, i_ii) \
+ V(I32Xor, 0x73, i_ii) \
+ V(I32Shl, 0x74, i_ii) \
+ V(I32ShrS, 0x75, i_ii) \
+ V(I32ShrU, 0x76, i_ii) \
+ V(I32Rol, 0x77, i_ii) \
+ V(I32Ror, 0x78, i_ii) \
+ V(I64Clz, 0x79, l_l) \
+ V(I64Ctz, 0x7a, l_l) \
+ V(I64Popcnt, 0x7b, l_l) \
+ V(I64DivS, 0x7f, l_ll) \
+ V(I64DivU, 0x80, l_ll) \
+ V(I64RemS, 0x81, l_ll) \
+ V(I64RemU, 0x82, l_ll) \
+ V(I64And, 0x83, l_ll) \
+ V(I64Ior, 0x84, l_ll) \
+ V(I64Xor, 0x85, l_ll) \
+ V(I64Shl, 0x86, l_ll) \
+ V(I64ShrS, 0x87, l_ll) \
+ V(I64ShrU, 0x88, l_ll) \
+ V(I64Rol, 0x89, l_ll) \
+ V(I64Ror, 0x8a, l_ll) \
+ V(F32Abs, 0x8b, f_f) \
+ V(F32Neg, 0x8c, f_f) \
+ V(F32Ceil, 0x8d, f_f) \
+ V(F32Floor, 0x8e, f_f) \
+ V(F32Trunc, 0x8f, f_f) \
+ V(F32NearestInt, 0x90, f_f) \
+ V(F32Sqrt, 0x91, f_f) \
+ V(F32Add, 0x92, f_ff) \
+ V(F32Sub, 0x93, f_ff) \
+ V(F32Mul, 0x94, f_ff) \
+ V(F32Div, 0x95, f_ff) \
+ V(F32Min, 0x96, f_ff) \
+ V(F32Max, 0x97, f_ff) \
+ V(F32CopySign, 0x98, f_ff) \
+ V(F64Abs, 0x99, d_d) \
+ V(F64Neg, 0x9a, d_d) \
+ V(F64Ceil, 0x9b, d_d) \
+ V(F64Floor, 0x9c, d_d) \
+ V(F64Trunc, 0x9d, d_d) \
+ V(F64NearestInt, 0x9e, d_d) \
+ V(F64Sqrt, 0x9f, d_d) \
+ V(F64Add, 0xa0, d_dd) \
+ V(F64Sub, 0xa1, d_dd) \
+ V(F64Mul, 0xa2, d_dd) \
+ V(F64Div, 0xa3, d_dd) \
+ V(F64Min, 0xa4, d_dd) \
+ V(F64Max, 0xa5, d_dd) \
+ V(F64CopySign, 0xa6, d_dd) \
+ V(I32ConvertI64, 0xa7, i_l) \
+ V(I32SConvertF32, 0xa8, i_f) \
+ V(I32UConvertF32, 0xa9, i_f) \
+ V(I32SConvertF64, 0xaa, i_d) \
+ V(I32UConvertF64, 0xab, i_d) \
+ V(I64SConvertI32, 0xac, l_i) \
+ V(I64UConvertI32, 0xad, l_i) \
+ V(I64SConvertF32, 0xae, l_f) \
+ V(I64UConvertF32, 0xaf, l_f) \
+ V(I64SConvertF64, 0xb0, l_d) \
+ V(I64UConvertF64, 0xb1, l_d) \
+ V(F32SConvertI32, 0xb2, f_i) \
+ V(F32UConvertI32, 0xb3, f_i) \
+ V(F32SConvertI64, 0xb4, f_l) \
+ V(F32UConvertI64, 0xb5, f_l) \
+ V(F32ConvertF64, 0xb6, f_d) \
+ V(F64SConvertI32, 0xb7, d_i) \
+ V(F64UConvertI32, 0xb8, d_i) \
+ V(F64SConvertI64, 0xb9, d_l) \
+ V(F64UConvertI64, 0xba, d_l) \
+ V(F64ConvertF32, 0xbb, d_f) \
+ V(I32ReinterpretF32, 0xbc, i_f) \
+ V(I64ReinterpretF64, 0xbd, l_d) \
+ V(F32ReinterpretI32, 0xbe, f_i) \
+ V(F64ReinterpretI64, 0xbf, d_l) \
+ V(I32SExtendI8, 0xc0, i_i) \
+ V(I32SExtendI16, 0xc1, i_i) \
+ V(I64SExtendI8, 0xc2, l_l) \
+ V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
+#define FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
+ FOREACH_SIMPLE_NON_CONST_OPCODE(V)
+
#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefEq, 0xd5, i_qq)
// For compatibility with Asm.js.
@@ -569,29 +578,29 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_CONST_OPCODE(V)
-#define FOREACH_NUMERIC_OPCODE(V) \
- V(I32SConvertSatF32, 0xfc00, i_f) \
- V(I32UConvertSatF32, 0xfc01, i_f) \
- V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d) \
- V(I64SConvertSatF32, 0xfc04, l_f) \
- V(I64UConvertSatF32, 0xfc05, l_f) \
- V(I64SConvertSatF64, 0xfc06, l_d) \
- V(I64UConvertSatF64, 0xfc07, l_d) \
- V(MemoryInit, 0xfc08, v_iii) \
- V(DataDrop, 0xfc09, v_v) \
- V(MemoryCopy, 0xfc0a, v_iii) \
- V(MemoryFill, 0xfc0b, v_iii) \
- V(TableInit, 0xfc0c, v_iii) \
- V(ElemDrop, 0xfc0d, v_v) \
- V(TableCopy, 0xfc0e, v_iii) \
+#define FOREACH_NUMERIC_OPCODE(V_SIG, V_VARIADIC) \
+ V_SIG(I32SConvertSatF32, 0xfc00, i_f) \
+ V_SIG(I32UConvertSatF32, 0xfc01, i_f) \
+ V_SIG(I32SConvertSatF64, 0xfc02, i_d) \
+ V_SIG(I32UConvertSatF64, 0xfc03, i_d) \
+ V_SIG(I64SConvertSatF32, 0xfc04, l_f) \
+ V_SIG(I64UConvertSatF32, 0xfc05, l_f) \
+ V_SIG(I64SConvertSatF64, 0xfc06, l_d) \
+ V_SIG(I64UConvertSatF64, 0xfc07, l_d) \
+ V_VARIADIC(MemoryInit, 0xfc08) \
+ V_SIG(DataDrop, 0xfc09, v_v) \
+ V_VARIADIC(MemoryCopy, 0xfc0a) \
+ V_VARIADIC(MemoryFill, 0xfc0b) \
+ V_SIG(TableInit, 0xfc0c, v_iii) \
+ V_SIG(ElemDrop, 0xfc0d, v_v) \
+ V_SIG(TableCopy, 0xfc0e, v_iii) \
/* TableGrow is polymorphic in the first parameter. */ \
/* It's whatever the table type is. */ \
- V(TableGrow, 0xfc0f, i_ci) \
- V(TableSize, 0xfc10, i_v) \
+ V_VARIADIC(TableGrow, 0xfc0f) \
+ V_SIG(TableSize, 0xfc10, i_v) \
/* TableFill is polymorphic in the second parameter. */ \
/* It's whatever the table type is. */ \
- V(TableFill, 0xfc11, v_iii)
+ V_VARIADIC(TableFill, 0xfc11)
#define FOREACH_ATOMIC_OPCODE(V) \
V(AtomicNotify, 0xfe00, i_ii) \
@@ -661,58 +670,62 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
-#define FOREACH_GC_OPCODE(V) \
- V(StructNewWithRtt, 0xfb01, _) \
- V(StructNewDefaultWithRtt, 0xfb02, _) \
- V(StructGet, 0xfb03, _) \
- V(StructGetS, 0xfb04, _) \
- V(StructGetU, 0xfb05, _) \
- V(StructSet, 0xfb06, _) \
- V(StructNew, 0xfb07, _) \
- V(StructNewDefault, 0xfb08, _) \
- V(ArrayNewWithRtt, 0xfb11, _) \
- V(ArrayNewDefaultWithRtt, 0xfb12, _) \
- V(ArrayGet, 0xfb13, _) \
- V(ArrayGetS, 0xfb14, _) \
- V(ArrayGetU, 0xfb15, _) \
- V(ArraySet, 0xfb16, _) \
- V(ArrayLen, 0xfb17, _) \
- V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
- V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
- V(ArrayInitStatic, 0xfb1a, _) \
- V(ArrayNew, 0xfb1b, _) \
- V(ArrayNewDefault, 0xfb1c, _) \
- V(I31New, 0xfb20, _) \
- V(I31GetS, 0xfb21, _) \
- V(I31GetU, 0xfb22, _) \
- V(RttCanon, 0xfb30, _) \
- V(RttSub, 0xfb31, _) \
- V(RttFreshSub, 0xfb32, _) /* not standardized - V8 experimental */ \
- V(RefTest, 0xfb40, _) \
- V(RefCast, 0xfb41, _) \
- V(BrOnCast, 0xfb42, _) \
- V(BrOnCastFail, 0xfb43, _) \
- V(RefTestStatic, 0xfb44, _) \
- V(RefCastStatic, 0xfb45, _) \
- V(BrOnCastStatic, 0xfb46, _) \
- V(BrOnCastStaticFail, 0xfb47, _) \
- V(RefIsFunc, 0xfb50, _) \
- V(RefIsData, 0xfb51, _) \
- V(RefIsI31, 0xfb52, _) \
- V(RefAsFunc, 0xfb58, _) \
- V(RefAsData, 0xfb59, _) \
- V(RefAsI31, 0xfb5a, _) \
- V(BrOnFunc, 0xfb60, _) \
- V(BrOnData, 0xfb61, _) \
- V(BrOnI31, 0xfb62, _) \
- V(BrOnNonFunc, 0xfb63, _) \
- V(BrOnNonData, 0xfb64, _) \
- V(BrOnNonI31, 0xfb65, _)
-
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
/* AtomicFence does not target a particular linear memory. */ \
V(AtomicFence, 0xfe03, v_v)
+#define FOREACH_GC_OPCODE(V) /* Force 80 columns */ \
+ V(StructNewWithRtt, 0xfb01, _) \
+ V(StructNewDefaultWithRtt, 0xfb02, _) \
+ V(StructGet, 0xfb03, _) \
+ V(StructGetS, 0xfb04, _) \
+ V(StructGetU, 0xfb05, _) \
+ V(StructSet, 0xfb06, _) \
+ V(StructNew, 0xfb07, _) \
+ V(StructNewDefault, 0xfb08, _) \
+ V(ArrayNewWithRtt, 0xfb11, _) \
+ V(ArrayNewDefaultWithRtt, 0xfb12, _) \
+ V(ArrayGet, 0xfb13, _) \
+ V(ArrayGetS, 0xfb14, _) \
+ V(ArrayGetU, 0xfb15, _) \
+ V(ArraySet, 0xfb16, _) \
+ V(ArrayLen, 0xfb17, _) \
+ V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
+ V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
+ V(ArrayInitStatic, 0xfb1a, _) /* not standardized - V8 experimental */ \
+ V(ArrayNew, 0xfb1b, _) \
+ V(ArrayNewDefault, 0xfb1c, _) \
+ V(ArrayInitFromData, 0xfb1e, _) /* not stand. - V8 experimental */ \
+ V(ArrayInitFromDataStatic, 0xfb1d, _) /* not stand. - V8 experimental */ \
+ V(I31New, 0xfb20, _) \
+ V(I31GetS, 0xfb21, _) \
+ V(I31GetU, 0xfb22, _) \
+ V(RttCanon, 0xfb30, _) \
+ V(RefTest, 0xfb40, _) \
+ V(RefCast, 0xfb41, _) \
+ V(BrOnCast, 0xfb42, _) \
+ V(BrOnCastFail, 0xfb43, _) \
+ V(RefTestStatic, 0xfb44, _) \
+ V(RefCastStatic, 0xfb45, _) \
+ V(BrOnCastStatic, 0xfb46, _) \
+ V(BrOnCastStaticFail, 0xfb47, _) \
+ V(RefIsFunc, 0xfb50, _) \
+ V(RefIsData, 0xfb51, _) \
+ V(RefIsI31, 0xfb52, _) \
+ V(RefIsArray, 0xfb53, _) \
+ V(RefAsFunc, 0xfb58, _) \
+ V(RefAsData, 0xfb59, _) \
+ V(RefAsI31, 0xfb5a, _) \
+ V(RefAsArray, 0xfb5b, _) \
+ V(BrOnFunc, 0xfb60, _) \
+ V(BrOnData, 0xfb61, _) \
+ V(BrOnI31, 0xfb62, _) \
+ V(BrOnArray, 0xfb66, _) \
+ V(BrOnNonFunc, 0xfb63, _) \
+ V(BrOnNonData, 0xfb64, _) \
+ V(BrOnNonI31, 0xfb65, _) \
+ V(BrOnNonArray, 0xfb67, _)
+
// All opcodes.
#define FOREACH_OPCODE(V) \
FOREACH_CONTROL_OPCODE(V) \
@@ -726,7 +739,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
- FOREACH_NUMERIC_OPCODE(V) \
+ FOREACH_NUMERIC_OPCODE(V, V) \
FOREACH_GC_OPCODE(V)
// All signatures.
@@ -770,7 +783,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
- V(i_e, kWasmI32, kWasmExternRef) \
+ V(i_a, kWasmI32, kWasmAnyRef) \
V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
@@ -795,7 +808,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
enum WasmOpcode {
// Declare expression opcodes.
-#define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
+#define DECLARE_NAMED_ENUM(name, opcode, ...) kExpr##name = opcode,
FOREACH_OPCODE(DECLARE_NAMED_ENUM)
#undef DECLARE_NAMED_ENUM
#define DECLARE_PREFIX(name, opcode) k##name##Prefix = opcode,
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 08dfce0f65..88a7a285a4 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -30,6 +30,9 @@ namespace internal {
namespace wasm {
namespace {
+constexpr uint8_t kLazyFunction = 2;
+constexpr uint8_t kLiftoffFunction = 3;
+constexpr uint8_t kTurboFanFunction = 4;
// TODO(bbudge) Try to unify the various implementations of readers and writers
// in Wasm, e.g. StreamProcessor and ZoneBuffer, with these.
@@ -189,17 +192,17 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
constexpr size_t kHeaderSize = sizeof(size_t); // total code size
-constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
- sizeof(int) + // offset of constant pool
- sizeof(int) + // offset of safepoint table
- sizeof(int) + // offset of handler table
- sizeof(int) + // offset of code comments
- sizeof(int) + // unpadded binary size
- sizeof(int) + // stack slots
- sizeof(int) + // tagged parameter slots
- sizeof(int) + // code size
- sizeof(int) + // reloc size
- sizeof(int) + // source positions size
+constexpr size_t kCodeHeaderSize = sizeof(uint8_t) + // code kind
+ sizeof(int) + // offset of constant pool
+ sizeof(int) + // offset of safepoint table
+ sizeof(int) + // offset of handler table
+ sizeof(int) + // offset of code comments
+ sizeof(int) + // unpadded binary size
+ sizeof(int) + // stack slots
+ sizeof(int) + // tagged parameter slots
+ sizeof(int) + // code size
+ sizeof(int) + // reloc size
+ sizeof(int) + // source positions size
sizeof(int) + // protected instructions size
sizeof(WasmCode::Kind) + // code kind
sizeof(ExecutionTier); // tier
@@ -285,7 +288,7 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
private:
size_t MeasureCode(const WasmCode*) const;
void WriteHeader(Writer*, size_t total_code_size);
- bool WriteCode(const WasmCode*, Writer*);
+ void WriteCode(const WasmCode*, Writer*);
const NativeModule* const native_module_;
const base::Vector<WasmCode* const> code_table_;
@@ -303,10 +306,10 @@ NativeModuleSerializer::NativeModuleSerializer(
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- if (code == nullptr) return sizeof(bool);
+ if (code == nullptr) return sizeof(uint8_t);
DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
if (code->tier() != ExecutionTier::kTurbofan) {
- return sizeof(bool);
+ return sizeof(uint8_t);
}
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
@@ -329,21 +332,33 @@ void NativeModuleSerializer::WriteHeader(Writer* writer,
writer->Write(total_code_size);
}
-bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
- DCHECK_IMPLIES(!FLAG_wasm_lazy_compilation, code != nullptr);
+void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code == nullptr) {
- writer->Write(false);
- return true;
+ writer->Write(kLazyFunction);
+ return;
}
+
DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
// Only serialize TurboFan code, as Liftoff code can contain breakpoints or
// non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) {
- writer->Write(false);
- return true;
+ // We check if the function has been executed already. If so, we serialize
+ // it as {kLiftoffFunction} so that upon deserialization the function will
+ // get compiled with Liftoff eagerly. If the function has not been executed
+ // yet, we serialize it as {kLazyFunction}, and the function will not get
+ // compiled upon deserialization.
+ NativeModule* native_module = code->native_module();
+ uint32_t budget =
+ native_module->tiering_budget_array()[declared_function_index(
+ native_module->module(), code->index())];
+ writer->Write(budget == static_cast<uint32_t>(FLAG_wasm_tiering_budget)
+ ? kLazyFunction
+ : kLiftoffFunction);
+ return;
}
+
++num_turbofan_functions_;
- writer->Write(true);
+ writer->Write(kTurboFanFunction);
// Write the size of the entire code section, followed by the code header.
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
@@ -432,7 +447,6 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
memcpy(serialized_code_start, code_start, code_size);
}
total_written_code_ += code_size;
- return true;
}
bool NativeModuleSerializer::Write(Writer* writer) {
@@ -449,7 +463,7 @@ bool NativeModuleSerializer::Write(Writer* writer) {
WriteHeader(writer, total_code_size);
for (WasmCode* code : code_table_) {
- if (!WriteCode(code, writer)) return false;
+ WriteCode(code, writer);
}
// If not a single function was written, serialization was not successful.
if (num_turbofan_functions_ == 0) return false;
@@ -519,13 +533,13 @@ class DeserializationQueue {
return units;
}
- size_t NumBatches() {
+ size_t NumBatches() const {
base::MutexGuard guard(&mutex_);
return queue_.size();
}
private:
- base::Mutex mutex_;
+ mutable base::Mutex mutex_;
std::queue<std::vector<DeserializationUnit>> queue_;
};
@@ -537,13 +551,16 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
bool Read(Reader* reader);
- base::Vector<const int> missing_functions() {
- return base::VectorOf(missing_functions_);
+ base::Vector<const int> lazy_functions() {
+ return base::VectorOf(lazy_functions_);
+ }
+
+ base::Vector<const int> liftoff_functions() {
+ return base::VectorOf(liftoff_functions_);
}
private:
- friend class CopyAndRelocTask;
- friend class PublishTask;
+ friend class DeserializeCodeTask;
void ReadHeader(Reader* reader);
DeserializationUnit ReadCode(int fn_index, Reader* reader);
@@ -559,69 +576,68 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
size_t remaining_code_size_ = 0;
base::Vector<byte> current_code_space_;
NativeModule::JumpTablesRef current_jump_tables_;
- std::vector<int> missing_functions_;
+ std::vector<int> lazy_functions_;
+ std::vector<int> liftoff_functions_;
};
-class CopyAndRelocTask : public JobTask {
+class DeserializeCodeTask : public JobTask {
public:
- CopyAndRelocTask(NativeModuleDeserializer* deserializer,
- DeserializationQueue* from_queue,
- DeserializationQueue* to_queue,
- std::shared_ptr<JobHandle> publish_handle)
- : deserializer_(deserializer),
- from_queue_(from_queue),
- to_queue_(to_queue),
- publish_handle_(std::move(publish_handle)) {}
+ DeserializeCodeTask(NativeModuleDeserializer* deserializer,
+ DeserializationQueue* reloc_queue)
+ : deserializer_(deserializer), reloc_queue_(reloc_queue) {}
void Run(JobDelegate* delegate) override {
CodeSpaceWriteScope code_space_write_scope(deserializer_->native_module_);
do {
- auto batch = from_queue_->Pop();
+ // Repeatedly publish everything that was copied already.
+ TryPublishing(delegate);
+
+ auto batch = reloc_queue_->Pop();
if (batch.empty()) break;
for (const auto& unit : batch) {
deserializer_->CopyAndRelocate(unit);
}
- to_queue_->Add(std::move(batch));
- publish_handle_->NotifyConcurrencyIncrease();
+ publish_queue_.Add(std::move(batch));
+ delegate->NotifyConcurrencyIncrease();
} while (!delegate->ShouldYield());
}
size_t GetMaxConcurrency(size_t /* worker_count */) const override {
- return from_queue_->NumBatches();
+ // Number of copy&reloc batches, plus 1 if there is also something to
+ // publish.
+ bool publish = publishing_.load(std::memory_order_relaxed) == false &&
+ publish_queue_.NumBatches() > 0;
+ return reloc_queue_->NumBatches() + (publish ? 1 : 0);
}
private:
- NativeModuleDeserializer* const deserializer_;
- DeserializationQueue* const from_queue_;
- DeserializationQueue* const to_queue_;
- std::shared_ptr<JobHandle> const publish_handle_;
-};
+ void TryPublishing(JobDelegate* delegate) {
+ // Publishing is sequential, so only start publishing if no one else is.
+ if (publishing_.exchange(true, std::memory_order_relaxed)) return;
-class PublishTask : public JobTask {
- public:
- PublishTask(NativeModuleDeserializer* deserializer,
- DeserializationQueue* from_queue)
- : deserializer_(deserializer), from_queue_(from_queue) {}
-
- void Run(JobDelegate* delegate) override {
WasmCodeRefScope code_scope;
- do {
- auto to_publish = from_queue_->PopAll();
- if (to_publish.empty()) break;
- deserializer_->Publish(std::move(to_publish));
- } while (!delegate->ShouldYield());
- }
-
- size_t GetMaxConcurrency(size_t worker_count) const override {
- // Publishing is sequential anyway, so never return more than 1. If a
- // worker is already running, don't spawn a second one.
- if (worker_count > 0) return 0;
- return std::min(size_t{1}, from_queue_->NumBatches());
+ while (true) {
+ bool yield = false;
+ while (!yield) {
+ auto to_publish = publish_queue_.PopAll();
+ if (to_publish.empty()) break;
+ deserializer_->Publish(std::move(to_publish));
+ yield = delegate->ShouldYield();
+ }
+ publishing_.store(false, std::memory_order_relaxed);
+ if (yield) break;
+ // After finishing publishing, check again if new work arrived in the mean
+ // time. If so, continue publishing.
+ if (publish_queue_.NumBatches() == 0) break;
+ if (publishing_.exchange(true, std::memory_order_relaxed)) break;
+ // We successfully reset {publishing_} from {false} to {true}.
+ }
}
- private:
NativeModuleDeserializer* const deserializer_;
- DeserializationQueue* const from_queue_;
+ DeserializationQueue* const reloc_queue_;
+ DeserializationQueue publish_queue_;
+ std::atomic<bool> publishing_{false};
};
NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
@@ -640,32 +656,30 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
WasmCodeRefScope wasm_code_ref_scope;
DeserializationQueue reloc_queue;
- DeserializationQueue publish_queue;
- std::shared_ptr<JobHandle> publish_handle = V8::GetCurrentPlatform()->PostJob(
+ std::unique_ptr<JobHandle> job_handle = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
- std::make_unique<PublishTask>(this, &publish_queue));
+ std::make_unique<DeserializeCodeTask>(this, &reloc_queue));
- std::unique_ptr<JobHandle> copy_and_reloc_handle =
- V8::GetCurrentPlatform()->PostJob(
- TaskPriority::kUserVisible,
- std::make_unique<CopyAndRelocTask>(this, &reloc_queue, &publish_queue,
- publish_handle));
+ // Choose a batch size such that we do not create too small batches (>=100k
+ // code bytes), but also not too many (<=100 batches).
+ constexpr size_t kMinBatchSizeInBytes = 100000;
+ size_t batch_limit =
+ std::max(kMinBatchSizeInBytes, remaining_code_size_ / 100);
std::vector<DeserializationUnit> batch;
- const byte* batch_start = reader->current_location();
+ size_t batch_size = 0;
CodeSpaceWriteScope code_space_write_scope(native_module_);
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
DeserializationUnit unit = ReadCode(i, reader);
if (!unit.code) continue;
+ batch_size += unit.code->instructions().size();
batch.emplace_back(std::move(unit));
- uint64_t batch_size_in_bytes = reader->current_location() - batch_start;
- constexpr int kMinBatchSizeInBytes = 100000;
- if (batch_size_in_bytes >= kMinBatchSizeInBytes) {
+ if (batch_size >= batch_limit) {
reloc_queue.Add(std::move(batch));
DCHECK(batch.empty());
- batch_start = reader->current_location();
- copy_and_reloc_handle->NotifyConcurrencyIncrease();
+ batch_size = 0;
+ job_handle->NotifyConcurrencyIncrease();
}
}
@@ -676,12 +690,11 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
if (!batch.empty()) {
reloc_queue.Add(std::move(batch));
- copy_and_reloc_handle->NotifyConcurrencyIncrease();
+ job_handle->NotifyConcurrencyIncrease();
}
// Wait for all tasks to finish, while participating in their work.
- copy_and_reloc_handle->Join();
- publish_handle->Join();
+ job_handle->Join();
return reader->current_size() == 0;
}
@@ -692,11 +705,16 @@ void NativeModuleDeserializer::ReadHeader(Reader* reader) {
DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
Reader* reader) {
- bool has_code = reader->Read<bool>();
- if (!has_code) {
- missing_functions_.push_back(fn_index);
+ uint8_t code_kind = reader->Read<uint8_t>();
+ if (code_kind == kLazyFunction) {
+ lazy_functions_.push_back(fn_index);
return {};
}
+ if (code_kind == kLiftoffFunction) {
+ liftoff_functions_.push_back(fn_index);
+ return {};
+ }
+
int constant_pool_offset = reader->Read<int>();
int safepoint_table_offset = reader->Read<int>();
int handler_table_offset = reader->Read<int>();
@@ -850,10 +868,13 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
auto shared_native_module = wasm_engine->MaybeGetNativeModule(
module->origin, owned_wire_bytes.as_vector(), isolate);
if (shared_native_module == nullptr) {
- const bool kIncludeLiftoff = false;
+ DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
+ const bool kIncludeLiftoff = dynamic_tiering == DynamicTiering::kDisabled;
size_t code_size_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(),
- kIncludeLiftoff);
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ module.get(), kIncludeLiftoff, dynamic_tiering);
shared_native_module = wasm_engine->NewNativeModule(
isolate, enabled_features, std::move(module), code_size_estimate);
// We have to assign a compilation ID here, as it is required for a
@@ -873,7 +894,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
return {};
}
shared_native_module->compilation_state()->InitializeAfterDeserialization(
- deserializer.missing_functions());
+ deserializer.lazy_functions(), deserializer.liftoff_functions());
wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate);
}
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index 83b1bbe462..bbd512296e 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -14,220 +14,21 @@ namespace wasm {
namespace {
-using CacheKey =
- std::tuple<uint32_t, uint32_t, const WasmModule*, const WasmModule*>;
-
-struct CacheKeyHasher {
- size_t operator()(CacheKey key) const {
- static constexpr size_t large_prime = 14887;
- return std::get<0>(key) + (std::get<1>(key) * large_prime) +
- (reinterpret_cast<size_t>(std::get<2>(key)) * large_prime *
- large_prime) +
- (reinterpret_cast<size_t>(std::get<3>(key)) * large_prime *
- large_prime * large_prime);
- }
-};
-
-class TypeJudgementCache {
- public:
- TypeJudgementCache()
- : zone_(new AccountingAllocator(), "type judgement zone"),
- subtyping_cache_(&zone_),
- type_equivalence_cache_(&zone_) {}
-
- static TypeJudgementCache* instance() {
- static base::LazyInstance<TypeJudgementCache>::type instance_ =
- LAZY_INSTANCE_INITIALIZER;
- return instance_.Pointer();
- }
-
- base::RecursiveMutex* type_cache_mutex() { return &type_cache_mutex_; }
- bool is_cached_subtype(uint32_t subtype, uint32_t supertype,
- const WasmModule* sub_module,
- const WasmModule* super_module) const {
- return subtyping_cache_.count(std::make_tuple(
- subtype, supertype, sub_module, super_module)) == 1;
- }
- void cache_subtype(uint32_t subtype, uint32_t supertype,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
- subtyping_cache_.emplace(subtype, supertype, sub_module, super_module);
- }
- void uncache_subtype(uint32_t subtype, uint32_t supertype,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
- subtyping_cache_.erase(
- std::make_tuple(subtype, supertype, sub_module, super_module));
- }
- bool is_cached_equivalent_type(uint32_t type1, uint32_t type2,
- const WasmModule* module1,
- const WasmModule* module2) const {
- if (type1 > type2) std::swap(type1, type2);
- if (reinterpret_cast<uintptr_t>(module1) >
- reinterpret_cast<uintptr_t>(module2)) {
- std::swap(module1, module2);
- }
- return type_equivalence_cache_.count(
- std::make_tuple(type1, type2, module1, module2)) == 1;
- }
- void cache_type_equivalence(uint32_t type1, uint32_t type2,
- const WasmModule* module1,
- const WasmModule* module2) {
- if (type1 > type2) std::swap(type1, type2);
- if (reinterpret_cast<uintptr_t>(module1) >
- reinterpret_cast<uintptr_t>(module2)) {
- std::swap(module1, module2);
- }
- type_equivalence_cache_.emplace(type1, type2, module1, module2);
- }
- void uncache_type_equivalence(uint32_t type1, uint32_t type2,
- const WasmModule* module1,
- const WasmModule* module2) {
- if (type1 > type2) std::swap(type1, type2);
- if (reinterpret_cast<uintptr_t>(module1) >
- reinterpret_cast<uintptr_t>(module2)) {
- std::swap(module1, module2);
- }
- type_equivalence_cache_.erase(
- std::make_tuple(type1, type2, module1, module2));
- }
- void delete_module(const WasmModule* module) {
- for (auto iterator = type_equivalence_cache_.begin();
- iterator != type_equivalence_cache_.end();) {
- if (std::get<2>(*iterator) == module ||
- std::get<3>(*iterator) == module) {
- iterator = type_equivalence_cache_.erase(iterator);
- } else {
- iterator++;
- }
- }
- for (auto iterator = subtyping_cache_.begin();
- iterator != subtyping_cache_.end();) {
- if (std::get<2>(*iterator) == module ||
- std::get<3>(*iterator) == module) {
- iterator = subtyping_cache_.erase(iterator);
- } else {
- iterator++;
- }
- }
- }
-
- private:
- Zone zone_;
- ZoneUnorderedSet<CacheKey, CacheKeyHasher>
- // Cache for discovered subtyping pairs.
- subtyping_cache_,
- // Cache for discovered equivalent type pairs.
- // Indexes and modules are stored in increasing order.
- type_equivalence_cache_;
- // The above two caches are used from background compile jobs, so they
- // must be protected from concurrent modifications:
- base::RecursiveMutex type_cache_mutex_;
-};
-
-bool ArrayEquivalentIndices(uint32_t type_index_1, uint32_t type_index_2,
- const WasmModule* module1,
- const WasmModule* module2) {
- const ArrayType* sub_array = module1->types[type_index_1].array_type;
- const ArrayType* super_array = module2->types[type_index_2].array_type;
- if (sub_array->mutability() != super_array->mutability()) return false;
-
- // Temporarily cache type equivalence for the recursive call.
- TypeJudgementCache::instance()->cache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- if (EquivalentTypes(sub_array->element_type(), super_array->element_type(),
- module1, module2)) {
- return true;
- } else {
- TypeJudgementCache::instance()->uncache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- // TODO(7748): Consider caching negative results as well.
- return false;
- }
-}
-
-bool StructEquivalentIndices(uint32_t type_index_1, uint32_t type_index_2,
- const WasmModule* module1,
- const WasmModule* module2) {
- const StructType* sub_struct = module1->types[type_index_1].struct_type;
- const StructType* super_struct = module2->types[type_index_2].struct_type;
-
- if (sub_struct->field_count() != super_struct->field_count()) {
- return false;
- }
-
- // Temporarily cache type equivalence for the recursive call.
- TypeJudgementCache::instance()->cache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- for (uint32_t i = 0; i < sub_struct->field_count(); i++) {
- if (sub_struct->mutability(i) != super_struct->mutability(i) ||
- !EquivalentTypes(sub_struct->field(i), super_struct->field(i), module1,
- module2)) {
- TypeJudgementCache::instance()->uncache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- return false;
- }
- }
- return true;
-}
-
-bool FunctionEquivalentIndices(uint32_t type_index_1, uint32_t type_index_2,
- const WasmModule* module1,
- const WasmModule* module2) {
- const FunctionSig* sig1 = module1->types[type_index_1].function_sig;
- const FunctionSig* sig2 = module2->types[type_index_2].function_sig;
-
- if (sig1->parameter_count() != sig2->parameter_count() ||
- sig1->return_count() != sig2->return_count()) {
- return false;
- }
-
- auto iter1 = sig1->all();
- auto iter2 = sig2->all();
-
- // Temporarily cache type equivalence for the recursive call.
- TypeJudgementCache::instance()->cache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- for (int i = 0; i < iter1.size(); i++) {
- if (!EquivalentTypes(iter1[i], iter2[i], module1, module2)) {
- TypeJudgementCache::instance()->uncache_type_equivalence(
- type_index_1, type_index_2, module1, module2);
- return false;
- }
- }
- return true;
-}
-
V8_INLINE bool EquivalentIndices(uint32_t index1, uint32_t index2,
const WasmModule* module1,
const WasmModule* module2) {
DCHECK(index1 != index2 || module1 != module2);
- uint8_t kind1 = module1->type_kinds[index1];
-
- if (kind1 != module2->type_kinds[index2]) return false;
-
- base::RecursiveMutexGuard type_cache_access(
- TypeJudgementCache::instance()->type_cache_mutex());
- if (TypeJudgementCache::instance()->is_cached_equivalent_type(
- index1, index2, module1, module2)) {
- return true;
- }
-
- if (kind1 == kWasmStructTypeCode) {
- return StructEquivalentIndices(index1, index2, module1, module2);
- } else if (kind1 == kWasmArrayTypeCode) {
- return ArrayEquivalentIndices(index1, index2, module1, module2);
- } else {
- DCHECK_EQ(kind1, kWasmFunctionTypeCode);
- return FunctionEquivalentIndices(index1, index2, module1, module2);
- }
+ // TODO(7748): Canonicalize types.
+ return false;
}
-} // namespace
+bool ValidStructSubtypeDefinition(uint32_t subtype_index,
+ uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
+ // TODO(7748): Figure out the cross-module story.
+ if (sub_module != super_module) return false;
-bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
const StructType* sub_struct = sub_module->types[subtype_index].struct_type;
const StructType* super_struct =
super_module->types[supertype_index].struct_type;
@@ -236,10 +37,6 @@ bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- if (!sub_module->has_supertype(subtype_index)) {
- TypeJudgementCache::instance()->cache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
- }
for (uint32_t i = 0; i < super_struct->field_count(); i++) {
bool sub_mut = sub_struct->mutability(i);
bool super_mut = super_struct->mutability(i);
@@ -249,48 +46,41 @@ bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
sub_module, super_module)) ||
(!sub_mut && !IsSubtypeOf(sub_struct->field(i), super_struct->field(i),
sub_module, super_module))) {
- TypeJudgementCache::instance()->uncache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
return false;
}
}
return true;
}
-bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
+bool ValidArraySubtypeDefinition(uint32_t subtype_index,
+ uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
+ // TODO(7748): Figure out the cross-module story.
+ if (sub_module != super_module) return false;
+
const ArrayType* sub_array = sub_module->types[subtype_index].array_type;
const ArrayType* super_array =
super_module->types[supertype_index].array_type;
bool sub_mut = sub_array->mutability();
bool super_mut = super_array->mutability();
- if (!sub_module->has_supertype(subtype_index)) {
- TypeJudgementCache::instance()->cache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
- }
- if (sub_mut != super_mut ||
- (sub_mut &&
- !EquivalentTypes(sub_array->element_type(), super_array->element_type(),
- sub_module, super_module)) ||
- (!sub_mut &&
- !IsSubtypeOf(sub_array->element_type(), super_array->element_type(),
- sub_module, super_module))) {
- TypeJudgementCache::instance()->uncache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
- return false;
- } else {
- return true;
- }
+
+ return (sub_mut && super_mut &&
+ EquivalentTypes(sub_array->element_type(),
+ super_array->element_type(), sub_module,
+ super_module)) ||
+ (!sub_mut && !super_mut &&
+ IsSubtypeOf(sub_array->element_type(), super_array->element_type(),
+ sub_module, super_module));
}
-bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
- if (!FLAG_experimental_wasm_gc) {
- return FunctionEquivalentIndices(subtype_index, supertype_index, sub_module,
- super_module);
- }
+bool ValidFunctionSubtypeDefinition(uint32_t subtype_index,
+ uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
+ // TODO(7748): Figure out the cross-module story.
+ if (sub_module != super_module) return false;
+
const FunctionSig* sub_func = sub_module->types[subtype_index].function_sig;
const FunctionSig* super_func =
super_module->types[supertype_index].function_sig;
@@ -300,17 +90,10 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- if (!sub_module->has_supertype(subtype_index)) {
- TypeJudgementCache::instance()->cache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
- }
-
for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
// Contravariance for params.
if (!IsSubtypeOf(super_func->parameters()[i], sub_func->parameters()[i],
super_module, sub_module)) {
- TypeJudgementCache::instance()->uncache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
return false;
}
}
@@ -318,8 +101,6 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
// Covariance for returns.
if (!IsSubtypeOf(sub_func->returns()[i], super_func->returns()[i],
sub_module, super_module)) {
- TypeJudgementCache::instance()->uncache_subtype(
- subtype_index, supertype_index, sub_module, super_module);
return false;
}
}
@@ -327,6 +108,27 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return true;
}
+} // namespace
+
+bool ValidSubtypeDefinition(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
+ TypeDefinition::Kind sub_kind = sub_module->types[subtype_index].kind;
+ TypeDefinition::Kind super_kind = super_module->types[supertype_index].kind;
+ if (sub_kind != super_kind) return false;
+ switch (sub_kind) {
+ case TypeDefinition::kFunction:
+ return ValidFunctionSubtypeDefinition(subtype_index, supertype_index,
+ sub_module, super_module);
+ case TypeDefinition::kStruct:
+ return ValidStructSubtypeDefinition(subtype_index, supertype_index,
+ sub_module, super_module);
+ case TypeDefinition::kArray:
+ return ValidArraySubtypeDefinition(subtype_index, supertype_index,
+ sub_module, super_module);
+ }
+}
+
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
ValueType subtype, ValueType supertype, const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -347,16 +149,6 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
return supertype.kind() == kRtt &&
EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
sub_module, super_module);
- case kRttWithDepth:
- return (supertype.kind() == kRtt &&
- ((sub_module == super_module &&
- subtype.ref_index() == supertype.ref_index()) ||
- EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
- sub_module, super_module))) ||
- (supertype.kind() == kRttWithDepth &&
- supertype.depth() == subtype.depth() &&
- EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
- sub_module, super_module));
case kRef:
case kOptRef:
break;
@@ -378,7 +170,9 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
switch (sub_heap.representation()) {
case HeapType::kFunc:
- case HeapType::kExtern:
+ // funcref is a subtype of anyref (aka externref) under wasm-gc.
+ return sub_heap == super_heap ||
+ (FLAG_experimental_wasm_gc && super_heap == HeapType::kAny);
case HeapType::kEq:
return sub_heap == super_heap || super_heap == HeapType::kAny;
case HeapType::kAny:
@@ -387,6 +181,9 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
case HeapType::kData:
return super_heap == sub_heap || super_heap == HeapType::kEq ||
super_heap == HeapType::kAny;
+ case HeapType::kArray:
+ return super_heap == HeapType::kArray || super_heap == HeapType::kData ||
+ super_heap == HeapType::kEq || super_heap == HeapType::kAny;
case HeapType::kBottom:
UNREACHABLE();
default:
@@ -403,7 +200,8 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
case HeapType::kEq:
case HeapType::kData:
return !sub_module->has_signature(sub_index);
- case HeapType::kExtern:
+ case HeapType::kArray:
+ return sub_module->has_array(sub_index);
case HeapType::kI31:
return false;
case HeapType::kAny:
@@ -421,48 +219,15 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
// equality; here we catch (ref $x) being a subtype of (ref null $x).
if (sub_module == super_module && sub_index == super_index) return true;
- uint8_t sub_kind = sub_module->type_kinds[sub_index];
-
- if (sub_kind != super_module->type_kinds[super_index]) return false;
+ // TODO(7748): Figure out cross-module story.
+ if (sub_module != super_module) return false;
- // Types with explicit supertypes just check those.
- if (sub_module->has_supertype(sub_index)) {
- // TODO(7748): Figure out cross-module story.
- if (sub_module != super_module) return false;
-
- uint32_t explicit_super = sub_module->supertype(sub_index);
- while (true) {
- if (explicit_super == super_index) return true;
- // Reached the end of the explicitly defined inheritance chain.
- if (explicit_super == kGenericSuperType) return false;
- // Types without explicit supertype can't occur here, they would have
- // failed validation.
- DCHECK_NE(explicit_super, kNoSuperType);
- explicit_super = sub_module->supertype(explicit_super);
- }
- } else {
- // A structural type (without explicit supertype) is never a subtype of
- // a nominal type (with explicit supertype).
- if (super_module->has_supertype(super_index)) return false;
- }
-
- // Accessing the caches for subtyping and equivalence from multiple background
- // threads is protected by a lock.
- base::RecursiveMutexGuard type_cache_access(
- TypeJudgementCache::instance()->type_cache_mutex());
- if (TypeJudgementCache::instance()->is_cached_subtype(
- sub_index, super_index, sub_module, super_module)) {
- return true;
- }
-
- if (sub_kind == kWasmStructTypeCode) {
- return StructIsSubtypeOf(sub_index, super_index, sub_module, super_module);
- } else if (sub_kind == kWasmArrayTypeCode) {
- return ArrayIsSubtypeOf(sub_index, super_index, sub_module, super_module);
- } else {
- DCHECK_EQ(sub_kind, kWasmFunctionTypeCode);
- return FunctionIsSubtypeOf(sub_index, super_index, sub_module,
- super_module);
+ uint32_t explicit_super = sub_module->supertype(sub_index);
+ while (true) {
+ if (explicit_super == super_index) return true;
+ // Reached the end of the explicitly defined inheritance chain.
+ if (explicit_super == kNoSuperType) return false;
+ explicit_super = sub_module->supertype(explicit_super);
}
}
@@ -476,9 +241,6 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
DCHECK(type1.has_index() && type2.has_index() &&
(type1 != type2 || module1 != module2));
- DCHECK_IMPLIES(type1.has_depth(), type2.has_depth()); // Due to 'if' above.
- if (type1.has_depth() && type1.depth() != type2.depth()) return false;
-
DCHECK(type1.has_index() && module1->has_type(type1.ref_index()) &&
type2.has_index() && module2->has_type(type2.ref_index()));
@@ -486,14 +248,6 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
module2);
}
-void DeleteCachedTypeJudgementsForModule(const WasmModule* module) {
- // Accessing the caches for subtyping and equivalence from multiple background
- // threads is protected by a lock.
- base::RecursiveMutexGuard type_cache_access(
- TypeJudgementCache::instance()->type_cache_mutex());
- TypeJudgementCache::instance()->delete_module(module);
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 53232ca2c2..76dff87e24 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -23,19 +23,12 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
// Checks if type1, defined in module1, is equivalent with type2, defined in
// module2.
-// Type equivalence (~) is described by the following rules (structural
-// equivalence):
+// Type equivalence (~) is described by the following rules:
// - Two numeric types are equivalent iff they are equal.
-// - optref(ht1) ~ optref(ht2) iff ht1 ~ ht2.
-// - ref(ht1) ~ ref(ht2) iff ht1 ~ ht2.
-// - rtt(d1, ht1) ~ rtt(d2, ht2) iff (d1 = d2 and ht1 ~ ht2).
-// For heap types, the following rules hold:
-// - Two generic heap types are equivalent iff they are equal.
-// - Two structs are equivalent iff they contain the same number of fields and
-// these are pairwise equivalent.
-// - Two functions are equivalent iff they contain the same number of parameters
-// and returns and these are pairwise equivalent.
-// - Two arrays are equivalent iff their underlying types are equivalent.
+// - T(ht1) ~ T(ht2) iff ht1 ~ ht2 for T in {ref, optref, rtt}.
+// Equivalence of heap types ht1 ~ ht2 is defined as follows:
+// - Two heap types are equivalent iff they are equal.
+// - TODO(7748): Implement iso-recursive canonicalization.
V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
const WasmModule* module1,
const WasmModule* module2);
@@ -55,15 +48,13 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
// eq func extern
// / \
// i31 data
-// - All structs and arrays are subtypes of data.
+// |
+// array
// - All functions are subtypes of func.
-// - Struct subtyping: Subtype must have at least as many fields as supertype,
-// covariance for immutable fields, equivalence for mutable fields.
-// - Array subtyping (mutable only) is the equivalence relation.
-// - Function subtyping depends on the enabled wasm features: if
-// --experimental-wasm-gc is enabled, then subtyping is computed
-// contravariantly for parameter types and covariantly for return types.
-// Otherwise, the subtyping relation is the equivalence relation.
+// - All structs are subtypes of data.
+// - All arrays are subtypes of array.
+// - An indexed heap type h1 is a subtype of indexed heap type h2 if h2 is
+// transitively an explicit supertype of h1.
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -80,37 +71,28 @@ V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
}
// We have this function call IsSubtypeOf instead of the opposite because type
-// checks are much more common than heap type checks.}
-V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index,
+// checks are much more common than heap type checks.
+V8_INLINE bool IsHeapSubtypeOf(HeapType::Representation subtype,
HeapType::Representation supertype,
const WasmModule* module) {
- return IsSubtypeOf(ValueType::Ref(subtype_index, kNonNullable),
+ return IsSubtypeOf(ValueType::Ref(subtype, kNonNullable),
ValueType::Ref(supertype, kNonNullable), module);
}
-V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* module) {
- return IsSubtypeOf(ValueType::Ref(subtype_index, kNonNullable),
- ValueType::Ref(supertype_index, kNonNullable), module);
-}
-
-// Call this function in {module}'s destructor to avoid spurious cache hits in
-// case another WasmModule gets allocated in the same address later.
-void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
-
-// Checks whether {subtype_index} is a legal subtype of {supertype_index}.
-// These are the same checks that {IsSubtypeOf} uses for comparing types without
-// explicitly given supertypes; for validating such explicit supertypes they
-// can be called directly.
-bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module);
-bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module);
-bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
- const WasmModule* sub_module,
- const WasmModule* super_module);
+// Checks whether {subtype_index} is valid as a declared subtype of
+// {supertype_index}.
+// - Both type must be of the same kind (function, struct, or array).
+// - Structs: Subtype must have at least as many fields as supertype,
+// covariance for respective immutable fields, equivalence for respective
+// mutable fields.
+// - Arrays: subtyping of respective element types for immutable arrays,
+// equivalence of element types for mutable arrays.
+// - Functions: equal number of parameter and return types. Contravariance for
+// respective parameter types, covariance for respective return types.
+V8_EXPORT_PRIVATE bool ValidSubtypeDefinition(uint32_t subtype_index,
+ uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index b2e5aca74d..1192da1bea 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -202,7 +202,6 @@ class WasmValue {
case kOptRef:
case kRef:
case kRtt:
- case kRttWithDepth:
return "Handle [" + std::to_string(to_ref().address()) + "]";
case kVoid:
case kBottom:
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 2e52583a1f..09aaf7fcc6 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -41,9 +41,9 @@ void WebSnapshotSerializerDeserializer::Throw(const char* message) {
}
error_message_ = message;
if (!isolate_->has_pending_exception()) {
- isolate_->Throw(*isolate_->factory()->NewError(
+ isolate_->Throw(*factory()->NewError(
MessageTemplate::kWebSnapshotError,
- isolate_->factory()->NewStringFromAsciiChecked(error_message_)));
+ factory()->NewStringFromAsciiChecked(error_message_)));
}
}
@@ -184,15 +184,18 @@ uint32_t WebSnapshotSerializerDeserializer::AttributesToFlags(
PropertyAttributes WebSnapshotSerializerDeserializer::FlagsToAttributes(
uint32_t flags) {
- uint32_t attributes = ReadOnlyBitField::decode(flags) * READ_ONLY +
- !ConfigurableBitField::decode(flags) * DONT_DELETE +
- !EnumerableBitField::decode(flags) * DONT_ENUM;
- return static_cast<PropertyAttributes>(attributes);
+ int attributes = ReadOnlyBitField::decode(flags) * READ_ONLY +
+ !ConfigurableBitField::decode(flags) * DONT_DELETE +
+ !EnumerableBitField::decode(flags) * DONT_ENUM;
+ return PropertyAttributesFromInt(attributes);
}
WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
- : WebSnapshotSerializerDeserializer(
- reinterpret_cast<v8::internal::Isolate*>(isolate)),
+ : WebSnapshotSerializer(reinterpret_cast<v8::internal::Isolate*>(isolate)) {
+}
+
+WebSnapshotSerializer::WebSnapshotSerializer(Isolate* isolate)
+ : WebSnapshotSerializerDeserializer(isolate),
string_serializer_(isolate_, nullptr),
map_serializer_(isolate_, nullptr),
context_serializer_(isolate_, nullptr),
@@ -201,16 +204,55 @@ WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
array_serializer_(isolate_, nullptr),
object_serializer_(isolate_, nullptr),
export_serializer_(isolate_, nullptr),
+ external_objects_ids_(isolate_->heap()),
string_ids_(isolate_->heap()),
map_ids_(isolate_->heap()),
context_ids_(isolate_->heap()),
function_ids_(isolate_->heap()),
class_ids_(isolate_->heap()),
array_ids_(isolate_->heap()),
- object_ids_(isolate_->heap()) {}
+ object_ids_(isolate_->heap()),
+ all_strings_(isolate_->heap()) {
+ auto empty_array_list = factory()->empty_array_list();
+ contexts_ = empty_array_list;
+ functions_ = empty_array_list;
+ classes_ = empty_array_list;
+ arrays_ = empty_array_list;
+ objects_ = empty_array_list;
+ strings_ = empty_array_list;
+ maps_ = empty_array_list;
+}
WebSnapshotSerializer::~WebSnapshotSerializer() {}
+bool WebSnapshotSerializer::TakeSnapshot(
+ Handle<Object> object, MaybeHandle<FixedArray> maybe_externals,
+ WebSnapshotData& data_out) {
+ if (string_ids_.size() > 0) {
+ Throw("Can't reuse WebSnapshotSerializer");
+ return false;
+ }
+ if (!maybe_externals.is_null()) {
+ ShallowDiscoverExternals(*maybe_externals.ToHandleChecked());
+ }
+
+ if (object->IsHeapObject()) Discover(Handle<HeapObject>::cast(object));
+
+ ConstructSource();
+ // The export is serialized with the empty string as name; we need to
+ // "discover" the name here.
+ DiscoverString(factory()->empty_string());
+ SerializeExport(object, factory()->empty_string());
+
+ WriteSnapshot(data_out.buffer, data_out.buffer_size);
+
+ if (has_error()) {
+ isolate_->ReportPendingMessages();
+ return false;
+ }
+ return true;
+}
+
bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
v8::Local<v8::PrimitiveArray> exports,
WebSnapshotData& data_out) {
@@ -220,20 +262,16 @@ bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
}
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- contexts_ = ArrayList::New(isolate_, 30);
- functions_ = ArrayList::New(isolate_, 30);
- classes_ = ArrayList::New(isolate_, 30);
- arrays_ = ArrayList::New(isolate_, 30);
- objects_ = ArrayList::New(isolate_, 30);
-
std::unique_ptr<Handle<JSObject>[]> export_objects(
new Handle<JSObject>[exports->Length()]);
for (int i = 0, length = exports->Length(); i < length; ++i) {
v8::Local<v8::String> str =
exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
- if (str.IsEmpty()) {
+ if (str->Length() == 0) {
continue;
}
+ // Discover the export name.
+ DiscoverString(Handle<String>::cast(Utils::OpenHandle(*str)));
v8::ScriptCompiler::Source source(str);
auto script = ScriptCompiler::Compile(context, &source).ToLocalChecked();
v8::MaybeLocal<v8::Value> script_result = script->Run(context);
@@ -244,14 +282,18 @@ bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
Throw("Exported object not found");
return false;
}
-
export_objects[i] = Handle<JSObject>::cast(Utils::OpenHandle(*v8_object));
- Discovery(export_objects[i]);
+ Discover(export_objects[i]);
}
+ ConstructSource();
+
for (int i = 0, length = exports->Length(); i < length; ++i) {
v8::Local<v8::String> str =
exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
+ if (str->Length() == 0) {
+ continue;
+ }
SerializeExport(export_objects[i],
Handle<String>::cast(Utils::OpenHandle(*str)));
}
@@ -266,32 +308,46 @@ bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
}
void WebSnapshotSerializer::SerializePendingItems() {
- for (int i = 0; i < contexts_->Length(); ++i) {
+ // The information about string reference counts is now complete. The strings
+ // in strings_ are not in place and can be serialized now. The in-place
+ // strings will be serialized as part of their respective objects.
+ for (int i = 0; i < strings_->Length(); ++i) {
+ Handle<String> string = handle(String::cast(strings_->Get(i)), isolate_);
+ SerializeString(string, string_serializer_);
+ }
+
+ for (int i = 0; i < maps_->Length(); ++i) {
+ Handle<Map> map = handle(Map::cast(maps_->Get(i)), isolate_);
+ SerializeMap(map);
+ }
+
+ // Serialize the items in the reverse order. The items at the end of the
+ // contexts_ etc get lower IDs and vice versa. IDs which items use for
+ // referring to each other are reversed by Get<item>Id functions().
+ for (int i = contexts_->Length() - 1; i >= 0; --i) {
Handle<Context> context =
handle(Context::cast(contexts_->Get(i)), isolate_);
SerializeContext(context);
}
- for (int i = 0; i < functions_->Length(); ++i) {
+ for (int i = functions_->Length() - 1; i >= 0; --i) {
Handle<JSFunction> function =
handle(JSFunction::cast(functions_->Get(i)), isolate_);
SerializeFunction(function);
}
- for (int i = 0; i < classes_->Length(); ++i) {
+ for (int i = classes_->Length() - 1; i >= 0; --i) {
Handle<JSFunction> function =
handle(JSFunction::cast(classes_->Get(i)), isolate_);
SerializeClass(function);
}
- for (int i = 0; i < arrays_->Length(); ++i) {
+ for (int i = arrays_->Length() - 1; i >= 0; --i) {
Handle<JSArray> array = handle(JSArray::cast(arrays_->Get(i)), isolate_);
SerializeArray(array);
}
- for (int i = 0; i < objects_->Length(); ++i) {
+ for (int i = objects_->Length() - 1; i >= 0; --i) {
Handle<JSObject> object =
handle(JSObject::cast(objects_->Get(i)), isolate_);
SerializeObject(object);
}
- // Maps and strings get serialized when they're encountered; we don't need to
- // serialize them explicitly.
}
// Format (full snapshot):
@@ -316,6 +372,9 @@ void WebSnapshotSerializer::SerializePendingItems() {
// - Serialized export
void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
size_t& buffer_size) {
+ if (has_error()) {
+ return;
+ }
SerializePendingItems();
ValueSerializer total_serializer(isolate_, nullptr);
@@ -329,31 +388,18 @@ void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
Throw("Out of memory");
return;
}
+
total_serializer.WriteRawBytes(kMagicNumber, 4);
- total_serializer.WriteUint32(static_cast<uint32_t>(string_count()));
- total_serializer.WriteRawBytes(string_serializer_.buffer_,
- string_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(map_count()));
- total_serializer.WriteRawBytes(map_serializer_.buffer_,
- map_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(context_count()));
- total_serializer.WriteRawBytes(context_serializer_.buffer_,
- context_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(function_count()));
- total_serializer.WriteRawBytes(function_serializer_.buffer_,
- function_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(array_count()));
- total_serializer.WriteRawBytes(array_serializer_.buffer_,
- array_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(object_count()));
- total_serializer.WriteRawBytes(object_serializer_.buffer_,
- object_serializer_.buffer_size_);
- total_serializer.WriteUint32(static_cast<uint32_t>(class_count()));
- total_serializer.WriteRawBytes(class_serializer_.buffer_,
- class_serializer_.buffer_size_);
- total_serializer.WriteUint32(export_count_);
- total_serializer.WriteRawBytes(export_serializer_.buffer_,
- export_serializer_.buffer_size_);
+ WriteObjects(total_serializer, string_count(), string_serializer_, "strings");
+ WriteObjects(total_serializer, map_count(), map_serializer_, "maps");
+ WriteObjects(total_serializer, context_count(), context_serializer_,
+ "contexts");
+ WriteObjects(total_serializer, function_count(), function_serializer_,
+ "functions");
+ WriteObjects(total_serializer, array_count(), array_serializer_, "arrays");
+ WriteObjects(total_serializer, object_count(), object_serializer_, "objects");
+ WriteObjects(total_serializer, class_count(), class_serializer_, "classes");
+ WriteObjects(total_serializer, export_count_, export_serializer_, "exports");
if (has_error()) {
return;
@@ -363,17 +409,26 @@ void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
buffer = result.first;
buffer_size = result.second;
}
+void WebSnapshotSerializer::WriteObjects(ValueSerializer& destination,
+ size_t count, ValueSerializer& source,
+ const char* name) {
+ if (count > std::numeric_limits<uint32_t>::max()) {
+ Throw("Too many objects");
+ return;
+ }
+ destination.WriteUint32(static_cast<uint32_t>(count));
+ destination.WriteRawBytes(source.buffer_, source.buffer_size_);
+}
bool WebSnapshotSerializer::InsertIntoIndexMap(ObjectCacheIndexMap& map,
- Handle<HeapObject> object,
+ HeapObject heap_object,
uint32_t& id) {
- if (static_cast<uint32_t>(map.size()) >=
- std::numeric_limits<uint32_t>::max()) {
- Throw("Too many objects");
+ DisallowGarbageCollection no_gc;
+ int index_out;
+ if (external_objects_ids_.Lookup(heap_object, &index_out)) {
return true;
}
- int index_out;
- bool found = map.LookupOrInsert(object, &index_out);
+ bool found = map.LookupOrInsert(heap_object, &index_out);
id = static_cast<uint32_t>(index_out);
return found;
}
@@ -382,55 +437,45 @@ bool WebSnapshotSerializer::InsertIntoIndexMap(ObjectCacheIndexMap& map,
// - Length
// - Raw bytes (data)
void WebSnapshotSerializer::SerializeString(Handle<String> string,
- uint32_t& id) {
- if (InsertIntoIndexMap(string_ids_, string, id)) {
- return;
- }
-
- // TODO(v8:11525): Always write strings as UTF-8.
- string = String::Flatten(isolate_, string);
+ ValueSerializer& serializer) {
DisallowGarbageCollection no_gc;
String::FlatContent flat = string->GetFlatContent(no_gc);
DCHECK(flat.IsFlat());
if (flat.IsOneByte()) {
base::Vector<const uint8_t> chars = flat.ToOneByteVector();
- string_serializer_.WriteUint32(chars.length());
- string_serializer_.WriteRawBytes(chars.begin(),
- chars.length() * sizeof(uint8_t));
+ serializer.WriteUint32(chars.length());
+ serializer.WriteRawBytes(chars.begin(), chars.length() * sizeof(uint8_t));
} else if (flat.IsTwoByte()) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
v8::Local<v8::String> api_string = Utils::ToLocal(string);
int length = api_string->Utf8Length(v8_isolate);
std::unique_ptr<char[]> buffer(new char[length]);
api_string->WriteUtf8(v8_isolate, buffer.get(), length);
- string_serializer_.WriteUint32(length);
- string_serializer_.WriteRawBytes(buffer.get(), length * sizeof(uint8_t));
+ serializer.WriteUint32(length);
+ serializer.WriteRawBytes(buffer.get(), length * sizeof(uint8_t));
} else {
UNREACHABLE();
}
}
// Format (serialized shape):
+// - PropertyAttributesType
+// - 0 if the __proto__ is Object.prototype, 1 + object id for the __proto__
+// otherwise
// - Property count
// - For each property
// - String id (name)
-void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
- if (InsertIntoIndexMap(map_ids_, map, id)) {
- return;
- }
-
+// - If the PropertyAttributesType is CUSTOM: attributes
+void WebSnapshotSerializer::SerializeMap(Handle<Map> map) {
int first_custom_index = -1;
- std::vector<uint32_t> string_ids;
+ std::vector<Handle<String>> keys;
std::vector<uint32_t> attributes;
- string_ids.reserve(map->NumberOfOwnDescriptors());
+ keys.reserve(map->NumberOfOwnDescriptors());
attributes.reserve(map->NumberOfOwnDescriptors());
for (InternalIndex i : map->IterateOwnDescriptors()) {
Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
isolate_);
- if (!key->IsString()) {
- Throw("Key is not a string");
- return;
- }
+ keys.push_back(Handle<String>::cast(key));
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
@@ -444,19 +489,30 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
if (first_custom_index == -1) first_custom_index = i.as_int();
attributes.push_back(AttributesToFlags(details));
}
-
- uint32_t string_id = 0;
- SerializeString(Handle<String>::cast(key), string_id);
- string_ids.push_back(string_id);
}
map_serializer_.WriteUint32(first_custom_index == -1
? PropertyAttributesType::DEFAULT
: PropertyAttributesType::CUSTOM);
- map_serializer_.WriteUint32(static_cast<uint32_t>(string_ids.size()));
+
+ if (map->prototype() ==
+ isolate_->native_context()->initial_object_prototype()) {
+ map_serializer_.WriteUint32(0);
+ } else {
+ // TODO(v8:11525): Support non-JSObject prototypes, at least null. Recognize
+ // well-known objects to that we don't end up encoding them in the snapshot.
+ if (!map->prototype().IsJSObject()) {
+ Throw("Non-JSObject __proto__s not supported");
+ return;
+ }
+ uint32_t prototype_id = GetObjectId(JSObject::cast(map->prototype()));
+ map_serializer_.WriteUint32(prototype_id + 1);
+ }
+
+ map_serializer_.WriteUint32(static_cast<uint32_t>(keys.size()));
uint32_t default_flags = GetDefaultAttributeFlags();
- for (size_t i = 0; i < string_ids.size(); ++i) {
+ for (size_t i = 0; i < keys.size(); ++i) {
if (first_custom_index >= 0) {
if (static_cast<int>(i) < first_custom_index) {
map_serializer_.WriteUint32(default_flags);
@@ -464,25 +520,59 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
map_serializer_.WriteUint32(attributes[i - first_custom_index]);
}
}
- map_serializer_.WriteUint32(string_ids[i]);
+ WriteStringId(keys[i], map_serializer_);
}
}
-void WebSnapshotSerializer::SerializeSource(ValueSerializer* serializer,
- Handle<JSFunction> function) {
- // TODO(v8:11525): Don't write the full source but instead, a set of minimal
- // snippets which cover the serialized functions.
- Handle<String> full_source(
- String::cast(Script::cast(function->shared().script()).source()),
- isolate_);
- uint32_t source_id = 0;
- SerializeString(full_source, source_id);
- serializer->WriteUint32(source_id);
+// Construct the minimal source string to be included in the snapshot. Maintain
+// the "inner function is textually inside its outer function" relationship.
+// Example:
+// Input:
+// Full source: abcdefghijklmnopqrstuvwxyzåäö
+// Functions: 11111111 22222222 3
+// Inner functions: 44 55 666
+// Output:
+// Constructed source: defghijkstuvwxyzö
+// Functions: 11111111222222223
+// Inner functions 44 55 666
+void WebSnapshotSerializer::ConstructSource() {
+ if (source_intervals_.empty()) {
+ return;
+ }
- int start = function->shared().StartPosition();
- serializer->WriteUint32(start);
- int end = function->shared().EndPosition();
- serializer->WriteUint32(end - start);
+ Handle<String> source_string = factory()->empty_string();
+ int current_interval_start = 0;
+ int current_interval_end = 0;
+ for (const auto& interval : source_intervals_) {
+ DCHECK_LE(current_interval_start, interval.first); // Iterated in order.
+ DCHECK_LE(interval.first, interval.second);
+ if (interval.second <= current_interval_end) {
+ // This interval is fully within the current interval. We don't need to
+ // include any new source code, just record the position conversion.
+ auto offset_within_parent = interval.first - current_interval_start;
+ source_offset_to_compacted_source_offset_[interval.first] =
+ source_offset_to_compacted_source_offset_[current_interval_start] +
+ offset_within_parent;
+ continue;
+ }
+ // Start a new interval.
+ current_interval_start = interval.first;
+ current_interval_end = interval.second;
+ source_offset_to_compacted_source_offset_[current_interval_start] =
+ source_string->length();
+ MaybeHandle<String> new_source_string = factory()->NewConsString(
+ source_string,
+ factory()->NewSubString(full_source_, current_interval_start,
+ current_interval_end));
+ if (!new_source_string.ToHandle(&source_string)) {
+ Throw("Cannot construct source string");
+ return;
+ }
+ }
+ DiscoverString(source_string);
+ bool in_place = false;
+ source_id_ = GetStringId(source_string, in_place);
+ DCHECK(!in_place);
}
void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
@@ -504,7 +594,12 @@ void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
}
}
- SerializeSource(serializer, function);
+ serializer->WriteUint32(source_id_);
+ int start = function->shared().StartPosition();
+ int end = function->shared().EndPosition();
+ serializer->WriteUint32(source_offset_to_compacted_source_offset_[start]);
+ serializer->WriteUint32(end - start);
+
serializer->WriteUint32(
function->shared().internal_formal_parameter_count_without_receiver());
serializer->WriteUint32(
@@ -520,54 +615,126 @@ void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
}
}
-void WebSnapshotSerializer::Discovery(Handle<Object> start_object) {
+void WebSnapshotSerializer::ShallowDiscoverExternals(FixedArray externals) {
+ DisallowGarbageCollection no_gc;
+ for (int i = 0; i < externals.length(); i++) {
+ Object object = externals.get(i);
+ if (!object.IsHeapObject()) continue;
+ uint32_t unused_id = 0;
+ InsertIntoIndexMap(external_objects_ids_, HeapObject::cast(object),
+ unused_id);
+ }
+}
+
+void WebSnapshotSerializer::Discover(Handle<HeapObject> start_object) {
// The object discovery phase assigns IDs for objects / functions / classes /
// arrays and discovers outgoing references from them. This is needed so that
// e.g., we know all functions upfront and can construct the source code that
// covers them before serializing the functions.
- // TODO(v8:11525): Serialize leaf objects first.
-
discovery_queue_.push(start_object);
while (!discovery_queue_.empty()) {
- const Handle<Object>& object = discovery_queue_.front();
- if (object->IsHeapObject()) {
- switch (HeapObject::cast(*object).map().instance_type()) {
- case JS_FUNCTION_TYPE:
- DiscoverFunction(Handle<JSFunction>::cast(object));
- break;
- case JS_CLASS_CONSTRUCTOR_TYPE:
- DiscoverClass(Handle<JSFunction>::cast(object));
- break;
- case JS_OBJECT_TYPE:
- DiscoverObject(Handle<JSObject>::cast(object));
- break;
- case JS_ARRAY_TYPE:
- DiscoverArray(Handle<JSArray>::cast(object));
- break;
- case ODDBALL_TYPE:
- case HEAP_NUMBER_TYPE:
- case JS_PRIMITIVE_WRAPPER_TYPE:
- case JS_REG_EXP_TYPE:
- // Can't contain references to other objects.
- break;
- default:
- if (object->IsString()) {
- // Can't contain references to other objects.
- break;
- } else {
- Throw("Unsupported object");
- }
+ const Handle<HeapObject>& object = discovery_queue_.front();
+ switch (object->map().instance_type()) {
+ case JS_FUNCTION_TYPE:
+ DiscoverFunction(Handle<JSFunction>::cast(object));
+ break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
+ DiscoverClass(Handle<JSFunction>::cast(object));
+ break;
+ case JS_OBJECT_TYPE:
+ DiscoverObject(Handle<JSObject>::cast(object));
+ break;
+ case JS_ARRAY_TYPE:
+ DiscoverArray(Handle<JSArray>::cast(object));
+ break;
+ case ODDBALL_TYPE:
+ case HEAP_NUMBER_TYPE:
+ // Can't contain references to other objects.
+ break;
+ case JS_PRIMITIVE_WRAPPER_TYPE: {
+ Handle<JSPrimitiveWrapper> wrapper =
+ Handle<JSPrimitiveWrapper>::cast(object);
+ Handle<Object> value = handle(wrapper->value(), isolate_);
+ if (value->IsHeapObject()) {
+ discovery_queue_.push(Handle<HeapObject>::cast(value));
+ }
+ break;
+ }
+ case JS_REG_EXP_TYPE: {
+ Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
+ Handle<String> pattern = handle(regexp->source(), isolate_);
+ DiscoverString(pattern);
+ Handle<String> flags_string =
+ JSRegExp::StringFromFlags(isolate_, regexp->flags());
+ DiscoverString(flags_string);
+ break;
}
+ default:
+ if (object->IsString()) {
+ // These are array elements / object properties -> allow in place
+ // strings.
+ DiscoverString(Handle<String>::cast(object), AllowInPlace::Yes);
+ break;
+ } else if (external_objects_ids_.size() > 0) {
+ int unused_id;
+ external_objects_ids_.LookupOrInsert(*object, &unused_id);
+ } else {
+ Throw("Unsupported object");
+ }
}
discovery_queue_.pop();
}
}
+void WebSnapshotSerializer::DiscoverMap(Handle<Map> map) {
+ uint32_t id;
+ if (InsertIntoIndexMap(map_ids_, *map, id)) {
+ return;
+ }
+ DCHECK_EQ(id, maps_->Length());
+ maps_ = ArrayList::Add(isolate_, maps_, map);
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
+ if (!key->IsString()) {
+ Throw("Key is not a string");
+ return;
+ }
+ DiscoverString(Handle<String>::cast(key));
+ }
+}
+
+void WebSnapshotSerializer::DiscoverString(Handle<String> string,
+ AllowInPlace can_be_in_place) {
+ // Can't contain references to other objects. We only log the existence of the
+ // string itself. Internalize the strings so that we can properly track which
+ // String objects are the same string.
+ string = factory()->InternalizeString(string);
+ auto result = all_strings_.FindOrInsert(string);
+ if (can_be_in_place == AllowInPlace::Yes && !result.already_exists) {
+ // This is the only reference to the string so far. Don't generate and
+ // ID for it yet; only generate it when another reference to the string is
+ // found.
+ return;
+ }
+ // The string is referred to more than two places, or in-placing not allowed
+ // -> not a candidate for writing it in-place. Generate an ID for it.
+
+ // TODO(v8:11525): Allow in-place strings in more places. Heuristics for
+ // when to make them in place?
+ uint32_t id;
+ if (InsertIntoIndexMap(string_ids_, *string, id)) {
+ return;
+ }
+ DCHECK_EQ(id, strings_->Length());
+ strings_ = ArrayList::Add(isolate_, strings_, string);
+}
+
void WebSnapshotSerializer::DiscoverFunction(Handle<JSFunction> function) {
uint32_t id;
- if (InsertIntoIndexMap(function_ids_, function, id)) {
+ if (InsertIntoIndexMap(function_ids_, *function, id)) {
return;
}
@@ -575,11 +742,12 @@ void WebSnapshotSerializer::DiscoverFunction(Handle<JSFunction> function) {
functions_ = ArrayList::Add(isolate_, functions_, function);
DiscoverContextAndPrototype(function);
// TODO(v8:11525): Support properties in functions.
+ DiscoverSource(function);
}
void WebSnapshotSerializer::DiscoverClass(Handle<JSFunction> function) {
uint32_t id;
- if (InsertIntoIndexMap(class_ids_, function, id)) {
+ if (InsertIntoIndexMap(class_ids_, *function, id)) {
return;
}
@@ -589,6 +757,7 @@ void WebSnapshotSerializer::DiscoverClass(Handle<JSFunction> function) {
DiscoverContextAndPrototype(function);
// TODO(v8:11525): Support properties in classes.
// TODO(v8:11525): Support class members.
+ DiscoverSource(function);
}
void WebSnapshotSerializer::DiscoverContextAndPrototype(
@@ -612,38 +781,49 @@ void WebSnapshotSerializer::DiscoverContextAndPrototype(
}
void WebSnapshotSerializer::DiscoverContext(Handle<Context> context) {
- // Ensure that parent contexts get a lower ID.
- if (!context->previous().IsNativeContext() &&
- !context->previous().IsScriptContext()) {
- DiscoverContext(handle(context->previous(), isolate_));
- }
-
uint32_t id;
- if (InsertIntoIndexMap(context_ids_, context, id)) {
- return;
- }
+ if (InsertIntoIndexMap(context_ids_, *context, id)) return;
DCHECK_EQ(id, contexts_->Length());
contexts_ = ArrayList::Add(isolate_, contexts_, context);
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ Handle<ScopeInfo> scope_info = handle(context->scope_info(), isolate_);
int count = scope_info->ContextLocalCount();
for (int i = 0; i < count; ++i) {
// TODO(v8:11525): support parameters
// TODO(v8:11525): distinguish variable modes
- Handle<Object> value(context->get(scope_info->ContextHeaderLength() + i),
- isolate_);
- discovery_queue_.push(value);
+ Handle<String> name(scope_info->context_local_names(i), isolate_);
+ DiscoverString(name);
+ Object value = context->get(scope_info->ContextHeaderLength() + i);
+ if (!value.IsHeapObject()) continue;
+ discovery_queue_.push(handle(HeapObject::cast(value), isolate_));
+ }
+
+ if (!context->previous().IsNativeContext() &&
+ !context->previous().IsScriptContext()) {
+ DiscoverContext(handle(context->previous(), isolate_));
+ }
+}
+
+void WebSnapshotSerializer::DiscoverSource(Handle<JSFunction> function) {
+ source_intervals_.emplace(function->shared().StartPosition(),
+ function->shared().EndPosition());
+ Handle<String> function_script_source =
+ handle(String::cast(Script::cast(function->shared().script()).source()),
+ isolate_);
+ if (full_source_.is_null()) {
+ full_source_ = function_script_source;
+ } else if (!full_source_->Equals(*function_script_source)) {
+ Throw("Cannot include functions from multiple scripts");
}
}
void WebSnapshotSerializer::DiscoverArray(Handle<JSArray> array) {
uint32_t id;
- if (InsertIntoIndexMap(array_ids_, array, id)) {
+ if (InsertIntoIndexMap(array_ids_, *array, id)) {
return;
}
-
DCHECK_EQ(id, arrays_->Length());
arrays_ = ArrayList::Add(isolate_, arrays_, array);
@@ -654,19 +834,18 @@ void WebSnapshotSerializer::DiscoverArray(Handle<JSArray> array) {
return;
}
// TODO(v8:11525): Support sparse arrays & arrays with holes.
- uint32_t length = static_cast<uint32_t>(array->length().ToSmi().value());
- Handle<FixedArray> elements =
- handle(FixedArray::cast(array->elements()), isolate_);
- for (uint32_t i = 0; i < length; ++i) {
- discovery_queue_.push(handle(elements->get(i), isolate_));
+ DisallowGarbageCollection no_gc;
+ FixedArray elements = FixedArray::cast(array->elements());
+ for (int i = 0; i < elements.length(); ++i) {
+ Object object = elements.get(i);
+ if (!object.IsHeapObject()) continue;
+ discovery_queue_.push(handle(HeapObject::cast(object), isolate_));
}
}
void WebSnapshotSerializer::DiscoverObject(Handle<JSObject> object) {
uint32_t id;
- if (InsertIntoIndexMap(object_ids_, object, id)) {
- return;
- }
+ if (InsertIntoIndexMap(object_ids_, *object, id)) return;
DCHECK_EQ(id, objects_->Length());
objects_ = ArrayList::Add(isolate_, objects_, object);
@@ -674,15 +853,37 @@ void WebSnapshotSerializer::DiscoverObject(Handle<JSObject> object) {
// TODO(v8:11525): Support objects with so many properties that they can't be
// in fast mode.
JSObject::MigrateSlowToFast(object, 0, "Web snapshot");
+ if (!object->HasFastProperties()) {
+ Throw("Dictionary mode objects not supported");
+ }
Handle<Map> map(object->map(), isolate_);
+ DiscoverMap(map);
+
+ // Discover __proto__.
+ if (map->prototype() !=
+ isolate_->native_context()->initial_object_prototype()) {
+ discovery_queue_.push(handle(map->prototype(), isolate_));
+ }
+
+ // Discover property values.
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value =
- JSObject::FastPropertyAt(object, details.representation(), field_index);
- discovery_queue_.push(value);
+ Handle<Object> value = JSObject::FastPropertyAt(
+ isolate_, object, details.representation(), field_index);
+ if (!value->IsHeapObject()) continue;
+ discovery_queue_.push(Handle<HeapObject>::cast(value));
+ }
+
+ // Discover elements.
+ Handle<FixedArray> elements =
+ handle(FixedArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < elements->length(); ++i) {
+ Object object = elements->get(i);
+ if (!object.IsHeapObject()) continue;
+ discovery_queue_.push(handle(HeapObject::cast(object), isolate_));
}
}
@@ -748,9 +949,7 @@ void WebSnapshotSerializer::SerializeContext(Handle<Context> context) {
// TODO(v8:11525): support parameters
// TODO(v8:11525): distinguish variable modes
Handle<String> name(scope_info->context_local_names(i), isolate_);
- uint32_t string_id = 0;
- SerializeString(name, string_id);
- context_serializer_.WriteUint32(string_id);
+ WriteStringId(name, context_serializer_);
Handle<Object> value(context->get(scope_info->ContextHeaderLength() + i),
isolate_);
WriteValue(value, context_serializer_);
@@ -761,26 +960,53 @@ void WebSnapshotSerializer::SerializeContext(Handle<Context> context) {
// - Shape id
// - For each property:
// - Serialized value
+// - Max element index + 1 (or 0 if there are no elements)
+// - For each element:
+// - Index
+// - Serialized value
+// TODO(v8:11525): Support packed elements with a denser format.
void WebSnapshotSerializer::SerializeObject(Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate_);
- uint32_t map_id = 0;
- SerializeMap(map, map_id);
-
- if (*map != object->map()) {
- Throw("Map changed");
- return;
- }
-
+ uint32_t map_id = GetMapId(*map);
object_serializer_.WriteUint32(map_id);
+ // Properties.
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value =
- JSObject::FastPropertyAt(object, details.representation(), field_index);
+ Handle<Object> value = JSObject::FastPropertyAt(
+ isolate_, object, details.representation(), field_index);
WriteValue(value, object_serializer_);
}
+
+ // Elements.
+ ReadOnlyRoots roots(isolate_);
+ Handle<FixedArray> elements =
+ handle(FixedArray::cast(object->elements()), isolate_);
+ uint32_t max_element_index = 0;
+ for (int i = 0; i < elements->length(); ++i) {
+ DisallowGarbageCollection no_gc;
+ Object value = elements->get(i);
+ if (value != roots.the_hole_value()) {
+ if (i > static_cast<int>(max_element_index)) {
+ max_element_index = i;
+ }
+ }
+ }
+ if (max_element_index == 0) {
+ object_serializer_.WriteUint32(0);
+ } else {
+ object_serializer_.WriteUint32(max_element_index + 1);
+ }
+ for (int i = 0; i < elements->length(); ++i) {
+ Handle<Object> value = handle(elements->get(i), isolate_);
+ if (*value != roots.the_hole_value()) {
+ DCHECK_LE(i, max_element_index);
+ object_serializer_.WriteUint32(i);
+ WriteValue(value, object_serializer_);
+ }
+ }
}
// Format (serialized array):
@@ -807,17 +1033,14 @@ void WebSnapshotSerializer::SerializeArray(Handle<JSArray> array) {
// Format (serialized export):
// - String id (export name)
// - Serialized value (export value)
-void WebSnapshotSerializer::SerializeExport(Handle<JSObject> object,
+void WebSnapshotSerializer::SerializeExport(Handle<Object> object,
Handle<String> export_name) {
++export_count_;
- uint32_t string_id = 0;
- SerializeString(export_name, string_id);
- export_serializer_.WriteUint32(string_id);
+ WriteStringId(export_name, export_serializer_);
if (object->IsJSPrimitiveWrapper()) {
Handle<JSPrimitiveWrapper> wrapper =
Handle<JSPrimitiveWrapper>::cast(object);
- Handle<Object> export_value =
- handle(JSPrimitiveWrapper::cast(*wrapper).value(), isolate_);
+ Handle<Object> export_value = handle(wrapper->value(), isolate_);
WriteValue(export_value, export_serializer_);
} else {
WriteValue(object, export_serializer_);
@@ -829,17 +1052,24 @@ void WebSnapshotSerializer::SerializeExport(Handle<JSObject> object,
// - Value or id (interpretation depends on the type)
void WebSnapshotSerializer::WriteValue(Handle<Object> object,
ValueSerializer& serializer) {
- uint32_t id = 0;
if (object->IsSmi()) {
serializer.WriteUint32(ValueType::INTEGER);
serializer.WriteZigZag<int32_t>(Smi::cast(*object).value());
return;
}
+ int external_id;
+ if (external_objects_ids_.Lookup(HeapObject::cast(*object), &external_id)) {
+ serializer.WriteUint32(ValueType::EXTERNAL_ID);
+ serializer.WriteUint32(static_cast<uint32_t>(external_id));
+ return;
+ }
+
DCHECK(object->IsHeapObject());
- switch (HeapObject::cast(*object).map().instance_type()) {
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ switch ((*heap_object).map().instance_type()) {
case ODDBALL_TYPE:
- switch (Oddball::cast(*object).kind()) {
+ switch (Oddball::cast(*heap_object).kind()) {
case Oddball::kFalse:
serializer.WriteUint32(ValueType::FALSE_CONSTANT);
return;
@@ -858,46 +1088,42 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
case HEAP_NUMBER_TYPE:
// TODO(v8:11525): Handle possible endianness mismatch.
serializer.WriteUint32(ValueType::DOUBLE);
- serializer.WriteDouble(HeapNumber::cast(*object).value());
+ serializer.WriteDouble(HeapNumber::cast(*heap_object).value());
break;
case JS_FUNCTION_TYPE:
serializer.WriteUint32(ValueType::FUNCTION_ID);
- serializer.WriteUint32(GetFunctionId(JSFunction::cast(*object)));
+ serializer.WriteUint32(GetFunctionId(JSFunction::cast(*heap_object)));
break;
case JS_CLASS_CONSTRUCTOR_TYPE:
serializer.WriteUint32(ValueType::CLASS_ID);
- serializer.WriteUint32(GetClassId(JSFunction::cast(*object)));
+ serializer.WriteUint32(GetClassId(JSFunction::cast(*heap_object)));
break;
case JS_OBJECT_TYPE:
serializer.WriteUint32(ValueType::OBJECT_ID);
- serializer.WriteUint32(GetObjectId(JSObject::cast(*object)));
+ serializer.WriteUint32(GetObjectId(JSObject::cast(*heap_object)));
break;
case JS_ARRAY_TYPE:
serializer.WriteUint32(ValueType::ARRAY_ID);
- serializer.WriteUint32(GetArrayId(JSArray::cast(*object)));
+ serializer.WriteUint32(GetArrayId(JSArray::cast(*heap_object)));
break;
case JS_REG_EXP_TYPE: {
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
+ Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(heap_object);
if (regexp->map() != isolate_->regexp_function()->initial_map()) {
Throw("Unsupported RegExp map");
return;
}
- uint32_t pattern_id, flags_id;
+ serializer.WriteUint32(ValueType::REGEXP);
Handle<String> pattern = handle(regexp->source(), isolate_);
+ WriteStringId(pattern, serializer);
Handle<String> flags_string =
JSRegExp::StringFromFlags(isolate_, regexp->flags());
- SerializeString(pattern, pattern_id);
- SerializeString(flags_string, flags_id);
- serializer.WriteUint32(ValueType::REGEXP);
- serializer.WriteUint32(pattern_id);
- serializer.WriteUint32(flags_id);
+ WriteStringId(flags_string, serializer);
break;
}
default:
- if (object->IsString()) {
- SerializeString(Handle<String>::cast(object), id);
- serializer.WriteUint32(ValueType::STRING_ID);
- serializer.WriteUint32(id);
+ if (heap_object->IsString()) {
+ // Write strings which are referred to only once as in-place strings.
+ WriteStringMaybeInPlace(Handle<String>::cast(heap_object), serializer);
} else {
Throw("Unsupported object");
}
@@ -905,12 +1131,58 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
// TODO(v8:11525): Support more types.
}
+void WebSnapshotSerializer::WriteStringMaybeInPlace(
+ Handle<String> string, ValueSerializer& serializer) {
+ // If the string is only referred to by one location, write it in-place.
+ bool in_place = false;
+ uint32_t id = GetStringId(string, in_place);
+ if (in_place) {
+ serializer.WriteUint32(ValueType::IN_PLACE_STRING_ID);
+ SerializeString(string, serializer);
+ } else {
+ serializer.WriteUint32(ValueType::STRING_ID);
+ serializer.WriteUint32(id);
+ }
+}
+
+void WebSnapshotSerializer::WriteStringId(Handle<String> string,
+ ValueSerializer& serializer) {
+ bool in_place = false;
+ uint32_t id = GetStringId(string, in_place);
+ CHECK(!in_place); // The string must have an ID.
+ serializer.WriteUint32(id);
+}
+
+uint32_t WebSnapshotSerializer::GetStringId(Handle<String> string,
+ bool& in_place) {
+ // Internalize strings so that they're unique.
+ string = factory()->InternalizeString(string);
+
+ // Strings referred to more than one places are inserted in string_ids_.
+ // Strings referred to by only one place aren't.
+#ifdef DEBUG
+ auto result = all_strings_.FindOrInsert(string);
+ DCHECK(result.already_exists);
+#endif
+ int id = 0;
+ in_place = !string_ids_.Lookup(*string, &id);
+ return static_cast<uint32_t>(id);
+}
+
+uint32_t WebSnapshotSerializer::GetMapId(Map map) {
+ int id;
+ bool return_value = map_ids_.Lookup(map, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
uint32_t WebSnapshotSerializer::GetFunctionId(JSFunction function) {
int id;
bool return_value = function_ids_.Lookup(function, &id);
DCHECK(return_value);
USE(return_value);
- return static_cast<uint32_t>(id);
+ return static_cast<uint32_t>(function_ids_.size() - 1 - id);
}
uint32_t WebSnapshotSerializer::GetClassId(JSFunction function) {
@@ -918,7 +1190,7 @@ uint32_t WebSnapshotSerializer::GetClassId(JSFunction function) {
bool return_value = class_ids_.Lookup(function, &id);
DCHECK(return_value);
USE(return_value);
- return static_cast<uint32_t>(id);
+ return static_cast<uint32_t>(class_ids_.size() - 1 - id);
}
uint32_t WebSnapshotSerializer::GetContextId(Context context) {
@@ -926,7 +1198,7 @@ uint32_t WebSnapshotSerializer::GetContextId(Context context) {
bool return_value = context_ids_.Lookup(context, &id);
DCHECK(return_value);
USE(return_value);
- return static_cast<uint32_t>(id);
+ return static_cast<uint32_t>(context_ids_.size() - 1 - id);
}
uint32_t WebSnapshotSerializer::GetArrayId(JSArray array) {
@@ -934,7 +1206,7 @@ uint32_t WebSnapshotSerializer::GetArrayId(JSArray array) {
bool return_value = array_ids_.Lookup(array, &id);
DCHECK(return_value);
USE(return_value);
- return static_cast<uint32_t>(id);
+ return static_cast<uint32_t>(array_ids_.size() - 1 - id);
}
uint32_t WebSnapshotSerializer::GetObjectId(JSObject object) {
@@ -942,65 +1214,94 @@ uint32_t WebSnapshotSerializer::GetObjectId(JSObject object) {
bool return_value = object_ids_.Lookup(object, &id);
DCHECK(return_value);
USE(return_value);
- return static_cast<uint32_t>(id);
+ return static_cast<uint32_t>(object_ids_.size() - 1 - id);
}
-WebSnapshotDeserializer::WebSnapshotDeserializer(v8::Isolate* isolate)
- : WebSnapshotSerializerDeserializer(
- reinterpret_cast<v8::internal::Isolate*>(isolate)) {}
-
-WebSnapshotDeserializer::~WebSnapshotDeserializer() {}
+uint32_t WebSnapshotSerializer::GetExternalId(HeapObject object) {
+ int id;
+ bool return_value = external_objects_ids_.Lookup(object, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
-void WebSnapshotDeserializer::Throw(const char* message) {
- string_count_ = 0;
- map_count_ = 0;
- context_count_ = 0;
- class_count_ = 0;
- function_count_ = 0;
- object_count_ = 0;
- deferred_references_->SetLength(0);
+Handle<FixedArray> WebSnapshotSerializer::GetExternals() {
+ return external_objects_ids_.Values(isolate_);
+}
- // Make sure we don't read any more data
- deserializer_->position_ = deserializer_->end_;
+WebSnapshotDeserializer::WebSnapshotDeserializer(v8::Isolate* isolate,
+ const uint8_t* data,
+ size_t buffer_size)
+ : WebSnapshotDeserializer(reinterpret_cast<i::Isolate*>(isolate),
+ Handle<Object>(), {data, buffer_size}) {}
+
+WebSnapshotDeserializer::WebSnapshotDeserializer(
+ Isolate* isolate, Handle<Script> snapshot_as_script)
+ : WebSnapshotDeserializer(
+ isolate, handle(snapshot_as_script->name(), isolate),
+ ExtractScriptBuffer(isolate, snapshot_as_script)) {}
+
+WebSnapshotDeserializer::WebSnapshotDeserializer(
+ Isolate* isolate, Handle<Object> script_name,
+ base::Vector<const uint8_t> buffer)
+ : WebSnapshotSerializerDeserializer(isolate),
+ script_name_(script_name),
+ deserializer_(isolate_, buffer.data(), buffer.length()),
+ roots_(isolate) {
+ Handle<FixedArray> empty_array = factory()->empty_fixed_array();
+ strings_handle_ = empty_array;
+ maps_handle_ = empty_array;
+ contexts_handle_ = empty_array;
+ functions_handle_ = empty_array;
+ classes_handle_ = empty_array;
+ arrays_handle_ = empty_array;
+ objects_handle_ = empty_array;
+ external_references_handle_ = empty_array;
+ isolate_->heap()->AddGCEpilogueCallback(UpdatePointersCallback,
+ v8::kGCTypeAll, this);
+}
- WebSnapshotSerializerDeserializer::Throw(message);
+WebSnapshotDeserializer::~WebSnapshotDeserializer() {
+ isolate_->heap()->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
}
-bool WebSnapshotDeserializer::UseWebSnapshot(const uint8_t* data,
- size_t buffer_size) {
- deserializer_.reset(new ValueDeserializer(isolate_, data, buffer_size));
- return Deserialize();
+void WebSnapshotDeserializer::UpdatePointers() {
+ strings_ = *strings_handle_;
+ maps_ = *maps_handle_;
+ contexts_ = *contexts_handle_;
+ functions_ = *functions_handle_;
+ classes_ = *classes_handle_;
+ arrays_ = *arrays_handle_;
+ objects_ = *objects_handle_;
+ external_references_ = *external_references_handle_;
}
-bool WebSnapshotDeserializer::UseWebSnapshot(
- Handle<Script> snapshot_as_script) {
+// static
+base::Vector<const uint8_t> WebSnapshotDeserializer::ExtractScriptBuffer(
+ Isolate* isolate, Handle<Script> snapshot_as_script) {
Handle<String> source =
- handle(String::cast(snapshot_as_script->source()), isolate_);
+ handle(String::cast(snapshot_as_script->source()), isolate);
if (source->IsExternalOneByteString()) {
const v8::String::ExternalOneByteStringResource* resource =
ExternalOneByteString::cast(*source).resource();
- deserializer_.reset(new ValueDeserializer(
- isolate_, reinterpret_cast<const uint8_t*>(resource->data()),
- resource->length()));
- return Deserialize();
+ return {reinterpret_cast<const uint8_t*>(resource->data()),
+ resource->length()};
} else if (source->IsSeqOneByteString()) {
SeqOneByteString source_as_seq = SeqOneByteString::cast(*source);
- auto length = source_as_seq.length();
+ size_t length = source_as_seq.length();
std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
{
DisallowGarbageCollection no_gc;
uint8_t* data = source_as_seq.GetChars(no_gc);
memcpy(data_copy.get(), data, length);
}
- deserializer_.reset(
- new ValueDeserializer(isolate_, data_copy.get(), length));
- return Deserialize();
+ return {data_copy.get(), length};
} else if (source->IsExternalTwoByteString()) {
// TODO(v8:11525): Implement end-to-end snapshot processing which gets rid
// of the need to copy the data here.
const v8::String::ExternalStringResource* resource =
ExternalTwoByteString::cast(*source).resource();
- auto length = resource->length();
+ size_t length = resource->length();
std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
{
DisallowGarbageCollection no_gc;
@@ -1010,48 +1311,66 @@ bool WebSnapshotDeserializer::UseWebSnapshot(
data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
}
}
- deserializer_.reset(
- new ValueDeserializer(isolate_, data_copy.get(), length));
- return Deserialize();
+ return {data_copy.get(), length};
} else if (source->IsSeqTwoByteString()) {
SeqTwoByteString source_as_seq = SeqTwoByteString::cast(*source);
- auto length = source_as_seq.length();
+ size_t length = source_as_seq.length();
std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
{
DisallowGarbageCollection no_gc;
uint16_t* data = source_as_seq.GetChars(no_gc);
uint8_t* data_copy_ptr = data_copy.get();
- for (int i = 0; i < length; ++i) {
+ for (size_t i = 0; i < length; ++i) {
data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
}
}
- deserializer_.reset(
- new ValueDeserializer(isolate_, data_copy.get(), length));
- return Deserialize();
+ return {data_copy.get(), length};
}
UNREACHABLE();
}
-bool WebSnapshotDeserializer::Deserialize() {
+void WebSnapshotDeserializer::Throw(const char* message) {
+ string_count_ = 0;
+ map_count_ = 0;
+ context_count_ = 0;
+ class_count_ = 0;
+ function_count_ = 0;
+ object_count_ = 0;
+ deferred_references_->SetLength(0);
+
+ // Make sure we don't read any more data
+ deserializer_.position_ = deserializer_.end_;
+
+ WebSnapshotSerializerDeserializer::Throw(message);
+}
+
+bool WebSnapshotDeserializer::Deserialize(
+ MaybeHandle<FixedArray> external_references) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize);
+ if (external_references.ToHandle(&external_references_handle_)) {
+ external_references_ = *external_references_handle_;
+ } else {
+ external_references_handle_ = roots_.empty_fixed_array_handle();
+ }
+
if (deserialized_) {
Throw("Can't reuse WebSnapshotDeserializer");
return false;
}
deserialized_ = true;
- auto buffer_size = deserializer_->end_ - deserializer_->position_;
+ auto buffer_size = deserializer_.end_ - deserializer_.position_;
base::ElapsedTimer timer;
if (FLAG_trace_web_snapshot) {
timer.Start();
}
if (!DeserializeSnapshot()) {
- isolate_->ReportPendingMessages();
return false;
}
if (!DeserializeScript()) {
return false;
}
+
if (FLAG_trace_web_snapshot) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Deserializing snapshot (%zu bytes) took %0.3f ms]\n", buffer_size,
@@ -1064,7 +1383,7 @@ bool WebSnapshotDeserializer::DeserializeSnapshot() {
deferred_references_ = ArrayList::New(isolate_, 30);
const void* magic_bytes;
- if (!deserializer_->ReadRawBytes(sizeof(kMagicNumber), &magic_bytes) ||
+ if (!deserializer_.ReadRawBytes(sizeof(kMagicNumber), &magic_bytes) ||
memcmp(magic_bytes, kMagicNumber, sizeof(kMagicNumber)) != 0) {
Throw("Invalid magic number");
return false;
@@ -1086,19 +1405,17 @@ bool WebSnapshotDeserializer::DeserializeSnapshot() {
bool WebSnapshotDeserializer::DeserializeScript() {
// If there is more data, treat it as normal JavaScript.
- DCHECK_LE(deserializer_->position_, deserializer_->end_);
- auto remaining_bytes = deserializer_->end_ - deserializer_->position_;
+ DCHECK_LE(deserializer_.position_, deserializer_.end_);
+ auto remaining_bytes = deserializer_.end_ - deserializer_.position_;
if (remaining_bytes > 0 && remaining_bytes < v8::String::kMaxLength) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
v8::Local<v8::String> source =
v8::String::NewFromUtf8(
- v8_isolate, reinterpret_cast<const char*>(deserializer_->position_),
+ v8_isolate, reinterpret_cast<const char*>(deserializer_.position_),
NewStringType::kNormal, static_cast<int>(remaining_bytes))
.ToLocalChecked();
- ScriptOrigin origin(v8_isolate, v8::String::NewFromUtf8Literal(
- v8_isolate, "(web snapshot)",
- NewStringType::kInternalized));
+ ScriptOrigin origin(v8_isolate, Utils::ToLocal(script_name_));
ScriptCompiler::Source script_source(source, origin);
Local<UnboundScript> script;
@@ -1124,51 +1441,67 @@ bool WebSnapshotDeserializer::DeserializeScript() {
void WebSnapshotDeserializer::DeserializeStrings() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Strings);
- if (!deserializer_->ReadUint32(&string_count_) ||
+ if (!deserializer_.ReadUint32(&string_count_) ||
string_count_ > kMaxItemCount) {
Throw("Malformed string table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
- strings_ = isolate_->factory()->NewFixedArray(string_count_);
+ strings_handle_ = factory()->NewFixedArray(string_count_);
+ strings_ = *strings_handle_;
for (uint32_t i = 0; i < string_count_; ++i) {
- MaybeHandle<String> maybe_string = deserializer_->ReadUtf8String();
+ MaybeHandle<String> maybe_string =
+ deserializer_.ReadUtf8String(AllocationType::kOld);
Handle<String> string;
if (!maybe_string.ToHandle(&string)) {
Throw("Malformed string");
return;
}
- strings_->set(i, *string);
+ strings_.set(i, *string);
}
}
-Handle<String> WebSnapshotDeserializer::ReadString(bool internalize) {
- DCHECK(!strings_->is_null());
+String WebSnapshotDeserializer::ReadString(bool internalize) {
+ DCHECK(!strings_handle_->is_null());
uint32_t string_id;
- if (!deserializer_->ReadUint32(&string_id) || string_id >= string_count_) {
+ if (!deserializer_.ReadUint32(&string_id) || string_id >= string_count_) {
Throw("malformed string id\n");
- return isolate_->factory()->empty_string();
+ return roots_.empty_string();
}
- Handle<String> string =
- handle(String::cast(strings_->get(string_id)), isolate_);
- if (internalize && !string->IsInternalizedString()) {
- string = isolate_->factory()->InternalizeString(string);
- strings_->set(string_id, *string);
+ String string = String::cast(strings_.get(string_id));
+ if (internalize && !string.IsInternalizedString(isolate_)) {
+ string = *factory()->InternalizeString(handle(string, isolate_));
+ strings_.set(string_id, string);
}
return string;
}
+String WebSnapshotDeserializer::ReadInPlaceString(bool internalize) {
+ MaybeHandle<String> maybe_string =
+ deserializer_.ReadUtf8String(AllocationType::kOld);
+ Handle<String> string;
+ if (!maybe_string.ToHandle(&string)) {
+ Throw("Malformed string");
+ return roots_.empty_string();
+ }
+ if (internalize) {
+ string = factory()->InternalizeString(string);
+ }
+ return *string;
+}
+
void WebSnapshotDeserializer::DeserializeMaps() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Maps);
- if (!deserializer_->ReadUint32(&map_count_) || map_count_ > kMaxItemCount) {
+ if (!deserializer_.ReadUint32(&map_count_) || map_count_ > kMaxItemCount) {
Throw("Malformed shape table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
- maps_ = isolate_->factory()->NewFixedArray(map_count_);
+ maps_handle_ = factory()->NewFixedArray(map_count_);
+ maps_ = *maps_handle_;
for (uint32_t i = 0; i < map_count_; ++i) {
uint32_t map_type;
- if (!deserializer_->ReadUint32(&map_type)) {
+ if (!deserializer_.ReadUint32(&map_type)) {
Throw("Malformed shape");
return;
}
@@ -1185,8 +1518,15 @@ void WebSnapshotDeserializer::DeserializeMaps() {
return;
}
+ uint32_t prototype_id;
+ if (!deserializer_.ReadUint32(&prototype_id) ||
+ prototype_id > kMaxItemCount) {
+ Throw("Malformed shape");
+ return;
+ }
+
uint32_t property_count;
- if (!deserializer_->ReadUint32(&property_count)) {
+ if (!deserializer_.ReadUint32(&property_count)) {
Throw("Malformed shape");
return;
}
@@ -1201,55 +1541,71 @@ void WebSnapshotDeserializer::DeserializeMaps() {
DisallowGarbageCollection no_gc;
Map empty_map =
isolate_->native_context()->object_function().initial_map();
- maps_->set(i, empty_map);
- return;
+ maps_.set(i, empty_map);
+ continue;
}
Handle<DescriptorArray> descriptors =
- isolate_->factory()->NewDescriptorArray(0, property_count);
- for (uint32_t p = 0; p < property_count; ++p) {
+ factory()->NewDescriptorArray(property_count, 0);
+ for (InternalIndex i : InternalIndex::Range(property_count)) {
PropertyAttributes attributes = PropertyAttributes::NONE;
if (has_custom_property_attributes) {
uint32_t flags;
- if (!deserializer_->ReadUint32(&flags)) {
+ if (!deserializer_.ReadUint32(&flags)) {
Throw("Malformed shape");
return;
}
attributes = FlagsToAttributes(flags);
}
- Handle<String> key = ReadString(true);
+ Handle<String> key(ReadString(true), isolate_);
// Use the "none" representation until we see the first object having this
// map. At that point, modify the representation.
- Descriptor desc =
- Descriptor::DataField(isolate_, key, static_cast<int>(p), attributes,
- Representation::None());
- descriptors->Append(&desc);
+ Descriptor desc = Descriptor::DataField(
+ isolate_, key, i.as_int(), attributes, Representation::None());
+ descriptors->Set(i, &desc);
}
+ DCHECK_EQ(descriptors->number_of_descriptors(), property_count);
+ descriptors->Sort();
- Handle<Map> map = isolate_->factory()->NewMap(
- JS_OBJECT_TYPE, JSObject::kHeaderSize * kTaggedSize, HOLEY_ELEMENTS, 0);
+ Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ HOLEY_ELEMENTS, 0);
map->InitializeDescriptors(isolate_, *descriptors);
// TODO(v8:11525): Set 'constructor'.
- // TODO(v8:11525): Set the correct prototype.
- maps_->set(i, *map);
+ if (prototype_id == 0) {
+ // Use Object.prototype as the prototype.
+ map->set_prototype(isolate_->native_context()->initial_object_prototype(),
+ UPDATE_WRITE_BARRIER);
+ } else {
+ // TODO(v8::11525): Implement stricter checks, e.g., disallow cycles.
+ --prototype_id;
+ if (prototype_id < current_object_count_) {
+ map->set_prototype(HeapObject::cast(objects_.get(prototype_id)),
+ UPDATE_WRITE_BARRIER);
+ } else {
+ // The object hasn't been deserialized yet.
+ AddDeferredReference(map, 0, OBJECT_ID, prototype_id);
+ }
+ }
+ maps_.set(i, *map);
}
}
void WebSnapshotDeserializer::DeserializeContexts() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Contexts);
- if (!deserializer_->ReadUint32(&context_count_) ||
+ if (!deserializer_.ReadUint32(&context_count_) ||
context_count_ > kMaxItemCount) {
Throw("Malformed context table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
- contexts_ = isolate_->factory()->NewFixedArray(context_count_);
+ contexts_handle_ = factory()->NewFixedArray(context_count_);
+ contexts_ = *contexts_handle_;
for (uint32_t i = 0; i < context_count_; ++i) {
uint32_t context_type;
- if (!deserializer_->ReadUint32(&context_type)) {
+ if (!deserializer_.ReadUint32(&context_type)) {
Throw("Malformed context type");
return;
}
@@ -1257,14 +1613,14 @@ void WebSnapshotDeserializer::DeserializeContexts() {
uint32_t parent_context_id;
// Parent context is serialized before child context. Note: not >= on
// purpose, we're going to subtract 1 later.
- if (!deserializer_->ReadUint32(&parent_context_id) ||
+ if (!deserializer_.ReadUint32(&parent_context_id) ||
parent_context_id > i) {
Throw("Malformed context");
return;
}
uint32_t variable_count;
- if (!deserializer_->ReadUint32(&variable_count)) {
+ if (!deserializer_.ReadUint32(&variable_count)) {
Throw("Malformed context");
return;
}
@@ -1275,8 +1631,8 @@ void WebSnapshotDeserializer::DeserializeContexts() {
Handle<Context> parent_context;
if (parent_context_id > 0) {
- parent_context = handle(
- Context::cast(contexts_->get(parent_context_id - 1)), isolate_);
+ parent_context =
+ handle(Context::cast(contexts_.get(parent_context_id - 1)), isolate_);
scope_info->set_outer_scope_info(parent_context->scope_info());
} else {
parent_context = handle(isolate_->context(), isolate_);
@@ -1286,8 +1642,10 @@ void WebSnapshotDeserializer::DeserializeContexts() {
const int context_local_info_base = context_local_base + variable_count;
for (int variable_index = 0;
variable_index < static_cast<int>(variable_count); ++variable_index) {
- Handle<String> name = ReadString(true);
- scope_info->set(context_local_base + variable_index, *name);
+ {
+ String name = ReadString(true);
+ scope_info->set(context_local_base + variable_index, name);
+ }
// TODO(v8:11525): Support variable modes etc.
uint32_t info =
@@ -1308,26 +1666,23 @@ void WebSnapshotDeserializer::DeserializeContexts() {
Handle<Context> context;
switch (context_type) {
case ContextType::FUNCTION:
- context =
- isolate_->factory()->NewFunctionContext(parent_context, scope_info);
+ context = factory()->NewFunctionContext(parent_context, scope_info);
break;
case ContextType::BLOCK:
- context =
- isolate_->factory()->NewBlockContext(parent_context, scope_info);
+ context = factory()->NewBlockContext(parent_context, scope_info);
break;
default:
Throw("Unsupported context type");
return;
}
+ int context_header_length = scope_info->ContextHeaderLength();
for (int variable_index = 0;
variable_index < static_cast<int>(variable_count); ++variable_index) {
- Handle<Object> value;
- Representation representation;
- ReadValue(value, representation, context,
- scope_info->ContextHeaderLength() + variable_index);
- context->set(scope_info->ContextHeaderLength() + variable_index, *value);
+ int context_index = context_header_length + variable_index;
+ Object value = ReadValue(context, context_index);
+ context->set(context_index, value);
}
- contexts_->set(i, *context);
+ contexts_.set(i, *context);
}
}
@@ -1345,8 +1700,8 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
ScopeInfo::LanguageModeBit::encode(LanguageMode::kStrict) |
ScopeInfo::DeclarationScopeBit::encode(false) |
ScopeInfo::ReceiverVariableBits::encode(VariableAllocationInfo::NONE) |
- ScopeInfo::HasClassBrandBit::encode(false) |
- ScopeInfo::HasSavedClassVariableIndexBit::encode(false) |
+ ScopeInfo::ClassScopeHasPrivateBrandBit::encode(false) |
+ ScopeInfo::HasSavedClassVariableBit::encode(false) |
ScopeInfo::HasNewTargetBit::encode(false) |
ScopeInfo::FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
ScopeInfo::HasInferredFunctionNameBit::encode(false) |
@@ -1382,16 +1737,20 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
? ScopeInfo::kPositionInfoEntries
: 0) +
(has_parent ? 1 : 0) + 2 * variable_count;
- Handle<ScopeInfo> scope_info = isolate_->factory()->NewScopeInfo(length);
+ Handle<ScopeInfo> scope_info = factory()->NewScopeInfo(length);
+ {
+ DisallowGarbageCollection no_gc;
+ ScopeInfo raw = *scope_info;
- scope_info->set_flags(flags);
- DCHECK(!scope_info->IsEmpty());
+ raw.set_flags(flags);
+ DCHECK(!raw.IsEmpty());
- scope_info->set_context_local_count(variable_count);
- // TODO(v8:11525): Support parameters.
- scope_info->set_parameter_count(0);
- if (scope_info->HasPositionInfo()) {
- scope_info->SetPositionInfo(0, 0);
+ raw.set_context_local_count(variable_count);
+ // TODO(v8:11525): Support parameters.
+ raw.set_parameter_count(0);
+ if (raw.HasPositionInfo()) {
+ raw.SetPositionInfo(0, 0);
+ }
}
return scope_info;
}
@@ -1401,26 +1760,29 @@ Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
uint32_t parameter_count, uint32_t flags, uint32_t context_id) {
// TODO(v8:11525): Deduplicate the SFIs for class methods.
FunctionKind kind = FunctionFlagsToFunctionKind(flags);
- Handle<SharedFunctionInfo> shared =
- isolate_->factory()->NewSharedFunctionInfo(
- isolate_->factory()->empty_string(), MaybeHandle<Code>(),
- Builtin::kCompileLazy, kind);
- if (IsConciseMethod(kind)) {
- shared->set_syntax_kind(FunctionSyntaxKind::kAccessorOrMethod);
- }
- shared->set_script(*script_);
- shared->set_function_literal_id(shared_function_info_index);
- shared->set_internal_formal_parameter_count(
- JSParameterCount(parameter_count));
- // TODO(v8:11525): Decide how to handle language modes.
- shared->set_language_mode(LanguageMode::kStrict);
- shared->set_uncompiled_data(
- *isolate_->factory()->NewUncompiledDataWithoutPreparseData(
- ReadOnlyRoots(isolate_).empty_string_handle(), start_position,
- start_position + length));
- shared->set_allows_lazy_compilation(true);
- shared_function_infos_->Set(shared_function_info_index,
- HeapObjectReference::Weak(*shared));
+ Handle<SharedFunctionInfo> shared = factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), MaybeHandle<Code>(), Builtin::kCompileLazy,
+ kind);
+ Handle<UncompiledData> uncompiled_data =
+ factory()->NewUncompiledDataWithoutPreparseData(
+ roots_.empty_string_handle(), start_position,
+ start_position + length);
+ {
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo raw = *shared;
+ if (IsConciseMethod(kind)) {
+ raw.set_syntax_kind(FunctionSyntaxKind::kAccessorOrMethod);
+ }
+ raw.set_script(*script_);
+ raw.set_function_literal_id(shared_function_info_index);
+ raw.set_internal_formal_parameter_count(JSParameterCount(parameter_count));
+ // TODO(v8:11525): Decide how to handle language modes.
+ raw.set_language_mode(LanguageMode::kStrict);
+ raw.set_uncompiled_data(*uncompiled_data);
+ raw.set_allows_lazy_compilation(true);
+ shared_function_infos_.Set(shared_function_info_index,
+ HeapObjectReference::Weak(raw));
+ }
shared_function_info_table_ = ObjectHashTable::Put(
shared_function_info_table_,
handle(Smi::FromInt(start_position), isolate_),
@@ -1433,7 +1795,7 @@ Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
DCHECK_LT(context_id - 1, context_count_);
// Guards raw pointer "context" below.
DisallowHeapAllocation no_heap_access;
- Context context = Context::cast(contexts_->get(context_id - 1));
+ Context context = Context::cast(contexts_.get(context_id - 1));
function->set_context(context);
shared->set_outer_scope_info(context.scope_info());
}
@@ -1442,50 +1804,57 @@ Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
void WebSnapshotDeserializer::DeserializeFunctions() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Functions);
- if (!deserializer_->ReadUint32(&function_count_) ||
+ if (!deserializer_.ReadUint32(&function_count_) ||
function_count_ > kMaxItemCount) {
Throw("Malformed function table");
return;
}
STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
- functions_ = isolate_->factory()->NewFixedArray(function_count_);
+ functions_handle_ = factory()->NewFixedArray(function_count_);
+ functions_ = *functions_handle_;
// Overallocate the array for SharedFunctionInfos; functions which we
// deserialize soon will create more SharedFunctionInfos when called.
- shared_function_infos_ = isolate_->factory()->NewWeakFixedArray(
+ shared_function_infos_handle_ = factory()->NewWeakFixedArray(
WeakArrayList::CapacityForLength(function_count_ + 1),
AllocationType::kOld);
+ shared_function_infos_ = *shared_function_infos_handle_;
shared_function_info_table_ = ObjectHashTable::New(isolate_, function_count_);
- script_ = isolate_->factory()->NewScript(isolate_->factory()->empty_string());
- script_->set_type(Script::TYPE_WEB_SNAPSHOT);
- script_->set_shared_function_infos(*shared_function_infos_);
- script_->set_shared_function_info_table(*shared_function_info_table_);
+ script_ = factory()->NewScript(factory()->empty_string());
+ {
+ DisallowGarbageCollection no_gc;
+ Script raw = *script_;
+ raw.set_type(Script::TYPE_WEB_SNAPSHOT);
+ raw.set_shared_function_infos(shared_function_infos_);
+ raw.set_shared_function_info_table(*shared_function_info_table_);
+ }
for (; current_function_count_ < function_count_; ++current_function_count_) {
uint32_t context_id;
// Note: > (not >= on purpose, we will subtract 1).
- if (!deserializer_->ReadUint32(&context_id) ||
- context_id > context_count_) {
+ if (!deserializer_.ReadUint32(&context_id) || context_id > context_count_) {
Throw("Malformed function");
return;
}
-
- Handle<String> source = ReadString(false);
- if (current_function_count_ == 0) {
- script_->set_source(*source);
- } else {
- // TODO(v8:11525): Support multiple source snippets.
- DCHECK_EQ(script_->source(), *source);
+ {
+ String source = ReadString(false);
+ DisallowGarbageCollection no_gc;
+ if (current_function_count_ == 0) {
+ script_->set_source(source);
+ } else {
+ // TODO(v8:11525): Support multiple source snippets.
+ DCHECK_EQ(script_->source(), source);
+ }
}
uint32_t start_position;
uint32_t length;
uint32_t parameter_count;
uint32_t flags;
- if (!deserializer_->ReadUint32(&start_position) ||
- !deserializer_->ReadUint32(&length) ||
- !deserializer_->ReadUint32(&parameter_count) ||
- !deserializer_->ReadUint32(&flags)) {
+ if (!deserializer_.ReadUint32(&start_position) ||
+ !deserializer_.ReadUint32(&length) ||
+ !deserializer_.ReadUint32(&parameter_count) ||
+ !deserializer_.ReadUint32(&flags)) {
Throw("Malformed function");
return;
}
@@ -1495,7 +1864,7 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
Handle<JSFunction> function =
CreateJSFunction(current_function_count_ + 1, start_position, length,
parameter_count, flags, context_id);
- functions_->set(current_function_count_, *function);
+ functions_.set(current_function_count_, *function);
ReadFunctionPrototype(function);
}
@@ -1503,45 +1872,48 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
void WebSnapshotDeserializer::DeserializeClasses() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Classes);
- if (!deserializer_->ReadUint32(&class_count_) ||
+ if (!deserializer_.ReadUint32(&class_count_) ||
class_count_ > kMaxItemCount) {
Throw("Malformed class table");
return;
}
STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
- classes_ = isolate_->factory()->NewFixedArray(class_count_);
+ classes_handle_ = factory()->NewFixedArray(class_count_);
+ classes_ = *classes_handle_;
// Grow the array for SharedFunctionInfos.
- shared_function_infos_ = WeakFixedArray::EnsureSpace(
- isolate_, shared_function_infos_,
+ shared_function_infos_handle_ = WeakFixedArray::EnsureSpace(
+ isolate_, shared_function_infos_handle_,
WeakArrayList::CapacityForLength(function_count_ + 1 + class_count_));
- script_->set_shared_function_infos(*shared_function_infos_);
+ shared_function_infos_ = *shared_function_infos_handle_;
+ script_->set_shared_function_infos(shared_function_infos_);
for (; current_class_count_ < class_count_; ++current_class_count_) {
uint32_t context_id;
// Note: > (not >= on purpose, we will subtract 1).
- if (!deserializer_->ReadUint32(&context_id) ||
- context_id > context_count_) {
+ if (!deserializer_.ReadUint32(&context_id) || context_id > context_count_) {
Throw("Malformed class");
return;
}
- Handle<String> source = ReadString(false);
- if (current_function_count_ + current_class_count_ == 0) {
- script_->set_source(*source);
- } else {
- // TODO(v8:11525): Support multiple source snippets.
- DCHECK_EQ(script_->source(), *source);
+ {
+ String source = ReadString(false);
+ if (current_function_count_ + current_class_count_ == 0) {
+ script_->set_source(source);
+ } else {
+ // TODO(v8:11525): Support multiple source snippets.
+ DCHECK_EQ(script_->source(), source);
+ }
}
uint32_t start_position;
uint32_t length;
uint32_t parameter_count;
uint32_t flags;
- if (!deserializer_->ReadUint32(&start_position) ||
- !deserializer_->ReadUint32(&length) ||
- !deserializer_->ReadUint32(&parameter_count) ||
- !deserializer_->ReadUint32(&flags)) {
+ if (!deserializer_.ReadUint32(&start_position) ||
+ !deserializer_.ReadUint32(&length) ||
+ !deserializer_.ReadUint32(&parameter_count) ||
+ !deserializer_.ReadUint32(&flags)) {
Throw("Malformed class");
return;
}
@@ -1551,7 +1923,7 @@ void WebSnapshotDeserializer::DeserializeClasses() {
Handle<JSFunction> function = CreateJSFunction(
function_count_ + current_class_count_ + 1, start_position, length,
parameter_count, flags, context_id);
- classes_->set(current_class_count_, *function);
+ classes_.set(current_class_count_, *function);
ReadFunctionPrototype(function);
}
@@ -1559,291 +1931,333 @@ void WebSnapshotDeserializer::DeserializeClasses() {
void WebSnapshotDeserializer::DeserializeObjects() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Objects);
- if (!deserializer_->ReadUint32(&object_count_) ||
+ if (!deserializer_.ReadUint32(&object_count_) ||
object_count_ > kMaxItemCount) {
Throw("Malformed objects table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
- objects_ = isolate_->factory()->NewFixedArray(object_count_);
+ objects_handle_ = factory()->NewFixedArray(object_count_);
+ objects_ = *objects_handle_;
for (; current_object_count_ < object_count_; ++current_object_count_) {
uint32_t map_id;
- if (!deserializer_->ReadUint32(&map_id) || map_id >= map_count_) {
+ if (!deserializer_.ReadUint32(&map_id) || map_id >= map_count_) {
Throw("Malformed object");
return;
}
- Handle<Map> map = handle(Map::cast(maps_->get(map_id)), isolate_);
+ Map raw_map = Map::cast(maps_.get(map_id));
Handle<DescriptorArray> descriptors =
- handle(map->instance_descriptors(kRelaxedLoad), isolate_);
- int no_properties = map->NumberOfOwnDescriptors();
+ handle(raw_map.instance_descriptors(kRelaxedLoad), isolate_);
+ int no_properties = raw_map.NumberOfOwnDescriptors();
// TODO(v8:11525): In-object properties.
+ Handle<Map> map(raw_map, isolate_);
Handle<PropertyArray> property_array =
- isolate_->factory()->NewPropertyArray(no_properties);
+ factory()->NewPropertyArray(no_properties);
for (int i = 0; i < no_properties; ++i) {
- Handle<Object> value;
- Representation wanted_representation = Representation::None();
- ReadValue(value, wanted_representation, property_array, i);
+ Object value = ReadValue(property_array, i);
+ DisallowGarbageCollection no_gc;
// Read the representation from the map.
- PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
+ DescriptorArray raw_descriptors = *descriptors;
+ PropertyDetails details = raw_descriptors.GetDetails(InternalIndex(i));
CHECK_EQ(details.location(), PropertyLocation::kField);
CHECK_EQ(PropertyKind::kData, details.kind());
Representation r = details.representation();
if (r.IsNone()) {
// Switch over to wanted_representation.
- details = details.CopyWithRepresentation(wanted_representation);
- descriptors->SetDetails(InternalIndex(i), details);
- } else if (!r.Equals(wanted_representation)) {
+ details = details.CopyWithRepresentation(Representation::Tagged());
+ raw_descriptors.SetDetails(InternalIndex(i), details);
+ } else if (!r.Equals(Representation::Tagged())) {
// TODO(v8:11525): Support this case too.
UNREACHABLE();
}
- property_array->set(i, *value);
+ property_array->set(i, value);
}
- Handle<JSObject> object = isolate_->factory()->NewJSObjectFromMap(map);
+ Handle<JSObject> object = factory()->NewJSObjectFromMap(map);
object->set_raw_properties_or_hash(*property_array, kRelaxedStore);
- objects_->set(static_cast<int>(current_object_count_), *object);
+
+ uint32_t max_element_index = 0;
+ if (!deserializer_.ReadUint32(&max_element_index) ||
+ max_element_index > kMaxItemCount + 1) {
+ Throw("Malformed object");
+ return;
+ }
+ if (max_element_index > 0) {
+ --max_element_index; // Subtract 1 to get the real max_element_index.
+ Handle<FixedArray> elements =
+ factory()->NewFixedArray(max_element_index + 1);
+ // Read (index, value) pairs until we encounter one where index ==
+ // max_element_index.
+ while (true) {
+ uint32_t index;
+ if (!deserializer_.ReadUint32(&index) || index > max_element_index) {
+ Throw("Malformed object");
+ return;
+ }
+ Object value = ReadValue(elements, index);
+ elements->set(index, value);
+ if (index == max_element_index) {
+ break;
+ }
+ }
+ object->set_elements(*elements);
+ // Objects always get HOLEY_ELEMENTS.
+ DCHECK(!IsSmiElementsKind(object->map().elements_kind()));
+ DCHECK(!IsDoubleElementsKind(object->map().elements_kind()));
+ DCHECK(IsHoleyElementsKind(object->map().elements_kind()));
+ }
+ objects_.set(static_cast<int>(current_object_count_), *object);
}
}
void WebSnapshotDeserializer::DeserializeArrays() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Arrays);
- if (!deserializer_->ReadUint32(&array_count_) ||
+ if (!deserializer_.ReadUint32(&array_count_) ||
object_count_ > kMaxItemCount) {
Throw("Malformed array table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
- arrays_ = isolate_->factory()->NewFixedArray(array_count_);
+ arrays_handle_ = factory()->NewFixedArray(array_count_);
+ arrays_ = *arrays_handle_;
for (; current_array_count_ < array_count_; ++current_array_count_) {
uint32_t length;
- if (!deserializer_->ReadUint32(&length) || length > kMaxItemCount) {
+ if (!deserializer_.ReadUint32(&length) || length > kMaxItemCount) {
Throw("Malformed array");
return;
}
- Handle<FixedArray> elements = isolate_->factory()->NewFixedArray(length);
+ Handle<FixedArray> elements = factory()->NewFixedArray(length);
ElementsKind elements_kind = PACKED_SMI_ELEMENTS;
for (uint32_t i = 0; i < length; ++i) {
- Handle<Object> value;
- Representation wanted_representation = Representation::None();
- ReadValue(value, wanted_representation, elements, i);
- if (!wanted_representation.IsSmi()) {
+ Object value = ReadValue(elements, i);
+ DisallowGarbageCollection no_gc;
+ if (!value.IsSmi()) {
elements_kind = PACKED_ELEMENTS;
}
- DCHECK(!value.is_null());
- elements->set(static_cast<int>(i), *value);
+ elements->set(static_cast<int>(i), value);
}
- Handle<JSArray> array = isolate_->factory()->NewJSArrayWithElements(
- elements, elements_kind, length);
- arrays_->set(static_cast<int>(current_array_count_), *array);
+ Handle<JSArray> array =
+ factory()->NewJSArrayWithElements(elements, elements_kind, length);
+ arrays_.set(static_cast<int>(current_array_count_), *array);
}
}
void WebSnapshotDeserializer::DeserializeExports() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Exports);
uint32_t count;
- if (!deserializer_->ReadUint32(&count) || count > kMaxItemCount) {
+ if (!deserializer_.ReadUint32(&count) || count > kMaxItemCount) {
Throw("Malformed export table");
return;
}
+ // Pre-reserve the space for the properties we're going to add to the global
+ // object.
+ Handle<JSGlobalObject> global = isolate_->global_object();
+ Handle<GlobalDictionary> dictionary(
+ global->global_dictionary(isolate_, kAcquireLoad), isolate_);
+
+ dictionary = GlobalDictionary::EnsureCapacity(
+ isolate_, dictionary, dictionary->NumberOfElements() + count,
+ AllocationType::kYoung);
+ bool has_exported_values = false;
+
+ // TODO(v8:11525): The code below skips checks, in particular
+ // LookupIterator::UpdateProtectors and
+ // LookupIterator::ExtendingNonExtensible.
+ InternalIndex entry = InternalIndex::NotFound();
for (uint32_t i = 0; i < count; ++i) {
- Handle<String> export_name = ReadString(true);
- Handle<Object> export_value;
- Representation representation;
+ Handle<String> export_name(ReadString(true), isolate_);
// No deferred references should occur at this point, since all objects have
// been deserialized.
- ReadValue(export_value, representation);
+ Object export_value = ReadValue();
+
+ if (export_name->length() == 0 && i == 0) {
+ // Hack: treat the first empty-string-named export value as a return value
+ // from the deserializer.
+ CHECK_EQ(i, 0);
+ return_value_ = handle(export_value, isolate_);
+ continue;
+ }
+ DisallowGarbageCollection no_gc;
// Check for the correctness of the snapshot (thus far) before producing
// something observable. TODO(v8:11525): Strictly speaking, we should
// produce observable effects only when we know that the whole snapshot is
// correct.
- if (has_error()) {
- return;
- }
+ if (has_error()) return;
- auto result = Object::SetProperty(isolate_, isolate_->global_object(),
- export_name, export_value);
- if (result.is_null()) {
- Throw("Setting global property failed");
- return;
- }
+ PropertyDetails property_details =
+ PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCell::InitialType(isolate_, export_value));
+ Handle<Object> export_value_handle(export_value, isolate_);
+ AllowGarbageCollection allow_gc;
+ Handle<PropertyCell> transition_cell = factory()->NewPropertyCell(
+ export_name, property_details, export_value_handle);
+ dictionary =
+ GlobalDictionary::Add(isolate_, dictionary, export_name,
+ transition_cell, property_details, &entry);
+ has_exported_values = true;
}
+
+ if (!has_exported_values) return;
+
+ global->set_global_dictionary(*dictionary, kReleaseStore);
+ JSObject::InvalidatePrototypeChains(global->map(isolate_));
}
-void WebSnapshotDeserializer::ReadValue(
- Handle<Object>& value, Representation& representation,
- Handle<Object> object_for_deferred_reference,
- uint32_t index_for_deferred_reference) {
+Object WebSnapshotDeserializer::ReadValue(Handle<HeapObject> container,
+ uint32_t container_index) {
uint32_t value_type;
// TODO(v8:11525): Consider adding a ReadByte.
- if (!deserializer_->ReadUint32(&value_type)) {
+ if (!deserializer_.ReadUint32(&value_type)) {
Throw("Malformed variable");
// Set "value" here so that the "keep on trucking" error handling won't fail
// when dereferencing the handle.
- value = isolate_->factory()->undefined_value();
- representation = Representation::None();
- return;
+ return Smi::zero();
}
switch (value_type) {
- case ValueType::FALSE_CONSTANT: {
- value = handle(ReadOnlyRoots(isolate_).false_value(), isolate_);
- representation = Representation::Tagged();
- break;
- }
- case ValueType::TRUE_CONSTANT: {
- value = handle(ReadOnlyRoots(isolate_).true_value(), isolate_);
- representation = Representation::Tagged();
- break;
- }
- case ValueType::NULL_CONSTANT: {
- value = handle(ReadOnlyRoots(isolate_).null_value(), isolate_);
- representation = Representation::Tagged();
- break;
- }
- case ValueType::UNDEFINED_CONSTANT: {
- value = handle(ReadOnlyRoots(isolate_).undefined_value(), isolate_);
- representation = Representation::Tagged();
- break;
- }
- case ValueType::INTEGER: {
- Maybe<int32_t> number = deserializer_->ReadZigZag<int32_t>();
- if (number.IsNothing()) {
- Throw("Malformed integer");
- return;
- }
- value = isolate_->factory()->NewNumberFromInt(number.FromJust());
- representation = Representation::Tagged();
- break;
- }
- case ValueType::DOUBLE: {
- double number;
- if (!deserializer_->ReadDouble(&number)) {
- Throw("Malformed double");
- return;
- }
- value = isolate_->factory()->NewNumber(number);
- representation = Representation::Tagged();
- break;
- }
- case ValueType::STRING_ID: {
- value = ReadString(false);
- representation = Representation::Tagged();
- break;
- }
+ case ValueType::FALSE_CONSTANT:
+ return roots_.false_value();
+ case ValueType::TRUE_CONSTANT:
+ return roots_.true_value();
+ case ValueType::NULL_CONSTANT:
+ return roots_.null_value();
+ case ValueType::UNDEFINED_CONSTANT:
+ return roots_.undefined_value();
+ case ValueType::INTEGER:
+ return ReadInteger();
+ case ValueType::DOUBLE:
+ return ReadNumber();
+ case ValueType::STRING_ID:
+ return ReadString(false);
case ValueType::ARRAY_ID:
- uint32_t array_id;
- if (!deserializer_->ReadUint32(&array_id) || array_id >= kMaxItemCount) {
- Throw("Malformed variable");
- return;
- }
- if (array_id < current_array_count_) {
- value = handle(arrays_->get(array_id), isolate_);
- } else {
- // The array hasn't been deserialized yet.
- value = isolate_->factory()->undefined_value();
- if (object_for_deferred_reference.is_null()) {
- Throw("Invalid array reference");
- return;
- }
- AddDeferredReference(object_for_deferred_reference,
- index_for_deferred_reference, ARRAY_ID, array_id);
- }
- representation = Representation::Tagged();
- break;
+ return ReadArray(container, container_index);
case ValueType::OBJECT_ID:
- uint32_t object_id;
- if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount) {
- Throw("Malformed variable");
- return;
- }
- if (object_id < current_object_count_) {
- value = handle(objects_->get(object_id), isolate_);
- } else {
- // The object hasn't been deserialized yet.
- value = isolate_->factory()->undefined_value();
- if (object_for_deferred_reference.is_null()) {
- Throw("Invalid object reference");
- return;
- }
- AddDeferredReference(object_for_deferred_reference,
- index_for_deferred_reference, OBJECT_ID,
- object_id);
- }
- representation = Representation::Tagged();
- break;
- case ValueType::FUNCTION_ID: {
- uint32_t function_id;
- if (!deserializer_->ReadUint32(&function_id) ||
- function_id >= function_count_) {
- Throw("Malformed object property");
- return;
- }
- if (function_id < current_function_count_) {
- value = handle(functions_->get(function_id), isolate_);
- } else {
- // The function hasn't been deserialized yet.
- value = isolate_->factory()->undefined_value();
- if (object_for_deferred_reference.is_null()) {
- Throw("Invalid object reference");
- return;
- }
- AddDeferredReference(object_for_deferred_reference,
- index_for_deferred_reference, FUNCTION_ID,
- function_id);
- }
- representation = Representation::Tagged();
- break;
- }
- case ValueType::CLASS_ID: {
- uint32_t class_id;
- if (!deserializer_->ReadUint32(&class_id) || class_id >= kMaxItemCount) {
- Throw("Malformed object property");
- return;
- }
- if (class_id < current_class_count_) {
- value = handle(classes_->get(class_id), isolate_);
- } else {
- // The class hasn't been deserialized yet.
- value = isolate_->factory()->undefined_value();
- if (object_for_deferred_reference.is_null()) {
- Throw("Invalid object reference");
- return;
- }
- AddDeferredReference(object_for_deferred_reference,
- index_for_deferred_reference, CLASS_ID, class_id);
- }
- representation = Representation::Tagged();
- break;
- }
- case ValueType::REGEXP: {
- Handle<String> pattern = ReadString(false);
- Handle<String> flags_string = ReadString(false);
- base::Optional<JSRegExp::Flags> flags =
- JSRegExp::FlagsFromString(isolate_, flags_string);
- if (!flags.has_value()) {
- Throw("Malformed flags in regular expression");
- return;
- }
- MaybeHandle<JSRegExp> maybe_regexp =
- JSRegExp::New(isolate_, pattern, flags.value());
- if (!maybe_regexp.ToHandle(&value)) {
- Throw("Malformed RegExp");
- return;
- }
- representation = Representation::Tagged();
- break;
- }
+ return ReadObject(container, container_index);
+ case ValueType::FUNCTION_ID:
+ return ReadFunction(container, container_index);
+ case ValueType::CLASS_ID:
+ return ReadClass(container, container_index);
+ case ValueType::REGEXP:
+ return ReadRegexp();
+ case ValueType::EXTERNAL_ID:
+ return ReadExternalReference();
+ case ValueType::IN_PLACE_STRING_ID:
+ return ReadInPlaceString(false);
default:
// TODO(v8:11525): Handle other value types.
Throw("Unsupported value type");
- return;
+ return Smi::zero();
}
}
+Object WebSnapshotDeserializer::ReadInteger() {
+ Maybe<int32_t> number = deserializer_.ReadZigZag<int32_t>();
+ if (number.IsNothing()) {
+ Throw("Malformed integer");
+ return Smi::zero();
+ }
+ return *factory()->NewNumberFromInt(number.FromJust());
+}
+
+Object WebSnapshotDeserializer::ReadNumber() {
+ double number;
+ if (!deserializer_.ReadDouble(&number)) {
+ Throw("Malformed double");
+ return Smi::zero();
+ }
+ return *factory()->NewNumber(number);
+}
+
+Object WebSnapshotDeserializer::ReadArray(Handle<HeapObject> container,
+ uint32_t index) {
+ uint32_t array_id;
+ if (!deserializer_.ReadUint32(&array_id) || array_id >= kMaxItemCount) {
+ Throw("Malformed variable");
+ return Smi::zero();
+ }
+ if (array_id < current_array_count_) {
+ return arrays_.get(array_id);
+ }
+ // The array hasn't been deserialized yet.
+ return AddDeferredReference(container, index, ARRAY_ID, array_id);
+}
+
+Object WebSnapshotDeserializer::ReadObject(Handle<HeapObject> container,
+ uint32_t index) {
+ uint32_t object_id;
+ if (!deserializer_.ReadUint32(&object_id) || object_id > kMaxItemCount) {
+ Throw("Malformed variable");
+ return Smi::zero();
+ }
+ if (object_id < current_object_count_) {
+ return objects_.get(object_id);
+ }
+ // The object hasn't been deserialized yet.
+ return AddDeferredReference(container, index, OBJECT_ID, object_id);
+}
+
+Object WebSnapshotDeserializer::ReadFunction(Handle<HeapObject> container,
+ uint32_t index) {
+ uint32_t function_id;
+ if (!deserializer_.ReadUint32(&function_id) ||
+ function_id >= function_count_) {
+ Throw("Malformed object property");
+ return Smi::zero();
+ }
+ if (function_id < current_function_count_) {
+ return functions_.get(function_id);
+ }
+ // The function hasn't been deserialized yet.
+ return AddDeferredReference(container, index, FUNCTION_ID, function_id);
+}
+
+Object WebSnapshotDeserializer::ReadClass(Handle<HeapObject> container,
+ uint32_t index) {
+ uint32_t class_id;
+ if (!deserializer_.ReadUint32(&class_id) || class_id >= kMaxItemCount) {
+ Throw("Malformed object property");
+ return Smi::zero();
+ }
+ if (class_id < current_class_count_) {
+ return classes_.get(class_id);
+ }
+ // The class hasn't been deserialized yet.
+ return AddDeferredReference(container, index, CLASS_ID, class_id);
+}
+
+Object WebSnapshotDeserializer::ReadRegexp() {
+ Handle<String> pattern(ReadString(false), isolate_);
+ Handle<String> flags_string(ReadString(false), isolate_);
+ base::Optional<JSRegExp::Flags> flags =
+ JSRegExp::FlagsFromString(isolate_, flags_string);
+ if (!flags.has_value()) {
+ Throw("Malformed flags in regular expression");
+ return Smi::zero();
+ }
+ MaybeHandle<JSRegExp> maybe_regexp =
+ JSRegExp::New(isolate_, pattern, flags.value());
+ Handle<JSRegExp> regexp;
+ if (!maybe_regexp.ToHandle(&regexp)) {
+ Throw("Malformed RegExp");
+ return Smi::zero();
+ }
+ return *regexp;
+}
+
+Object WebSnapshotDeserializer::ReadExternalReference() {
+ uint32_t ref_id;
+ if (!deserializer_.ReadUint32(&ref_id) ||
+ ref_id >= static_cast<uint32_t>(external_references_.length())) {
+ Throw("Invalid external reference");
+ return Smi::zero();
+ }
+ return external_references_.get(ref_id);
+}
+
void WebSnapshotDeserializer::ReadFunctionPrototype(
Handle<JSFunction> function) {
uint32_t object_id;
- if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount + 1) {
+ if (!deserializer_.ReadUint32(&object_id) || object_id > kMaxItemCount + 1) {
Throw("Malformed class / function");
return;
}
@@ -1854,7 +2268,7 @@ void WebSnapshotDeserializer::ReadFunctionPrototype(
--object_id;
if (object_id < current_object_count_) {
if (!SetFunctionPrototype(*function,
- JSReceiver::cast(objects_->get(object_id)))) {
+ JSReceiver::cast(objects_.get(object_id)))) {
Throw("Can't reuse function prototype");
return;
}
@@ -1866,10 +2280,11 @@ void WebSnapshotDeserializer::ReadFunctionPrototype(
bool WebSnapshotDeserializer::SetFunctionPrototype(JSFunction function,
JSReceiver prototype) {
+ DisallowGarbageCollection no_gc;
// TODO(v8:11525): Enforce the invariant that no two prototypes share a map.
Map map = prototype.map();
map.set_is_prototype_map(true);
- if (!map.constructor_or_back_pointer().IsNullOrUndefined()) {
+ if (!map.constructor_or_back_pointer().IsNullOrUndefined(isolate_)) {
return false;
}
map.set_constructor_or_back_pointer(function);
@@ -1877,38 +2292,55 @@ bool WebSnapshotDeserializer::SetFunctionPrototype(JSFunction function,
return true;
}
-void WebSnapshotDeserializer::AddDeferredReference(Handle<Object> container,
- uint32_t index,
- ValueType target_type,
- uint32_t target_index) {
+HeapObject WebSnapshotDeserializer::AddDeferredReference(
+ Handle<HeapObject> container, uint32_t index, ValueType target_type,
+ uint32_t target_index) {
+ if (container.is_null()) {
+ const char* message = "Invalid reference";
+ switch (target_type) {
+ case ARRAY_ID:
+ message = "Invalid array reference";
+ break;
+ case OBJECT_ID:
+ message = "Invalid object reference";
+ break;
+ case CLASS_ID:
+ message = "Invalid class reference";
+ break;
+ case FUNCTION_ID:
+ message = "Invalid function reference";
+ break;
+ default:
+ break;
+ }
+ Throw(message);
+ return roots_.undefined_value();
+ }
DCHECK(container->IsPropertyArray() || container->IsContext() ||
- container->IsFixedArray() || container->IsJSFunction());
+ container->IsFixedArray() || container->IsJSFunction() ||
+ container->IsMap());
deferred_references_ = ArrayList::Add(
isolate_, deferred_references_, container, Smi::FromInt(index),
Smi::FromInt(target_type), Smi::FromInt(target_index));
+ // Use HeapObject as placeholder since this might break elements kinds.
+ return roots_.undefined_value();
}
void WebSnapshotDeserializer::ProcessDeferredReferences() {
// Check for error now, since the FixedArrays below might not have been
// created if there was an error.
- if (has_error()) {
- return;
- }
+ if (has_error()) return;
DisallowGarbageCollection no_gc;
ArrayList raw_deferred_references = *deferred_references_;
- FixedArray raw_functions = *functions_;
- FixedArray raw_classes = *classes_;
- FixedArray raw_arrays = *arrays_;
- FixedArray raw_objects = *objects_;
// Deferred references is a list of (object, index, target type, target index)
// tuples.
for (int i = 0; i < raw_deferred_references.Length() - 3; i += 4) {
- Object container = raw_deferred_references.Get(i);
+ HeapObject container = HeapObject::cast(raw_deferred_references.Get(i));
int index = raw_deferred_references.Get(i + 1).ToSmi().value();
- ValueType target_type =
- ValueType(raw_deferred_references.Get(i + 2).ToSmi().value());
+ ValueType target_type = static_cast<ValueType>(
+ raw_deferred_references.Get(i + 2).ToSmi().value());
int target_index = raw_deferred_references.Get(i + 3).ToSmi().value();
Object target;
switch (target_type) {
@@ -1920,7 +2352,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Throw("Invalid function reference");
return;
}
- target = raw_functions.get(target_index);
+ target = functions_.get(target_index);
break;
case CLASS_ID:
if (static_cast<uint32_t>(target_index) >= class_count_) {
@@ -1928,7 +2360,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Throw("Invalid class reference");
return;
}
- target = raw_classes.get(target_index);
+ target = classes_.get(target_index);
break;
case ARRAY_ID:
if (static_cast<uint32_t>(target_index) >= array_count_) {
@@ -1936,7 +2368,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Throw("Invalid array reference");
return;
}
- target = raw_arrays.get(target_index);
+ target = arrays_.get(target_index);
break;
case OBJECT_ID:
if (static_cast<uint32_t>(target_index) >= object_count_) {
@@ -1944,18 +2376,19 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Throw("Invalid object reference");
return;
}
- target = raw_objects.get(target_index);
+ target = objects_.get(target_index);
break;
default:
UNREACHABLE();
}
- if (container.IsPropertyArray()) {
+ InstanceType instance_type = container.map().instance_type();
+ if (InstanceTypeChecker::IsPropertyArray(instance_type)) {
PropertyArray::cast(container).set(index, target);
- } else if (container.IsContext()) {
+ } else if (InstanceTypeChecker::IsContext(instance_type)) {
Context::cast(container).set(index, target);
- } else if (container.IsFixedArray()) {
+ } else if (InstanceTypeChecker::IsFixedArray(instance_type)) {
FixedArray::cast(container).set(index, target);
- } else if (container.IsJSFunction()) {
+ } else if (InstanceTypeChecker::IsJSFunction(instance_type)) {
// The only deferred reference allowed for a JSFunction is the function
// prototype.
DCHECK_EQ(index, 0);
@@ -1966,6 +2399,12 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Throw("Can't reuse function prototype");
return;
}
+ } else if (InstanceTypeChecker::IsMap(instance_type)) {
+ // The only deferred reference allowed for a Map is the __proto__.
+ DCHECK_EQ(index, 0);
+ DCHECK(target.IsJSReceiver());
+ Map::cast(container).set_prototype(HeapObject::cast(target),
+ UPDATE_WRITE_BARRIER);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
index 25a76f3572..4dc0f3f091 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.h
+++ b/deps/v8/src/web-snapshot/web-snapshot.h
@@ -6,7 +6,6 @@
#define V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
#include <queue>
-#include <vector>
#include "src/handles/handles.h"
#include "src/objects/value-serializer.h"
@@ -53,7 +52,9 @@ class WebSnapshotSerializerDeserializer {
OBJECT_ID,
FUNCTION_ID,
CLASS_ID,
- REGEXP
+ REGEXP,
+ EXTERNAL_ID,
+ IN_PLACE_STRING_ID
};
static constexpr uint8_t kMagicNumber[4] = {'+', '+', '+', ';'};
@@ -83,6 +84,9 @@ class WebSnapshotSerializerDeserializer {
: isolate_(isolate) {}
// Not virtual, on purpose (because it doesn't need to be).
void Throw(const char* message);
+
+ inline Factory* factory() const { return isolate_->factory(); }
+
Isolate* isolate_;
const char* error_message_ = nullptr;
@@ -112,11 +116,15 @@ class V8_EXPORT WebSnapshotSerializer
: public WebSnapshotSerializerDeserializer {
public:
explicit WebSnapshotSerializer(v8::Isolate* isolate);
+ explicit WebSnapshotSerializer(Isolate* isolate);
+
~WebSnapshotSerializer();
bool TakeSnapshot(v8::Local<v8::Context> context,
v8::Local<v8::PrimitiveArray> exports,
WebSnapshotData& data_out);
+ bool TakeSnapshot(Handle<Object> object, MaybeHandle<FixedArray> block_list,
+ WebSnapshotData& data_out);
// For inspecting the state after taking a snapshot.
uint32_t string_count() const {
@@ -145,47 +153,69 @@ class V8_EXPORT WebSnapshotSerializer
return static_cast<uint32_t>(object_ids_.size());
}
+ uint32_t external_objects_count() const {
+ return static_cast<uint32_t>(external_objects_ids_.size());
+ }
+
+ Handle<FixedArray> GetExternals();
+
private:
WebSnapshotSerializer(const WebSnapshotSerializer&) = delete;
WebSnapshotSerializer& operator=(const WebSnapshotSerializer&) = delete;
+ enum class AllowInPlace {
+ No, // This reference cannot be replace with an in-place item.
+ Yes, // This reference can be replaced with an in-place item.
+ };
+
void SerializePendingItems();
void WriteSnapshot(uint8_t*& buffer, size_t& buffer_size);
+ void WriteObjects(ValueSerializer& destination, size_t count,
+ ValueSerializer& source, const char* name);
// Returns true if the object was already in the map, false if it was added.
- bool InsertIntoIndexMap(ObjectCacheIndexMap& map, Handle<HeapObject> object,
+ bool InsertIntoIndexMap(ObjectCacheIndexMap& map, HeapObject heap_object,
uint32_t& id);
- void Discovery(Handle<Object> object);
+ void ShallowDiscoverExternals(FixedArray externals);
+ void Discover(Handle<HeapObject> object);
+ void DiscoverString(Handle<String> string,
+ AllowInPlace can_be_in_place = AllowInPlace::No);
+ void DiscoverMap(Handle<Map> map);
void DiscoverFunction(Handle<JSFunction> function);
void DiscoverClass(Handle<JSFunction> function);
void DiscoverContextAndPrototype(Handle<JSFunction> function);
void DiscoverContext(Handle<Context> context);
void DiscoverArray(Handle<JSArray> array);
void DiscoverObject(Handle<JSObject> object);
+ void DiscoverSource(Handle<JSFunction> function);
+ void ConstructSource();
- void SerializeSource(ValueSerializer* serializer,
- Handle<JSFunction> function);
void SerializeFunctionInfo(ValueSerializer* serializer,
Handle<JSFunction> function);
- void SerializeString(Handle<String> string, uint32_t& id);
- void SerializeMap(Handle<Map> map, uint32_t& id);
-
+ void SerializeString(Handle<String> string, ValueSerializer& serializer);
+ void SerializeMap(Handle<Map> map);
void SerializeFunction(Handle<JSFunction> function);
void SerializeClass(Handle<JSFunction> function);
void SerializeContext(Handle<Context> context);
void SerializeArray(Handle<JSArray> array);
void SerializeObject(Handle<JSObject> object);
- void SerializeExport(Handle<JSObject> object, Handle<String> export_name);
+ void SerializeExport(Handle<Object> object, Handle<String> export_name);
void WriteValue(Handle<Object> object, ValueSerializer& serializer);
+ void WriteStringMaybeInPlace(Handle<String> string,
+ ValueSerializer& serializer);
+ void WriteStringId(Handle<String> string, ValueSerializer& serializer);
+ uint32_t GetStringId(Handle<String> string, bool& in_place);
+ uint32_t GetMapId(Map map);
uint32_t GetFunctionId(JSFunction function);
uint32_t GetClassId(JSFunction function);
uint32_t GetContextId(Context context);
uint32_t GetArrayId(JSArray array);
uint32_t GetObjectId(JSObject object);
+ uint32_t GetExternalId(HeapObject object);
ValueSerializer string_serializer_;
ValueSerializer map_serializer_;
@@ -202,8 +232,17 @@ class V8_EXPORT WebSnapshotSerializer
Handle<ArrayList> classes_;
Handle<ArrayList> arrays_;
Handle<ArrayList> objects_;
+ Handle<ArrayList> strings_;
+ Handle<ArrayList> maps_;
- // ObjectCacheIndexMap implements fast lookup item -> id.
+ // IndexMap to keep track of explicitly blocked external objects and
+ // non-serializable/not-supported objects (e.g. API Objects).
+ ObjectCacheIndexMap external_objects_ids_;
+
+ // ObjectCacheIndexMap implements fast lookup item -> id. Some items (context,
+ // function, class, array, object) can point to other items and we serialize
+ // them in the reverse order. This ensures that the items this item points to
+ // have a lower ID and will be deserialized first.
ObjectCacheIndexMap string_ids_;
ObjectCacheIndexMap map_ids_;
ObjectCacheIndexMap context_ids_;
@@ -213,16 +252,33 @@ class V8_EXPORT WebSnapshotSerializer
ObjectCacheIndexMap object_ids_;
uint32_t export_count_ = 0;
- std::queue<Handle<Object>> discovery_queue_;
+ std::queue<Handle<HeapObject>> discovery_queue_;
+
+ // For keeping track of which strings have exactly one reference. Strings are
+ // inserted here when the first reference is discovered, and never removed.
+ // Strings which have more than one reference get an ID and are inserted to
+ // strings_.
+ IdentityMap<int, base::DefaultAllocationPolicy> all_strings_;
+
+ // For constructing the minimal, "compacted", source string to cover all
+ // function bodies.
+ Handle<String> full_source_;
+ uint32_t source_id_;
+ // Ordered set of (start, end) pairs of all functions we've discovered.
+ std::set<std::pair<int, int>> source_intervals_;
+ // Maps function positions in the real source code into the function positions
+ // in the constructed source code (which we'll include in the web snapshot).
+ std::unordered_map<int, int> source_offset_to_compacted_source_offset_;
};
class V8_EXPORT WebSnapshotDeserializer
: public WebSnapshotSerializerDeserializer {
public:
- explicit WebSnapshotDeserializer(v8::Isolate* v8_isolate);
+ WebSnapshotDeserializer(v8::Isolate* v8_isolate, const uint8_t* data,
+ size_t buffer_size);
+ WebSnapshotDeserializer(Isolate* isolate, Handle<Script> snapshot_as_script);
~WebSnapshotDeserializer();
- bool UseWebSnapshot(const uint8_t* data, size_t buffer_size);
- bool UseWebSnapshot(Handle<Script> snapshot_as_script);
+ bool Deserialize(MaybeHandle<FixedArray> external_references = {});
// For inspecting the state after deserializing a snapshot.
uint32_t string_count() const { return string_count_; }
@@ -233,8 +289,21 @@ class V8_EXPORT WebSnapshotDeserializer
uint32_t array_count() const { return array_count_; }
uint32_t object_count() const { return object_count_; }
+ static void UpdatePointersCallback(v8::Isolate* isolate, v8::GCType type,
+ v8::GCCallbackFlags flags,
+ void* deserializer) {
+ reinterpret_cast<WebSnapshotDeserializer*>(deserializer)->UpdatePointers();
+ }
+
+ void UpdatePointers();
+
+ MaybeHandle<Object> value() const { return return_value_; }
+
private:
- bool Deserialize();
+ WebSnapshotDeserializer(Isolate* isolate, Handle<Object> script_name,
+ base::Vector<const uint8_t> buffer);
+ base::Vector<const uint8_t> ExtractScriptBuffer(
+ Isolate* isolate, Handle<Script> snapshot_as_script);
bool DeserializeSnapshot();
bool DeserializeScript();
@@ -242,7 +311,6 @@ class V8_EXPORT WebSnapshotDeserializer
WebSnapshotDeserializer& operator=(const WebSnapshotDeserializer&) = delete;
void DeserializeStrings();
- Handle<String> ReadString(bool internalize = false);
void DeserializeMaps();
void DeserializeContexts();
Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent,
@@ -256,32 +324,67 @@ class V8_EXPORT WebSnapshotDeserializer
void DeserializeArrays();
void DeserializeObjects();
void DeserializeExports();
- void ReadValue(
- Handle<Object>& value, Representation& representation,
- Handle<Object> object_for_deferred_reference = Handle<Object>(),
+
+ Object ReadValue(
+ Handle<HeapObject> object_for_deferred_reference = Handle<HeapObject>(),
uint32_t index_for_deferred_reference = 0);
+
+ Object ReadInteger();
+ Object ReadNumber();
+ String ReadString(bool internalize = false);
+ String ReadInPlaceString(bool internalize = false);
+ Object ReadArray(Handle<HeapObject> container, uint32_t container_index);
+ Object ReadObject(Handle<HeapObject> container, uint32_t container_index);
+ Object ReadFunction(Handle<HeapObject> container, uint32_t container_index);
+ Object ReadClass(Handle<HeapObject> container, uint32_t container_index);
+ Object ReadRegexp();
+ Object ReadExternalReference();
+
void ReadFunctionPrototype(Handle<JSFunction> function);
bool SetFunctionPrototype(JSFunction function, JSReceiver prototype);
- void AddDeferredReference(Handle<Object> container, uint32_t index,
- ValueType target_type,
- uint32_t target_object_index);
+ HeapObject AddDeferredReference(Handle<HeapObject> container, uint32_t index,
+ ValueType target_type,
+ uint32_t target_object_index);
void ProcessDeferredReferences();
// Not virtual, on purpose (because it doesn't need to be).
void Throw(const char* message);
- Handle<FixedArray> strings_;
- Handle<FixedArray> maps_;
- Handle<FixedArray> contexts_;
- Handle<FixedArray> functions_;
- Handle<FixedArray> classes_;
- Handle<FixedArray> arrays_;
- Handle<FixedArray> objects_;
+ Handle<FixedArray> strings_handle_;
+ FixedArray strings_;
+
+ Handle<FixedArray> maps_handle_;
+ FixedArray maps_;
+
+ Handle<FixedArray> contexts_handle_;
+ FixedArray contexts_;
+
+ Handle<FixedArray> functions_handle_;
+ FixedArray functions_;
+
+ Handle<FixedArray> classes_handle_;
+ FixedArray classes_;
+
+ Handle<FixedArray> arrays_handle_;
+ FixedArray arrays_;
+
+ Handle<FixedArray> objects_handle_;
+ FixedArray objects_;
+
+ Handle<FixedArray> external_references_handle_;
+ FixedArray external_references_;
+
Handle<ArrayList> deferred_references_;
- Handle<WeakFixedArray> shared_function_infos_;
+ Handle<WeakFixedArray> shared_function_infos_handle_;
+ WeakFixedArray shared_function_infos_;
+
Handle<ObjectHashTable> shared_function_info_table_;
+
Handle<Script> script_;
+ Handle<Object> script_name_;
+
+ Handle<Object> return_value_;
uint32_t string_count_ = 0;
uint32_t map_count_ = 0;
@@ -295,7 +398,8 @@ class V8_EXPORT WebSnapshotDeserializer
uint32_t object_count_ = 0;
uint32_t current_object_count_ = 0;
- std::unique_ptr<ValueDeserializer> deserializer_;
+ ValueDeserializer deserializer_;
+ ReadOnlyRoots roots_;
bool deserialized_ = false;
};
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 40b4756bd1..141e111206 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -113,7 +113,7 @@ void AccountingAllocator::ReturnSegment(Segment* segment,
current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed);
segment->ZapHeader();
if (COMPRESS_ZONES_BOOL && supports_compression) {
- CHECK(FreePages(bounded_page_allocator_.get(), segment, segment_size));
+ FreePages(bounded_page_allocator_.get(), segment, segment_size);
} else {
zone_backing_free_(segment);
}
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 42617aadb8..66039f5368 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -39,8 +39,7 @@ Zone::Zone(AccountingAllocator* allocator, const char* name,
Zone::~Zone() {
DeleteAll();
-
- DCHECK_EQ(segment_bytes_allocated_, 0);
+ DCHECK_EQ(segment_bytes_allocated_.load(), 0);
}
void* Zone::AsanNew(size_t size) {
@@ -70,9 +69,30 @@ void* Zone::AsanNew(size_t size) {
return reinterpret_cast<void*>(result);
}
-void Zone::ReleaseMemory() {
+void Zone::Reset() {
+ if (!segment_head_) return;
+ Segment* keep = segment_head_;
+ segment_head_ = segment_head_->next();
+ if (segment_head_ != nullptr) {
+ // Reset the position to the end of the new head, and uncommit its
+ // allocation size (which will be re-committed in DeleteAll).
+ position_ = segment_head_->end();
+ allocation_size_ -= segment_head_->end() - segment_head_->start();
+ }
+ keep->set_next(nullptr);
DeleteAll();
allocator_->TraceZoneCreation(this);
+
+ // Un-poison the kept segment content so we can zap and re-use it.
+ ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(keep->start()),
+ keep->capacity());
+ keep->ZapContents();
+
+ segment_head_ = keep;
+ position_ = RoundUp(keep->start(), kAlignmentInBytes);
+ limit_ = keep->end();
+ DCHECK_LT(allocation_size(), kAlignmentInBytes);
+ DCHECK_EQ(segment_bytes_allocated_, keep->total_size());
}
void Zone::DeleteAll() {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index ef2f0b3dc8..801db73fe4 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -134,15 +134,10 @@ class V8_EXPORT_PRIVATE Zone final {
// Seals the zone to prevent any further allocation.
void Seal() { sealed_ = true; }
- // Allows the zone to be safely reused. Releases the memory and fires zone
- // destruction and creation events for the accounting allocator.
- void ReleaseMemory();
-
- // Returns true if more memory has been allocated in zones than
- // the limit allows.
- bool excess_allocation() const {
- return segment_bytes_allocated_ > kExcessLimit;
- }
+ // Allows the zone to be safely reused. Releases the memory except for the
+ // last page, and fires zone destruction and creation events for the
+ // accounting allocator.
+ void Reset();
size_t segment_bytes_allocated() const { return segment_bytes_allocated_; }
@@ -202,16 +197,13 @@ class V8_EXPORT_PRIVATE Zone final {
// Never allocate segments larger than this size in bytes.
static const size_t kMaximumSegmentSize = 32 * KB;
- // Report zone excess when allocation exceeds this limit.
- static const size_t kExcessLimit = 256 * MB;
-
// The number of bytes allocated in this zone so far.
- size_t allocation_size_ = 0;
+ std::atomic<size_t> allocation_size_ = {0};
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
// the zone.
- size_t segment_bytes_allocated_ = 0;
+ std::atomic<size_t> segment_bytes_allocated_ = {0};
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
@@ -234,10 +226,10 @@ class V8_EXPORT_PRIVATE Zone final {
#ifdef V8_ENABLE_PRECISE_ZONE_STATS
TypeStats type_stats_;
- size_t allocation_size_for_tracing_ = 0;
+ std::atomic<size_t> allocation_size_for_tracing_ = {0};
// The number of bytes freed in this zone so far.
- size_t freed_size_for_tracing_ = 0;
+ stdd::atomic<size_t> freed_size_for_tracing_ = {0};
#endif
friend class ZoneScope;
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index a603f00258..e857ea5e8e 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -49,6 +49,7 @@
['variant in [jitless, nooptimization, stress]', {
# Slow tests.
'kraken/ai-astar': [PASS, SLOW],
+ 'kraken/audio-beat-detection': [SLOW],
'kraken/imaging-darkroom': [PASS, SLOW],
'kraken/imaging-desaturate': [PASS, SLOW],
'octane/code-load': [PASS, SLOW],
@@ -73,6 +74,7 @@
['variant == jitless', {
# Too slow for jitless mode.
+ 'kraken/audio-fft': [PASS, ['mode == debug', SKIP]],
'kraken/imaging-desaturate' : [PASS, ['mode == debug', SKIP]],
'kraken/imaging-gaussian-blur' : [PASS, ['mode == debug', SKIP]],
'octane/zlib': [SKIP],
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 58b4bc1d2e..d5d20fdfe9 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -43,10 +43,9 @@ v8_executable("cctest") {
]
}
- # TODO(machenbach): Translate from gyp.
- #["OS=="aix"", {
- # "ldflags": [ "-Wl,-bbigtoc" ],
- #}],
+ if (current_os == "aix") {
+ ldflags = [ "-Wl,-bbigtoc" ]
+ }
}
v8_header_set("cctest_headers") {
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 04f3167b68..99bca0a29b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -38,6 +38,7 @@
#include "src/base/strings.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
#include "src/flags/flags.h"
@@ -121,7 +122,7 @@ void CcTest::Run() {
DCHECK_EQ(active_isolates, i::Isolate::non_disposed_isolates());
#endif // DEBUG
if (initialize_) {
- if (v8::Locker::WasEverUsed()) {
+ if (i_isolate()->was_locker_ever_used()) {
v8::Locker locker(isolate_);
EmptyMessageQueues(isolate_);
} else {
@@ -278,14 +279,14 @@ i::Handle<i::JSFunction> Optimize(
}
CHECK(info.shared_info()->HasBytecodeArray());
- i::JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ i::JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
- i::Handle<i::Code> code =
+ i::Handle<i::CodeT> code = i::ToCodeT(
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
- .ToHandleChecked();
+ .ToHandleChecked(),
+ isolate);
info.native_context().AddOptimizedCode(*code);
function->set_code(*code, v8::kReleaseStore);
-
return function;
}
@@ -335,8 +336,8 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- CHECK(v8::V8::InitializeVirtualMemoryCage());
+#ifdef V8_SANDBOX
+ CHECK(v8::V8::InitializeSandbox());
#endif
cppgc::InitializeProcess(platform->GetPageAllocator());
using HelpOptions = v8::internal::FlagList::HelpOptions;
@@ -423,3 +424,37 @@ bool IsValidUnwrapObject(v8::Object* object) {
instance_type == i::Internals::kJSObjectType ||
instance_type == i::Internals::kJSSpecialApiObjectType);
}
+
+ManualGCScope::ManualGCScope(i::Isolate* isolate)
+ : flag_concurrent_marking_(i::FLAG_concurrent_marking),
+ flag_concurrent_sweeping_(i::FLAG_concurrent_sweeping),
+ flag_stress_concurrent_allocation_(i::FLAG_stress_concurrent_allocation),
+ flag_stress_incremental_marking_(i::FLAG_stress_incremental_marking),
+ flag_parallel_marking_(i::FLAG_parallel_marking),
+ flag_detect_ineffective_gcs_near_heap_limit_(
+ i::FLAG_detect_ineffective_gcs_near_heap_limit) {
+ // Some tests run threaded (back-to-back) and thus the GC may already be
+ // running by the time a ManualGCScope is created. Finalizing existing marking
+ // prevents any undefined/unexpected behavior.
+ if (isolate && isolate->heap()->incremental_marking()->IsMarking()) {
+ CcTest::CollectGarbage(i::OLD_SPACE, isolate);
+ }
+
+ i::FLAG_concurrent_marking = false;
+ i::FLAG_concurrent_sweeping = false;
+ i::FLAG_stress_incremental_marking = false;
+ i::FLAG_stress_concurrent_allocation = false;
+ // Parallel marking has a dependency on concurrent marking.
+ i::FLAG_parallel_marking = false;
+ i::FLAG_detect_ineffective_gcs_near_heap_limit = false;
+}
+
+ManualGCScope::~ManualGCScope() {
+ i::FLAG_concurrent_marking = flag_concurrent_marking_;
+ i::FLAG_concurrent_sweeping = flag_concurrent_sweeping_;
+ i::FLAG_stress_concurrent_allocation = flag_stress_concurrent_allocation_;
+ i::FLAG_stress_incremental_marking = flag_stress_incremental_marking_;
+ i::FLAG_parallel_marking = flag_parallel_marking_;
+ i::FLAG_detect_ineffective_gcs_near_heap_limit =
+ flag_detect_ineffective_gcs_near_heap_limit_;
+}
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 20cc74666c..735fc9cc9a 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -199,18 +199,21 @@ class CcTest {
static void TearDown();
private:
- friend int main(int argc, char** argv);
+ static CcTest* last_;
+ static v8::ArrayBuffer::Allocator* allocator_;
+ static v8::Isolate* isolate_;
+ static bool initialize_called_;
+ static v8::base::Atomic32 isolate_used_;
+
TestFunction* callback_;
const char* file_;
const char* name_;
bool enabled_;
bool initialize_;
CcTest* prev_;
- static CcTest* last_;
- static v8::ArrayBuffer::Allocator* allocator_;
- static v8::Isolate* isolate_;
- static bool initialize_called_;
- static v8::base::Atomic32 isolate_used_;
+
+ friend int main(int argc, char** argv);
+ friend class ManualGCScope;
};
// Switches between all the Api tests using the threading support.
@@ -528,6 +531,25 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(
return CompileRunWithOrigin(v8_str(source), origin_url);
}
+// Run a ScriptStreamingTask in a separate thread.
+class StreamerThread : public v8::base::Thread {
+ public:
+ static void StartThreadForTaskAndJoin(
+ v8::ScriptCompiler::ScriptStreamingTask* task) {
+ StreamerThread thread(task);
+ CHECK(thread.Start());
+ thread.Join();
+ }
+
+ explicit StreamerThread(v8::ScriptCompiler::ScriptStreamingTask* task)
+ : Thread(Thread::Options()), task_(task) {}
+
+ void Run() override { task_->Run(); }
+
+ private:
+ v8::ScriptCompiler::ScriptStreamingTask* task_;
+};
+
// Takes a JSFunction and runs it through the test version of the optimizing
// pipeline, allocating the temporary compilation artifacts in a given Zone.
// For possible {flags} values, look at OptimizedCompilationInfo::Flag. If
@@ -657,42 +679,25 @@ class StaticOneByteResource : public v8::String::ExternalOneByteStringResource {
const char* data_;
};
+// ManualGCScope allows for disabling GC heuristics. This is useful for tests
+// that want to check specific corner cases around GC.
+//
+// The scope will finalize any ongoing GC on the provided Isolate. If no Isolate
+// is manually provided, it is assumed that a CcTest setup (e.g.
+// CcTest::InitializeVM()) is used.
class V8_NODISCARD ManualGCScope {
public:
- ManualGCScope()
- : flag_concurrent_marking_(i::FLAG_concurrent_marking),
- flag_concurrent_sweeping_(i::FLAG_concurrent_sweeping),
- flag_stress_concurrent_allocation_(
- i::FLAG_stress_concurrent_allocation),
- flag_stress_incremental_marking_(i::FLAG_stress_incremental_marking),
- flag_parallel_marking_(i::FLAG_parallel_marking),
- flag_detect_ineffective_gcs_near_heap_limit_(
- i::FLAG_detect_ineffective_gcs_near_heap_limit) {
- i::FLAG_concurrent_marking = false;
- i::FLAG_concurrent_sweeping = false;
- i::FLAG_stress_incremental_marking = false;
- i::FLAG_stress_concurrent_allocation = false;
- // Parallel marking has a dependency on concurrent marking.
- i::FLAG_parallel_marking = false;
- i::FLAG_detect_ineffective_gcs_near_heap_limit = false;
- }
- ~ManualGCScope() {
- i::FLAG_concurrent_marking = flag_concurrent_marking_;
- i::FLAG_concurrent_sweeping = flag_concurrent_sweeping_;
- i::FLAG_stress_concurrent_allocation = flag_stress_concurrent_allocation_;
- i::FLAG_stress_incremental_marking = flag_stress_incremental_marking_;
- i::FLAG_parallel_marking = flag_parallel_marking_;
- i::FLAG_detect_ineffective_gcs_near_heap_limit =
- flag_detect_ineffective_gcs_near_heap_limit_;
- }
+ explicit ManualGCScope(
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(CcTest::isolate_));
+ ~ManualGCScope();
private:
- bool flag_concurrent_marking_;
- bool flag_concurrent_sweeping_;
- bool flag_stress_concurrent_allocation_;
- bool flag_stress_incremental_marking_;
- bool flag_parallel_marking_;
- bool flag_detect_ineffective_gcs_near_heap_limit_;
+ const bool flag_concurrent_marking_;
+ const bool flag_concurrent_sweeping_;
+ const bool flag_stress_concurrent_allocation_;
+ const bool flag_stress_incremental_marking_;
+ const bool flag_parallel_marking_;
+ const bool flag_detect_ineffective_gcs_near_heap_limit_;
};
// This is an abstract base class that can be overridden to implement a test
@@ -705,55 +710,56 @@ class TestPlatform : public v8::Platform {
// v8::Platform implementation.
v8::PageAllocator* GetPageAllocator() override {
- return old_platform_->GetPageAllocator();
+ return old_platform()->GetPageAllocator();
}
void OnCriticalMemoryPressure() override {
- old_platform_->OnCriticalMemoryPressure();
+ old_platform()->OnCriticalMemoryPressure();
}
bool OnCriticalMemoryPressure(size_t length) override {
- return old_platform_->OnCriticalMemoryPressure(length);
+ return old_platform()->OnCriticalMemoryPressure(length);
}
int NumberOfWorkerThreads() override {
- return old_platform_->NumberOfWorkerThreads();
+ return old_platform()->NumberOfWorkerThreads();
}
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override {
- return old_platform_->GetForegroundTaskRunner(isolate);
+ return old_platform()->GetForegroundTaskRunner(isolate);
}
void CallOnWorkerThread(std::unique_ptr<v8::Task> task) override {
- old_platform_->CallOnWorkerThread(std::move(task));
+ old_platform()->CallOnWorkerThread(std::move(task));
}
void CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
double delay_in_seconds) override {
- old_platform_->CallDelayedOnWorkerThread(std::move(task), delay_in_seconds);
+ old_platform()->CallDelayedOnWorkerThread(std::move(task),
+ delay_in_seconds);
}
std::unique_ptr<v8::JobHandle> PostJob(
v8::TaskPriority priority,
std::unique_ptr<v8::JobTask> job_task) override {
- return old_platform_->PostJob(priority, std::move(job_task));
+ return old_platform()->PostJob(priority, std::move(job_task));
}
double MonotonicallyIncreasingTime() override {
- return old_platform_->MonotonicallyIncreasingTime();
+ return old_platform()->MonotonicallyIncreasingTime();
}
double CurrentClockTimeMillis() override {
- return old_platform_->CurrentClockTimeMillis();
+ return old_platform()->CurrentClockTimeMillis();
}
bool IdleTasksEnabled(v8::Isolate* isolate) override {
- return old_platform_->IdleTasksEnabled(isolate);
+ return old_platform()->IdleTasksEnabled(isolate);
}
v8::TracingController* GetTracingController() override {
- return old_platform_->GetTracingController();
+ return old_platform()->GetTracingController();
}
protected:
@@ -763,7 +769,7 @@ class TestPlatform : public v8::Platform {
v8::Platform* old_platform() const { return old_platform_; }
private:
- v8::Platform* old_platform_;
+ std::atomic<v8::Platform*> old_platform_;
};
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 832e58ba55..b0ff48ced7 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -192,9 +192,21 @@
# Apple silicon. See:
# https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
'test-assembler-arm64/printf_no_preserve': [SKIP],
+
+ # https://crbug.com/v8/12588
+ 'test-run-wasm-relaxed-simd/RunWasm_F32x4Qfma_interpreter': [SKIP],
+ 'test-run-wasm-relaxed-simd/RunWasm_F32x4Qfms_interpreter': [SKIP],
+ 'test-run-wasm-relaxed-simd/RunWasm_F64x2Qfma_interpreter': [SKIP],
+ 'test-run-wasm-relaxed-simd/RunWasm_F64x2Qfms_interpreter': [SKIP],
}], # arch == arm64 and system == macos and not simulator_run
##############################################################################
+['arch == arm and not simulator_run', {
+ # Fails on ODROID: https://crbug.com/v8/11634
+ 'test-wasm-metrics/*': [SKIP],
+}], # arch == arm and not simulator_run
+
+##############################################################################
['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'test-cpu-profiler/MultipleIsolates': [SKIP],
@@ -282,12 +294,10 @@
##############################################################################
['byteorder == big', {
- # Skip Wasm atomic tests on big-endian machines.
- # There is no support to emulate atomic Wasm operations on big-endian
- # platforms, since this would require bit swapping as a part of atomic
- # operations.
- 'test-run-wasm-atomics/*': [SKIP],
- 'test-run-wasm-atomics64/*': [SKIP],
+ # BUG(v8:12505). Tests which share Wasm memory buffer with Js Typed arrays.
+ 'test-api/WasmI32AtomicWaitCallback': [SKIP],
+ 'test-api/WasmI64AtomicWaitCallback': [SKIP],
+ # Peephole optimization not supported on big-endian machines.
'test-regexp/Peephole*': [SKIP],
}], # 'byteorder == big'
@@ -418,11 +428,6 @@
'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
# SIMD not fully implemented yet.
- 'test-run-wasm-relaxed-simd/*': [SKIP],
- 'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
- 'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
- 'test-run-wasm-simd-liftoff/*': [SKIP],
- 'test-run-wasm-simd/*':[SKIP],
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
@@ -506,11 +511,6 @@
'test-wasm-breakpoints/*' : [SKIP],
- # SIMD / Liftoff not fully implemented yet
- 'test-run-wasm-simd-liftoff/*': [SKIP],
- 'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
- 'test-gc/RunWasmLiftoff_RefTrivialCastsStatic': [SKIP],
-
# TODO(11856): Port nondeterminism detection.
'test-liftoff-for-fuzzing/*': [SKIP],
@@ -519,6 +519,11 @@
['arch == ppc64', {
# https://crbug.com/v8/8766
'test-bytecode-generator/WideRegisters': [SKIP],
+
+ # SIMD / Liftoff not fully implemented yet
+ 'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
+ 'test-gc/RunWasmLiftoff_RefTrivialCastsStatic': [SKIP],
}],
##############################################################################
@@ -720,19 +725,6 @@
}], # variant == jitless
##############################################################################
-['variant == turboprop or variant == turboprop_as_toptier', {
- # Require inlining.
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
- 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
- # Turboprop doesn't use call feedback and hence doesn't inline even if
- # the inlining flag is explicitly set.
- 'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
- 'test-calls-with-arraylike-or-spread/*': [SKIP],
- 'test-js-to-wasm/*': [SKIP],
-}], # variant == turboprop or variant == turboprop_as_toptier
-
-##############################################################################
['no_i18n == True', {
'test-regexp/UnicodePropertyEscapeCodeSize': [SKIP],
}], # no_i18n == True
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 0b7174bc2b..ef39787bad 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -55,6 +55,10 @@ class CodeRunner : public CallHelper<T> {
public:
CodeRunner(Isolate* isolate, Handle<Code> code, MachineSignature* csig)
: CallHelper<T>(isolate, csig), code_(code) {}
+#ifdef V8_EXTERNAL_CODE_SPACE
+ CodeRunner(Isolate* isolate, Handle<CodeT> code, MachineSignature* csig)
+ : CallHelper<T>(isolate, csig), code_(FromCodeT(*code), isolate) {}
+#endif // V8_EXTERNAL_CODE_SPACE
~CodeRunner() override = default;
Address Generate() override { return code_->entry(); }
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 18919b2c30..baf497003d 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -76,6 +76,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
return code_.ToHandleChecked();
}
+ Handle<CodeT> GetCodeT() { return ToCodeT(GetCode(), main_isolate()); }
+
protected:
Address Generate() override {
if (code_.is_null()) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 5cbdcd1b3b..220b4b6d43 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -46,7 +46,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
flags_(0) {
CHECK(!code.is_null());
Compile(function);
- function->set_code(*code, kReleaseStore);
+ function->set_code(ToCodeT(*code), kReleaseStore);
}
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 69823ef55a..9a8696c828 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -88,8 +88,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ Parameter<Object>(1));
- params.push_back(__ HeapConstant(
- BuildTeardownFunction(isolate, call_descriptor, parameters)));
+ params.push_back(__ HeapConstant(ToCodeT(
+ BuildTeardownFunction(isolate, call_descriptor, parameters), isolate)));
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
@@ -441,12 +441,13 @@ class TestEnvironment : public HandleAndZoneScope {
DCHECK_LE(kGeneralRegisterCount,
GetRegConfig()->num_allocatable_general_registers() - 2);
- int32_t general_mask = GetRegConfig()->allocatable_general_codes_mask();
+ RegList general_mask =
+ RegList::FromBits(GetRegConfig()->allocatable_general_codes_mask());
// kReturnRegister0 is used to hold the "teardown" code object, do not
// generate moves using it.
+ general_mask.clear(kReturnRegister0);
std::unique_ptr<const RegisterConfiguration> registers(
- RegisterConfiguration::RestrictGeneralRegisters(
- general_mask & ~kReturnRegister0.bit()));
+ RegisterConfiguration::RestrictGeneralRegisters(general_mask));
for (int i = 0; i < kGeneralRegisterCount; i++) {
int code = registers->GetAllocatableGeneralCode(i);
@@ -460,7 +461,7 @@ class TestEnvironment : public HandleAndZoneScope {
((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
"kDoubleRegisterCount should be a multiple of two and three.");
for (int i = 0; i < kDoubleRegisterCount; i += 2) {
- if (kSimpleFPAliasing) {
+ if (kFPAliasing != AliasingKind::kCombine) {
// Allocate three registers at once if kSimd128 is supported, else
// allocate in pairs.
AddRegister(&test_signature, MachineRepresentation::kFloat32,
@@ -577,7 +578,7 @@ class TestEnvironment : public HandleAndZoneScope {
kTotalStackParameterCount, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
CallDescriptor::kNoFlags); // flags
}
@@ -696,7 +697,8 @@ class TestEnvironment : public HandleAndZoneScope {
// return value will be freed along with it. Copy the result into
// state_out.
FunctionTester ft(setup, 2);
- Handle<FixedArray> result = ft.CallChecked<FixedArray>(test, state_in);
+ Handle<FixedArray> result =
+ ft.CallChecked<FixedArray>(ToCodeT(test, main_isolate()), state_in);
CHECK_EQ(result->length(), state_in->length());
result->CopyTo(0, *state_out, 0, result->length());
}
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index faf24c1fee..1dfa2dbab7 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -92,7 +92,6 @@ class BackgroundCompilationThread final : public v8::base::Thread {
TEST(TestConcurrentSharedFunctionInfo) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
- FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true);
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
@@ -120,7 +119,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
Handle<SharedFunctionInfo> test_sfi(test->shared(), isolate);
DCHECK(test_sfi->HasBytecodeArray());
IsCompiledScope compiled_scope_test(*test_sfi, isolate);
- JSFunction::EnsureFeedbackVector(test, &compiled_scope_test);
+ JSFunction::EnsureFeedbackVector(isolate, test, &compiled_scope_test);
// Get function "f"
Local<Function> function_f = Local<Function>::Cast(
@@ -136,7 +135,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
Pipeline::GenerateCodeForTesting(&f_info, isolate).ToHandleChecked();
f->set_code(*f_code, kReleaseStore);
IsCompiledScope compiled_scope_f(*f_sfi, isolate);
- JSFunction::EnsureFeedbackVector(f, &compiled_scope_f);
+ JSFunction::EnsureFeedbackVector(isolate, f, &compiled_scope_f);
ExpectSharedFunctionInfoState(*test_sfi, SfiState::Compiled);
@@ -146,7 +145,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
// Prepare job.
{
CompilationHandleScope compilation(isolate, job->compilation_info());
- CanonicalHandleScope canonical(isolate, job->compilation_info());
+ CanonicalHandleScopeForTurbofan canonical(isolate, job->compilation_info());
job->compilation_info()->ReopenHandlesInNewHandleScope(isolate);
const CompilationJob::Status status = job->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index faa0367d71..664e41fa80 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -17,7 +17,7 @@ const auto GetRegConfig = RegisterConfiguration::Default;
// simplify ParallelMove equivalence testing.
void GetCanonicalOperands(const InstructionOperand& op,
std::vector<InstructionOperand>* fragments) {
- CHECK(!kSimpleFPAliasing);
+ CHECK_EQ(kFPAliasing, AliasingKind::kCombine);
CHECK(op.IsFPLocationOperand());
const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation();
@@ -51,7 +51,7 @@ class InterpreterState {
CHECK(!m->IsRedundant());
const InstructionOperand& src = m->source();
const InstructionOperand& dst = m->destination();
- if (!kSimpleFPAliasing && src.IsFPLocationOperand() &&
+ if (kFPAliasing == AliasingKind::kCombine && src.IsFPLocationOperand() &&
dst.IsFPLocationOperand()) {
// Canonicalize FP location-location moves by fragmenting them into
// an equivalent sequence of float32 moves, to simplify state
@@ -137,8 +137,15 @@ class InterpreterState {
// Preserve FP representation when FP register aliasing is complex.
// Otherwise, canonicalize to kFloat64.
if (IsFloatingPoint(loc_op.representation())) {
- rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64
- : loc_op.representation();
+ if (kFPAliasing == AliasingKind::kIndependent) {
+ rep = IsSimd128(loc_op.representation())
+ ? MachineRepresentation::kSimd128
+ : MachineRepresentation::kFloat64;
+ } else if (kFPAliasing == AliasingKind::kOverlap) {
+ rep = MachineRepresentation::kFloat64;
+ } else {
+ rep = loc_op.representation();
+ }
}
if (loc_op.IsAnyRegister()) {
index = loc_op.register_code();
@@ -234,7 +241,8 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// On architectures where FP register aliasing is non-simple, update the
// destinations set with the float equivalents of the operand and check
// that all destinations are unique and do not alias each other.
- if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) {
+ if (kFPAliasing == AliasingKind::kCombine &&
+ mo.destination().IsFPLocationOperand()) {
std::vector<InstructionOperand> dst_fragments;
GetCanonicalOperands(dst, &dst_fragments);
CHECK(!dst_fragments.empty());
@@ -383,7 +391,7 @@ void RunTest(ParallelMove* pm, Zone* zone) {
TEST(Aliasing) {
// On platforms with simple aliasing, these parallel moves are ill-formed.
- if (kSimpleFPAliasing) return;
+ if (kFPAliasing != AliasingKind::kCombine) return;
ParallelMoveCreator pmc;
Zone* zone = pmc.main_zone();
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 3f3f406b0f..82649b59ca 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -6,7 +6,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
@@ -92,8 +91,6 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Node* reduce(Node* node) {
- JSHeapCopyReducer heap_copy_reducer(&js_heap_broker);
- CHECK(!heap_copy_reducer.Reduce(node).Changed());
JSGraph jsgraph(main_isolate(), &graph, &common, &javascript, &simplified,
&machine);
GraphReducer graph_reducer(main_zone(), &graph, &tick_counter,
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index f4488bf09a..1c31166f58 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -29,7 +29,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
jsgraph_(main_isolate(), main_graph_, &main_common_, &javascript_,
&main_simplified_, &main_machine_),
broker_(main_isolate(), main_zone()),
- changer_(&jsgraph_, &broker_) {
+ changer_(&jsgraph_, &broker_, nullptr) {
Node* s = graph()->NewNode(common()->Start(num_parameters));
graph()->SetStart(s);
}
@@ -264,7 +264,9 @@ TEST(ToInt32_constant) {
RepresentationChangerTester r;
{
FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(i);
+ const double value = static_cast<double>(i);
+ Node* n = r.jsgraph()->Constant(value);
+ NodeProperties::SetType(n, Type::Constant(value, r.zone()));
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, Type::Signed32(), use,
@@ -277,7 +279,9 @@ TEST(ToInt32_constant) {
TEST(ToUint32_constant) {
RepresentationChangerTester r;
FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(static_cast<double>(i));
+ const double value = static_cast<double>(i);
+ Node* n = r.jsgraph()->Constant(value);
+ NodeProperties::SetType(n, Type::Constant(value, r.zone()));
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, Type::Unsigned32(), use,
@@ -289,7 +293,9 @@ TEST(ToUint32_constant) {
TEST(ToInt64_constant) {
RepresentationChangerTester r;
FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(i);
+ const double value = static_cast<double>(i);
+ Node* n = r.jsgraph()->Constant(value);
+ NodeProperties::SetType(n, Type::Constant(value, r.zone()));
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, TypeCache::Get()->kSafeInteger, use,
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 544e9e3774..968cb3b395 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -120,7 +120,7 @@ class BytecodeGraphTester {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate_));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate_, function, &is_compiled_scope);
CHECK(function->shared().HasBytecodeArray());
Zone zone(isolate_->allocator(), ZONE_NAME);
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index f99e83a1fb..f6b7a249cf 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -7458,10 +7458,10 @@ TEST(RunComputedCodeObject) {
RawMachineLabel merge;
r.Branch(r.Parameter(0), &tlabel, &flabel);
r.Bind(&tlabel);
- Node* fa = r.HeapConstant(a.GetCode());
+ Node* fa = r.HeapConstant(a.GetCodeT());
r.Goto(&merge);
r.Bind(&flabel);
- Node* fb = r.HeapConstant(b.GetCode());
+ Node* fb = r.HeapConstant(b.GetCodeT());
r.Goto(&merge);
r.Bind(&merge);
Node* phi = r.Phi(MachineRepresentation::kWord32, fa, fb);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 6f46ed6ff2..5859051e36 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -182,8 +182,8 @@ class RegisterConfig {
locations.AddParam(params.Next(msig->GetParam(i)));
}
- const RegList kCalleeSaveRegisters = 0;
- const RegList kCalleeSaveFPRegisters = 0;
+ const RegList kCalleeSaveRegisters;
+ const DoubleRegList kCalleeSaveFPRegisters;
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
@@ -242,8 +242,8 @@ class Int32Signature : public MachineSignature {
}
};
-Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule = nullptr) {
+Handle<CodeT> CompileGraph(const char* name, CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
OptimizedCompilationInfo info(base::ArrayVector("testing"), graph->zone(),
CodeKind::FOR_TESTING);
@@ -257,11 +257,11 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
code->Disassemble(name, os, isolate);
}
#endif
- return code;
+ return ToCodeT(code, isolate);
}
-Handle<Code> WrapWithCFunction(Handle<Code> inner,
- CallDescriptor* call_descriptor) {
+Handle<CodeT> WrapWithCFunction(Handle<CodeT> inner,
+ CallDescriptor* call_descriptor) {
Zone zone(inner->GetIsolate()->allocator(), ZONE_NAME, kCompressGraphZone);
int param_count = static_cast<int>(call_descriptor->ParameterCount());
GraphAndBuilders caller(&zone);
@@ -297,7 +297,6 @@ Handle<Code> WrapWithCFunction(Handle<Code> inner,
return CompileGraph("wrapper", cdesc, caller.graph());
}
-
template <typename CType>
class ArgsBuffer {
public:
@@ -426,7 +425,7 @@ class Computer {
CHECK_LE(num_params, kMaxParamCount);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Handle<Code> inner = Handle<Code>::null();
+ Handle<CodeT> inner;
{
// Build the graph for the computation.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -441,7 +440,7 @@ class Computer {
{
// constant mode.
- Handle<Code> wrapper = Handle<Code>::null();
+ Handle<CodeT> wrapper;
{
// Wrap the above code with a callable function that passes constants.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -475,7 +474,7 @@ class Computer {
{
// buffer mode.
- Handle<Code> wrapper = Handle<Code>::null();
+ Handle<CodeT> wrapper;
{
// Wrap the above code with a callable function that loads from {input}.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -535,8 +534,8 @@ static void TestInt32Sub(CallDescriptor* desc) {
b.graph()->SetEnd(ret);
}
- Handle<Code> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
- Handle<Code> wrapper = WrapWithCFunction(inner_code, desc);
+ Handle<CodeT> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
+ Handle<CodeT> wrapper = WrapWithCFunction(inner_code, desc);
MachineSignature* msig = desc->GetMachineSignature(&zone);
CodeRunner<int32_t> runnable(isolate, wrapper,
CSignature::FromMachine(&zone, msig));
@@ -558,7 +557,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
int32_t output[kNumParams];
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Handle<Code> inner = Handle<Code>::null();
+ Handle<CodeT> inner;
{
// Writes all parameters into the output buffer.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -575,7 +574,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
}
CSignatureOf<int32_t> csig;
- Handle<Code> wrapper = Handle<Code>::null();
+ Handle<CodeT> wrapper;
{
// Loads parameters from the input buffer and calls the above code.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -941,7 +940,7 @@ TEST(Float64Select_stack_params_return_reg) {
template <typename CType, int which>
static void Build_Select_With_Call(CallDescriptor* desc,
RawMachineAssembler* raw) {
- Handle<Code> inner = Handle<Code>::null();
+ Handle<CodeT> inner;
int num_params = ParamCount(desc);
CHECK_LE(num_params, kMaxParamCount);
{
@@ -953,7 +952,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
r.Return(r.Parameter(which));
inner = CompileGraph("Select-indirection", desc, &graph, r.ExportForTest());
CHECK(!inner.is_null());
- CHECK(inner->IsCode());
+ CHECK(inner->IsCodeT());
}
{
@@ -1040,7 +1039,7 @@ void MixedParamTest(int start) {
MachineSignature* sig = builder.Build();
CallDescriptor* desc = config.Create(&zone, sig);
- Handle<Code> select;
+ Handle<CodeT> select;
{
// build the select.
Zone select_zone(&allocator, ZONE_NAME, kCompressGraphZone);
@@ -1052,7 +1051,7 @@ void MixedParamTest(int start) {
{
// call the select.
- Handle<Code> wrapper = Handle<Code>::null();
+ Handle<CodeT> wrapper;
int32_t expected_ret;
char bytes[kDoubleSize];
alignas(8) char output[kDoubleSize];
@@ -1159,7 +1158,7 @@ void TestStackSlot(MachineType slot_type, T expected) {
// Create inner function g. g has lots of parameters so that they are passed
// over the stack.
- Handle<Code> inner;
+ Handle<CodeT> inner;
Graph graph(&zone);
RawMachineAssembler g(isolate, &graph, desc);
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 06b2529ad6..432a529a12 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -6,7 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/macro-assembler.h"
-
+#include "src/objects/code-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -44,7 +44,9 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- params.push_back(__ HeapConstant(BuildCallee(isolate, callee_descriptor)));
+ Handle<CodeT> code =
+ ToCodeT(BuildCallee(isolate, callee_descriptor), isolate);
+ params.push_back(__ HeapConstant(code));
int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
for (int i = 0; i < param_slots; ++i) {
params.push_back(__ IntPtrConstant(i));
@@ -63,8 +65,9 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- params.push_back(__ HeapConstant(
- BuildCaller(isolate, caller_descriptor, callee_descriptor)));
+ Handle<CodeT> code = ToCodeT(
+ BuildCaller(isolate, caller_descriptor, callee_descriptor), isolate);
+ params.push_back(__ HeapConstant(code));
// Set up arguments for "Caller".
int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
for (int i = 0; i < param_slots; ++i) {
@@ -101,7 +104,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
param_slots, // stack parameter slots
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
+ kNoCalleeSavedFp, // callee-saved fp
CallDescriptor::kNoFlags); // flags
}
diff --git a/deps/v8/test/cctest/disasm-regex-helper.cc b/deps/v8/test/cctest/disasm-regex-helper.cc
index 1559f90ac2..7fb22c8c7c 100644
--- a/deps/v8/test/cctest/disasm-regex-helper.cc
+++ b/deps/v8/test/cctest/disasm-regex-helper.cc
@@ -20,13 +20,13 @@ std::string DisassembleFunction(const char* function) {
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
- Address begin = f->code().raw_instruction_start();
- Address end = f->code().raw_instruction_end();
Isolate* isolate = CcTest::i_isolate();
+ Handle<Code> code(FromCodeT(f->code()), isolate);
+ Address begin = code->raw_instruction_start();
+ Address end = code->raw_instruction_end();
std::ostringstream os;
Disassembler::Decode(isolate, os, reinterpret_cast<byte*>(begin),
- reinterpret_cast<byte*>(end),
- CodeReference(handle(f->code(), isolate)));
+ reinterpret_cast<byte*>(end), CodeReference(code));
return os.str();
}
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index c7549ad71a..b5fd905a34 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -31,7 +31,8 @@ void SealCurrentObjects(Heap* heap) {
CHECK(!FLAG_stress_concurrent_allocation);
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
page->MarkNeverAllocateForTesting();
@@ -140,7 +141,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
+ DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());
@@ -172,7 +173,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
SafepointScope scope(heap);
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
if (marking->IsStopped()) {
@@ -201,7 +203,8 @@ void SimulateFullSpace(v8::internal::PagedSpace* space) {
CodePageCollectionMemoryModificationScope modification_scope(space->heap());
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
space->FreeLinearAllocationArea();
space->ResetFreeList();
@@ -218,7 +221,8 @@ void GcAndSweep(Heap* heap, AllocationSpace space) {
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap->mark_compact_collector()->sweeping_in_progress()) {
SafepointScope scope(heap);
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
}
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index bbc268d0c5..5f168014c8 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -78,7 +78,7 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
// Map space.
- heap::SimulateFullSpace(heap->map_space());
+ heap::SimulateFullSpace(heap->space_for_maps());
obj = heap->AllocateRaw(Map::kSize, AllocationType::kMap).ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), Map::kSize,
ClearRecordedSlots::kNo);
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 543a4beb17..61325bb3b7 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -79,7 +79,8 @@ HEAP_TEST(CompactionFullAbortedPage) {
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// Check that all handles still point to the same page, i.e., compaction
// has been aborted on the page.
@@ -160,7 +161,8 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
bool migration_aborted = false;
for (Handle<FixedArray> object : compaction_page_handles) {
@@ -256,7 +258,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
CHECK_EQ(Page::FromHeapObject(*compaction_page_handles.front()),
page_to_fill);
@@ -334,7 +337,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
@@ -435,7 +439,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index 138ad50883..7975dba13a 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -248,7 +248,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
AllocationResult result = local_heap.AllocateRaw(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (result.IsRetry()) {
+ if (result.IsFailure()) {
local_heap.TryPerformCollection();
} else {
Address address = result.ToAddress();
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 0113894982..9f6087ee98 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -34,7 +34,8 @@ TEST(ConcurrentMarking) {
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
@@ -56,7 +57,8 @@ TEST(ConcurrentMarkingReschedule) {
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
@@ -82,7 +84,8 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 0f6ce60a68..7acdc6b0aa 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -13,8 +13,10 @@
#include "include/v8-template.h"
#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
+#include "src/common/allow-deprecated.h"
#include "src/handles/global-handles.h"
#include "src/heap/embedder-tracing.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/safepoint.h"
@@ -25,6 +27,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
+START_ALLOW_USE_DEPRECATED()
+
namespace v8 {
namespace internal {
@@ -264,22 +268,26 @@ TEST(FinalizeTracingWhenMarking) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
- Isolate* i_isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::i_isolate()->heap();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
// Finalize a potentially running garbage collection.
- i_isolate->heap()->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kTesting);
- if (i_isolate->heap()->mark_compact_collector()->sweeping_in_progress()) {
- i_isolate->heap()->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+ if (heap->mark_compact_collector()->sweeping_in_progress()) {
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
- CHECK(i_isolate->heap()->incremental_marking()->IsStopped());
+ heap->tracer()->StopCycleIfNeeded();
+ CHECK(heap->incremental_marking()->IsStopped());
- i::IncrementalMarking* marking = i_isolate->heap()->incremental_marking();
+ i::IncrementalMarking* marking = heap->incremental_marking();
{
- SafepointScope scope(i_isolate->heap());
- marking->Start(i::GarbageCollectionReason::kTesting);
+ SafepointScope scope(heap);
+ heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
+ marking->Start(GarbageCollectionReason::kTesting);
}
// Sweeping is not runing so we should immediately start marking.
@@ -1327,3 +1335,5 @@ TEST(NotifyEmptyStack) {
} // namespace heap
} // namespace internal
} // namespace v8
+
+END_ALLOW_USE_DEPRECATED()
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index ca7ac13558..4d67965136 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -55,6 +55,7 @@
#include "src/heap/safepoint.h"
#include "src/ic/ic.h"
#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/elements.h"
#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
@@ -63,7 +64,6 @@
#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/transitions.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/snapshot.h"
@@ -337,7 +337,8 @@ TEST(HeapObjects) {
Handle<String> object_string = Handle<String>::cast(factory->Object_string());
Handle<JSGlobalObject> global(CcTest::i_isolate()->context().global_object(),
isolate);
- CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string));
+ CHECK(Just(true) ==
+ JSReceiver::HasOwnProperty(isolate, global, object_string));
// Check ToString for oddballs
ReadOnlyRoots roots(heap);
@@ -406,7 +407,7 @@ TEST(GarbageCollection) {
CcTest::CollectGarbage(NEW_SPACE);
// Function should be alive.
- CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, global, name));
// Check function is retained.
Handle<Object> func_value =
Object::GetProperty(isolate, global, name).ToHandleChecked();
@@ -424,7 +425,7 @@ TEST(GarbageCollection) {
// After gc, it should survive.
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, global, obj_name));
Handle<Object> obj =
Object::GetProperty(isolate, global, obj_name).ToHandleChecked();
CHECK(obj->IsJSObject());
@@ -800,60 +801,60 @@ TEST(ObjectProperties) {
Handle<Smi> two(Smi::FromInt(2), isolate);
// check for empty
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, first));
// add first
Object::SetProperty(isolate, obj, first, one).Check();
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, first));
// delete first
CHECK(Just(true) ==
JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, first));
// add first and then second
Object::SetProperty(isolate, obj, first, one).Check();
Object::SetProperty(isolate, obj, second, two).Check();
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, second));
// delete first and then second
CHECK(Just(true) ==
JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, second));
CHECK(Just(true) ==
JSReceiver::DeleteProperty(obj, second, LanguageMode::kSloppy));
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, second));
// add first and then second
Object::SetProperty(isolate, obj, first, one).Check();
Object::SetProperty(isolate, obj, second, two).Check();
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, second));
// delete second and then first
CHECK(Just(true) ==
JSReceiver::DeleteProperty(obj, second, LanguageMode::kSloppy));
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, first));
CHECK(Just(true) ==
JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
- CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(isolate, obj, second));
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
Object::SetProperty(isolate, obj, s1, one).Check();
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
Object::SetProperty(isolate, obj, s2_string, one).Check();
Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
- CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, obj, s2));
}
@@ -1221,7 +1222,7 @@ HEAP_TEST(Regress10560) {
// Allocate feedback vector.
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(i_isolate));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(i_isolate, function, &is_compiled_scope);
CHECK(function->has_feedback_vector());
CHECK(function->shared().is_compiled());
@@ -1270,65 +1271,6 @@ UNINITIALIZED_TEST(Regress10843) {
isolate->Dispose();
}
-// Tests that spill slots from optimized code don't have weak pointers.
-TEST(Regress10774) {
- if (FLAG_single_generation) return;
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_turboprop = true;
- i::FLAG_turbo_dynamic_map_checks = true;
-#ifdef VERIFY_HEAP
- i::FLAG_verify_heap = true;
-#endif
-
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- Isolate* i_isolate = CcTest::i_isolate();
- Factory* factory = i_isolate->factory();
- Heap* heap = i_isolate->heap();
-
- {
- v8::HandleScope scope(isolate);
- // We want to generate optimized code with dynamic map check operator that
- // migrates deprecated maps. To force this, we want the IC state to be
- // monomorphic and the map in the feedback should be a migration target.
- const char* source =
- "function f(o) {"
- " return o.b;"
- "}"
- "var o = {a:10, b:20};"
- "var o1 = {a:10, b:20};"
- "var o2 = {a:10, b:20};"
- "%PrepareFunctionForOptimization(f);"
- "f(o);"
- "o1.b = 10.23;" // Deprecate O's map.
- "f(o1);" // Install new map in IC
- "f(o);" // Mark o's map as migration target
- "%OptimizeFunctionOnNextCall(f);"
- "f(o);";
- CompileRun(source);
-
- Handle<String> foo_name = factory->InternalizeUtf8String("f");
- Handle<Object> func_value =
- Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
- .ToHandleChecked();
- CHECK(func_value->IsJSFunction());
- Handle<JSFunction> fun = Handle<JSFunction>::cast(func_value);
-
- Handle<String> obj_name = factory->InternalizeUtf8String("o2");
- Handle<Object> obj_value =
- Object::GetProperty(i_isolate, i_isolate->global_object(), obj_name)
- .ToHandleChecked();
-
- heap::SimulateFullSpace(heap->new_space());
-
- Handle<JSObject> global(i_isolate->context().global_object(), i_isolate);
- // O2 still has the deprecated map and the optimized code should migrate O2
- // successfully. This shouldn't crash.
- Execution::Call(i_isolate, fun, global, 1, &obj_value).ToHandleChecked();
- }
-}
-
#ifndef V8_LITE_MODE
TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
@@ -1680,7 +1622,8 @@ TEST(TestSizeOfRegExpCode) {
CcTest::CollectAllAvailableGarbage();
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
@@ -1721,7 +1664,8 @@ HEAP_TEST(TestSizeOfObjects) {
// the heap size and return with sweeping finished completely.
CcTest::CollectAllAvailableGarbage();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
int initial_size = static_cast<int>(heap->SizeOfObjects());
@@ -1746,7 +1690,8 @@ HEAP_TEST(TestSizeOfObjects) {
CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
}
@@ -1889,7 +1834,7 @@ TEST(TestAlignedOverAllocation) {
heap::AbandonCurrentlyFreeMemory(heap->old_space());
// Allocate a dummy object to properly set up the linear allocation info.
AllocationResult dummy = heap->old_space()->AllocateRawUnaligned(kTaggedSize);
- CHECK(!dummy.IsRetry());
+ CHECK(!dummy.IsFailure());
heap->CreateFillerObjectAt(dummy.ToObjectChecked().address(), kTaggedSize,
ClearRecordedSlots::kNo);
@@ -2379,7 +2324,8 @@ HEAP_TEST(GCFlags) {
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
IncrementalMarking* marking = heap->incremental_marking();
@@ -2453,7 +2399,7 @@ TEST(IdleNotificationFinishMarking) {
// The next idle notification has to finish incremental marking.
const double kLongIdleTime = 1000.0;
CcTest::isolate()->IdleNotificationDeadline(
- (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ (v8::base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
kLongIdleTime);
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count + 1);
@@ -2938,8 +2884,7 @@ TEST(OptimizedAllocationArrayLiterals) {
}
static int CountMapTransitions(i::Isolate* isolate, Map map) {
- DisallowGarbageCollection no_gc;
- return TransitionsAccessor(isolate, map, &no_gc).NumberOfTransitions();
+ return TransitionsAccessor(isolate, map).NumberOfTransitions();
}
@@ -3558,16 +3503,12 @@ void DetailedErrorStackTraceTest(const char* src,
CHECK(try_catch.HasCaught());
Handle<Object> exception = v8::Utils::OpenHandle(*try_catch.Exception());
- Isolate* isolate = CcTest::i_isolate();
- Handle<Name> key = isolate->factory()->stack_trace_symbol();
-
- Handle<FixedArray> stack_trace(Handle<FixedArray>::cast(
- Object::GetProperty(isolate, exception, key).ToHandleChecked()));
- test(stack_trace);
+ test(CcTest::i_isolate()->GetSimpleStackTrace(
+ Handle<JSReceiver>::cast(exception)));
}
FixedArray ParametersOf(Handle<FixedArray> stack_trace, int frame_index) {
- return StackFrameInfo::cast(stack_trace->get(frame_index)).parameters();
+ return CallSiteInfo::cast(stack_trace->get(frame_index)).parameters();
}
// * Test interpreted function error
@@ -3995,8 +3936,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
CHECK_EQ(dependency.length(), DependentCode::kSlotsPerEntry);
MaybeObject code = dependency.Get(0 + DependentCode::kCodeSlotOffset);
CHECK(code->IsWeak());
- CHECK_EQ(bar_handle->code(),
- FromCodeT(CodeT::cast(code->GetHeapObjectAssumeWeak())));
+ CHECK_EQ(bar_handle->code(), CodeT::cast(code->GetHeapObjectAssumeWeak()));
Smi groups = dependency.Get(0 + DependentCode::kGroupsSlotOffset).ToSmi();
CHECK_EQ(static_cast<DependentCode::DependencyGroups>(groups.value()),
DependentCode::kAllocationSiteTransitionChangedGroup |
@@ -4149,7 +4089,8 @@ TEST(CellsInOptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
+ code = handle(FromCodeT(bar->code()), isolate);
+ code = scope.CloseAndEscape(code);
}
// Now make sure that a gc should get rid of the function
@@ -4193,7 +4134,8 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
+ code = handle(FromCodeT(bar->code()), isolate);
+ code = scope.CloseAndEscape(code);
}
// Now make sure that a gc should get rid of the function
@@ -4255,7 +4197,8 @@ TEST(NewSpaceObjectsInOptimizedCode) {
CcTest::heap()->Verify();
#endif
CHECK(!bar->code().marked_for_deoptimization());
- code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
+ code = handle(FromCodeT(bar->code()), isolate);
+ code = scope.CloseAndEscape(code);
}
// Now make sure that a gc should get rid of the function
@@ -4299,7 +4242,8 @@ TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
+ code = handle(FromCodeT(bar->code()), isolate);
+ code = scope.CloseAndEscape(code);
}
CHECK(code->marked_for_deoptimization());
@@ -4361,10 +4305,11 @@ TEST(NextCodeLinkIsWeak) {
OptimizeDummyFunction(CcTest::isolate(), "mortal");
Handle<JSFunction> immortal =
OptimizeDummyFunction(CcTest::isolate(), "immortal");
- CHECK_EQ(immortal->code().next_code_link(), ToCodeT(mortal->code()));
- code_chain_length_before = GetCodeChainLength(immortal->code());
+ CHECK_EQ(immortal->code().next_code_link(), mortal->code());
+ code_chain_length_before = GetCodeChainLength(FromCodeT(immortal->code()));
// Keep the immortal code and let the mortal code die.
- code = scope.CloseAndEscape(Handle<Code>(immortal->code(), isolate));
+ code = handle(FromCodeT(immortal->code()), isolate);
+ code = scope.CloseAndEscape(code);
CompileRun("mortal = null; immortal = null;");
}
CcTest::CollectAllAvailableGarbage();
@@ -4390,9 +4335,10 @@ TEST(NextCodeLinkInCodeDataContainerIsCleared) {
OptimizeDummyFunction(CcTest::isolate(), "mortal1");
Handle<JSFunction> mortal2 =
OptimizeDummyFunction(CcTest::isolate(), "mortal2");
- CHECK_EQ(mortal2->code().next_code_link(), ToCodeT(mortal1->code()));
- code_data_container = scope.CloseAndEscape(Handle<CodeDataContainer>(
- mortal2->code().code_data_container(kAcquireLoad), isolate));
+ CHECK_EQ(mortal2->code().next_code_link(), mortal1->code());
+ code_data_container =
+ handle(CodeDataContainerFromCodeT(mortal2->code()), isolate);
+ code_data_container = scope.CloseAndEscape(code_data_container);
CompileRun("mortal1 = null; mortal2 = null;");
}
CcTest::CollectAllAvailableGarbage();
@@ -5206,7 +5152,7 @@ TEST(PreprocessStackTrace) {
CHECK(try_catch.HasCaught());
Isolate* isolate = CcTest::i_isolate();
Handle<Object> exception = v8::Utils::OpenHandle(*try_catch.Exception());
- Handle<Name> key = isolate->factory()->stack_trace_symbol();
+ Handle<Name> key = isolate->factory()->error_stack_symbol();
Handle<Object> stack_trace =
Object::GetProperty(isolate, exception, key).ToHandleChecked();
Handle<Object> code =
@@ -5444,7 +5390,7 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
SKIP_WRITE_BARRIER);
ByteArray::cast(result).set_length(length);
ByteArray::cast(result).clear_padding();
- return result;
+ return AllocationResult::FromObject(result);
}
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
@@ -5477,7 +5423,8 @@ HEAP_TEST(Regress587004) {
CcTest::CollectGarbage(OLD_SPACE);
heap::SimulateFullSpace(heap->old_space());
heap->RightTrimFixedArray(*array, N - 1);
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
ByteArray byte_array;
const int M = 256;
// Don't allow old space expansion. The test works without this flag too,
@@ -5649,7 +5596,8 @@ TEST(Regress598319) {
CcTest::CollectGarbage(OLD_SPACE);
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(heap->lo_space()->Contains(arr.get()));
@@ -5723,7 +5671,8 @@ Handle<FixedArray> ShrinkArrayAndCheckSize(Heap* heap, int length) {
for (int i = 0; i < 5; i++) {
CcTest::CollectAllGarbage();
}
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// Disable LAB, such that calculations with SizeOfObjects() and object size
// are correct.
heap->DisableInlineAllocation();
@@ -5738,7 +5687,8 @@ Handle<FixedArray> ShrinkArrayAndCheckSize(Heap* heap, int length) {
CHECK_EQ(size_after_allocation, size_after_shrinking);
// GC and sweeping updates the size to acccount for shrinking.
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
intptr_t size_after_gc = heap->SizeOfObjects();
CHECK_EQ(size_after_gc, size_before_allocation + array->Size());
return array;
@@ -5775,7 +5725,8 @@ TEST(Regress615489) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
@@ -5884,7 +5835,8 @@ TEST(LeftTrimFixedArrayInBlackArea) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
@@ -5925,7 +5877,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
@@ -5994,7 +5947,8 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
@@ -6067,7 +6021,6 @@ TEST(Regress618958) {
TEST(YoungGenerationLargeObjectAllocationScavenge) {
if (FLAG_minor_mc) return;
- if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -6097,7 +6050,6 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
if (FLAG_minor_mc) return;
- if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -6127,7 +6079,6 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
if (FLAG_minor_mc) return;
- if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -6447,11 +6398,16 @@ HEAP_TEST(Regress670675) {
CcTest::CollectAllGarbage();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
+ heap->tracer()->StopCycleIfNeeded();
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (marking->IsStopped()) {
SafepointScope safepoint_scope(heap);
+ heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
size_t array_length = 128 * KB;
@@ -6497,7 +6453,8 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
CcTest::CollectAllGarbage();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(object->map().IsMap());
}
@@ -7062,6 +7019,9 @@ TEST(Regress978156) {
i::IncrementalMarking* marking = heap->incremental_marking();
if (marking->IsStopped()) {
SafepointScope scope(heap);
+ heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 30da6c8585..702d66560e 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -16,11 +16,11 @@
#include <utility>
-#include "src/init/v8.h"
-
#include "src/handles/global-handles.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -129,6 +129,9 @@ UNINITIALIZED_TEST(IncrementalMarkingUsingTasks) {
marking->Stop();
{
SafepointScope scope(heap);
+ heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
CHECK(platform.PendingTask());
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index b16173599b..1ca4699c1c 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -68,7 +68,8 @@ TEST(UnusedLabImplicitClose) {
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 1);
{
- AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ AllocationResult lab_backing_store =
+ AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
@@ -89,7 +90,8 @@ TEST(SimpleAllocate) {
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 2);
{
- AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ AllocationResult lab_backing_store =
+ AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
@@ -115,7 +117,8 @@ TEST(AllocateUntilLabOOM) {
expected_sizes_raw + 5);
intptr_t sum = 0;
{
- AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ AllocationResult lab_backing_store =
+ AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
@@ -142,7 +145,8 @@ TEST(AllocateExactlyUntilLimit) {
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 5);
{
- AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ AllocationResult lab_backing_store =
+ AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
@@ -183,7 +187,8 @@ TEST(MergeSuccessful) {
expected_sizes2_raw + 10);
{
- AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
+ AllocationResult lab_backing_store1 =
+ AllocationResult::FromObject(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
@@ -196,7 +201,8 @@ TEST(MergeSuccessful) {
}
}
- AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
+ AllocationResult lab_backing_store2 =
+ AllocationResult::FromObject(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
@@ -225,17 +231,20 @@ TEST(MergeFailed) {
Address base3 = base2 + kLabSize;
{
- AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
+ AllocationResult lab_backing_store1 =
+ AllocationResult::FromObject(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
- AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
+ AllocationResult lab_backing_store2 =
+ AllocationResult::FromObject(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
- AllocationResult lab_backing_store3(HeapObject::FromAddress(base3));
+ AllocationResult lab_backing_store3 =
+ AllocationResult::FromObject(HeapObject::FromAddress(base3));
LocalAllocationBuffer lab3 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
CHECK(lab3.IsValid());
@@ -261,7 +270,8 @@ TEST(AllocateAligned) {
expected_sizes_raw + 4);
{
- AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ AllocationResult lab_backing_store =
+ AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 4a15680c28..d6419c4b57 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -82,9 +82,9 @@ AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
if (!alloc.To(&obj)) return alloc;
obj.set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
SKIP_WRITE_BARRIER);
- return isolate->factory()->InitializeMap(Map::cast(obj), JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- TERMINAL_FAST_ELEMENTS_KIND, 0);
+ return AllocationResult::FromObject(isolate->factory()->InitializeMap(
+ Map::cast(obj), JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 0, heap));
}
// This is the same as Factory::NewFixedArray, except it doesn't retry
@@ -104,7 +104,7 @@ AllocationResult HeapTester::AllocateFixedArrayForTest(
array.set_length(length);
MemsetTagged(array.data_start(), ReadOnlyRoots(heap).undefined_value(),
length);
- return array;
+ return AllocationResult::FromObject(array);
}
HEAP_TEST(MarkCompactCollector) {
@@ -128,7 +128,7 @@ HEAP_TEST(MarkCompactCollector) {
do {
allocation =
AllocateFixedArrayForTest(heap, arraysize, AllocationType::kYoung);
- } while (!allocation.IsRetry());
+ } while (!allocation.IsFailure());
CcTest::CollectGarbage(NEW_SPACE);
AllocateFixedArrayForTest(heap, arraysize, AllocationType::kYoung)
.ToObjectChecked();
@@ -137,7 +137,7 @@ HEAP_TEST(MarkCompactCollector) {
// keep allocating maps until it fails
do {
allocation = AllocateMapForTest(isolate);
- } while (!allocation.IsRetry());
+ } while (!allocation.IsFailure());
CcTest::CollectGarbage(MAP_SPACE);
AllocateMapForTest(isolate).ToObjectChecked();
@@ -154,7 +154,7 @@ HEAP_TEST(MarkCompactCollector) {
{ HandleScope scope(isolate);
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
- CHECK(Just(true) == JSReceiver::HasOwnProperty(global, func_name));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, global, func_name));
Handle<Object> func_value =
Object::GetProperty(isolate, global, func_name).ToHandleChecked();
CHECK(func_value->IsJSFunction());
@@ -172,7 +172,7 @@ HEAP_TEST(MarkCompactCollector) {
{ HandleScope scope(isolate);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
- CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(isolate, global, obj_name));
Handle<Object> object =
Object::GetProperty(isolate, global, obj_name).ToHandleChecked();
CHECK(object->IsJSObject());
@@ -205,7 +205,8 @@ HEAP_TEST(DoNotEvacuatePinnedPages) {
page->SetFlag(MemoryChunk::PINNED);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// The pinned flag should prevent the page from moving.
for (Handle<FixedArray> object : handles) {
@@ -215,7 +216,8 @@ HEAP_TEST(DoNotEvacuatePinnedPages) {
page->ClearFlag(MemoryChunk::PINNED);
CcTest::CollectAllGarbage();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
// `compact_on_every_full_gc` ensures that this page is an evacuation
// candidate, so with the pin flag cleared compaction should now move it.
@@ -410,7 +412,8 @@ TEST(Regress5829) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 334c5137ac..9cc24525e2 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -114,7 +114,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size,
- Executability executable, Space* space) {
+ Executability executable, PageSize page_size,
+ Space* space) {
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope(
@@ -129,7 +130,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
- reserve_area_size, commit_area_size, executable, space);
+ reserve_area_size, commit_area_size, executable, page_size, space);
size_t reserved_size =
((executable == EXECUTABLE))
? allocatable_memory_area_offset +
@@ -144,7 +145,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
memory_chunk->address() + memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
- memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
+ memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk);
}
static unsigned int PseudorandomAreaSize() {
@@ -179,11 +180,12 @@ TEST(MemoryChunk) {
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
- initial_commit_area_size, EXECUTABLE, heap->code_space());
+ initial_commit_area_size, EXECUTABLE, PageSize::kLarge,
+ heap->code_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, NOT_EXECUTABLE,
- heap->old_space());
+ PageSize::kLarge, heap->old_space());
}
}
@@ -201,8 +203,8 @@ TEST(MemoryAllocator) {
CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
- NOT_EXECUTABLE);
+ MemoryAllocator::kRegular, faked_space.AreaSize(),
+ static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page);
CHECK(first_page->next_page() == nullptr);
@@ -214,8 +216,8 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
- faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
- NOT_EXECUTABLE);
+ MemoryAllocator::kRegular, faked_space.AreaSize(),
+ static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
int page_count = 0;
@@ -344,14 +346,14 @@ TEST(OldLargeObjectSpace) {
while (true) {
{
AllocationResult allocation = lo->AllocateRaw(lo_size);
- if (allocation.IsRetry()) break;
+ if (allocation.IsFailure()) break;
ho = HeapObject::cast(allocation.ToObjectChecked());
Handle<HeapObject> keep_alive(ho, isolate);
}
}
CHECK(!lo->IsEmpty());
- CHECK(lo->AllocateRaw(lo_size).IsRetry());
+ CHECK(lo->AllocateRaw(lo_size).IsFailure());
}
#ifndef DEBUG
@@ -394,6 +396,9 @@ TEST(SizeOfInitialHeap) {
Heap* heap = isolate->heap();
for (int i = FIRST_GROWABLE_PAGED_SPACE; i <= LAST_GROWABLE_PAGED_SPACE;
i++) {
+ // Map space might be disabled.
+ if (i == MAP_SPACE && !heap->paged_space(i)) continue;
+
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
@@ -411,7 +416,7 @@ TEST(SizeOfInitialHeap) {
static HeapObject AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
- CHECK(!allocation.IsRetry());
+ CHECK(!allocation.IsFailure());
HeapObject filler;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler.address(), size,
@@ -421,7 +426,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
- CHECK(!allocation.IsRetry());
+ CHECK(!allocation.IsFailure());
HeapObject filler;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler.address(), size,
@@ -431,7 +436,7 @@ static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
static HeapObject AllocateUnaligned(OldLargeObjectSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size);
- CHECK(!allocation.IsRetry());
+ CHECK(!allocation.IsFailure());
HeapObject filler;
CHECK(allocation.To(&filler));
return filler;
@@ -808,8 +813,8 @@ TEST(NoMemoryForNewPage) {
LinearAllocationArea allocation_info;
OldSpace faked_space(heap, &allocation_info);
Page* page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
- NOT_EXECUTABLE);
+ MemoryAllocator::kRegular, faked_space.AreaSize(),
+ static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
CHECK_NULL(page);
}
@@ -854,14 +859,12 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
- MemoryAllocator* allocator = heap->memory_allocator();
-
// Allocated objects size.
CHECK_EQ(faked_space->Size(), 16);
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
- allocator->GetCommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
// Amount of OS allocated memory.
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
@@ -888,10 +891,9 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
- MemoryAllocator* allocator = heap->memory_allocator();
// Allocate an object just under an OS page in size.
int object_size =
- static_cast<int>(allocator->GetCommitPageSize() - kApiTaggedSize);
+ static_cast<int>(MemoryAllocator::GetCommitPageSize() - kApiTaggedSize);
// TODO(v8:8875): Pointer compression does not enable aligned memory allocation
// yet.
@@ -923,7 +925,7 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
- allocator->GetCommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
@@ -949,8 +951,6 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
- MemoryAllocator* allocator = heap->memory_allocator();
-
// Allocate an object that's too big to have more than one on a page.
int object_size = RoundUp(
@@ -973,7 +973,7 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// Amount of OS allocated memory.
size_t committed_memory_per_page =
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
- allocator->GetCommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
CHECK_EQ(faked_space->CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space->CommittedPhysicalMemory(),
2 * committed_memory_per_page);
@@ -982,7 +982,7 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// page headers.
size_t capacity_per_page =
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
- allocator->GetCommitPageSize()) -
+ MemoryAllocator::GetCommitPageSize()) -
MemoryChunkLayout::ObjectStartOffsetInDataPage();
CHECK_EQ(faked_space->Capacity(), 2 * capacity_per_page);
}
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index e90fdcf8b7..3ead9c48be 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -51,19 +51,20 @@ TEST(WeakReferencesBasic) {
assm.nop(); // supported on all architectures
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- CHECK(code->IsCode());
+ Handle<CodeT> code = ToCodeT(
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(),
+ isolate);
+ CHECK(code->IsCodeT());
- lh->set_data1(HeapObjectReference::Weak(ToCodeT(*code)));
+ lh->set_data1(HeapObjectReference::Weak(*code));
HeapObject code_heap_object;
CHECK(lh->data1()->GetHeapObjectIfWeak(&code_heap_object));
- CHECK_EQ(ToCodeT(*code), code_heap_object);
+ CHECK_EQ(*code, code_heap_object);
CcTest::CollectAllGarbage();
CHECK(lh->data1()->GetHeapObjectIfWeak(&code_heap_object));
- CHECK_EQ(ToCodeT(*code), code_heap_object);
+ CHECK_EQ(*code, code_heap_object);
} // code will go out of scope.
CcTest::CollectAllGarbage();
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index c8dfb80e5b..2c5371dd7a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -178,7 +178,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
} else if (register_value.is_function_closure()) {
*stream << "(closure)";
} else if (register_value.is_parameter()) {
- int parameter_index = register_value.ToParameterIndex(parameter_count);
+ int parameter_index = register_value.ToParameterIndex();
if (parameter_index == 0) {
*stream << "(this)";
} else {
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 5c217356c5..e7001e483b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -150,15 +150,15 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star4),
- B(LdaNamedProperty), R(4), U8(2), U8(6),
+ B(GetNamedProperty), R(4), U8(2), U8(6),
B(Star3),
B(CallProperty0), R(3), R(4), U8(15),
B(Star5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(3), U8(17),
+ B(GetNamedProperty), R(5), U8(3), U8(17),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(5), U8(4), U8(8),
+ B(GetNamedProperty), R(5), U8(4), U8(8),
B(StaInArrayLiteral), R(2), R(1), U8(13),
B(Ldar), R(1),
B(Inc), U8(12),
@@ -189,7 +189,7 @@ bytecodes: [
B(Star0),
/* 64 S> */ B(CreateArrayFromIterable),
B(Star2),
- B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(GetNamedProperty), R(2), U8(1), U8(1),
B(Star1),
B(LdaSmi), I8(3),
B(StaInArrayLiteral), R(2), R(1), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 30baf4afad..5c0b80fc4a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -237,7 +237,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star9),
- B(LdaNamedProperty), R(9), U8(5), U8(5),
+ B(GetNamedProperty), R(9), U8(5), U8(5),
B(Star8),
B(LdaFalse),
B(Star10),
@@ -248,9 +248,9 @@ bytecodes: [
B(Star14),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(LdaNamedProperty), R(14), U8(6), U8(9),
+ B(GetNamedProperty), R(14), U8(6), U8(9),
B(JumpIfToBooleanTrue), U8(62),
- B(LdaNamedProperty), R(14), U8(7), U8(11),
+ B(GetNamedProperty), R(14), U8(7), U8(11),
B(Star14),
B(LdaFalse),
B(Star10),
@@ -287,7 +287,7 @@ bytecodes: [
B(Ldar), R(10),
B(JumpIfToBooleanTrue), U8(37),
B(Mov), R(context), R(15),
- B(LdaNamedProperty), R(9), U8(10), U8(13),
+ B(GetNamedProperty), R(9), U8(10), U8(13),
B(JumpIfUndefinedOrNull), U8(28),
B(Star), R(16),
B(CallProperty0), R(16), R(9), U8(15),
@@ -417,19 +417,19 @@ bytecodes: [
B(Star9),
/* 56 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
B(Star10),
- B(LdaNamedProperty), R(10), U8(8), U8(4),
+ B(GetNamedProperty), R(10), U8(8), U8(4),
B(JumpIfUndefinedOrNull), U8(14),
B(Star11),
B(CallProperty0), R(11), R(10), U8(6),
B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(10), U8(9), U8(8),
+ B(GetNamedProperty), R(10), U8(9), U8(8),
B(Star11),
B(CallProperty0), R(11), R(10), U8(10),
B(Star11),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
B(Star7),
- B(LdaNamedProperty), R(7), U8(10), U8(12),
+ B(GetNamedProperty), R(7), U8(10), U8(12),
B(Star9),
B(LdaUndefined),
B(Star8),
@@ -439,7 +439,7 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
B(CallProperty1), R(9), R(7), R(8), U8(14),
B(Jump), U8(130),
- B(LdaNamedProperty), R(7), U8(13), U8(16),
+ B(GetNamedProperty), R(7), U8(13), U8(16),
B(JumpIfUndefinedOrNull), U8(10),
B(Star10),
B(CallProperty1), R(10), R(7), R(8), U8(18),
@@ -461,12 +461,12 @@ bytecodes: [
B(Star1),
B(Mov), R(10), R(2),
B(Jump), U8(215),
- B(LdaNamedProperty), R(7), U8(14), U8(20),
+ B(GetNamedProperty), R(7), U8(14), U8(20),
B(JumpIfUndefinedOrNull), U8(10),
B(Star12),
B(CallProperty1), R(12), R(7), R(8), U8(22),
B(Jump), U8(61),
- B(LdaNamedProperty), R(7), U8(13), U8(24),
+ B(GetNamedProperty), R(7), U8(13), U8(24),
B(JumpIfUndefinedOrNull), U8(50),
B(Star12),
B(CallProperty0), R(12), R(7), U8(26),
@@ -506,9 +506,9 @@ bytecodes: [
B(Mov), R(12), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(15), U8(28),
+ B(GetNamedProperty), R(5), U8(15), U8(28),
B(JumpIfToBooleanTrue), U8(35),
- B(LdaNamedProperty), R(5), U8(16), U8(30),
+ B(GetNamedProperty), R(5), U8(16), U8(30),
B(Star15),
B(LdaFalse),
B(Star), R(16),
@@ -520,7 +520,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star6),
B(JumpLoop), U8(220), I8(0),
- B(LdaNamedProperty), R(5), U8(16), U8(32),
+ B(GetNamedProperty), R(5), U8(16), U8(32),
B(Star7),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
index a7a0a03492..3bc4300e0b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -11,9 +11,9 @@ top level: yes
snippet: "
await 42;
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 116
+bytecode array length: 112
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
@@ -48,10 +48,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star4),
- B(LdaTrue),
- B(Star5),
B(Mov), R(0), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(2),
/* 10 S> */ B(Return),
B(Star3),
B(CreateCatchContext), R(3), U8(4),
@@ -62,10 +60,8 @@ bytecodes: [
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star5),
- B(LdaTrue),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(2),
B(Return),
]
constant pool: [
@@ -76,16 +72,16 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [47, 92, 92],
+ [47, 90, 90],
]
---
snippet: "
await import(\"foo\");
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 125
+bytecode array length: 121
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
@@ -123,10 +119,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star4),
- B(LdaTrue),
- B(Star5),
B(Mov), R(0), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(2),
/* 21 S> */ B(Return),
B(Star3),
B(CreateCatchContext), R(3), U8(5),
@@ -137,10 +131,8 @@ bytecodes: [
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star5),
- B(LdaTrue),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(2),
B(Return),
]
constant pool: [
@@ -152,7 +144,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [47, 101, 101],
+ [47, 99, 99],
]
---
@@ -163,9 +155,9 @@ snippet: "
}
foo();
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 126
+bytecode array length: 122
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(3),
@@ -204,10 +196,8 @@ bytecodes: [
/* 47 S> */ B(CallUndefinedReceiver0), R(1), U8(0),
B(LdaUndefined),
B(Star5),
- B(LdaTrue),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(2),
/* 54 S> */ B(Return),
B(Star4),
B(CreateCatchContext), R(4), U8(5),
@@ -218,10 +208,8 @@ bytecodes: [
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(2),
B(Return),
]
constant pool: [
@@ -233,7 +221,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [54, 102, 102],
+ [54, 100, 100],
]
---
@@ -241,9 +229,9 @@ snippet: "
import * as foo from \"bar\";
await import(\"goo\");
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 135
+bytecode array length: 131
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(3),
@@ -286,10 +274,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star5),
- B(LdaTrue),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(2),
/* 49 S> */ B(Return),
B(Star4),
B(CreateCatchContext), R(4), U8(5),
@@ -300,10 +286,8 @@ bytecodes: [
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(2),
B(Return),
]
constant pool: [
@@ -315,6 +299,6 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [57, 111, 111],
+ [57, 109, 109],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 7cbcc91999..579f7c1965 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -15,7 +15,7 @@ bytecode array length: 21
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star1),
- /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
+ /* 39 E> */ B(GetNamedProperty), R(1), U8(1), U8(2),
B(Star0),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
B(Star2),
@@ -41,7 +41,7 @@ bytecode array length: 23
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star1),
- /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
+ /* 39 E> */ B(GetNamedProperty), R(1), U8(1), U8(2),
B(Star0),
B(LdaZero),
B(Star2),
@@ -65,43 +65,38 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 95
+bytecode array length: 88
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star1),
- /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
+ /* 39 E> */ B(GetNamedProperty), R(1), U8(1), U8(2),
B(Star0),
- B(CreateEmptyArrayLiteral), U8(4),
+ B(CreateArrayLiteral), U8(2), U8(4), U8(37),
B(Star3),
- B(LdaZero),
- B(Star2),
- B(LdaZero),
- B(StaInArrayLiteral), R(3), R(2), U8(5),
- B(Ldar), R(2),
- B(Inc), U8(7),
+ B(LdaSmi), I8(1),
B(Star2),
- /* 49 S> */ B(CreateArrayLiteral), U8(2), U8(8), U8(37),
+ /* 49 S> */ B(CreateArrayLiteral), U8(3), U8(5), U8(37),
B(Star6),
- /* 49 E> */ B(GetIterator), R(6), U8(9), U8(11),
+ /* 49 E> */ B(GetIterator), R(6), U8(6), U8(8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(3), U8(13),
+ B(GetNamedProperty), R(5), U8(4), U8(10),
B(Star4),
B(CallProperty0), R(4), R(5), U8(19),
B(Star6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(4), U8(21),
+ B(GetNamedProperty), R(6), U8(5), U8(21),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(6), U8(5), U8(15),
- B(StaInArrayLiteral), R(3), R(2), U8(5),
+ B(GetNamedProperty), R(6), U8(6), U8(12),
+ B(StaInArrayLiteral), R(3), R(2), U8(17),
B(Ldar), R(2),
- B(Inc), U8(7),
+ B(Inc), U8(16),
B(Star2),
B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(2), U8(5),
+ B(StaInArrayLiteral), R(3), R(2), U8(17),
B(Mov), R(3), R(2),
/* 39 E> */ B(CallJSRuntime), U8(%reflect_apply), R(0), U8(3),
B(LdaUndefined),
@@ -111,6 +106,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["Math"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["max"],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 82b6e16be9..7581b0f391 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -20,14 +20,18 @@ snippet: "
test();
})();
"
-frame size: 1
+frame size: 5
parameter count: 1
-bytecode array length: 16
+bytecode array length: 24
bytecodes: [
/* 104 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- /* 117 E> */ B(LdaNamedPropertyFromSuper), R(this), U8(0), U8(1),
+ B(Star3),
+ B(LdaConstant), U8(0),
+ B(Star4),
+ B(Mov), R(this), R(2),
+ /* 117 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(2), U8(3),
B(Star0),
- /* 117 E> */ B(CallAnyReceiver), R(0), R(this), U8(1), U8(3),
+ /* 117 E> */ B(CallAnyReceiver), R(0), R(this), U8(1), U8(1),
/* 126 E> */ B(AddSmi), I8(1), U8(0),
/* 130 S> */ B(Return),
]
@@ -54,7 +58,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 24
+bytecode array length: 32
bytecodes: [
/* 130 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star1),
@@ -65,7 +69,11 @@ bytecodes: [
B(Mov), R(this), R(0),
/* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper), R(0), U8(4),
/* 143 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- /* 156 E> */ B(LdaNamedPropertyFromSuper), R(this), U8(0), U8(0),
+ B(Star1),
+ B(LdaConstant), U8(0),
+ B(Star2),
+ B(Mov), R(this), R(0),
+ /* 156 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(0), U8(3),
/* 158 S> */ B(Return),
]
constant pool: [
@@ -106,7 +114,7 @@ bytecodes: [
/* 128 S> */ B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
B(LdaSmi), I8(2),
- /* 136 E> */ B(StaNamedProperty), R(this), U8(0), U8(2),
+ /* 136 E> */ B(SetNamedProperty), R(this), U8(0), U8(2),
B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
/* 141 S> */ B(Return),
@@ -147,7 +155,7 @@ bytecodes: [
/* 126 S> */ B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
B(LdaSmi), I8(2),
- /* 134 E> */ B(StaNamedProperty), R(this), U8(0), U8(2),
+ /* 134 E> */ B(SetNamedProperty), R(this), U8(0), U8(2),
B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
/* 139 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index 6a00a5445b..dda4470c80 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -55,9 +55,9 @@ bytecode array length: 18
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ /* 54 S> */ B(GetNamedProperty), R(0), U8(1), U8(1),
B(MulSmi), I8(2), U8(3),
- /* 61 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
+ /* 61 E> */ B(SetNamedProperty), R(0), U8(1), U8(4),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -80,9 +80,9 @@ bytecodes: [
B(Star0),
/* 52 S> */ B(LdaSmi), I8(1),
B(Star2),
- B(LdaKeyedProperty), R(0), U8(1),
+ B(GetKeyedProperty), R(0), U8(1),
B(BitwiseXorSmi), I8(2), U8(3),
- /* 57 E> */ B(StaKeyedProperty), R(0), R(2), U8(4),
+ /* 57 E> */ B(SetKeyedProperty), R(0), R(2), U8(4),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 91d5d07356..0394579cb2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -97,12 +97,12 @@ bytecode array length: 22
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ /* 54 S> */ B(GetNamedProperty), R(0), U8(1), U8(1),
B(ToNumeric), U8(3),
B(Star2),
B(Inc), U8(3),
B(Star3),
- /* 66 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
+ /* 66 E> */ B(SetNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -123,10 +123,10 @@ bytecode array length: 19
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ /* 54 S> */ B(GetNamedProperty), R(0), U8(1), U8(1),
B(Dec), U8(3),
B(Star2),
- /* 65 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
+ /* 65 E> */ B(SetNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -150,12 +150,12 @@ bytecodes: [
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
B(Star1),
/* 72 S> */ B(Ldar), R(0),
- /* 81 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ /* 81 E> */ B(GetKeyedProperty), R(1), U8(1),
B(ToNumeric), U8(3),
B(Star4),
B(Dec), U8(3),
B(Star5),
- /* 86 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
+ /* 86 E> */ B(SetKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
@@ -179,10 +179,10 @@ bytecodes: [
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
B(Star1),
/* 72 S> */ B(Ldar), R(0),
- /* 83 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ /* 83 E> */ B(GetKeyedProperty), R(1), U8(1),
B(Inc), U8(3),
B(Star4),
- /* 87 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
+ /* 87 E> */ B(SetKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
@@ -267,7 +267,7 @@ bytecodes: [
B(Star0),
B(LdaSmi), I8(2),
B(Star4),
- /* 79 E> */ B(StaKeyedProperty), R(1), R(3), U8(2),
+ /* 79 E> */ B(SetKeyedProperty), R(1), R(3), U8(2),
B(Ldar), R(4),
/* 83 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index 489ac63a92..af428803f6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -36,7 +36,7 @@ bytecodes: [
/* 10 E> */ B(CreateMappedArguments),
B(Star0),
/* 15 S> */ B(LdaZero),
- /* 31 E> */ B(LdaKeyedProperty), R(0), U8(0),
+ /* 31 E> */ B(GetKeyedProperty), R(0), U8(0),
/* 35 S> */ B(Return),
]
constant pool: [
@@ -78,7 +78,7 @@ bytecodes: [
B(CreateMappedArguments),
B(Star0),
/* 16 S> */ B(LdaZero),
- /* 32 E> */ B(LdaKeyedProperty), R(0), U8(0),
+ /* 32 E> */ B(GetKeyedProperty), R(0), U8(0),
/* 36 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index 022e676013..dc04ccef93 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -60,7 +60,7 @@ bytecodes: [
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(1), U8(0),
+ /* 44 E> */ B(GetKeyedProperty), R(1), U8(0),
/* 48 S> */ B(Return),
]
constant pool: [
@@ -84,10 +84,10 @@ bytecodes: [
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ /* 44 E> */ B(GetKeyedProperty), R(1), U8(1),
B(Star4),
B(LdaZero),
- /* 59 E> */ B(LdaKeyedProperty), R(3), U8(3),
+ /* 59 E> */ B(GetKeyedProperty), R(3), U8(3),
/* 48 E> */ B(Add), R(4), U8(0),
/* 63 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 6ad4a1fd3f..eced99489d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -21,7 +21,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star4),
- B(LdaNamedProperty), R(4), U8(1), U8(5),
+ B(GetNamedProperty), R(4), U8(1), U8(5),
B(Star3),
B(LdaFalse),
B(Star5),
@@ -34,9 +34,9 @@ bytecodes: [
B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(LdaNamedProperty), R(9), U8(2), U8(9),
+ B(GetNamedProperty), R(9), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(13),
- B(LdaNamedProperty), R(9), U8(3), U8(7),
+ B(GetNamedProperty), R(9), U8(3), U8(7),
B(Star9),
B(LdaFalse),
B(Star5),
@@ -57,7 +57,7 @@ bytecodes: [
B(Ldar), R(5),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
- B(LdaNamedProperty), R(4), U8(4), U8(13),
+ B(GetNamedProperty), R(4), U8(4), U8(13),
B(JumpIfUndefinedOrNull), U8(26),
B(Star11),
B(CallProperty0), R(11), R(4), U8(15),
@@ -100,7 +100,7 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 210
+bytecode array length: 203
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star2),
@@ -109,26 +109,23 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(1), U8(5),
+ B(GetNamedProperty), R(5), U8(1), U8(5),
B(Star4),
B(LdaFalse),
B(Star6),
B(Mov), R(context), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(31),
+ B(JumpIfToBooleanTrue), U8(24),
B(LdaTrue),
B(Star6),
B(CallProperty0), R(4), R(5), U8(11),
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(11),
- B(LdaNamedProperty), R(10), U8(3), U8(7),
- B(Star10),
+ B(GetNamedProperty), R(10), U8(2), U8(9),
+ B(JumpIfToBooleanTrue), U8(4),
B(LdaFalse),
B(Star6),
- B(Ldar), R(10),
/* 61 S> */ B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
@@ -137,9 +134,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(2), U8(9),
+ B(GetNamedProperty), R(10), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(13),
- B(LdaNamedProperty), R(10), U8(3), U8(7),
+ B(GetNamedProperty), R(10), U8(3), U8(7),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -159,9 +156,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(2), U8(21),
+ B(GetNamedProperty), R(10), U8(2), U8(21),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(10), U8(3), U8(7),
+ B(GetNamedProperty), R(10), U8(3), U8(7),
B(StaInArrayLiteral), R(11), R(12), U8(16),
B(Ldar), R(12),
B(Inc), U8(18),
@@ -181,7 +178,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
- B(LdaNamedProperty), R(5), U8(4), U8(23),
+ B(GetNamedProperty), R(5), U8(4), U8(23),
B(JumpIfUndefinedOrNull), U8(26),
B(Star12),
B(CallProperty0), R(12), R(5), U8(25),
@@ -213,8 +210,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [30, 148, 154],
- [167, 186, 188],
+ [30, 141, 147],
+ [160, 179, 181],
]
---
@@ -235,7 +232,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(1), U8(5),
+ B(GetNamedProperty), R(5), U8(1), U8(5),
B(Star4),
B(LdaFalse),
B(Star6),
@@ -249,16 +246,16 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(2), U8(9),
+ B(GetNamedProperty), R(10), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(13),
- B(LdaNamedProperty), R(10), U8(3), U8(7),
+ B(GetNamedProperty), R(10), U8(3), U8(7),
B(Star10),
B(LdaFalse),
B(Star6),
B(Ldar), R(10),
B(Jump), U8(3),
B(LdaUndefined),
- B(StaNamedProperty), R(11), U8(4), U8(13),
+ B(SetNamedProperty), R(11), U8(4), U8(13),
/* 63 S> */ B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
@@ -267,9 +264,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(2), U8(9),
+ B(GetNamedProperty), R(10), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(13),
- B(LdaNamedProperty), R(10), U8(3), U8(7),
+ B(GetNamedProperty), R(10), U8(3), U8(7),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -290,7 +287,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
- B(LdaNamedProperty), R(5), U8(5), U8(17),
+ B(GetNamedProperty), R(5), U8(5), U8(17),
B(JumpIfUndefinedOrNull), U8(26),
B(Star13),
B(CallProperty0), R(13), R(5), U8(19),
@@ -338,7 +335,7 @@ bytecode array length: 12
bytecodes: [
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star1),
- /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ /* 54 S> */ B(GetNamedProperty), R(1), U8(1), U8(1),
B(Star0),
B(LdaUndefined),
/* 63 S> */ B(Return),
@@ -363,8 +360,8 @@ bytecodes: [
B(Star0),
/* 48 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star1),
- /* 61 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(StaNamedProperty), R(0), U8(2), U8(3),
+ /* 61 S> */ B(GetNamedProperty), R(1), U8(1), U8(1),
+ B(SetNamedProperty), R(0), U8(2), U8(3),
B(LdaUndefined),
/* 72 S> */ B(Return),
]
@@ -383,18 +380,18 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star1),
/* 64 S> */ B(LdaConstant), U8(1),
B(Star3),
- B(LdaNamedProperty), R(1), U8(1), U8(1),
+ B(GetNamedProperty), R(1), U8(1), U8(1),
B(Mov), R(1), R(2),
B(JumpIfNotUndefined), U8(3),
B(LdaZero),
B(Star0),
- /* 71 S> */ B(CallRuntime), U16(Runtime::kCopyDataPropertiesWithExcludedProperties), R(2), U8(2),
+ /* 71 S> */ B(InvokeIntrinsic), U8(Runtime::k_CopyDataPropertiesWithExcludedPropertiesOnStack), R(2), U8(2),
B(StaGlobal), U8(2), U8(3),
B(LdaUndefined),
/* 80 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index a11a4aa405..4fc3bc8523 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,7 +16,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 272
+bytecode array length: 268
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -26,19 +26,19 @@ bytecodes: [
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
B(Star7),
- B(LdaNamedProperty), R(7), U8(3), U8(1),
+ B(GetNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefinedOrNull), U8(14),
B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(GetNamedProperty), R(7), U8(4), U8(5),
B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star6),
- B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(GetNamedProperty), R(6), U8(5), U8(9),
B(Star5),
B(LdaFalse),
B(Star7),
@@ -63,9 +63,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(6), U8(13),
+ B(GetNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(20),
- B(LdaNamedProperty), R(11), U8(7), U8(15),
+ B(GetNamedProperty), R(11), U8(7), U8(15),
B(Star11),
B(LdaFalse),
B(Star7),
@@ -86,7 +86,7 @@ bytecodes: [
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
- B(LdaNamedProperty), R(6), U8(8), U8(17),
+ B(GetNamedProperty), R(6), U8(8), U8(17),
B(JumpIfUndefinedOrNull), U8(63),
B(Star15),
B(CallProperty0), R(15), R(6), U8(19),
@@ -123,10 +123,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(2),
/* 57 S> */ B(Return),
B(Star5),
B(CreateCatchContext), R(5), U8(9),
@@ -137,10 +135,8 @@ bytecodes: [
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star7),
- B(LdaTrue),
- B(Star8),
B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(2),
B(Return),
]
constant pool: [
@@ -156,7 +152,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [18, 248, 248],
+ [18, 246, 246],
[66, 139, 145],
[158, 214, 216],
]
@@ -170,7 +166,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 292
+bytecode array length: 285
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -180,19 +176,19 @@ bytecodes: [
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
B(Star7),
- B(LdaNamedProperty), R(7), U8(3), U8(1),
+ B(GetNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefinedOrNull), U8(14),
B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(GetNamedProperty), R(7), U8(4), U8(5),
B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star6),
- B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(GetNamedProperty), R(6), U8(5), U8(9),
B(Star5),
B(LdaFalse),
B(Star7),
@@ -217,9 +213,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(6), U8(13),
+ B(GetNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(11), U8(7), U8(15),
+ B(GetNamedProperty), R(11), U8(7), U8(15),
B(Star11),
B(LdaFalse),
B(Star7),
@@ -242,7 +238,7 @@ bytecodes: [
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
- B(LdaNamedProperty), R(6), U8(8), U8(17),
+ B(GetNamedProperty), R(6), U8(8), U8(17),
B(JumpIfUndefinedOrNull), U8(63),
B(Star15),
B(CallProperty0), R(15), R(6), U8(19),
@@ -274,21 +270,17 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(8),
B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
- B(Jump), U8(19),
+ B(Jump), U8(16),
B(Ldar), R(9),
B(ReThrow),
- B(LdaTrue),
- B(Star), R(16),
B(Mov), R(0), R(14),
B(Mov), R(9), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(2),
B(Return),
B(LdaUndefined),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(2),
/* 68 S> */ B(Return),
B(Star5),
B(CreateCatchContext), R(5), U8(11),
@@ -299,10 +291,8 @@ bytecodes: [
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star7),
- B(LdaTrue),
- B(Star8),
B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(2),
B(Return),
]
constant pool: [
@@ -320,7 +310,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [18, 268, 268],
+ [18, 263, 263],
[66, 142, 148],
[161, 217, 219],
]
@@ -337,7 +327,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 288
+bytecode array length: 284
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -347,19 +337,19 @@ bytecodes: [
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
B(Star7),
- B(LdaNamedProperty), R(7), U8(3), U8(1),
+ B(GetNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefinedOrNull), U8(14),
B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(GetNamedProperty), R(7), U8(4), U8(5),
B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star6),
- B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(GetNamedProperty), R(6), U8(5), U8(9),
B(Star5),
B(LdaFalse),
B(Star7),
@@ -384,9 +374,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(6), U8(13),
+ B(GetNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(36),
- B(LdaNamedProperty), R(11), U8(7), U8(15),
+ B(GetNamedProperty), R(11), U8(7), U8(15),
B(Star11),
B(LdaFalse),
B(Star7),
@@ -414,7 +404,7 @@ bytecodes: [
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
- B(LdaNamedProperty), R(6), U8(8), U8(19),
+ B(GetNamedProperty), R(6), U8(8), U8(19),
B(JumpIfUndefinedOrNull), U8(63),
B(Star15),
B(CallProperty0), R(15), R(6), U8(21),
@@ -451,10 +441,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(2),
/* 114 S> */ B(Return),
B(Star5),
B(CreateCatchContext), R(5), U8(9),
@@ -465,10 +453,8 @@ bytecodes: [
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star7),
- B(LdaTrue),
- B(Star8),
B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(2),
B(Return),
]
constant pool: [
@@ -484,7 +470,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [18, 264, 264],
+ [18, 262, 262],
[66, 155, 161],
[174, 230, 232],
]
@@ -499,7 +485,7 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 204
+bytecode array length: 198
bytecodes: [
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
@@ -514,7 +500,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star4),
- B(LdaNamedProperty), R(4), U8(2), U8(6),
+ B(GetNamedProperty), R(4), U8(2), U8(6),
B(Star3),
B(LdaFalse),
B(Star5),
@@ -525,15 +511,15 @@ bytecodes: [
B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(LdaNamedProperty), R(9), U8(3), U8(10),
+ B(GetNamedProperty), R(9), U8(3), U8(10),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(9), U8(4), U8(12),
+ B(GetNamedProperty), R(9), U8(4), U8(12),
B(Star9),
B(LdaFalse),
B(Star5),
B(Ldar), R(9),
- /* 58 E> */ B(StaNamedProperty), R(1), U8(5), U8(14),
- /* 87 S> */ B(LdaNamedProperty), R(1), U8(5), U8(16),
+ /* 58 E> */ B(SetNamedProperty), R(1), U8(5), U8(14),
+ /* 87 S> */ B(GetNamedProperty), R(1), U8(5), U8(16),
B(Star7),
B(LdaSmi), I8(1),
B(Star6),
@@ -552,7 +538,7 @@ bytecodes: [
B(Ldar), R(5),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
- B(LdaNamedProperty), R(4), U8(6), U8(18),
+ B(GetNamedProperty), R(4), U8(6), U8(18),
B(JumpIfUndefinedOrNull), U8(26),
B(Star12),
B(CallProperty0), R(12), R(4), U8(20),
@@ -570,21 +556,17 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(6),
B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
- B(Jump), U8(18),
+ B(Jump), U8(16),
B(Ldar), R(7),
B(ReThrow),
- B(LdaFalse),
- B(Star13),
B(Mov), R(0), R(11),
B(Mov), R(7), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(2),
B(Return),
B(LdaUndefined),
B(Star4),
- B(LdaFalse),
- B(Star5),
B(Mov), R(0), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(2),
/* 96 S> */ B(Return),
B(Star3),
B(CreateCatchContext), R(3), U8(9),
@@ -595,10 +577,8 @@ bytecodes: [
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star5),
- B(LdaFalse),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(2),
B(Return),
]
constant pool: [
@@ -614,7 +594,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [14, 180, 180],
+ [14, 176, 176],
[46, 92, 98],
[111, 130, 132],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 2aae4ae80e..a71ae4c808 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -156,14 +156,14 @@ bytecodes: [
B(JumpIfUndefined), U8(37),
B(Star6),
B(Ldar), R(6),
- /* 68 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
- /* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
+ /* 68 E> */ B(SetNamedProperty), R(0), U8(2), U8(3),
+ /* 100 S> */ B(GetNamedProperty), R(0), U8(2), U8(5),
B(Star6),
B(LdaSmi), I8(10),
/* 106 E> */ B(TestEqual), R(6), U8(7),
B(JumpIfFalse), U8(4),
/* 113 S> */ B(Jump), U8(16),
- /* 130 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
+ /* 130 S> */ B(GetNamedProperty), R(0), U8(2), U8(5),
B(Star6),
B(LdaSmi), I8(20),
/* 136 E> */ B(TestEqual), R(6), U8(8),
@@ -209,9 +209,9 @@ bytecodes: [
B(LdaZero),
B(Star8),
B(Ldar), R(6),
- /* 65 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
+ /* 65 E> */ B(SetKeyedProperty), R(0), R(8), U8(3),
/* 83 S> */ B(LdaSmi), I8(3),
- /* 91 E> */ B(LdaKeyedProperty), R(0), U8(5),
+ /* 91 E> */ B(GetKeyedProperty), R(0), U8(5),
/* 95 S> */ B(Return),
B(ForInStep), R(5),
B(Star5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 0a954c76b8..92f6f085f6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -19,7 +19,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star3),
- B(LdaNamedProperty), R(3), U8(1), U8(5),
+ B(GetNamedProperty), R(3), U8(1), U8(5),
B(Star2),
B(LdaFalse),
B(Star4),
@@ -30,9 +30,9 @@ bytecodes: [
B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(2), U8(9),
+ B(GetNamedProperty), R(8), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(20),
- B(LdaNamedProperty), R(8), U8(3), U8(11),
+ B(GetNamedProperty), R(8), U8(3), U8(11),
B(Star8),
B(LdaFalse),
B(Star4),
@@ -53,7 +53,7 @@ bytecodes: [
B(Ldar), R(4),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
- B(LdaNamedProperty), R(3), U8(4), U8(13),
+ B(GetNamedProperty), R(3), U8(4), U8(13),
B(JumpIfUndefinedOrNull), U8(26),
B(Star10),
B(CallProperty0), R(10), R(3), U8(15),
@@ -104,7 +104,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star4),
- B(LdaNamedProperty), R(4), U8(1), U8(4),
+ B(GetNamedProperty), R(4), U8(1), U8(4),
B(Star3),
B(LdaFalse),
B(Star5),
@@ -115,9 +115,9 @@ bytecodes: [
B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(LdaNamedProperty), R(9), U8(2), U8(8),
+ B(GetNamedProperty), R(9), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(9), U8(3), U8(10),
+ B(GetNamedProperty), R(9), U8(3), U8(10),
B(Star9),
B(LdaFalse),
B(Star5),
@@ -140,7 +140,7 @@ bytecodes: [
B(Ldar), R(5),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
- B(LdaNamedProperty), R(4), U8(4), U8(12),
+ B(GetNamedProperty), R(4), U8(4), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star11),
B(CallProperty0), R(11), R(4), U8(14),
@@ -197,7 +197,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star3),
- B(LdaNamedProperty), R(3), U8(1), U8(5),
+ B(GetNamedProperty), R(3), U8(1), U8(5),
B(Star2),
B(LdaFalse),
B(Star4),
@@ -208,9 +208,9 @@ bytecodes: [
B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(2), U8(9),
+ B(GetNamedProperty), R(8), U8(2), U8(9),
B(JumpIfToBooleanTrue), U8(36),
- B(LdaNamedProperty), R(8), U8(3), U8(11),
+ B(GetNamedProperty), R(8), U8(3), U8(11),
B(Star8),
B(LdaFalse),
B(Star4),
@@ -238,7 +238,7 @@ bytecodes: [
B(Ldar), R(4),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
- B(LdaNamedProperty), R(3), U8(4), U8(15),
+ B(GetNamedProperty), R(3), U8(4), U8(15),
B(JumpIfUndefinedOrNull), U8(26),
B(Star10),
B(CallProperty0), R(10), R(3), U8(17),
@@ -291,7 +291,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star2),
- B(LdaNamedProperty), R(2), U8(2), U8(6),
+ B(GetNamedProperty), R(2), U8(2), U8(6),
B(Star1),
B(LdaFalse),
B(Star3),
@@ -302,15 +302,15 @@ bytecodes: [
B(Star7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(3), U8(10),
+ B(GetNamedProperty), R(7), U8(3), U8(10),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(7), U8(4), U8(12),
+ B(GetNamedProperty), R(7), U8(4), U8(12),
B(Star7),
B(LdaFalse),
B(Star3),
B(Ldar), R(7),
- /* 67 E> */ B(StaNamedProperty), R(0), U8(5), U8(14),
- /* 96 S> */ B(LdaNamedProperty), R(0), U8(5), U8(16),
+ /* 67 E> */ B(SetNamedProperty), R(0), U8(5), U8(14),
+ /* 96 S> */ B(GetNamedProperty), R(0), U8(5), U8(16),
B(Star5),
B(LdaSmi), I8(1),
B(Star4),
@@ -329,7 +329,7 @@ bytecodes: [
B(Ldar), R(3),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
- B(LdaNamedProperty), R(2), U8(6), U8(18),
+ B(GetNamedProperty), R(2), U8(6), U8(18),
B(JumpIfUndefinedOrNull), U8(26),
B(Star10),
B(CallProperty0), R(10), R(2), U8(20),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 6dbdbe157f..3af4eeb214 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -21,7 +21,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(0), U8(4),
+ B(GetNamedProperty), R(5), U8(0), U8(4),
B(Star4),
B(LdaFalse),
B(Star6),
@@ -32,9 +32,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(1), U8(8),
+ B(GetNamedProperty), R(10), U8(1), U8(8),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(10), U8(2), U8(10),
+ B(GetNamedProperty), R(10), U8(2), U8(10),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -56,7 +56,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
- B(LdaNamedProperty), R(5), U8(3), U8(12),
+ B(GetNamedProperty), R(5), U8(3), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star12),
B(CallProperty0), R(12), R(5), U8(14),
@@ -122,7 +122,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(2), U8(4),
+ B(GetNamedProperty), R(5), U8(2), U8(4),
B(Star4),
B(LdaFalse),
B(Star6),
@@ -133,9 +133,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(3), U8(8),
+ B(GetNamedProperty), R(10), U8(3), U8(8),
B(JumpIfToBooleanTrue), U8(69),
- B(LdaNamedProperty), R(10), U8(4), U8(10),
+ B(GetNamedProperty), R(10), U8(4), U8(10),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -178,7 +178,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
- B(LdaNamedProperty), R(5), U8(8), U8(16),
+ B(GetNamedProperty), R(5), U8(8), U8(16),
B(JumpIfUndefinedOrNull), U8(26),
B(Star13),
B(CallProperty0), R(13), R(5), U8(18),
@@ -234,7 +234,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star3),
- B(LdaNamedProperty), R(3), U8(0), U8(4),
+ B(GetNamedProperty), R(3), U8(0), U8(4),
B(Star2),
B(LdaFalse),
B(Star4),
@@ -245,9 +245,9 @@ bytecodes: [
B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(1), U8(8),
+ B(GetNamedProperty), R(8), U8(1), U8(8),
B(JumpIfToBooleanTrue), U8(39),
- B(LdaNamedProperty), R(8), U8(2), U8(10),
+ B(GetNamedProperty), R(8), U8(2), U8(10),
B(Star8),
B(LdaFalse),
B(Star4),
@@ -277,7 +277,7 @@ bytecodes: [
B(Ldar), R(4),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
- B(LdaNamedProperty), R(3), U8(5), U8(14),
+ B(GetNamedProperty), R(3), U8(5), U8(14),
B(JumpIfUndefinedOrNull), U8(26),
B(Star11),
B(CallProperty0), R(11), R(3), U8(16),
@@ -329,7 +329,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star7),
- B(LdaNamedProperty), R(7), U8(0), U8(4),
+ B(GetNamedProperty), R(7), U8(0), U8(4),
B(Star6),
B(LdaFalse),
B(Star8),
@@ -340,16 +340,16 @@ bytecodes: [
B(Star12),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(LdaNamedProperty), R(12), U8(1), U8(8),
+ B(GetNamedProperty), R(12), U8(1), U8(8),
B(JumpIfToBooleanTrue), U8(31),
- B(LdaNamedProperty), R(12), U8(2), U8(10),
+ B(GetNamedProperty), R(12), U8(2), U8(10),
B(Star12),
B(LdaFalse),
B(Star8),
B(Mov), R(12), R(0),
- /* 31 S> */ B(LdaNamedProperty), R(0), U8(3), U8(12),
+ /* 31 S> */ B(GetNamedProperty), R(0), U8(3), U8(12),
B(Star3),
- /* 34 S> */ B(LdaNamedProperty), R(0), U8(4), U8(14),
+ /* 34 S> */ B(GetNamedProperty), R(0), U8(4), U8(14),
B(Star4),
/* 56 S> */ B(Ldar), R(4),
/* 58 E> */ B(Add), R(3), U8(16),
@@ -368,7 +368,7 @@ bytecodes: [
B(Ldar), R(8),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(13),
- B(LdaNamedProperty), R(7), U8(5), U8(17),
+ B(GetNamedProperty), R(7), U8(5), U8(17),
B(JumpIfUndefinedOrNull), U8(26),
B(Star14),
B(CallProperty0), R(14), R(7), U8(19),
@@ -434,7 +434,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star6),
- B(LdaNamedProperty), R(6), U8(3), U8(4),
+ B(GetNamedProperty), R(6), U8(3), U8(4),
B(Star5),
B(LdaFalse),
B(Star7),
@@ -445,9 +445,9 @@ bytecodes: [
B(Star11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(4), U8(8),
+ B(GetNamedProperty), R(11), U8(4), U8(8),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(11), U8(5), U8(10),
+ B(GetNamedProperty), R(11), U8(5), U8(10),
B(Star11),
B(LdaFalse),
B(Star7),
@@ -469,7 +469,7 @@ bytecodes: [
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
- B(LdaNamedProperty), R(6), U8(6), U8(12),
+ B(GetNamedProperty), R(6), U8(6), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star13),
B(CallProperty0), R(13), R(6), U8(14),
@@ -536,7 +536,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(4), U8(4),
+ B(GetNamedProperty), R(5), U8(4), U8(4),
B(Star4),
B(LdaFalse),
B(Star6),
@@ -547,9 +547,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(5), U8(8),
+ B(GetNamedProperty), R(10), U8(5), U8(8),
B(JumpIfToBooleanTrue), U8(58),
- B(LdaNamedProperty), R(10), U8(6), U8(10),
+ B(GetNamedProperty), R(10), U8(6), U8(10),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -585,7 +585,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
- B(LdaNamedProperty), R(5), U8(9), U8(12),
+ B(GetNamedProperty), R(5), U8(9), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star12),
B(CallProperty0), R(12), R(5), U8(14),
@@ -639,7 +639,7 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 173
+bytecode array length: 169
bytecodes: [
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
@@ -650,7 +650,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star7),
- B(LdaNamedProperty), R(7), U8(0), U8(4),
+ B(GetNamedProperty), R(7), U8(0), U8(4),
B(Star6),
B(LdaFalse),
B(Star8),
@@ -661,9 +661,9 @@ bytecodes: [
B(Star12),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(LdaNamedProperty), R(12), U8(1), U8(8),
+ B(GetNamedProperty), R(12), U8(1), U8(8),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(12), U8(2), U8(10),
+ B(GetNamedProperty), R(12), U8(2), U8(10),
B(Star12),
B(LdaFalse),
B(Star8),
@@ -685,7 +685,7 @@ bytecodes: [
B(Ldar), R(8),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(13),
- B(LdaNamedProperty), R(7), U8(3), U8(12),
+ B(GetNamedProperty), R(7), U8(3), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star14),
B(CallProperty0), R(14), R(7), U8(14),
@@ -708,10 +708,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star7),
- B(LdaFalse),
- B(Star8),
B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(2),
/* 60 S> */ B(Return),
B(Star6),
B(CreateCatchContext), R(6), U8(4),
@@ -722,10 +720,8 @@ bytecodes: [
B(PushContext), R(6),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star8),
- B(LdaFalse),
- B(Star9),
B(Mov), R(0), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(2),
B(Return),
]
constant pool: [
@@ -736,7 +732,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [14, 149, 149],
+ [14, 147, 147],
[36, 77, 83],
[96, 115, 117],
]
@@ -750,7 +746,7 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 207
+bytecode array length: 203
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
@@ -762,7 +758,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star6),
- B(LdaNamedProperty), R(6), U8(1), U8(4),
+ B(GetNamedProperty), R(6), U8(1), U8(4),
B(Star5),
B(LdaFalse),
B(Star7),
@@ -773,9 +769,9 @@ bytecodes: [
B(Star11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(2), U8(8),
+ B(GetNamedProperty), R(11), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(53),
- B(LdaNamedProperty), R(11), U8(3), U8(10),
+ B(GetNamedProperty), R(11), U8(3), U8(10),
B(Star11),
B(LdaFalse),
B(Star7),
@@ -809,7 +805,7 @@ bytecodes: [
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
- B(LdaNamedProperty), R(6), U8(4), U8(12),
+ B(GetNamedProperty), R(6), U8(4), U8(12),
B(JumpIfUndefinedOrNull), U8(26),
B(Star13),
B(CallProperty0), R(13), R(6), U8(14),
@@ -832,10 +828,8 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star6),
- B(LdaTrue),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(2),
/* 54 S> */ B(Return),
B(Star5),
B(CreateCatchContext), R(5), U8(5),
@@ -846,10 +840,8 @@ bytecodes: [
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star7),
- B(LdaTrue),
- B(Star8),
B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(2),
B(Return),
]
constant pool: [
@@ -861,7 +853,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [18, 183, 183],
+ [18, 181, 181],
[40, 111, 117],
[130, 149, 151],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index e5f1c46c66..99780b0c1e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -120,7 +120,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(5), U8(5),
+ B(GetNamedProperty), R(5), U8(5), U8(5),
B(Star4),
B(LdaFalse),
B(Star6),
@@ -131,9 +131,9 @@ bytecodes: [
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(6), U8(9),
+ B(GetNamedProperty), R(10), U8(6), U8(9),
B(JumpIfToBooleanTrue), U8(58),
- B(LdaNamedProperty), R(10), U8(7), U8(11),
+ B(GetNamedProperty), R(10), U8(7), U8(11),
B(Star10),
B(LdaFalse),
B(Star6),
@@ -169,7 +169,7 @@ bytecodes: [
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
- B(LdaNamedProperty), R(5), U8(10), U8(13),
+ B(GetNamedProperty), R(5), U8(10), U8(13),
B(JumpIfUndefinedOrNull), U8(26),
B(Star12),
B(CallProperty0), R(12), R(5), U8(15),
@@ -247,7 +247,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star3),
- B(LdaNamedProperty), R(3), U8(5), U8(8),
+ B(GetNamedProperty), R(3), U8(5), U8(8),
B(Star5),
B(LdaUndefined),
B(Star4),
@@ -257,19 +257,19 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(2), I8(1),
B(CallProperty1), R(5), R(3), R(4), U8(10),
B(Jump), U8(59),
- B(LdaNamedProperty), R(3), U8(8), U8(12),
+ B(GetNamedProperty), R(3), U8(8), U8(12),
B(JumpIfUndefinedOrNull), U8(10),
B(Star6),
B(CallProperty1), R(6), R(3), R(4), U8(14),
B(Jump), U8(45),
B(Ldar), R(4),
B(Return),
- B(LdaNamedProperty), R(3), U8(9), U8(16),
+ B(GetNamedProperty), R(3), U8(9), U8(16),
B(JumpIfUndefinedOrNull), U8(10),
B(Star6),
B(CallProperty1), R(6), R(3), R(4), U8(18),
B(Jump), U8(28),
- B(LdaNamedProperty), R(3), U8(8), U8(20),
+ B(GetNamedProperty), R(3), U8(8), U8(20),
B(JumpIfUndefinedOrNull), U8(17),
B(Star6),
B(CallProperty0), R(6), R(3), U8(22),
@@ -281,7 +281,7 @@ bytecodes: [
B(Star1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
- B(LdaNamedProperty), R(1), U8(10), U8(24),
+ B(GetNamedProperty), R(1), U8(10), U8(24),
B(JumpIfToBooleanTrue), U8(22),
B(Ldar), R(1),
/* 43 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
@@ -290,7 +290,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star2),
B(JumpLoop), U8(101), I8(0),
- B(LdaNamedProperty), R(1), U8(11), U8(26),
+ B(GetNamedProperty), R(1), U8(11), U8(26),
B(Star3),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFE.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFE.golden
index c212cf36fb..915158b47a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFE.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFE.golden
@@ -29,14 +29,14 @@ bytecodes: [
/* 45 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star1),
B(LdaSmi), I8(2),
- /* 49 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 49 E> */ B(SetNamedProperty), R(1), U8(1), U8(4),
/* 62 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star1),
/* 68 E> */ B(LdaGlobal), U8(0), U8(2),
B(Star2),
- /* 70 E> */ B(LdaNamedProperty), R(2), U8(1), U8(6),
- /* 66 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
- /* 98 S> */ B(LdaNamedProperty), R(0), U8(3), U8(10),
+ /* 70 E> */ B(GetNamedProperty), R(2), U8(1), U8(6),
+ /* 66 E> */ B(SetNamedProperty), R(1), U8(2), U8(8),
+ /* 98 S> */ B(GetNamedProperty), R(0), U8(3), U8(10),
/* 105 S> */ B(Return),
]
constant pool: [
@@ -75,7 +75,7 @@ bytecodes: [
/* 31 E> */ B(StaGlobal), U8(1), U8(1),
/* 93 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star1),
- /* 99 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
+ /* 99 E> */ B(GetNamedProperty), R(1), U8(2), U8(5),
B(Star1),
B(LdaSmi), I8(3),
/* 101 E> */ B(TestLessThan), R(1), U8(7),
@@ -83,15 +83,15 @@ bytecodes: [
/* 118 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star1),
B(LdaSmi), I8(3),
- /* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ /* 122 E> */ B(SetNamedProperty), R(1), U8(2), U8(8),
B(Jump), U8(18),
/* 154 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star1),
/* 160 E> */ B(LdaGlobal), U8(1), U8(3),
B(Star2),
- /* 162 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
- /* 158 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
- /* 200 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
+ /* 162 E> */ B(GetNamedProperty), R(2), U8(3), U8(10),
+ /* 158 E> */ B(SetNamedProperty), R(1), U8(2), U8(8),
+ /* 200 S> */ B(GetNamedProperty), R(0), U8(4), U8(12),
/* 207 S> */ B(Return),
]
constant pool: [
@@ -130,22 +130,22 @@ bytecode array length: 121
bytecodes: [
/* 237 E> */ B(CreateMappedArguments),
B(Star0),
- /* 255 S> */ B(LdaNamedProperty), R(this), U8(0), U8(0),
+ /* 255 S> */ B(GetNamedProperty), R(this), U8(0), U8(0),
B(Star1),
/* 255 E> */ B(CallProperty0), R(1), R(this), U8(2),
- /* 274 S> */ B(LdaNamedProperty), R(this), U8(1), U8(4),
+ /* 274 S> */ B(GetNamedProperty), R(this), U8(1), U8(4),
B(Star1),
B(LdaSmi), I8(1),
B(Star3),
/* 274 E> */ B(CallProperty1), R(1), R(this), R(3), U8(6),
- /* 294 S> */ B(LdaNamedProperty), R(this), U8(2), U8(8),
+ /* 294 S> */ B(GetNamedProperty), R(this), U8(2), U8(8),
B(Star1),
B(LdaSmi), I8(1),
B(Star3),
B(LdaSmi), I8(2),
B(Star4),
/* 294 E> */ B(CallProperty2), R(1), R(this), R(3), R(4), U8(10),
- /* 317 S> */ B(LdaNamedProperty), R(this), U8(3), U8(12),
+ /* 317 S> */ B(GetNamedProperty), R(this), U8(3), U8(12),
B(Star1),
B(LdaSmi), I8(1),
B(Star3),
@@ -155,7 +155,7 @@ bytecodes: [
B(Star5),
B(Mov), R(this), R(2),
/* 317 E> */ B(CallProperty), R(1), R(2), U8(4), U8(14),
- /* 343 S> */ B(LdaNamedProperty), R(this), U8(4), U8(16),
+ /* 343 S> */ B(GetNamedProperty), R(this), U8(4), U8(16),
B(Star1),
B(LdaSmi), I8(1),
B(Star3),
@@ -167,7 +167,7 @@ bytecodes: [
B(Star6),
B(Mov), R(this), R(2),
/* 343 E> */ B(CallProperty), R(1), R(2), U8(5), U8(18),
- /* 372 S> */ B(LdaNamedProperty), R(this), U8(5), U8(20),
+ /* 372 S> */ B(GetNamedProperty), R(this), U8(5), U8(20),
B(Star1),
B(LdaSmi), I8(1),
B(Star3),
@@ -181,7 +181,7 @@ bytecodes: [
B(Star7),
B(Mov), R(this), R(2),
/* 372 E> */ B(CallProperty), R(1), R(2), U8(6), U8(22),
- /* 416 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
+ /* 416 S> */ B(GetNamedProperty), R(0), U8(6), U8(24),
/* 423 S> */ B(Return),
]
constant pool: [
@@ -270,7 +270,7 @@ bytecodes: [
B(LdaSmi), I8(5),
B(Star6),
/* 294 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(5), U8(22),
- /* 338 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
+ /* 338 S> */ B(GetNamedProperty), R(0), U8(6), U8(24),
/* 345 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index f69f618b6f..cd9d77d421 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -206,134 +206,134 @@ bytecode array length: 521
bytecodes: [
/* 33 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 67 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 80 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 93 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 106 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 119 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 132 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 145 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 158 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 171 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 184 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 197 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 210 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 223 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 236 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 249 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 262 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 275 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 288 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 301 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 314 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 327 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 340 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 353 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 366 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 379 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 392 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 405 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 418 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 431 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 444 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 457 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 470 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 483 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 496 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 509 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 522 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 535 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 548 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 561 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 574 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 587 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 600 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 613 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 626 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 639 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 652 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 665 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 678 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 691 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 704 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 717 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 730 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 743 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 756 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 769 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 782 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 795 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 808 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 821 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 834 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 847 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 860 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 873 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 886 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 899 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 912 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 925 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 938 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 951 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 964 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 977 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 990 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 1003 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1016 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1029 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1042 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1055 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1068 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1081 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1094 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1107 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1120 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1133 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1146 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1159 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1172 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1185 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1198 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1211 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1224 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1237 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1250 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1263 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1276 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1289 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1302 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1315 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1328 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1341 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1354 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1367 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1380 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1393 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1406 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1419 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1432 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1445 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1458 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1471 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1484 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1497 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1510 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1523 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1536 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1549 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1562 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1575 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1588 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1601 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1614 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1627 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1640 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1653 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1666 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1679 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1692 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
+ /* 41 S> */ B(GetNamedProperty), R(0), U8(0), U8(0),
+ /* 54 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 67 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 80 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 93 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 106 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 119 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 132 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 145 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 158 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 171 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 184 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 197 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 210 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 223 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 236 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 249 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 262 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 275 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 288 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 301 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 314 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 327 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 340 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 353 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 366 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 379 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 392 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 405 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 418 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 431 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 444 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 457 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 470 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 483 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 496 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 509 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 522 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 535 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 548 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 561 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 574 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 587 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 600 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 613 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 626 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 639 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 652 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 665 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 678 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 691 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 704 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 717 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 730 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 743 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 756 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 769 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 782 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 795 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 808 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 821 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 834 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 847 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 860 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 873 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 886 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 899 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 912 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 925 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 938 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 951 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 964 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 977 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 990 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 1003 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1016 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1029 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1042 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1055 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1068 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1081 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1094 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1107 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1120 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1133 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1146 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1159 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1172 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1185 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1198 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1211 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1224 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1237 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1250 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1263 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1276 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1289 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1302 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1315 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1328 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1341 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1354 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1367 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1380 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1393 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1406 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1419 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1432 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1445 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1458 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1471 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1484 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1497 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1510 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1523 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1536 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1549 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1562 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1575 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1588 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1601 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1614 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1627 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1640 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1653 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1666 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1679 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1692 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
/* 1703 S> */ B(Wide), B(LdaGlobal), U16(128), U16(256),
/* 1712 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 670b9c4e7b..c587fa5f6a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -473,9 +473,9 @@ bytecodes: [
/* 0 E> */ B(Throw),
B(Ldar), R(3),
B(Return),
- /* 31 S> */ B(LdaNamedProperty), R(1), U8(3), U8(0),
+ /* 31 S> */ B(GetNamedProperty), R(1), U8(3), U8(0),
B(Star3),
- /* 42 E> */ B(LdaNamedProperty), R(1), U8(4), U8(2),
+ /* 42 E> */ B(GetNamedProperty), R(1), U8(4), U8(2),
B(Star6),
/* 31 E> */ B(CallProperty2), R(3), R(1), R(1), R(6), U8(4),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 1a40a1833b..b879703a4f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -90,7 +90,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 117
+bytecode array length: 110
bytecodes: [
B(CreateBlockContext), U8(0),
B(PushContext), R(1),
@@ -105,38 +105,33 @@ bytecodes: [
B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
- /* 89 S> */ B(CreateEmptyArrayLiteral), U8(0),
+ /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
B(Star3),
- B(LdaZero),
- B(Star2),
- B(LdaZero),
- B(StaInArrayLiteral), R(3), R(2), U8(1),
- B(Ldar), R(2),
- B(Inc), U8(3),
+ B(LdaSmi), I8(1),
B(Star2),
- /* 101 S> */ B(CreateArrayLiteral), U8(3), U8(4), U8(37),
+ /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(1), U8(37),
B(Star6),
- /* 101 E> */ B(GetIterator), R(6), U8(5), U8(7),
+ /* 101 E> */ B(GetIterator), R(6), U8(2), U8(4),
B(Mov), R(4), R(1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(4), U8(9),
+ B(GetNamedProperty), R(5), U8(5), U8(6),
B(Star4),
B(CallProperty0), R(4), R(5), U8(15),
B(Star6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(5), U8(17),
+ B(GetNamedProperty), R(6), U8(6), U8(17),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(6), U8(6), U8(11),
- B(StaInArrayLiteral), R(3), R(2), U8(1),
+ B(GetNamedProperty), R(6), U8(7), U8(8),
+ B(StaInArrayLiteral), R(3), R(2), U8(13),
B(Ldar), R(2),
- B(Inc), U8(3),
+ B(Inc), U8(12),
B(Star2),
B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(2), U8(1),
+ B(StaInArrayLiteral), R(3), R(2), U8(13),
B(Mov), R(3), R(2),
/* 89 E> */ B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
B(LdaUndefined),
@@ -147,6 +142,7 @@ constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index fb3447b39e..606a6ad481 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -51,7 +51,7 @@ bytecodes: [
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star1),
B(Ldar), R(0),
- /* 75 E> */ B(StaNamedOwnProperty), R(1), U8(1), U8(1),
+ /* 75 E> */ B(DefineNamedOwnProperty), R(1), U8(1), U8(1),
B(Ldar), R(1),
/* 79 S> */ B(Return),
]
@@ -76,7 +76,7 @@ bytecodes: [
B(Star1),
B(Ldar), R(0),
/* 69 E> */ B(AddSmi), I8(1), U8(1),
- B(StaNamedOwnProperty), R(1), U8(1), U8(2),
+ B(DefineNamedOwnProperty), R(1), U8(1), U8(2),
B(Ldar), R(1),
/* 75 S> */ B(Return),
]
@@ -98,7 +98,7 @@ bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star0),
/* 49 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(StaNamedOwnProperty), R(0), U8(2), U8(1),
+ B(DefineNamedOwnProperty), R(0), U8(2), U8(1),
B(Ldar), R(0),
/* 66 S> */ B(Return),
]
@@ -121,7 +121,7 @@ bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star0),
/* 43 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(StaNamedOwnProperty), R(0), U8(2), U8(1),
+ B(DefineNamedOwnProperty), R(0), U8(2), U8(1),
B(Ldar), R(0),
/* 67 S> */ B(Return),
]
@@ -242,7 +242,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star2),
B(Ldar), R(0),
- /* 57 E> */ B(StaKeyedPropertyAsDefine), R(1), R(2), U8(1),
+ /* 57 E> */ B(DefineKeyedOwnProperty), R(1), R(2), U8(1),
B(Ldar), R(1),
/* 61 S> */ B(Return),
]
@@ -284,7 +284,7 @@ bytecodes: [
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- /* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
+ /* 64 E> */ B(DefineKeyedOwnPropertyInLiteral), R(1), R(2), U8(0), U8(1),
B(Ldar), R(1),
/* 68 S> */ B(Return),
]
@@ -308,11 +308,11 @@ bytecodes: [
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
B(Star1),
B(Ldar), R(0),
- /* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(1),
+ /* 64 E> */ B(DefineNamedOwnProperty), R(1), U8(2), U8(1),
B(Ldar), R(0),
/* 68 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- /* 72 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(3),
+ /* 72 E> */ B(DefineKeyedOwnPropertyInLiteral), R(1), R(2), U8(0), U8(3),
B(Ldar), R(1),
/* 76 S> */ B(Return),
]
@@ -339,7 +339,7 @@ bytecodes: [
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- /* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
+ /* 64 E> */ B(DefineKeyedOwnPropertyInLiteral), R(1), R(2), U8(0), U8(1),
/* 78 E> */ B(CreateEmptyObjectLiteral),
B(Star3),
B(Mov), R(1), R(2),
@@ -369,7 +369,7 @@ bytecodes: [
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaConstant), U8(2),
- /* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
+ /* 64 E> */ B(DefineKeyedOwnPropertyInLiteral), R(1), R(2), U8(0), U8(1),
B(LdaConstant), U8(3),
B(Star3),
/* 71 E> */ B(CreateClosure), U8(4), U8(0), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 6fbb9f0c89..00cf22fb7b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -28,11 +28,11 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 67 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 67 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 76 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
- /* 81 E> */ B(LdaKeyedProperty), R(this), U8(2),
+ /* 81 E> */ B(GetKeyedProperty), R(this), U8(2),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(2), U8(1),
B(Star3),
B(CallProperty0), R(3), R(this), U8(4),
@@ -46,14 +46,14 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star3),
B(LdaImmutableCurrentContextSlot), U8(3),
- /* 96 E> */ B(LdaKeyedProperty), R(this), U8(9),
+ /* 96 E> */ B(GetKeyedProperty), R(this), U8(9),
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(2), U8(1),
B(Star4),
B(CallProperty1), R(4), R(this), R(3), U8(11),
/* 108 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
- /* 120 E> */ B(LdaKeyedProperty), R(this), U8(13),
+ /* 120 E> */ B(GetKeyedProperty), R(this), U8(13),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(2), U8(1),
B(Star3),
B(CallProperty0), R(3), R(this), U8(15),
@@ -80,10 +80,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 58 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(286),
+ /* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(296),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -112,10 +112,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 51 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(285),
+ /* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(295),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -144,10 +144,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 58 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(286),
+ /* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(296),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -176,10 +176,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 51 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(285),
+ /* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(295),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
index e02111d2a9..647027c27f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
@@ -23,7 +23,7 @@ frame size: 4
parameter count: 1
bytecode array length: 28
bytecodes: [
- /* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
+ /* 35 E> */ B(GetNamedProperty), R(closure), U8(0), U8(0),
B(JumpIfUndefined), U8(10),
B(Star1),
B(CallProperty0), R(1), R(this), U8(2),
@@ -31,8 +31,8 @@ bytecodes: [
/* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star3),
B(LdaImmutableCurrentContextSlot), U8(3),
- /* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
- /* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
+ /* 59 E> */ B(GetKeyedProperty), R(this), U8(4),
+ /* 52 E> */ B(SetKeyedProperty), R(this), R(3), U8(6),
B(LdaUndefined),
/* 65 S> */ B(Return),
]
@@ -62,7 +62,7 @@ frame size: 4
parameter count: 1
bytecode array length: 28
bytecodes: [
- /* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
+ /* 35 E> */ B(GetNamedProperty), R(closure), U8(0), U8(0),
B(JumpIfUndefined), U8(10),
B(Star1),
B(CallProperty0), R(1), R(this), U8(2),
@@ -70,8 +70,8 @@ bytecodes: [
/* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star3),
B(LdaImmutableCurrentContextSlot), U8(3),
- /* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
- /* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
+ /* 59 E> */ B(GetKeyedProperty), R(this), U8(4),
+ /* 52 E> */ B(SetKeyedProperty), R(this), R(3), U8(6),
B(LdaUndefined),
/* 65 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index aa39cb5ab9..ae031860d7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -45,7 +45,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(5), U8(0),
+ B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(3), R(0),
B(CreateBlockContext), U8(6),
@@ -67,7 +67,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(9), U8(3), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(5), U8(2),
+ B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 136 S> */ B(Ldar), R(0),
@@ -158,7 +158,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(6), U8(2), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(0),
+ B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(4), R(0),
B(CreateBlockContext), U8(8),
@@ -198,7 +198,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(16), U8(7), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(2),
+ B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(4), R(1),
B(CreateBlockContext), U8(17),
@@ -219,7 +219,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(20), U8(9), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(4),
+ B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(4), R(2),
/* 430 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index 953c6c98b3..a26dd54cf9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -23,9 +23,9 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 61 E> */ B(LdaKeyedProperty), R(this), U8(2),
+ /* 61 E> */ B(GetKeyedProperty), R(this), U8(2),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star1),
/* 63 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(4),
@@ -53,10 +53,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 54 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(284),
+ /* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(294),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -86,10 +86,10 @@ bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(StaKeyedPropertyAsDefine), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 54 E> */ B(LdaKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(284),
+ /* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(294),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -123,13 +123,13 @@ bytecodes: [
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
B(Star1),
B(Ldar), R(0),
- B(StaKeyedPropertyAsDefine), R(this), R(1), U8(0),
+ B(DefineKeyedOwnProperty), R(this), R(1), U8(0),
/* 49 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star3),
/* 61 E> */ B(CallUndefinedReceiver0), R(3), U8(2),
B(Star3),
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
- /* 63 E> */ B(LdaKeyedProperty), R(3), U8(4),
+ /* 63 E> */ B(GetKeyedProperty), R(3), U8(4),
B(LdaImmutableContextSlot), R(0), U8(2), U8(0),
B(Star2),
/* 66 E> */ B(CallAnyReceiver), R(2), R(3), U8(1), U8(6),
@@ -143,3 +143,148 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+ var test;
+ class F extends class {} {
+ #method() { }
+ constructor() {
+ (test = () => super())();
+ this.#method();
+ }
+ };
+ new F;
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 58
+bytecodes: [
+ /* 89 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(GetSuperConstructor), R(1),
+ B(ThrowIfNotSuperConstructor), R(1),
+ B(Star0),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 89 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
+ B(Star2),
+ B(LdaCurrentContextSlot), U8(2),
+ B(ThrowSuperAlreadyCalledIfNotHole),
+ B(Ldar), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaImmutableContextSlot), R(context), U8(3), U8(1),
+ B(Star4),
+ B(LdaSmi), I8(1),
+ B(Star6),
+ B(Mov), R(2), R(3),
+ B(Mov), R(context), R(5),
+ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(3), U8(4),
+ B(GetNamedProperty), R(0), U8(0), U8(2),
+ B(JumpIfUndefined), U8(10),
+ B(Star8),
+ B(CallProperty0), R(8), R(2), U8(4),
+ B(Mov), R(2), R(7),
+ B(Ldar), R(2),
+ /* 96 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var test;
+ class G extends class {} {
+ #method() { }
+ constructor() {
+ test = () => super();
+ test();
+ this.#method();
+ }
+ };
+ new G();
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 58
+bytecodes: [
+ /* 88 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(GetSuperConstructor), R(1),
+ B(ThrowIfNotSuperConstructor), R(1),
+ B(Star0),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 88 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
+ B(Star2),
+ B(LdaCurrentContextSlot), U8(2),
+ B(ThrowSuperAlreadyCalledIfNotHole),
+ B(Ldar), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaImmutableContextSlot), R(context), U8(3), U8(1),
+ B(Star4),
+ B(LdaSmi), I8(1),
+ B(Star6),
+ B(Mov), R(2), R(3),
+ B(Mov), R(context), R(5),
+ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(3), U8(4),
+ B(GetNamedProperty), R(0), U8(0), U8(2),
+ B(JumpIfUndefined), U8(10),
+ B(Star8),
+ B(CallProperty0), R(8), R(2), U8(4),
+ B(Mov), R(2), R(7),
+ B(Ldar), R(2),
+ /* 95 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var test;
+ class H extends class {} {
+ #method() { }
+ constructor(str) {
+ eval(str);
+ this.#method();
+ }
+ };
+ new test('test = () => super(); test()');
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 58
+bytecodes: [
+ /* 88 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(GetSuperConstructor), R(1),
+ B(ThrowIfNotSuperConstructor), R(1),
+ B(Star0),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 88 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
+ B(Star2),
+ B(LdaCurrentContextSlot), U8(2),
+ B(ThrowSuperAlreadyCalledIfNotHole),
+ B(Ldar), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaImmutableContextSlot), R(context), U8(3), U8(1),
+ B(Star4),
+ B(LdaSmi), I8(1),
+ B(Star6),
+ B(Mov), R(2), R(3),
+ B(Mov), R(context), R(5),
+ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(3), U8(4),
+ B(GetNamedProperty), R(0), U8(0), U8(2),
+ B(JumpIfUndefined), U8(10),
+ B(Star8),
+ B(CallProperty0), R(8), R(2), U8(4),
+ B(Mov), R(2), R(7),
+ B(Ldar), R(2),
+ /* 95 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index f2ef6f1f92..e0d58d38e7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -15,7 +15,7 @@ frame size: 1
parameter count: 2
bytecode array length: 10
bytecodes: [
- /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 25 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
B(Star0),
/* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(2),
/* 32 S> */ B(Return),
@@ -35,7 +35,7 @@ frame size: 1
parameter count: 4
bytecode array length: 12
bytecodes: [
- /* 31 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 31 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
B(Star0),
/* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(2),
/* 42 S> */ B(Return),
@@ -55,7 +55,7 @@ frame size: 3
parameter count: 3
bytecode array length: 18
bytecodes: [
- /* 28 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 28 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
B(Star0),
B(Ldar), R(arg1),
/* 35 E> */ B(Add), R(arg1), U8(2),
@@ -211,136 +211,136 @@ bytecode array length: 540
bytecodes: [
/* 26 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
- /* 47 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 60 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 73 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 86 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 99 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 112 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 125 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 138 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 151 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 164 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 177 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 190 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 203 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 216 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 229 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 242 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 255 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 268 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 281 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 294 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 307 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 320 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 333 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 346 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 359 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 372 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 385 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 398 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 411 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 424 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 437 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 450 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 463 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 476 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 489 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 502 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 515 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 528 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 541 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 554 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 567 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 580 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 593 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 606 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 619 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 632 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 645 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 658 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 671 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 684 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 697 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 710 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 723 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 736 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 749 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 762 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 775 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 788 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 801 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 814 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 827 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 840 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 853 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 866 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 879 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 892 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 905 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 918 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 931 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 944 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 957 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 970 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 983 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 996 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1009 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1022 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1035 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1048 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1061 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1074 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1087 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1100 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1113 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1126 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1139 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1152 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1165 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1178 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1191 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1204 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1217 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1230 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1243 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1256 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1269 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1282 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1295 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1308 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1321 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1334 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1347 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1360 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1373 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1386 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1399 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1412 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1425 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1438 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1451 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1464 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1477 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1490 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1503 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1516 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1529 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1542 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1555 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1568 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1581 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1594 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1607 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1620 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1633 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1646 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1659 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1672 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1685 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
- /* 1698 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(128), U16(256),
- /* 1715 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(128), U16(256),
+ /* 34 S> */ B(GetNamedProperty), R(0), U8(0), U8(0),
+ /* 47 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 60 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 73 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 86 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 99 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 112 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 125 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 138 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 151 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 164 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 177 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 190 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 203 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 216 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 229 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 242 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 255 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 268 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 281 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 294 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 307 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 320 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 333 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 346 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 359 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 372 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 385 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 398 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 411 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 424 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 437 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 450 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 463 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 476 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 489 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 502 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 515 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 528 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 541 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 554 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 567 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 580 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 593 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 606 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 619 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 632 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 645 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 658 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 671 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 684 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 697 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 710 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 723 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 736 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 749 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 762 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 775 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 788 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 801 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 814 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 827 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 840 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 853 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 866 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 879 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 892 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 905 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 918 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 931 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 944 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 957 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 970 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 983 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 996 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1009 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1022 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1035 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1048 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1061 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1074 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1087 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1100 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1113 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1126 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1139 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1152 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1165 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1178 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1191 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1204 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1217 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1230 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1243 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1256 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1269 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1282 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1295 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1308 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1321 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1334 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1347 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1360 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1373 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1386 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1399 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1412 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1425 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1438 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1451 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1464 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1477 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1490 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1503 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1516 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1529 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1542 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1555 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1568 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1581 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1594 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1607 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1620 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1633 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1646 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1659 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1672 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1685 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
+ /* 1698 S> */ B(Wide), B(GetNamedProperty), R16(arg0), U16(128), U16(256),
+ /* 1715 S> */ B(Wide), B(GetNamedProperty), R16(arg0), U16(128), U16(256),
B(Star1),
/* 1715 E> */ B(Wide), B(CallProperty0), R16(1), R16(arg0), U16(258),
/* 1722 S> */ B(Return),
@@ -488,19 +488,19 @@ frame size: 5
parameter count: 2
bytecode array length: 42
bytecodes: [
- /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 25 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
B(Star2),
B(LdaSmi), I8(1),
B(Star4),
/* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(2),
B(Star2),
- /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(4),
+ /* 32 E> */ B(GetNamedProperty), R(2), U8(0), U8(4),
B(Star1),
B(LdaSmi), I8(2),
B(Star3),
/* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(6),
B(Star1),
- /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(8),
+ /* 40 E> */ B(GetNamedProperty), R(1), U8(0), U8(8),
B(Star0),
B(LdaSmi), I8(3),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStore.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStore.golden
index 179bf0dc63..c53128363e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStore.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStore.golden
@@ -27,24 +27,24 @@ bytecodes: [
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
B(Star1),
- /* 71 E> */ B(LdaNamedProperty), R(1), U8(2), U8(6),
+ /* 71 E> */ B(GetNamedProperty), R(1), U8(2), U8(6),
B(Star1),
/* 80 E> */ B(LdaGlobal), U8(1), U8(4),
B(Star2),
- /* 81 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ /* 81 E> */ B(GetNamedProperty), R(2), U8(3), U8(8),
/* 78 E> */ B(Add), R(1), U8(3),
/* 68 E> */ B(StaGlobal), U8(4), U8(10),
/* 95 S> */ B(LdaGlobal), U8(1), U8(4),
B(Star1),
B(LdaSmi), I8(7),
- /* 103 E> */ B(StaNamedProperty), R(1), U8(3), U8(12),
+ /* 103 E> */ B(SetNamedProperty), R(1), U8(3), U8(12),
/* 114 S> */ B(LdaGlobal), U8(1), U8(4),
B(Star1),
/* 124 E> */ B(LdaGlobal), U8(1), U8(4),
B(Star2),
- /* 125 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ /* 125 E> */ B(GetNamedProperty), R(2), U8(3), U8(8),
B(Star2),
- /* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(14),
+ /* 122 E> */ B(SetNamedProperty), R(1), U8(2), U8(14),
B(Mov), R(2), R(0),
B(Ldar), R(0),
/* 139 S> */ B(Return),
@@ -81,7 +81,7 @@ bytecodes: [
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 65 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star1),
- /* 70 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
+ /* 70 E> */ B(GetNamedProperty), R(1), U8(2), U8(5),
B(Star1),
B(LdaSmi), I8(3),
/* 77 E> */ B(TestLessThan), R(1), U8(7),
@@ -90,7 +90,7 @@ bytecodes: [
B(Star1),
B(LdaSmi), I8(3),
B(Star2),
- /* 100 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ /* 100 E> */ B(SetNamedProperty), R(1), U8(2), U8(8),
B(Mov), R(2), R(0),
B(Ldar), R(2),
B(Jump), U8(18),
@@ -98,7 +98,7 @@ bytecodes: [
B(Star1),
B(LdaSmi), I8(3),
B(Star2),
- /* 136 E> */ B(StaNamedProperty), R(1), U8(3), U8(10),
+ /* 136 E> */ B(SetNamedProperty), R(1), U8(3), U8(10),
B(Mov), R(2), R(0),
B(Ldar), R(2),
B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index 242b24b70c..10faccc0e2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -15,7 +15,7 @@ frame size: 0
parameter count: 2
bytecode array length: 5
bytecodes: [
- /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 25 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
/* 30 S> */ B(Return),
]
constant pool: [
@@ -33,7 +33,7 @@ frame size: 0
parameter count: 2
bytecode array length: 5
bytecodes: [
- /* 24 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 24 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -52,7 +52,7 @@ parameter count: 2
bytecode array length: 6
bytecodes: [
/* 16 S> */ B(LdaSmi), I8(100),
- /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
+ /* 24 E> */ B(GetKeyedProperty), R(arg0), U8(0),
/* 30 S> */ B(Return),
]
constant pool: [
@@ -70,7 +70,7 @@ parameter count: 3
bytecode array length: 6
bytecodes: [
/* 19 S> */ B(Ldar), R(arg1),
- /* 27 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
+ /* 27 E> */ B(GetKeyedProperty), R(arg0), U8(0),
/* 31 S> */ B(Return),
]
constant pool: [
@@ -87,10 +87,10 @@ frame size: 1
parameter count: 2
bytecode array length: 11
bytecodes: [
- /* 26 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 26 S> */ B(GetNamedProperty), R(arg0), U8(0), U8(0),
B(Star0),
/* 32 S> */ B(LdaSmi), I8(-124),
- /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 40 E> */ B(GetKeyedProperty), R(arg0), U8(2),
/* 47 S> */ B(Return),
]
constant pool: [
@@ -241,135 +241,135 @@ bytecode array length: 523
bytecodes: [
/* 26 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
- /* 45 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 56 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 67 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 78 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 89 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 100 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 111 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 122 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 133 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 144 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 156 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 168 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 180 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 192 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 204 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 216 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 228 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 240 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 252 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 264 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 276 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 288 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 300 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 312 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 324 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 336 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 348 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 360 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 372 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 384 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 396 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 408 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 420 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 432 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 444 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 456 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 468 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 480 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 492 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 504 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 516 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 528 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 540 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 552 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 564 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 576 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 588 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 600 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 612 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 624 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 636 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 648 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 660 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 672 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 684 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 696 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 708 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 720 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 732 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 744 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 756 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 768 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 780 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 792 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 804 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 816 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 828 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 840 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 852 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 864 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 876 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 888 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 900 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 912 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 924 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 936 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 948 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 960 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 972 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 984 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 996 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1008 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1020 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1032 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1044 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1056 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1068 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1080 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1092 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1104 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1116 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1128 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1140 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1152 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1164 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1176 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1188 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1200 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1212 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1224 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1237 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1250 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1263 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1276 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1289 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1302 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1315 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1328 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1341 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1354 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1367 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1380 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1393 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1406 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1419 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1432 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1445 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1458 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1471 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1484 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1497 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1510 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1523 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1536 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1549 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1562 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1575 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
- /* 1595 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(128), U16(256),
+ /* 34 S> */ B(GetNamedProperty), R(0), U8(0), U8(0),
+ /* 45 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 56 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 67 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 78 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 89 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 100 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 111 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 122 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 133 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 144 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 156 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 168 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 180 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 192 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 204 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 216 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 228 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 240 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 252 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 264 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 276 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 288 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 300 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 312 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 324 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 336 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 348 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 360 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 372 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 384 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 396 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 408 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 420 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 432 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 444 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 456 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 468 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 480 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 492 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 504 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 516 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 528 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 540 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 552 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 564 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 576 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 588 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 600 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 612 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 624 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 636 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 648 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 660 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 672 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 684 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 696 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 708 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 720 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 732 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 744 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 756 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 768 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 780 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 792 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 804 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 816 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 828 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 840 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 852 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 864 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 876 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 888 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 900 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 912 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 924 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 936 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 948 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 960 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 972 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 984 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 996 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1008 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1020 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1032 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1044 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1056 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1068 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1080 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1092 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1104 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1116 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1128 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1140 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1152 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1164 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1176 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1188 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1200 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1212 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1224 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1237 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1250 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1263 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1276 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1289 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1302 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1315 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1328 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1341 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1354 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1367 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1380 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1393 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1406 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1419 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1432 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1445 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1458 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1471 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1484 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1497 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1510 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1523 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1536 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1549 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1562 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1575 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
+ /* 1595 S> */ B(Wide), B(GetNamedProperty), R16(arg0), U16(128), U16(256),
/* 1600 S> */ B(Return),
]
constant pool: [
@@ -647,391 +647,391 @@ parameter count: 3
bytecode array length: 777
bytecodes: [
/* 30 S> */ B(Ldar), R(arg1),
- /* 35 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
+ /* 35 E> */ B(GetKeyedProperty), R(arg0), U8(0),
B(Star0),
/* 42 S> */ B(Ldar), R(arg1),
- /* 47 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 47 E> */ B(GetKeyedProperty), R(arg0), U8(2),
B(Star0),
/* 54 S> */ B(Ldar), R(arg1),
- /* 59 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
+ /* 59 E> */ B(GetKeyedProperty), R(arg0), U8(4),
B(Star0),
/* 66 S> */ B(Ldar), R(arg1),
- /* 71 E> */ B(LdaKeyedProperty), R(arg0), U8(6),
+ /* 71 E> */ B(GetKeyedProperty), R(arg0), U8(6),
B(Star0),
/* 78 S> */ B(Ldar), R(arg1),
- /* 83 E> */ B(LdaKeyedProperty), R(arg0), U8(8),
+ /* 83 E> */ B(GetKeyedProperty), R(arg0), U8(8),
B(Star0),
/* 90 S> */ B(Ldar), R(arg1),
- /* 95 E> */ B(LdaKeyedProperty), R(arg0), U8(10),
+ /* 95 E> */ B(GetKeyedProperty), R(arg0), U8(10),
B(Star0),
/* 102 S> */ B(Ldar), R(arg1),
- /* 107 E> */ B(LdaKeyedProperty), R(arg0), U8(12),
+ /* 107 E> */ B(GetKeyedProperty), R(arg0), U8(12),
B(Star0),
/* 114 S> */ B(Ldar), R(arg1),
- /* 119 E> */ B(LdaKeyedProperty), R(arg0), U8(14),
+ /* 119 E> */ B(GetKeyedProperty), R(arg0), U8(14),
B(Star0),
/* 126 S> */ B(Ldar), R(arg1),
- /* 131 E> */ B(LdaKeyedProperty), R(arg0), U8(16),
+ /* 131 E> */ B(GetKeyedProperty), R(arg0), U8(16),
B(Star0),
/* 138 S> */ B(Ldar), R(arg1),
- /* 143 E> */ B(LdaKeyedProperty), R(arg0), U8(18),
+ /* 143 E> */ B(GetKeyedProperty), R(arg0), U8(18),
B(Star0),
/* 150 S> */ B(Ldar), R(arg1),
- /* 155 E> */ B(LdaKeyedProperty), R(arg0), U8(20),
+ /* 155 E> */ B(GetKeyedProperty), R(arg0), U8(20),
B(Star0),
/* 162 S> */ B(Ldar), R(arg1),
- /* 167 E> */ B(LdaKeyedProperty), R(arg0), U8(22),
+ /* 167 E> */ B(GetKeyedProperty), R(arg0), U8(22),
B(Star0),
/* 174 S> */ B(Ldar), R(arg1),
- /* 179 E> */ B(LdaKeyedProperty), R(arg0), U8(24),
+ /* 179 E> */ B(GetKeyedProperty), R(arg0), U8(24),
B(Star0),
/* 186 S> */ B(Ldar), R(arg1),
- /* 191 E> */ B(LdaKeyedProperty), R(arg0), U8(26),
+ /* 191 E> */ B(GetKeyedProperty), R(arg0), U8(26),
B(Star0),
/* 198 S> */ B(Ldar), R(arg1),
- /* 203 E> */ B(LdaKeyedProperty), R(arg0), U8(28),
+ /* 203 E> */ B(GetKeyedProperty), R(arg0), U8(28),
B(Star0),
/* 210 S> */ B(Ldar), R(arg1),
- /* 215 E> */ B(LdaKeyedProperty), R(arg0), U8(30),
+ /* 215 E> */ B(GetKeyedProperty), R(arg0), U8(30),
B(Star0),
/* 222 S> */ B(Ldar), R(arg1),
- /* 227 E> */ B(LdaKeyedProperty), R(arg0), U8(32),
+ /* 227 E> */ B(GetKeyedProperty), R(arg0), U8(32),
B(Star0),
/* 234 S> */ B(Ldar), R(arg1),
- /* 239 E> */ B(LdaKeyedProperty), R(arg0), U8(34),
+ /* 239 E> */ B(GetKeyedProperty), R(arg0), U8(34),
B(Star0),
/* 246 S> */ B(Ldar), R(arg1),
- /* 251 E> */ B(LdaKeyedProperty), R(arg0), U8(36),
+ /* 251 E> */ B(GetKeyedProperty), R(arg0), U8(36),
B(Star0),
/* 258 S> */ B(Ldar), R(arg1),
- /* 263 E> */ B(LdaKeyedProperty), R(arg0), U8(38),
+ /* 263 E> */ B(GetKeyedProperty), R(arg0), U8(38),
B(Star0),
/* 270 S> */ B(Ldar), R(arg1),
- /* 275 E> */ B(LdaKeyedProperty), R(arg0), U8(40),
+ /* 275 E> */ B(GetKeyedProperty), R(arg0), U8(40),
B(Star0),
/* 282 S> */ B(Ldar), R(arg1),
- /* 287 E> */ B(LdaKeyedProperty), R(arg0), U8(42),
+ /* 287 E> */ B(GetKeyedProperty), R(arg0), U8(42),
B(Star0),
/* 294 S> */ B(Ldar), R(arg1),
- /* 299 E> */ B(LdaKeyedProperty), R(arg0), U8(44),
+ /* 299 E> */ B(GetKeyedProperty), R(arg0), U8(44),
B(Star0),
/* 306 S> */ B(Ldar), R(arg1),
- /* 311 E> */ B(LdaKeyedProperty), R(arg0), U8(46),
+ /* 311 E> */ B(GetKeyedProperty), R(arg0), U8(46),
B(Star0),
/* 318 S> */ B(Ldar), R(arg1),
- /* 323 E> */ B(LdaKeyedProperty), R(arg0), U8(48),
+ /* 323 E> */ B(GetKeyedProperty), R(arg0), U8(48),
B(Star0),
/* 330 S> */ B(Ldar), R(arg1),
- /* 335 E> */ B(LdaKeyedProperty), R(arg0), U8(50),
+ /* 335 E> */ B(GetKeyedProperty), R(arg0), U8(50),
B(Star0),
/* 342 S> */ B(Ldar), R(arg1),
- /* 347 E> */ B(LdaKeyedProperty), R(arg0), U8(52),
+ /* 347 E> */ B(GetKeyedProperty), R(arg0), U8(52),
B(Star0),
/* 354 S> */ B(Ldar), R(arg1),
- /* 359 E> */ B(LdaKeyedProperty), R(arg0), U8(54),
+ /* 359 E> */ B(GetKeyedProperty), R(arg0), U8(54),
B(Star0),
/* 366 S> */ B(Ldar), R(arg1),
- /* 371 E> */ B(LdaKeyedProperty), R(arg0), U8(56),
+ /* 371 E> */ B(GetKeyedProperty), R(arg0), U8(56),
B(Star0),
/* 378 S> */ B(Ldar), R(arg1),
- /* 383 E> */ B(LdaKeyedProperty), R(arg0), U8(58),
+ /* 383 E> */ B(GetKeyedProperty), R(arg0), U8(58),
B(Star0),
/* 390 S> */ B(Ldar), R(arg1),
- /* 395 E> */ B(LdaKeyedProperty), R(arg0), U8(60),
+ /* 395 E> */ B(GetKeyedProperty), R(arg0), U8(60),
B(Star0),
/* 402 S> */ B(Ldar), R(arg1),
- /* 407 E> */ B(LdaKeyedProperty), R(arg0), U8(62),
+ /* 407 E> */ B(GetKeyedProperty), R(arg0), U8(62),
B(Star0),
/* 414 S> */ B(Ldar), R(arg1),
- /* 419 E> */ B(LdaKeyedProperty), R(arg0), U8(64),
+ /* 419 E> */ B(GetKeyedProperty), R(arg0), U8(64),
B(Star0),
/* 426 S> */ B(Ldar), R(arg1),
- /* 431 E> */ B(LdaKeyedProperty), R(arg0), U8(66),
+ /* 431 E> */ B(GetKeyedProperty), R(arg0), U8(66),
B(Star0),
/* 438 S> */ B(Ldar), R(arg1),
- /* 443 E> */ B(LdaKeyedProperty), R(arg0), U8(68),
+ /* 443 E> */ B(GetKeyedProperty), R(arg0), U8(68),
B(Star0),
/* 450 S> */ B(Ldar), R(arg1),
- /* 455 E> */ B(LdaKeyedProperty), R(arg0), U8(70),
+ /* 455 E> */ B(GetKeyedProperty), R(arg0), U8(70),
B(Star0),
/* 462 S> */ B(Ldar), R(arg1),
- /* 467 E> */ B(LdaKeyedProperty), R(arg0), U8(72),
+ /* 467 E> */ B(GetKeyedProperty), R(arg0), U8(72),
B(Star0),
/* 474 S> */ B(Ldar), R(arg1),
- /* 479 E> */ B(LdaKeyedProperty), R(arg0), U8(74),
+ /* 479 E> */ B(GetKeyedProperty), R(arg0), U8(74),
B(Star0),
/* 486 S> */ B(Ldar), R(arg1),
- /* 491 E> */ B(LdaKeyedProperty), R(arg0), U8(76),
+ /* 491 E> */ B(GetKeyedProperty), R(arg0), U8(76),
B(Star0),
/* 498 S> */ B(Ldar), R(arg1),
- /* 503 E> */ B(LdaKeyedProperty), R(arg0), U8(78),
+ /* 503 E> */ B(GetKeyedProperty), R(arg0), U8(78),
B(Star0),
/* 510 S> */ B(Ldar), R(arg1),
- /* 515 E> */ B(LdaKeyedProperty), R(arg0), U8(80),
+ /* 515 E> */ B(GetKeyedProperty), R(arg0), U8(80),
B(Star0),
/* 522 S> */ B(Ldar), R(arg1),
- /* 527 E> */ B(LdaKeyedProperty), R(arg0), U8(82),
+ /* 527 E> */ B(GetKeyedProperty), R(arg0), U8(82),
B(Star0),
/* 534 S> */ B(Ldar), R(arg1),
- /* 539 E> */ B(LdaKeyedProperty), R(arg0), U8(84),
+ /* 539 E> */ B(GetKeyedProperty), R(arg0), U8(84),
B(Star0),
/* 546 S> */ B(Ldar), R(arg1),
- /* 551 E> */ B(LdaKeyedProperty), R(arg0), U8(86),
+ /* 551 E> */ B(GetKeyedProperty), R(arg0), U8(86),
B(Star0),
/* 558 S> */ B(Ldar), R(arg1),
- /* 563 E> */ B(LdaKeyedProperty), R(arg0), U8(88),
+ /* 563 E> */ B(GetKeyedProperty), R(arg0), U8(88),
B(Star0),
/* 570 S> */ B(Ldar), R(arg1),
- /* 575 E> */ B(LdaKeyedProperty), R(arg0), U8(90),
+ /* 575 E> */ B(GetKeyedProperty), R(arg0), U8(90),
B(Star0),
/* 582 S> */ B(Ldar), R(arg1),
- /* 587 E> */ B(LdaKeyedProperty), R(arg0), U8(92),
+ /* 587 E> */ B(GetKeyedProperty), R(arg0), U8(92),
B(Star0),
/* 594 S> */ B(Ldar), R(arg1),
- /* 599 E> */ B(LdaKeyedProperty), R(arg0), U8(94),
+ /* 599 E> */ B(GetKeyedProperty), R(arg0), U8(94),
B(Star0),
/* 606 S> */ B(Ldar), R(arg1),
- /* 611 E> */ B(LdaKeyedProperty), R(arg0), U8(96),
+ /* 611 E> */ B(GetKeyedProperty), R(arg0), U8(96),
B(Star0),
/* 618 S> */ B(Ldar), R(arg1),
- /* 623 E> */ B(LdaKeyedProperty), R(arg0), U8(98),
+ /* 623 E> */ B(GetKeyedProperty), R(arg0), U8(98),
B(Star0),
/* 630 S> */ B(Ldar), R(arg1),
- /* 635 E> */ B(LdaKeyedProperty), R(arg0), U8(100),
+ /* 635 E> */ B(GetKeyedProperty), R(arg0), U8(100),
B(Star0),
/* 642 S> */ B(Ldar), R(arg1),
- /* 647 E> */ B(LdaKeyedProperty), R(arg0), U8(102),
+ /* 647 E> */ B(GetKeyedProperty), R(arg0), U8(102),
B(Star0),
/* 654 S> */ B(Ldar), R(arg1),
- /* 659 E> */ B(LdaKeyedProperty), R(arg0), U8(104),
+ /* 659 E> */ B(GetKeyedProperty), R(arg0), U8(104),
B(Star0),
/* 666 S> */ B(Ldar), R(arg1),
- /* 671 E> */ B(LdaKeyedProperty), R(arg0), U8(106),
+ /* 671 E> */ B(GetKeyedProperty), R(arg0), U8(106),
B(Star0),
/* 678 S> */ B(Ldar), R(arg1),
- /* 683 E> */ B(LdaKeyedProperty), R(arg0), U8(108),
+ /* 683 E> */ B(GetKeyedProperty), R(arg0), U8(108),
B(Star0),
/* 690 S> */ B(Ldar), R(arg1),
- /* 695 E> */ B(LdaKeyedProperty), R(arg0), U8(110),
+ /* 695 E> */ B(GetKeyedProperty), R(arg0), U8(110),
B(Star0),
/* 702 S> */ B(Ldar), R(arg1),
- /* 707 E> */ B(LdaKeyedProperty), R(arg0), U8(112),
+ /* 707 E> */ B(GetKeyedProperty), R(arg0), U8(112),
B(Star0),
/* 714 S> */ B(Ldar), R(arg1),
- /* 719 E> */ B(LdaKeyedProperty), R(arg0), U8(114),
+ /* 719 E> */ B(GetKeyedProperty), R(arg0), U8(114),
B(Star0),
/* 726 S> */ B(Ldar), R(arg1),
- /* 731 E> */ B(LdaKeyedProperty), R(arg0), U8(116),
+ /* 731 E> */ B(GetKeyedProperty), R(arg0), U8(116),
B(Star0),
/* 738 S> */ B(Ldar), R(arg1),
- /* 743 E> */ B(LdaKeyedProperty), R(arg0), U8(118),
+ /* 743 E> */ B(GetKeyedProperty), R(arg0), U8(118),
B(Star0),
/* 750 S> */ B(Ldar), R(arg1),
- /* 755 E> */ B(LdaKeyedProperty), R(arg0), U8(120),
+ /* 755 E> */ B(GetKeyedProperty), R(arg0), U8(120),
B(Star0),
/* 762 S> */ B(Ldar), R(arg1),
- /* 767 E> */ B(LdaKeyedProperty), R(arg0), U8(122),
+ /* 767 E> */ B(GetKeyedProperty), R(arg0), U8(122),
B(Star0),
/* 774 S> */ B(Ldar), R(arg1),
- /* 779 E> */ B(LdaKeyedProperty), R(arg0), U8(124),
+ /* 779 E> */ B(GetKeyedProperty), R(arg0), U8(124),
B(Star0),
/* 786 S> */ B(Ldar), R(arg1),
- /* 791 E> */ B(LdaKeyedProperty), R(arg0), U8(126),
+ /* 791 E> */ B(GetKeyedProperty), R(arg0), U8(126),
B(Star0),
/* 798 S> */ B(Ldar), R(arg1),
- /* 803 E> */ B(LdaKeyedProperty), R(arg0), U8(128),
+ /* 803 E> */ B(GetKeyedProperty), R(arg0), U8(128),
B(Star0),
/* 810 S> */ B(Ldar), R(arg1),
- /* 815 E> */ B(LdaKeyedProperty), R(arg0), U8(130),
+ /* 815 E> */ B(GetKeyedProperty), R(arg0), U8(130),
B(Star0),
/* 822 S> */ B(Ldar), R(arg1),
- /* 827 E> */ B(LdaKeyedProperty), R(arg0), U8(132),
+ /* 827 E> */ B(GetKeyedProperty), R(arg0), U8(132),
B(Star0),
/* 834 S> */ B(Ldar), R(arg1),
- /* 839 E> */ B(LdaKeyedProperty), R(arg0), U8(134),
+ /* 839 E> */ B(GetKeyedProperty), R(arg0), U8(134),
B(Star0),
/* 846 S> */ B(Ldar), R(arg1),
- /* 851 E> */ B(LdaKeyedProperty), R(arg0), U8(136),
+ /* 851 E> */ B(GetKeyedProperty), R(arg0), U8(136),
B(Star0),
/* 858 S> */ B(Ldar), R(arg1),
- /* 863 E> */ B(LdaKeyedProperty), R(arg0), U8(138),
+ /* 863 E> */ B(GetKeyedProperty), R(arg0), U8(138),
B(Star0),
/* 870 S> */ B(Ldar), R(arg1),
- /* 875 E> */ B(LdaKeyedProperty), R(arg0), U8(140),
+ /* 875 E> */ B(GetKeyedProperty), R(arg0), U8(140),
B(Star0),
/* 882 S> */ B(Ldar), R(arg1),
- /* 887 E> */ B(LdaKeyedProperty), R(arg0), U8(142),
+ /* 887 E> */ B(GetKeyedProperty), R(arg0), U8(142),
B(Star0),
/* 894 S> */ B(Ldar), R(arg1),
- /* 899 E> */ B(LdaKeyedProperty), R(arg0), U8(144),
+ /* 899 E> */ B(GetKeyedProperty), R(arg0), U8(144),
B(Star0),
/* 906 S> */ B(Ldar), R(arg1),
- /* 911 E> */ B(LdaKeyedProperty), R(arg0), U8(146),
+ /* 911 E> */ B(GetKeyedProperty), R(arg0), U8(146),
B(Star0),
/* 918 S> */ B(Ldar), R(arg1),
- /* 923 E> */ B(LdaKeyedProperty), R(arg0), U8(148),
+ /* 923 E> */ B(GetKeyedProperty), R(arg0), U8(148),
B(Star0),
/* 930 S> */ B(Ldar), R(arg1),
- /* 935 E> */ B(LdaKeyedProperty), R(arg0), U8(150),
+ /* 935 E> */ B(GetKeyedProperty), R(arg0), U8(150),
B(Star0),
/* 942 S> */ B(Ldar), R(arg1),
- /* 947 E> */ B(LdaKeyedProperty), R(arg0), U8(152),
+ /* 947 E> */ B(GetKeyedProperty), R(arg0), U8(152),
B(Star0),
/* 954 S> */ B(Ldar), R(arg1),
- /* 959 E> */ B(LdaKeyedProperty), R(arg0), U8(154),
+ /* 959 E> */ B(GetKeyedProperty), R(arg0), U8(154),
B(Star0),
/* 966 S> */ B(Ldar), R(arg1),
- /* 971 E> */ B(LdaKeyedProperty), R(arg0), U8(156),
+ /* 971 E> */ B(GetKeyedProperty), R(arg0), U8(156),
B(Star0),
/* 978 S> */ B(Ldar), R(arg1),
- /* 983 E> */ B(LdaKeyedProperty), R(arg0), U8(158),
+ /* 983 E> */ B(GetKeyedProperty), R(arg0), U8(158),
B(Star0),
/* 990 S> */ B(Ldar), R(arg1),
- /* 995 E> */ B(LdaKeyedProperty), R(arg0), U8(160),
+ /* 995 E> */ B(GetKeyedProperty), R(arg0), U8(160),
B(Star0),
/* 1002 S> */ B(Ldar), R(arg1),
- /* 1007 E> */ B(LdaKeyedProperty), R(arg0), U8(162),
+ /* 1007 E> */ B(GetKeyedProperty), R(arg0), U8(162),
B(Star0),
/* 1014 S> */ B(Ldar), R(arg1),
- /* 1019 E> */ B(LdaKeyedProperty), R(arg0), U8(164),
+ /* 1019 E> */ B(GetKeyedProperty), R(arg0), U8(164),
B(Star0),
/* 1026 S> */ B(Ldar), R(arg1),
- /* 1031 E> */ B(LdaKeyedProperty), R(arg0), U8(166),
+ /* 1031 E> */ B(GetKeyedProperty), R(arg0), U8(166),
B(Star0),
/* 1038 S> */ B(Ldar), R(arg1),
- /* 1043 E> */ B(LdaKeyedProperty), R(arg0), U8(168),
+ /* 1043 E> */ B(GetKeyedProperty), R(arg0), U8(168),
B(Star0),
/* 1050 S> */ B(Ldar), R(arg1),
- /* 1055 E> */ B(LdaKeyedProperty), R(arg0), U8(170),
+ /* 1055 E> */ B(GetKeyedProperty), R(arg0), U8(170),
B(Star0),
/* 1062 S> */ B(Ldar), R(arg1),
- /* 1067 E> */ B(LdaKeyedProperty), R(arg0), U8(172),
+ /* 1067 E> */ B(GetKeyedProperty), R(arg0), U8(172),
B(Star0),
/* 1074 S> */ B(Ldar), R(arg1),
- /* 1079 E> */ B(LdaKeyedProperty), R(arg0), U8(174),
+ /* 1079 E> */ B(GetKeyedProperty), R(arg0), U8(174),
B(Star0),
/* 1086 S> */ B(Ldar), R(arg1),
- /* 1091 E> */ B(LdaKeyedProperty), R(arg0), U8(176),
+ /* 1091 E> */ B(GetKeyedProperty), R(arg0), U8(176),
B(Star0),
/* 1098 S> */ B(Ldar), R(arg1),
- /* 1103 E> */ B(LdaKeyedProperty), R(arg0), U8(178),
+ /* 1103 E> */ B(GetKeyedProperty), R(arg0), U8(178),
B(Star0),
/* 1110 S> */ B(Ldar), R(arg1),
- /* 1115 E> */ B(LdaKeyedProperty), R(arg0), U8(180),
+ /* 1115 E> */ B(GetKeyedProperty), R(arg0), U8(180),
B(Star0),
/* 1122 S> */ B(Ldar), R(arg1),
- /* 1127 E> */ B(LdaKeyedProperty), R(arg0), U8(182),
+ /* 1127 E> */ B(GetKeyedProperty), R(arg0), U8(182),
B(Star0),
/* 1134 S> */ B(Ldar), R(arg1),
- /* 1139 E> */ B(LdaKeyedProperty), R(arg0), U8(184),
+ /* 1139 E> */ B(GetKeyedProperty), R(arg0), U8(184),
B(Star0),
/* 1146 S> */ B(Ldar), R(arg1),
- /* 1151 E> */ B(LdaKeyedProperty), R(arg0), U8(186),
+ /* 1151 E> */ B(GetKeyedProperty), R(arg0), U8(186),
B(Star0),
/* 1158 S> */ B(Ldar), R(arg1),
- /* 1163 E> */ B(LdaKeyedProperty), R(arg0), U8(188),
+ /* 1163 E> */ B(GetKeyedProperty), R(arg0), U8(188),
B(Star0),
/* 1170 S> */ B(Ldar), R(arg1),
- /* 1175 E> */ B(LdaKeyedProperty), R(arg0), U8(190),
+ /* 1175 E> */ B(GetKeyedProperty), R(arg0), U8(190),
B(Star0),
/* 1182 S> */ B(Ldar), R(arg1),
- /* 1187 E> */ B(LdaKeyedProperty), R(arg0), U8(192),
+ /* 1187 E> */ B(GetKeyedProperty), R(arg0), U8(192),
B(Star0),
/* 1194 S> */ B(Ldar), R(arg1),
- /* 1199 E> */ B(LdaKeyedProperty), R(arg0), U8(194),
+ /* 1199 E> */ B(GetKeyedProperty), R(arg0), U8(194),
B(Star0),
/* 1206 S> */ B(Ldar), R(arg1),
- /* 1211 E> */ B(LdaKeyedProperty), R(arg0), U8(196),
+ /* 1211 E> */ B(GetKeyedProperty), R(arg0), U8(196),
B(Star0),
/* 1218 S> */ B(Ldar), R(arg1),
- /* 1223 E> */ B(LdaKeyedProperty), R(arg0), U8(198),
+ /* 1223 E> */ B(GetKeyedProperty), R(arg0), U8(198),
B(Star0),
/* 1230 S> */ B(Ldar), R(arg1),
- /* 1235 E> */ B(LdaKeyedProperty), R(arg0), U8(200),
+ /* 1235 E> */ B(GetKeyedProperty), R(arg0), U8(200),
B(Star0),
/* 1242 S> */ B(Ldar), R(arg1),
- /* 1247 E> */ B(LdaKeyedProperty), R(arg0), U8(202),
+ /* 1247 E> */ B(GetKeyedProperty), R(arg0), U8(202),
B(Star0),
/* 1254 S> */ B(Ldar), R(arg1),
- /* 1259 E> */ B(LdaKeyedProperty), R(arg0), U8(204),
+ /* 1259 E> */ B(GetKeyedProperty), R(arg0), U8(204),
B(Star0),
/* 1266 S> */ B(Ldar), R(arg1),
- /* 1271 E> */ B(LdaKeyedProperty), R(arg0), U8(206),
+ /* 1271 E> */ B(GetKeyedProperty), R(arg0), U8(206),
B(Star0),
/* 1278 S> */ B(Ldar), R(arg1),
- /* 1283 E> */ B(LdaKeyedProperty), R(arg0), U8(208),
+ /* 1283 E> */ B(GetKeyedProperty), R(arg0), U8(208),
B(Star0),
/* 1290 S> */ B(Ldar), R(arg1),
- /* 1295 E> */ B(LdaKeyedProperty), R(arg0), U8(210),
+ /* 1295 E> */ B(GetKeyedProperty), R(arg0), U8(210),
B(Star0),
/* 1302 S> */ B(Ldar), R(arg1),
- /* 1307 E> */ B(LdaKeyedProperty), R(arg0), U8(212),
+ /* 1307 E> */ B(GetKeyedProperty), R(arg0), U8(212),
B(Star0),
/* 1314 S> */ B(Ldar), R(arg1),
- /* 1319 E> */ B(LdaKeyedProperty), R(arg0), U8(214),
+ /* 1319 E> */ B(GetKeyedProperty), R(arg0), U8(214),
B(Star0),
/* 1326 S> */ B(Ldar), R(arg1),
- /* 1331 E> */ B(LdaKeyedProperty), R(arg0), U8(216),
+ /* 1331 E> */ B(GetKeyedProperty), R(arg0), U8(216),
B(Star0),
/* 1338 S> */ B(Ldar), R(arg1),
- /* 1343 E> */ B(LdaKeyedProperty), R(arg0), U8(218),
+ /* 1343 E> */ B(GetKeyedProperty), R(arg0), U8(218),
B(Star0),
/* 1350 S> */ B(Ldar), R(arg1),
- /* 1355 E> */ B(LdaKeyedProperty), R(arg0), U8(220),
+ /* 1355 E> */ B(GetKeyedProperty), R(arg0), U8(220),
B(Star0),
/* 1362 S> */ B(Ldar), R(arg1),
- /* 1367 E> */ B(LdaKeyedProperty), R(arg0), U8(222),
+ /* 1367 E> */ B(GetKeyedProperty), R(arg0), U8(222),
B(Star0),
/* 1374 S> */ B(Ldar), R(arg1),
- /* 1379 E> */ B(LdaKeyedProperty), R(arg0), U8(224),
+ /* 1379 E> */ B(GetKeyedProperty), R(arg0), U8(224),
B(Star0),
/* 1386 S> */ B(Ldar), R(arg1),
- /* 1391 E> */ B(LdaKeyedProperty), R(arg0), U8(226),
+ /* 1391 E> */ B(GetKeyedProperty), R(arg0), U8(226),
B(Star0),
/* 1398 S> */ B(Ldar), R(arg1),
- /* 1403 E> */ B(LdaKeyedProperty), R(arg0), U8(228),
+ /* 1403 E> */ B(GetKeyedProperty), R(arg0), U8(228),
B(Star0),
/* 1410 S> */ B(Ldar), R(arg1),
- /* 1415 E> */ B(LdaKeyedProperty), R(arg0), U8(230),
+ /* 1415 E> */ B(GetKeyedProperty), R(arg0), U8(230),
B(Star0),
/* 1422 S> */ B(Ldar), R(arg1),
- /* 1427 E> */ B(LdaKeyedProperty), R(arg0), U8(232),
+ /* 1427 E> */ B(GetKeyedProperty), R(arg0), U8(232),
B(Star0),
/* 1434 S> */ B(Ldar), R(arg1),
- /* 1439 E> */ B(LdaKeyedProperty), R(arg0), U8(234),
+ /* 1439 E> */ B(GetKeyedProperty), R(arg0), U8(234),
B(Star0),
/* 1446 S> */ B(Ldar), R(arg1),
- /* 1451 E> */ B(LdaKeyedProperty), R(arg0), U8(236),
+ /* 1451 E> */ B(GetKeyedProperty), R(arg0), U8(236),
B(Star0),
/* 1458 S> */ B(Ldar), R(arg1),
- /* 1463 E> */ B(LdaKeyedProperty), R(arg0), U8(238),
+ /* 1463 E> */ B(GetKeyedProperty), R(arg0), U8(238),
B(Star0),
/* 1470 S> */ B(Ldar), R(arg1),
- /* 1475 E> */ B(LdaKeyedProperty), R(arg0), U8(240),
+ /* 1475 E> */ B(GetKeyedProperty), R(arg0), U8(240),
B(Star0),
/* 1482 S> */ B(Ldar), R(arg1),
- /* 1487 E> */ B(LdaKeyedProperty), R(arg0), U8(242),
+ /* 1487 E> */ B(GetKeyedProperty), R(arg0), U8(242),
B(Star0),
/* 1494 S> */ B(Ldar), R(arg1),
- /* 1499 E> */ B(LdaKeyedProperty), R(arg0), U8(244),
+ /* 1499 E> */ B(GetKeyedProperty), R(arg0), U8(244),
B(Star0),
/* 1506 S> */ B(Ldar), R(arg1),
- /* 1511 E> */ B(LdaKeyedProperty), R(arg0), U8(246),
+ /* 1511 E> */ B(GetKeyedProperty), R(arg0), U8(246),
B(Star0),
/* 1518 S> */ B(Ldar), R(arg1),
- /* 1523 E> */ B(LdaKeyedProperty), R(arg0), U8(248),
+ /* 1523 E> */ B(GetKeyedProperty), R(arg0), U8(248),
B(Star0),
/* 1530 S> */ B(Ldar), R(arg1),
- /* 1535 E> */ B(LdaKeyedProperty), R(arg0), U8(250),
+ /* 1535 E> */ B(GetKeyedProperty), R(arg0), U8(250),
B(Star0),
/* 1542 S> */ B(Ldar), R(arg1),
- /* 1547 E> */ B(LdaKeyedProperty), R(arg0), U8(252),
+ /* 1547 E> */ B(GetKeyedProperty), R(arg0), U8(252),
B(Star0),
/* 1554 S> */ B(Ldar), R(arg1),
- /* 1559 E> */ B(LdaKeyedProperty), R(arg0), U8(254),
+ /* 1559 E> */ B(GetKeyedProperty), R(arg0), U8(254),
B(Star0),
/* 1566 S> */ B(Ldar), R(arg1),
- /* 1574 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(256),
+ /* 1574 E> */ B(Wide), B(GetKeyedProperty), R16(arg0), U16(256),
/* 1578 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 318139541a..fbc284219b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -16,7 +16,7 @@ parameter count: 2
bytecode array length: 8
bytecodes: [
/* 16 S> */ B(LdaConstant), U8(0),
- /* 23 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
+ /* 23 E> */ B(SetNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -37,7 +37,7 @@ parameter count: 2
bytecode array length: 8
bytecodes: [
/* 16 S> */ B(LdaConstant), U8(0),
- /* 25 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
+ /* 25 E> */ B(SetNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -60,7 +60,7 @@ bytecodes: [
/* 16 S> */ B(LdaSmi), I8(100),
B(Star1),
B(LdaConstant), U8(0),
- /* 23 E> */ B(StaKeyedProperty), R(arg0), R(1), U8(0),
+ /* 23 E> */ B(SetKeyedProperty), R(arg0), R(1), U8(0),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -80,7 +80,7 @@ parameter count: 3
bytecode array length: 8
bytecodes: [
/* 19 S> */ B(LdaConstant), U8(0),
- /* 24 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
+ /* 24 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
@@ -100,8 +100,8 @@ parameter count: 2
bytecode array length: 11
bytecodes: [
/* 16 S> */ B(LdaSmi), I8(-124),
- /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
- /* 23 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 26 E> */ B(GetKeyedProperty), R(arg0), U8(0),
+ /* 23 E> */ B(SetNamedProperty), R(arg0), U8(0), U8(2),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -121,7 +121,7 @@ parameter count: 2
bytecode array length: 8
bytecodes: [
/* 30 S> */ B(LdaConstant), U8(0),
- /* 37 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
+ /* 37 E> */ B(SetNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 46 S> */ B(Return),
]
@@ -142,7 +142,7 @@ parameter count: 3
bytecode array length: 8
bytecodes: [
/* 33 S> */ B(LdaConstant), U8(0),
- /* 38 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
+ /* 38 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
@@ -294,139 +294,139 @@ parameter count: 2
bytecode array length: 532
bytecodes: [
/* 18 S> */ B(LdaSmi), I8(1),
- /* 25 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 25 E> */ B(SetNamedProperty), R(arg0), U8(0), U8(0),
/* 40 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 48 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 61 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 74 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 87 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 100 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 113 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 126 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 139 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 152 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 165 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 178 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 191 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 204 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 217 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 230 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 243 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 256 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 269 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 282 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 295 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 308 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 321 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 334 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 347 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 360 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 373 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 386 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 399 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 412 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 425 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 438 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 451 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 464 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 477 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 490 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 503 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 516 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 529 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 542 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 555 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 568 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 581 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 594 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 607 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 620 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 633 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 646 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 659 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 672 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 685 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 698 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 711 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 724 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 737 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 750 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 763 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 776 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 789 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 802 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 815 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 828 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 841 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 854 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 867 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 880 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 893 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 906 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 919 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 932 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 945 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 958 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 971 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 984 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 997 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1010 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1023 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1036 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1049 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1062 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1075 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1088 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1101 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1114 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1127 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1140 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1153 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1166 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1179 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1192 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1205 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1218 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1231 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1244 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1257 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1270 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1283 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1296 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1309 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1322 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1335 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1348 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1361 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1374 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1387 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1400 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1413 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1426 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1439 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1452 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1465 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1478 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1491 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1504 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1517 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1530 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1543 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1556 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1569 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1582 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1595 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1608 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1621 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1634 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1647 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1660 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1673 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1686 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
- /* 1699 S> */ B(Wide), B(LdaNamedProperty), R16(0), U16(128), U16(256),
+ /* 48 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 61 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 74 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 87 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 100 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 113 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 126 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 139 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 152 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 165 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 178 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 191 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 204 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 217 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 230 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 243 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 256 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 269 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 282 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 295 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 308 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 321 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 334 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 347 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 360 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 373 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 386 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 399 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 412 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 425 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 438 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 451 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 464 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 477 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 490 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 503 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 516 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 529 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 542 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 555 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 568 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 581 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 594 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 607 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 620 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 633 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 646 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 659 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 672 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 685 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 698 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 711 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 724 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 737 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 750 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 763 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 776 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 789 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 802 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 815 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 828 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 841 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 854 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 867 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 880 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 893 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 906 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 919 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 932 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 945 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 958 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 971 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 984 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 997 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1010 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1023 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1036 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1049 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1062 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1075 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1088 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1101 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1114 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1127 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1140 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1153 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1166 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1179 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1192 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1205 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1218 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1231 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1244 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1257 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1270 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1283 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1296 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1309 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1322 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1335 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1348 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1361 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1374 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1387 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1400 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1413 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1426 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1439 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1452 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1465 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1478 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1491 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1504 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1517 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1530 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1543 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1556 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1569 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1582 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1595 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1608 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1621 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1634 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1647 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1660 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1673 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1686 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
+ /* 1699 S> */ B(Wide), B(GetNamedProperty), R16(0), U16(128), U16(256),
/* 1710 S> */ B(LdaSmi), I8(2),
- /* 1717 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 1717 E> */ B(SetNamedProperty), R(arg0), U8(0), U8(0),
B(LdaUndefined),
/* 1722 S> */ B(Return),
]
@@ -707,139 +707,139 @@ parameter count: 2
bytecode array length: 532
bytecodes: [
/* 33 S> */ B(LdaSmi), I8(1),
- /* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 40 E> */ B(SetNamedProperty), R(arg0), U8(0), U8(0),
/* 55 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 63 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 76 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 89 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 102 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 115 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 128 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 141 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 154 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 167 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 180 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 193 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 206 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 219 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 232 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 245 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 258 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 271 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 284 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 297 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 310 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 323 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 336 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 349 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 362 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 375 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 388 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 401 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 414 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 427 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 440 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 453 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 466 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 479 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 492 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 505 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 518 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 531 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 544 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 557 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 570 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 583 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 596 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 609 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 622 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 635 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 648 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 661 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 674 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 687 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 700 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 713 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 726 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 739 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 752 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 765 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 778 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 791 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 804 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 817 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 830 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 843 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 856 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 869 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 882 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 895 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 908 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 921 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 934 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 947 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 960 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 973 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 986 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 999 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 1012 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1025 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1038 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1051 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1064 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1077 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1090 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1103 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1116 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1129 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1142 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1155 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1168 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1181 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1194 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1207 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1220 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1233 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1246 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1259 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1272 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1285 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1298 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1311 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1324 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1337 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1350 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1363 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1376 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1389 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1402 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1415 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1428 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1441 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1454 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1467 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1480 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1493 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1506 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1519 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1532 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1545 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1558 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1571 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1584 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1597 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1610 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1623 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1636 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1649 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1662 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1675 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1688 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1701 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
- /* 1714 S> */ B(Wide), B(LdaNamedProperty), R16(0), U16(128), U16(256),
+ /* 63 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 76 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 89 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 102 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 115 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 128 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 141 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 154 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 167 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 180 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 193 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 206 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 219 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 232 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 245 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 258 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 271 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 284 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 297 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 310 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 323 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 336 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 349 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 362 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 375 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 388 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 401 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 414 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 427 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 440 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 453 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 466 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 479 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 492 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 505 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 518 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 531 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 544 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 557 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 570 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 583 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 596 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 609 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 622 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 635 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 648 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 661 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 674 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 687 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 700 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 713 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 726 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 739 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 752 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 765 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 778 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 791 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 804 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 817 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 830 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 843 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 856 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 869 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 882 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 895 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 908 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 921 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 934 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 947 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 960 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 973 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 986 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 999 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 1012 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1025 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1038 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1051 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1064 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1077 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1090 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1103 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1116 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1129 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1142 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1155 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1168 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1181 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1194 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1207 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1220 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1233 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1246 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1259 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1272 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1285 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1298 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1311 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1324 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1337 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1350 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1363 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1376 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1389 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1402 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1415 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1428 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1441 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1454 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1467 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1480 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1493 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1506 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1519 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1532 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1545 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1558 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1571 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1584 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1597 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1610 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1623 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1636 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1649 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1662 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1675 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1688 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1701 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
+ /* 1714 S> */ B(Wide), B(GetNamedProperty), R16(0), U16(128), U16(256),
/* 1725 S> */ B(LdaSmi), I8(2),
- /* 1732 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 1732 E> */ B(SetNamedProperty), R(arg0), U8(0), U8(0),
B(LdaUndefined),
/* 1737 S> */ B(Return),
]
@@ -1117,263 +1117,263 @@ parameter count: 3
bytecode array length: 780
bytecodes: [
/* 21 S> */ B(LdaSmi), I8(1),
- /* 26 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
+ /* 26 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(0),
/* 33 S> */ B(LdaSmi), I8(1),
- /* 38 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(2),
+ /* 38 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(2),
/* 45 S> */ B(LdaSmi), I8(1),
- /* 50 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(4),
+ /* 50 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(4),
/* 57 S> */ B(LdaSmi), I8(1),
- /* 62 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(6),
+ /* 62 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(6),
/* 69 S> */ B(LdaSmi), I8(1),
- /* 74 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(8),
+ /* 74 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(8),
/* 81 S> */ B(LdaSmi), I8(1),
- /* 86 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(10),
+ /* 86 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(10),
/* 93 S> */ B(LdaSmi), I8(1),
- /* 98 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(12),
+ /* 98 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(12),
/* 105 S> */ B(LdaSmi), I8(1),
- /* 110 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(14),
+ /* 110 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(14),
/* 117 S> */ B(LdaSmi), I8(1),
- /* 122 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(16),
+ /* 122 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(16),
/* 129 S> */ B(LdaSmi), I8(1),
- /* 134 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(18),
+ /* 134 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(18),
/* 141 S> */ B(LdaSmi), I8(1),
- /* 146 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(20),
+ /* 146 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(20),
/* 153 S> */ B(LdaSmi), I8(1),
- /* 158 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(22),
+ /* 158 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(22),
/* 165 S> */ B(LdaSmi), I8(1),
- /* 170 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(24),
+ /* 170 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(24),
/* 177 S> */ B(LdaSmi), I8(1),
- /* 182 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(26),
+ /* 182 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(26),
/* 189 S> */ B(LdaSmi), I8(1),
- /* 194 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(28),
+ /* 194 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(28),
/* 201 S> */ B(LdaSmi), I8(1),
- /* 206 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(30),
+ /* 206 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(30),
/* 213 S> */ B(LdaSmi), I8(1),
- /* 218 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(32),
+ /* 218 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(32),
/* 225 S> */ B(LdaSmi), I8(1),
- /* 230 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(34),
+ /* 230 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(34),
/* 237 S> */ B(LdaSmi), I8(1),
- /* 242 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(36),
+ /* 242 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(36),
/* 249 S> */ B(LdaSmi), I8(1),
- /* 254 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(38),
+ /* 254 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(38),
/* 261 S> */ B(LdaSmi), I8(1),
- /* 266 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(40),
+ /* 266 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(40),
/* 273 S> */ B(LdaSmi), I8(1),
- /* 278 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(42),
+ /* 278 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(42),
/* 285 S> */ B(LdaSmi), I8(1),
- /* 290 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(44),
+ /* 290 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(44),
/* 297 S> */ B(LdaSmi), I8(1),
- /* 302 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(46),
+ /* 302 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(46),
/* 309 S> */ B(LdaSmi), I8(1),
- /* 314 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(48),
+ /* 314 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(48),
/* 321 S> */ B(LdaSmi), I8(1),
- /* 326 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(50),
+ /* 326 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(50),
/* 333 S> */ B(LdaSmi), I8(1),
- /* 338 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(52),
+ /* 338 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(52),
/* 345 S> */ B(LdaSmi), I8(1),
- /* 350 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(54),
+ /* 350 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(54),
/* 357 S> */ B(LdaSmi), I8(1),
- /* 362 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(56),
+ /* 362 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(56),
/* 369 S> */ B(LdaSmi), I8(1),
- /* 374 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(58),
+ /* 374 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(58),
/* 381 S> */ B(LdaSmi), I8(1),
- /* 386 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(60),
+ /* 386 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(60),
/* 393 S> */ B(LdaSmi), I8(1),
- /* 398 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(62),
+ /* 398 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(62),
/* 405 S> */ B(LdaSmi), I8(1),
- /* 410 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(64),
+ /* 410 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(64),
/* 417 S> */ B(LdaSmi), I8(1),
- /* 422 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(66),
+ /* 422 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(66),
/* 429 S> */ B(LdaSmi), I8(1),
- /* 434 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(68),
+ /* 434 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(68),
/* 441 S> */ B(LdaSmi), I8(1),
- /* 446 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(70),
+ /* 446 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(70),
/* 453 S> */ B(LdaSmi), I8(1),
- /* 458 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(72),
+ /* 458 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(72),
/* 465 S> */ B(LdaSmi), I8(1),
- /* 470 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(74),
+ /* 470 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(74),
/* 477 S> */ B(LdaSmi), I8(1),
- /* 482 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(76),
+ /* 482 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(76),
/* 489 S> */ B(LdaSmi), I8(1),
- /* 494 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(78),
+ /* 494 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(78),
/* 501 S> */ B(LdaSmi), I8(1),
- /* 506 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(80),
+ /* 506 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(80),
/* 513 S> */ B(LdaSmi), I8(1),
- /* 518 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(82),
+ /* 518 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(82),
/* 525 S> */ B(LdaSmi), I8(1),
- /* 530 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(84),
+ /* 530 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(84),
/* 537 S> */ B(LdaSmi), I8(1),
- /* 542 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(86),
+ /* 542 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(86),
/* 549 S> */ B(LdaSmi), I8(1),
- /* 554 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(88),
+ /* 554 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(88),
/* 561 S> */ B(LdaSmi), I8(1),
- /* 566 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(90),
+ /* 566 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(90),
/* 573 S> */ B(LdaSmi), I8(1),
- /* 578 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(92),
+ /* 578 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(92),
/* 585 S> */ B(LdaSmi), I8(1),
- /* 590 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(94),
+ /* 590 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(94),
/* 597 S> */ B(LdaSmi), I8(1),
- /* 602 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(96),
+ /* 602 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(96),
/* 609 S> */ B(LdaSmi), I8(1),
- /* 614 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(98),
+ /* 614 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(98),
/* 621 S> */ B(LdaSmi), I8(1),
- /* 626 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(100),
+ /* 626 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(100),
/* 633 S> */ B(LdaSmi), I8(1),
- /* 638 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(102),
+ /* 638 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(102),
/* 645 S> */ B(LdaSmi), I8(1),
- /* 650 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(104),
+ /* 650 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(104),
/* 657 S> */ B(LdaSmi), I8(1),
- /* 662 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(106),
+ /* 662 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(106),
/* 669 S> */ B(LdaSmi), I8(1),
- /* 674 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(108),
+ /* 674 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(108),
/* 681 S> */ B(LdaSmi), I8(1),
- /* 686 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(110),
+ /* 686 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(110),
/* 693 S> */ B(LdaSmi), I8(1),
- /* 698 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(112),
+ /* 698 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(112),
/* 705 S> */ B(LdaSmi), I8(1),
- /* 710 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(114),
+ /* 710 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(114),
/* 717 S> */ B(LdaSmi), I8(1),
- /* 722 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(116),
+ /* 722 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(116),
/* 729 S> */ B(LdaSmi), I8(1),
- /* 734 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(118),
+ /* 734 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(118),
/* 741 S> */ B(LdaSmi), I8(1),
- /* 746 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(120),
+ /* 746 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(120),
/* 753 S> */ B(LdaSmi), I8(1),
- /* 758 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(122),
+ /* 758 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(122),
/* 765 S> */ B(LdaSmi), I8(1),
- /* 770 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(124),
+ /* 770 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(124),
/* 777 S> */ B(LdaSmi), I8(1),
- /* 782 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(126),
+ /* 782 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(126),
/* 789 S> */ B(LdaSmi), I8(1),
- /* 794 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(128),
+ /* 794 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(128),
/* 801 S> */ B(LdaSmi), I8(1),
- /* 806 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(130),
+ /* 806 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(130),
/* 813 S> */ B(LdaSmi), I8(1),
- /* 818 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(132),
+ /* 818 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(132),
/* 825 S> */ B(LdaSmi), I8(1),
- /* 830 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(134),
+ /* 830 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(134),
/* 837 S> */ B(LdaSmi), I8(1),
- /* 842 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(136),
+ /* 842 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(136),
/* 849 S> */ B(LdaSmi), I8(1),
- /* 854 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(138),
+ /* 854 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(138),
/* 861 S> */ B(LdaSmi), I8(1),
- /* 866 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(140),
+ /* 866 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(140),
/* 873 S> */ B(LdaSmi), I8(1),
- /* 878 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(142),
+ /* 878 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(142),
/* 885 S> */ B(LdaSmi), I8(1),
- /* 890 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(144),
+ /* 890 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(144),
/* 897 S> */ B(LdaSmi), I8(1),
- /* 902 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(146),
+ /* 902 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(146),
/* 909 S> */ B(LdaSmi), I8(1),
- /* 914 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(148),
+ /* 914 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(148),
/* 921 S> */ B(LdaSmi), I8(1),
- /* 926 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(150),
+ /* 926 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(150),
/* 933 S> */ B(LdaSmi), I8(1),
- /* 938 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(152),
+ /* 938 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(152),
/* 945 S> */ B(LdaSmi), I8(1),
- /* 950 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(154),
+ /* 950 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(154),
/* 957 S> */ B(LdaSmi), I8(1),
- /* 962 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(156),
+ /* 962 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(156),
/* 969 S> */ B(LdaSmi), I8(1),
- /* 974 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(158),
+ /* 974 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(158),
/* 981 S> */ B(LdaSmi), I8(1),
- /* 986 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(160),
+ /* 986 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(160),
/* 993 S> */ B(LdaSmi), I8(1),
- /* 998 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(162),
+ /* 998 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(162),
/* 1005 S> */ B(LdaSmi), I8(1),
- /* 1010 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(164),
+ /* 1010 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(164),
/* 1017 S> */ B(LdaSmi), I8(1),
- /* 1022 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(166),
+ /* 1022 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(166),
/* 1029 S> */ B(LdaSmi), I8(1),
- /* 1034 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(168),
+ /* 1034 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(168),
/* 1041 S> */ B(LdaSmi), I8(1),
- /* 1046 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(170),
+ /* 1046 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(170),
/* 1053 S> */ B(LdaSmi), I8(1),
- /* 1058 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(172),
+ /* 1058 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(172),
/* 1065 S> */ B(LdaSmi), I8(1),
- /* 1070 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(174),
+ /* 1070 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(174),
/* 1077 S> */ B(LdaSmi), I8(1),
- /* 1082 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(176),
+ /* 1082 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(176),
/* 1089 S> */ B(LdaSmi), I8(1),
- /* 1094 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(178),
+ /* 1094 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(178),
/* 1101 S> */ B(LdaSmi), I8(1),
- /* 1106 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(180),
+ /* 1106 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(180),
/* 1113 S> */ B(LdaSmi), I8(1),
- /* 1118 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(182),
+ /* 1118 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(182),
/* 1125 S> */ B(LdaSmi), I8(1),
- /* 1130 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(184),
+ /* 1130 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(184),
/* 1137 S> */ B(LdaSmi), I8(1),
- /* 1142 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(186),
+ /* 1142 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(186),
/* 1149 S> */ B(LdaSmi), I8(1),
- /* 1154 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(188),
+ /* 1154 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(188),
/* 1161 S> */ B(LdaSmi), I8(1),
- /* 1166 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(190),
+ /* 1166 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(190),
/* 1173 S> */ B(LdaSmi), I8(1),
- /* 1178 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(192),
+ /* 1178 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(192),
/* 1185 S> */ B(LdaSmi), I8(1),
- /* 1190 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(194),
+ /* 1190 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(194),
/* 1197 S> */ B(LdaSmi), I8(1),
- /* 1202 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(196),
+ /* 1202 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(196),
/* 1209 S> */ B(LdaSmi), I8(1),
- /* 1214 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(198),
+ /* 1214 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(198),
/* 1221 S> */ B(LdaSmi), I8(1),
- /* 1226 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(200),
+ /* 1226 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(200),
/* 1233 S> */ B(LdaSmi), I8(1),
- /* 1238 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(202),
+ /* 1238 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(202),
/* 1245 S> */ B(LdaSmi), I8(1),
- /* 1250 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(204),
+ /* 1250 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(204),
/* 1257 S> */ B(LdaSmi), I8(1),
- /* 1262 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(206),
+ /* 1262 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(206),
/* 1269 S> */ B(LdaSmi), I8(1),
- /* 1274 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(208),
+ /* 1274 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(208),
/* 1281 S> */ B(LdaSmi), I8(1),
- /* 1286 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(210),
+ /* 1286 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(210),
/* 1293 S> */ B(LdaSmi), I8(1),
- /* 1298 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(212),
+ /* 1298 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(212),
/* 1305 S> */ B(LdaSmi), I8(1),
- /* 1310 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(214),
+ /* 1310 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(214),
/* 1317 S> */ B(LdaSmi), I8(1),
- /* 1322 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(216),
+ /* 1322 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(216),
/* 1329 S> */ B(LdaSmi), I8(1),
- /* 1334 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(218),
+ /* 1334 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(218),
/* 1341 S> */ B(LdaSmi), I8(1),
- /* 1346 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(220),
+ /* 1346 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(220),
/* 1353 S> */ B(LdaSmi), I8(1),
- /* 1358 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(222),
+ /* 1358 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(222),
/* 1365 S> */ B(LdaSmi), I8(1),
- /* 1370 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(224),
+ /* 1370 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(224),
/* 1377 S> */ B(LdaSmi), I8(1),
- /* 1382 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(226),
+ /* 1382 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(226),
/* 1389 S> */ B(LdaSmi), I8(1),
- /* 1394 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(228),
+ /* 1394 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(228),
/* 1401 S> */ B(LdaSmi), I8(1),
- /* 1406 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(230),
+ /* 1406 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(230),
/* 1413 S> */ B(LdaSmi), I8(1),
- /* 1418 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(232),
+ /* 1418 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(232),
/* 1425 S> */ B(LdaSmi), I8(1),
- /* 1430 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(234),
+ /* 1430 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(234),
/* 1437 S> */ B(LdaSmi), I8(1),
- /* 1442 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(236),
+ /* 1442 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(236),
/* 1449 S> */ B(LdaSmi), I8(1),
- /* 1454 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(238),
+ /* 1454 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(238),
/* 1461 S> */ B(LdaSmi), I8(1),
- /* 1466 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(240),
+ /* 1466 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(240),
/* 1473 S> */ B(LdaSmi), I8(1),
- /* 1478 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(242),
+ /* 1478 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(242),
/* 1485 S> */ B(LdaSmi), I8(1),
- /* 1490 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(244),
+ /* 1490 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(244),
/* 1497 S> */ B(LdaSmi), I8(1),
- /* 1502 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(246),
+ /* 1502 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(246),
/* 1509 S> */ B(LdaSmi), I8(1),
- /* 1514 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(248),
+ /* 1514 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(248),
/* 1521 S> */ B(LdaSmi), I8(1),
- /* 1526 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(250),
+ /* 1526 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(250),
/* 1533 S> */ B(LdaSmi), I8(1),
- /* 1538 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(252),
+ /* 1538 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(252),
/* 1545 S> */ B(LdaSmi), I8(1),
- /* 1550 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(254),
+ /* 1550 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(254),
/* 1557 S> */ B(LdaSmi), I8(2),
- /* 1562 E> */ B(Wide), B(StaKeyedProperty), R16(arg0), R16(arg1), U16(256),
+ /* 1562 E> */ B(Wide), B(SetKeyedProperty), R16(arg0), R16(arg1), U16(256),
B(LdaUndefined),
/* 1567 S> */ B(Return),
]
@@ -1523,263 +1523,263 @@ parameter count: 3
bytecode array length: 780
bytecodes: [
/* 37 S> */ B(LdaSmi), I8(1),
- /* 42 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
+ /* 42 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(0),
/* 49 S> */ B(LdaSmi), I8(1),
- /* 54 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(2),
+ /* 54 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(2),
/* 61 S> */ B(LdaSmi), I8(1),
- /* 66 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(4),
+ /* 66 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(4),
/* 73 S> */ B(LdaSmi), I8(1),
- /* 78 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(6),
+ /* 78 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(6),
/* 85 S> */ B(LdaSmi), I8(1),
- /* 90 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(8),
+ /* 90 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(8),
/* 97 S> */ B(LdaSmi), I8(1),
- /* 102 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(10),
+ /* 102 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(10),
/* 109 S> */ B(LdaSmi), I8(1),
- /* 114 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(12),
+ /* 114 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(12),
/* 121 S> */ B(LdaSmi), I8(1),
- /* 126 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(14),
+ /* 126 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(14),
/* 133 S> */ B(LdaSmi), I8(1),
- /* 138 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(16),
+ /* 138 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(16),
/* 145 S> */ B(LdaSmi), I8(1),
- /* 150 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(18),
+ /* 150 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(18),
/* 157 S> */ B(LdaSmi), I8(1),
- /* 162 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(20),
+ /* 162 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(20),
/* 169 S> */ B(LdaSmi), I8(1),
- /* 174 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(22),
+ /* 174 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(22),
/* 181 S> */ B(LdaSmi), I8(1),
- /* 186 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(24),
+ /* 186 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(24),
/* 193 S> */ B(LdaSmi), I8(1),
- /* 198 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(26),
+ /* 198 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(26),
/* 205 S> */ B(LdaSmi), I8(1),
- /* 210 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(28),
+ /* 210 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(28),
/* 217 S> */ B(LdaSmi), I8(1),
- /* 222 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(30),
+ /* 222 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(30),
/* 229 S> */ B(LdaSmi), I8(1),
- /* 234 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(32),
+ /* 234 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(32),
/* 241 S> */ B(LdaSmi), I8(1),
- /* 246 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(34),
+ /* 246 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(34),
/* 253 S> */ B(LdaSmi), I8(1),
- /* 258 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(36),
+ /* 258 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(36),
/* 265 S> */ B(LdaSmi), I8(1),
- /* 270 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(38),
+ /* 270 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(38),
/* 277 S> */ B(LdaSmi), I8(1),
- /* 282 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(40),
+ /* 282 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(40),
/* 289 S> */ B(LdaSmi), I8(1),
- /* 294 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(42),
+ /* 294 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(42),
/* 301 S> */ B(LdaSmi), I8(1),
- /* 306 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(44),
+ /* 306 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(44),
/* 313 S> */ B(LdaSmi), I8(1),
- /* 318 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(46),
+ /* 318 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(46),
/* 325 S> */ B(LdaSmi), I8(1),
- /* 330 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(48),
+ /* 330 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(48),
/* 337 S> */ B(LdaSmi), I8(1),
- /* 342 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(50),
+ /* 342 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(50),
/* 349 S> */ B(LdaSmi), I8(1),
- /* 354 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(52),
+ /* 354 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(52),
/* 361 S> */ B(LdaSmi), I8(1),
- /* 366 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(54),
+ /* 366 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(54),
/* 373 S> */ B(LdaSmi), I8(1),
- /* 378 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(56),
+ /* 378 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(56),
/* 385 S> */ B(LdaSmi), I8(1),
- /* 390 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(58),
+ /* 390 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(58),
/* 397 S> */ B(LdaSmi), I8(1),
- /* 402 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(60),
+ /* 402 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(60),
/* 409 S> */ B(LdaSmi), I8(1),
- /* 414 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(62),
+ /* 414 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(62),
/* 421 S> */ B(LdaSmi), I8(1),
- /* 426 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(64),
+ /* 426 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(64),
/* 433 S> */ B(LdaSmi), I8(1),
- /* 438 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(66),
+ /* 438 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(66),
/* 445 S> */ B(LdaSmi), I8(1),
- /* 450 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(68),
+ /* 450 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(68),
/* 457 S> */ B(LdaSmi), I8(1),
- /* 462 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(70),
+ /* 462 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(70),
/* 469 S> */ B(LdaSmi), I8(1),
- /* 474 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(72),
+ /* 474 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(72),
/* 481 S> */ B(LdaSmi), I8(1),
- /* 486 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(74),
+ /* 486 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(74),
/* 493 S> */ B(LdaSmi), I8(1),
- /* 498 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(76),
+ /* 498 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(76),
/* 505 S> */ B(LdaSmi), I8(1),
- /* 510 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(78),
+ /* 510 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(78),
/* 517 S> */ B(LdaSmi), I8(1),
- /* 522 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(80),
+ /* 522 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(80),
/* 529 S> */ B(LdaSmi), I8(1),
- /* 534 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(82),
+ /* 534 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(82),
/* 541 S> */ B(LdaSmi), I8(1),
- /* 546 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(84),
+ /* 546 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(84),
/* 553 S> */ B(LdaSmi), I8(1),
- /* 558 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(86),
+ /* 558 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(86),
/* 565 S> */ B(LdaSmi), I8(1),
- /* 570 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(88),
+ /* 570 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(88),
/* 577 S> */ B(LdaSmi), I8(1),
- /* 582 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(90),
+ /* 582 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(90),
/* 589 S> */ B(LdaSmi), I8(1),
- /* 594 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(92),
+ /* 594 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(92),
/* 601 S> */ B(LdaSmi), I8(1),
- /* 606 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(94),
+ /* 606 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(94),
/* 613 S> */ B(LdaSmi), I8(1),
- /* 618 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(96),
+ /* 618 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(96),
/* 625 S> */ B(LdaSmi), I8(1),
- /* 630 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(98),
+ /* 630 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(98),
/* 637 S> */ B(LdaSmi), I8(1),
- /* 642 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(100),
+ /* 642 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(100),
/* 649 S> */ B(LdaSmi), I8(1),
- /* 654 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(102),
+ /* 654 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(102),
/* 661 S> */ B(LdaSmi), I8(1),
- /* 666 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(104),
+ /* 666 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(104),
/* 673 S> */ B(LdaSmi), I8(1),
- /* 678 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(106),
+ /* 678 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(106),
/* 685 S> */ B(LdaSmi), I8(1),
- /* 690 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(108),
+ /* 690 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(108),
/* 697 S> */ B(LdaSmi), I8(1),
- /* 702 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(110),
+ /* 702 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(110),
/* 709 S> */ B(LdaSmi), I8(1),
- /* 714 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(112),
+ /* 714 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(112),
/* 721 S> */ B(LdaSmi), I8(1),
- /* 726 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(114),
+ /* 726 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(114),
/* 733 S> */ B(LdaSmi), I8(1),
- /* 738 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(116),
+ /* 738 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(116),
/* 745 S> */ B(LdaSmi), I8(1),
- /* 750 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(118),
+ /* 750 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(118),
/* 757 S> */ B(LdaSmi), I8(1),
- /* 762 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(120),
+ /* 762 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(120),
/* 769 S> */ B(LdaSmi), I8(1),
- /* 774 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(122),
+ /* 774 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(122),
/* 781 S> */ B(LdaSmi), I8(1),
- /* 786 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(124),
+ /* 786 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(124),
/* 793 S> */ B(LdaSmi), I8(1),
- /* 798 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(126),
+ /* 798 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(126),
/* 805 S> */ B(LdaSmi), I8(1),
- /* 810 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(128),
+ /* 810 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(128),
/* 817 S> */ B(LdaSmi), I8(1),
- /* 822 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(130),
+ /* 822 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(130),
/* 829 S> */ B(LdaSmi), I8(1),
- /* 834 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(132),
+ /* 834 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(132),
/* 841 S> */ B(LdaSmi), I8(1),
- /* 846 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(134),
+ /* 846 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(134),
/* 853 S> */ B(LdaSmi), I8(1),
- /* 858 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(136),
+ /* 858 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(136),
/* 865 S> */ B(LdaSmi), I8(1),
- /* 870 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(138),
+ /* 870 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(138),
/* 877 S> */ B(LdaSmi), I8(1),
- /* 882 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(140),
+ /* 882 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(140),
/* 889 S> */ B(LdaSmi), I8(1),
- /* 894 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(142),
+ /* 894 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(142),
/* 901 S> */ B(LdaSmi), I8(1),
- /* 906 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(144),
+ /* 906 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(144),
/* 913 S> */ B(LdaSmi), I8(1),
- /* 918 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(146),
+ /* 918 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(146),
/* 925 S> */ B(LdaSmi), I8(1),
- /* 930 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(148),
+ /* 930 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(148),
/* 937 S> */ B(LdaSmi), I8(1),
- /* 942 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(150),
+ /* 942 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(150),
/* 949 S> */ B(LdaSmi), I8(1),
- /* 954 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(152),
+ /* 954 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(152),
/* 961 S> */ B(LdaSmi), I8(1),
- /* 966 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(154),
+ /* 966 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(154),
/* 973 S> */ B(LdaSmi), I8(1),
- /* 978 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(156),
+ /* 978 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(156),
/* 985 S> */ B(LdaSmi), I8(1),
- /* 990 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(158),
+ /* 990 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(158),
/* 997 S> */ B(LdaSmi), I8(1),
- /* 1002 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(160),
+ /* 1002 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(160),
/* 1009 S> */ B(LdaSmi), I8(1),
- /* 1014 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(162),
+ /* 1014 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(162),
/* 1021 S> */ B(LdaSmi), I8(1),
- /* 1026 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(164),
+ /* 1026 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(164),
/* 1033 S> */ B(LdaSmi), I8(1),
- /* 1038 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(166),
+ /* 1038 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(166),
/* 1045 S> */ B(LdaSmi), I8(1),
- /* 1050 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(168),
+ /* 1050 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(168),
/* 1057 S> */ B(LdaSmi), I8(1),
- /* 1062 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(170),
+ /* 1062 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(170),
/* 1069 S> */ B(LdaSmi), I8(1),
- /* 1074 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(172),
+ /* 1074 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(172),
/* 1081 S> */ B(LdaSmi), I8(1),
- /* 1086 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(174),
+ /* 1086 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(174),
/* 1093 S> */ B(LdaSmi), I8(1),
- /* 1098 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(176),
+ /* 1098 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(176),
/* 1105 S> */ B(LdaSmi), I8(1),
- /* 1110 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(178),
+ /* 1110 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(178),
/* 1117 S> */ B(LdaSmi), I8(1),
- /* 1122 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(180),
+ /* 1122 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(180),
/* 1129 S> */ B(LdaSmi), I8(1),
- /* 1134 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(182),
+ /* 1134 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(182),
/* 1141 S> */ B(LdaSmi), I8(1),
- /* 1146 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(184),
+ /* 1146 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(184),
/* 1153 S> */ B(LdaSmi), I8(1),
- /* 1158 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(186),
+ /* 1158 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(186),
/* 1165 S> */ B(LdaSmi), I8(1),
- /* 1170 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(188),
+ /* 1170 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(188),
/* 1177 S> */ B(LdaSmi), I8(1),
- /* 1182 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(190),
+ /* 1182 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(190),
/* 1189 S> */ B(LdaSmi), I8(1),
- /* 1194 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(192),
+ /* 1194 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(192),
/* 1201 S> */ B(LdaSmi), I8(1),
- /* 1206 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(194),
+ /* 1206 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(194),
/* 1213 S> */ B(LdaSmi), I8(1),
- /* 1218 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(196),
+ /* 1218 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(196),
/* 1225 S> */ B(LdaSmi), I8(1),
- /* 1230 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(198),
+ /* 1230 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(198),
/* 1237 S> */ B(LdaSmi), I8(1),
- /* 1242 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(200),
+ /* 1242 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(200),
/* 1249 S> */ B(LdaSmi), I8(1),
- /* 1254 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(202),
+ /* 1254 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(202),
/* 1261 S> */ B(LdaSmi), I8(1),
- /* 1266 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(204),
+ /* 1266 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(204),
/* 1273 S> */ B(LdaSmi), I8(1),
- /* 1278 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(206),
+ /* 1278 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(206),
/* 1285 S> */ B(LdaSmi), I8(1),
- /* 1290 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(208),
+ /* 1290 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(208),
/* 1297 S> */ B(LdaSmi), I8(1),
- /* 1302 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(210),
+ /* 1302 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(210),
/* 1309 S> */ B(LdaSmi), I8(1),
- /* 1314 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(212),
+ /* 1314 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(212),
/* 1321 S> */ B(LdaSmi), I8(1),
- /* 1326 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(214),
+ /* 1326 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(214),
/* 1333 S> */ B(LdaSmi), I8(1),
- /* 1338 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(216),
+ /* 1338 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(216),
/* 1345 S> */ B(LdaSmi), I8(1),
- /* 1350 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(218),
+ /* 1350 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(218),
/* 1357 S> */ B(LdaSmi), I8(1),
- /* 1362 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(220),
+ /* 1362 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(220),
/* 1369 S> */ B(LdaSmi), I8(1),
- /* 1374 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(222),
+ /* 1374 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(222),
/* 1381 S> */ B(LdaSmi), I8(1),
- /* 1386 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(224),
+ /* 1386 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(224),
/* 1393 S> */ B(LdaSmi), I8(1),
- /* 1398 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(226),
+ /* 1398 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(226),
/* 1405 S> */ B(LdaSmi), I8(1),
- /* 1410 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(228),
+ /* 1410 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(228),
/* 1417 S> */ B(LdaSmi), I8(1),
- /* 1422 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(230),
+ /* 1422 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(230),
/* 1429 S> */ B(LdaSmi), I8(1),
- /* 1434 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(232),
+ /* 1434 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(232),
/* 1441 S> */ B(LdaSmi), I8(1),
- /* 1446 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(234),
+ /* 1446 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(234),
/* 1453 S> */ B(LdaSmi), I8(1),
- /* 1458 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(236),
+ /* 1458 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(236),
/* 1465 S> */ B(LdaSmi), I8(1),
- /* 1470 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(238),
+ /* 1470 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(238),
/* 1477 S> */ B(LdaSmi), I8(1),
- /* 1482 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(240),
+ /* 1482 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(240),
/* 1489 S> */ B(LdaSmi), I8(1),
- /* 1494 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(242),
+ /* 1494 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(242),
/* 1501 S> */ B(LdaSmi), I8(1),
- /* 1506 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(244),
+ /* 1506 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(244),
/* 1513 S> */ B(LdaSmi), I8(1),
- /* 1518 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(246),
+ /* 1518 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(246),
/* 1525 S> */ B(LdaSmi), I8(1),
- /* 1530 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(248),
+ /* 1530 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(248),
/* 1537 S> */ B(LdaSmi), I8(1),
- /* 1542 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(250),
+ /* 1542 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(250),
/* 1549 S> */ B(LdaSmi), I8(1),
- /* 1554 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(252),
+ /* 1554 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(252),
/* 1561 S> */ B(LdaSmi), I8(1),
- /* 1566 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(254),
+ /* 1566 E> */ B(SetKeyedProperty), R(arg0), R(arg1), U8(254),
/* 1573 S> */ B(LdaSmi), I8(2),
- /* 1578 E> */ B(Wide), B(StaKeyedProperty), R16(arg0), R16(arg1), U16(256),
+ /* 1578 E> */ B(Wide), B(SetKeyedProperty), R16(arg0), R16(arg1), U16(256),
B(LdaUndefined),
/* 1583 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index d91b3444fc..06aed04fa9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -43,7 +43,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(5), U8(0),
+ B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(3), R(0),
B(CreateBlockContext), U8(6),
@@ -64,7 +64,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(9), U8(3), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(5), U8(2),
+ B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 120 S> */ B(Ldar), R(0),
@@ -149,7 +149,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(6), U8(2), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(0),
+ B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(4), R(0),
B(CreateBlockContext), U8(8),
@@ -180,7 +180,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(14), U8(6), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(2),
+ B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(4), R(1),
B(CreateBlockContext), U8(15),
@@ -200,7 +200,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(18), U8(8), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(7), U8(4),
+ B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(4), R(2),
/* 329 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index b8cdbfb37e..81c7a026bc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -49,7 +49,7 @@ bytecode array length: 19
bytecodes: [
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
B(Star1),
- /* 48 E> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ /* 48 E> */ B(GetNamedProperty), R(1), U8(1), U8(1),
B(Star0),
B(LdaConstant), U8(2),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index c62a6489e7..aa6b65d4bc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -216,9 +216,9 @@ bytecode array length: 37
bytecodes: [
/* 37 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star3),
- /* 28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
+ /* 28 S> */ B(GetNamedProperty), R(3), U8(1), U8(1),
B(Star0),
- /* 31 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
+ /* 31 S> */ B(GetNamedProperty), R(3), U8(2), U8(3),
B(Star1),
/* 55 S> */ B(LdaZero),
/* 55 E> */ B(TestGreaterThan), R(1), U8(5),
@@ -355,9 +355,9 @@ snippet: "
}
f();
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 70
+bytecode array length: 66
bytecodes: [
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
@@ -376,10 +376,8 @@ bytecodes: [
/* 23 E> */ B(JumpLoop), U8(15), I8(0),
B(LdaUndefined),
B(Star5),
- B(LdaFalse),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(2),
/* 67 S> */ B(Return),
B(Star4),
B(CreateCatchContext), R(4), U8(0),
@@ -390,17 +388,15 @@ bytecodes: [
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star6),
- B(LdaFalse),
- B(Star7),
B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(2),
B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [14, 46, 46],
+ [14, 44, 44],
]
---
@@ -410,9 +406,9 @@ snippet: "
}
f();
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 104
+bytecode array length: 100
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
@@ -444,10 +440,8 @@ bytecodes: [
/* 23 E> */ B(JumpLoop), U8(45), I8(0),
B(LdaUndefined),
B(Star4),
- B(LdaTrue),
- B(Star5),
B(Mov), R(0), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(2),
/* 61 S> */ B(Return),
B(Star3),
B(CreateCatchContext), R(3), U8(1),
@@ -458,10 +452,8 @@ bytecodes: [
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
B(Star5),
- B(LdaTrue),
- B(Star6),
B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(2),
B(Return),
]
constant pool: [
@@ -469,6 +461,6 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [18, 80, 80],
+ [18, 78, 78],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index e9177ee2f6..e098ba2a76 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -57,7 +57,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(6), U8(1), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(7), U8(1),
+ B(SetNamedProperty), R(3), U8(7), U8(1),
B(CreateClosure), U8(8), U8(2), U8(2),
B(Star7),
B(CallProperty0), R(7), R(3), U8(3),
@@ -91,7 +91,7 @@ bytecodes: [
B(Star4),
B(CreateClosure), U8(12), U8(4), U8(2),
B(Star5),
- B(StaNamedProperty), R(3), U8(7), U8(5),
+ B(SetNamedProperty), R(3), U8(7), U8(5),
B(CreateClosure), U8(13), U8(5), U8(2),
B(Star7),
B(CallProperty0), R(7), R(3), U8(7),
@@ -199,7 +199,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(8), U8(2), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(9), U8(1),
+ B(SetNamedProperty), R(4), U8(9), U8(1),
B(CreateClosure), U8(10), U8(3), U8(2),
B(Star8),
B(CallProperty0), R(8), R(4), U8(3),
@@ -243,7 +243,7 @@ bytecodes: [
B(Star5),
B(CreateClosure), U8(17), U8(7), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(9), U8(5),
+ B(SetNamedProperty), R(4), U8(9), U8(5),
B(CreateClosure), U8(18), U8(8), U8(2),
B(Star8),
B(CallProperty0), R(8), R(4), U8(7),
@@ -279,7 +279,7 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(22), U8(10), U8(2),
B(Star6),
- B(StaNamedProperty), R(4), U8(9), U8(9),
+ B(SetNamedProperty), R(4), U8(9), U8(9),
B(CreateClosure), U8(23), U8(11), U8(2),
B(Star8),
B(CallProperty0), R(8), R(4), U8(11),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 9e94ebfc76..846fe89559 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -24,7 +24,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -51,7 +51,7 @@ snippet: "
var test = B.test;
test();
"
-frame size: 5
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -59,17 +59,17 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(284),
- B(Star3),
+ B(Wide), B(LdaSmi), I16(294),
+ B(Star1),
B(LdaConstant), U8(1),
- B(Star4),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Star2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
constant pool: [
@@ -89,7 +89,7 @@ snippet: "
var test = C.test;
test();
"
-frame size: 5
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -97,17 +97,17 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(284),
- B(Star3),
+ B(Wide), B(LdaSmi), I16(294),
+ B(Star1),
B(LdaConstant), U8(1),
- B(Star4),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Star2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
constant pool: [
@@ -133,7 +133,7 @@ snippet: "
var test = D.test;
test();
"
-frame size: 6
+frame size: 5
parameter count: 1
bytecode array length: 127
bytecodes: [
@@ -143,20 +143,20 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 95 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
- B(Star4),
- B(CallProperty0), R(4), R(0), U8(0),
+ B(Star2),
+ B(CallProperty0), R(2), R(0), U8(0),
B(Inc), U8(2),
- B(Star4),
+ B(Star2),
/* 97 E> */ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(1), U8(1),
- B(Star5),
- B(CallProperty1), R(5), R(0), R(4), U8(3),
+ B(Star3),
+ B(CallProperty1), R(3), R(0), R(2), U8(3),
/* 105 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star1),
B(LdaSmi), I8(1),
@@ -165,30 +165,30 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
/* 110 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(1), U8(1),
- B(Star5),
- B(CallProperty1), R(5), R(0), R(2), U8(5),
+ B(Star3),
+ B(CallProperty1), R(3), R(0), R(2), U8(5),
/* 122 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star1),
B(LdaCurrentContextSlot), U8(3),
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 134 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
- B(Star4),
- B(CallProperty0), R(4), R(0), U8(7),
+ B(Star2),
+ B(CallProperty0), R(2), R(0), U8(7),
/* 137 S> */ B(Return),
]
constant pool: [
@@ -206,7 +206,7 @@ snippet: "
var test = E.test;
test();
"
-frame size: 5
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -214,17 +214,17 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(286),
- B(Star3),
+ B(Wide), B(LdaSmi), I16(296),
+ B(Star1),
B(LdaConstant), U8(1),
- B(Star4),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Star2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
constant pool: [
@@ -243,7 +243,7 @@ snippet: "
var test = F.test;
test();
"
-frame size: 5
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -251,17 +251,17 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 58 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(285),
- B(Star3),
+ B(Wide), B(LdaSmi), I16(295),
+ B(Star1),
B(LdaConstant), U8(1),
- B(Star4),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Star2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
constant pool: [
@@ -280,7 +280,7 @@ snippet: "
var test = G.test;
test();
"
-frame size: 5
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -288,17 +288,17 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(288),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(286),
- B(Star3),
+ B(Wide), B(LdaSmi), I16(296),
+ B(Star1),
B(LdaConstant), U8(1),
- B(Star4),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Star2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
constant pool: [
@@ -322,8 +322,8 @@ parameter count: 1
bytecode array length: 19
bytecodes: [
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 51 E> */ B(LdaKeyedProperty), R(this), U8(0),
- B(Wide), B(LdaSmi), I16(285),
+ /* 51 E> */ B(GetKeyedProperty), R(this), U8(0),
+ B(Wide), B(LdaSmi), I16(295),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 322e08134a..3f2442811f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -232,134 +232,134 @@ bytecode array length: 524
bytecodes: [
/* 33 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 67 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 80 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 93 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 106 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 119 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 132 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 145 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 158 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 171 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 184 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 197 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 210 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 223 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 236 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 249 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 262 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 275 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 288 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 301 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 314 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 327 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 340 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 353 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 366 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 379 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 392 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 405 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 418 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 431 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 444 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 457 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 470 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 483 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 496 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 509 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 522 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 535 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 548 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 561 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 574 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 587 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 600 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 613 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 626 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 639 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 652 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 665 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 678 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 691 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 704 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 717 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 730 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 743 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 756 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 769 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 782 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 795 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 808 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 821 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 834 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 847 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 860 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 873 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 886 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 899 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 912 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 925 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 938 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 951 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 964 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 977 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 990 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 1003 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1016 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1029 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1042 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1055 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1068 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1081 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1094 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1107 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1120 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1133 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1146 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1159 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1172 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1185 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1198 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1211 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1224 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1237 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1250 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1263 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1276 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1289 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1302 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1315 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1328 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1341 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1354 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1367 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1380 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1393 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1406 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1419 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1432 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1445 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1458 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1471 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1484 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1497 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1510 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1523 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1536 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1549 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1562 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1575 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1588 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1601 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1614 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1627 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1640 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1653 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1666 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1679 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1692 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
+ /* 41 S> */ B(GetNamedProperty), R(0), U8(0), U8(0),
+ /* 54 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 67 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 80 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 93 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 106 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 119 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 132 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 145 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 158 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 171 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 184 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 197 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 210 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 223 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 236 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 249 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 262 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 275 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 288 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 301 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 314 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 327 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 340 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 353 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 366 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 379 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 392 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 405 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 418 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 431 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 444 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 457 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 470 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 483 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 496 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 509 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 522 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 535 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 548 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 561 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 574 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 587 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 600 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 613 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 626 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 639 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 652 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 665 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 678 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 691 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 704 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 717 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 730 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 743 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 756 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 769 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 782 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 795 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 808 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 821 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 834 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 847 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 860 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 873 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 886 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 899 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 912 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 925 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 938 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 951 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 964 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 977 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 990 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 1003 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1016 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1029 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1042 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1055 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1068 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1081 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1094 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1107 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1120 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1133 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1146 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1159 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1172 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1185 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1198 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1211 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1224 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1237 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1250 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1263 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1276 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1289 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1302 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1315 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1328 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1341 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1354 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1367 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1380 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1393 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1406 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1419 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1432 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1445 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1458 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1471 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1484 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1497 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1510 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1523 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1536 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1549 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1562 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1575 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1588 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1601 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1614 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1627 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1640 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1653 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1666 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1679 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1692 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
/* 1703 S> */ B(LdaSmi), I8(2),
/* 1705 E> */ B(Wide), B(StaGlobal), U16(128), U16(256),
B(LdaUndefined),
@@ -643,134 +643,134 @@ bytecode array length: 524
bytecodes: [
/* 49 S> */ B(CreateEmptyObjectLiteral),
B(Star0),
- /* 57 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
- /* 70 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- /* 83 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
- /* 96 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
- /* 109 S> */ B(LdaNamedProperty), R(0), U8(4), U8(8),
- /* 122 S> */ B(LdaNamedProperty), R(0), U8(5), U8(10),
- /* 135 S> */ B(LdaNamedProperty), R(0), U8(6), U8(12),
- /* 148 S> */ B(LdaNamedProperty), R(0), U8(7), U8(14),
- /* 161 S> */ B(LdaNamedProperty), R(0), U8(8), U8(16),
- /* 174 S> */ B(LdaNamedProperty), R(0), U8(9), U8(18),
- /* 187 S> */ B(LdaNamedProperty), R(0), U8(10), U8(20),
- /* 200 S> */ B(LdaNamedProperty), R(0), U8(11), U8(22),
- /* 213 S> */ B(LdaNamedProperty), R(0), U8(12), U8(24),
- /* 226 S> */ B(LdaNamedProperty), R(0), U8(13), U8(26),
- /* 239 S> */ B(LdaNamedProperty), R(0), U8(14), U8(28),
- /* 252 S> */ B(LdaNamedProperty), R(0), U8(15), U8(30),
- /* 265 S> */ B(LdaNamedProperty), R(0), U8(16), U8(32),
- /* 278 S> */ B(LdaNamedProperty), R(0), U8(17), U8(34),
- /* 291 S> */ B(LdaNamedProperty), R(0), U8(18), U8(36),
- /* 304 S> */ B(LdaNamedProperty), R(0), U8(19), U8(38),
- /* 317 S> */ B(LdaNamedProperty), R(0), U8(20), U8(40),
- /* 330 S> */ B(LdaNamedProperty), R(0), U8(21), U8(42),
- /* 343 S> */ B(LdaNamedProperty), R(0), U8(22), U8(44),
- /* 356 S> */ B(LdaNamedProperty), R(0), U8(23), U8(46),
- /* 369 S> */ B(LdaNamedProperty), R(0), U8(24), U8(48),
- /* 382 S> */ B(LdaNamedProperty), R(0), U8(25), U8(50),
- /* 395 S> */ B(LdaNamedProperty), R(0), U8(26), U8(52),
- /* 408 S> */ B(LdaNamedProperty), R(0), U8(27), U8(54),
- /* 421 S> */ B(LdaNamedProperty), R(0), U8(28), U8(56),
- /* 434 S> */ B(LdaNamedProperty), R(0), U8(29), U8(58),
- /* 447 S> */ B(LdaNamedProperty), R(0), U8(30), U8(60),
- /* 460 S> */ B(LdaNamedProperty), R(0), U8(31), U8(62),
- /* 473 S> */ B(LdaNamedProperty), R(0), U8(32), U8(64),
- /* 486 S> */ B(LdaNamedProperty), R(0), U8(33), U8(66),
- /* 499 S> */ B(LdaNamedProperty), R(0), U8(34), U8(68),
- /* 512 S> */ B(LdaNamedProperty), R(0), U8(35), U8(70),
- /* 525 S> */ B(LdaNamedProperty), R(0), U8(36), U8(72),
- /* 538 S> */ B(LdaNamedProperty), R(0), U8(37), U8(74),
- /* 551 S> */ B(LdaNamedProperty), R(0), U8(38), U8(76),
- /* 564 S> */ B(LdaNamedProperty), R(0), U8(39), U8(78),
- /* 577 S> */ B(LdaNamedProperty), R(0), U8(40), U8(80),
- /* 590 S> */ B(LdaNamedProperty), R(0), U8(41), U8(82),
- /* 603 S> */ B(LdaNamedProperty), R(0), U8(42), U8(84),
- /* 616 S> */ B(LdaNamedProperty), R(0), U8(43), U8(86),
- /* 629 S> */ B(LdaNamedProperty), R(0), U8(44), U8(88),
- /* 642 S> */ B(LdaNamedProperty), R(0), U8(45), U8(90),
- /* 655 S> */ B(LdaNamedProperty), R(0), U8(46), U8(92),
- /* 668 S> */ B(LdaNamedProperty), R(0), U8(47), U8(94),
- /* 681 S> */ B(LdaNamedProperty), R(0), U8(48), U8(96),
- /* 694 S> */ B(LdaNamedProperty), R(0), U8(49), U8(98),
- /* 707 S> */ B(LdaNamedProperty), R(0), U8(50), U8(100),
- /* 720 S> */ B(LdaNamedProperty), R(0), U8(51), U8(102),
- /* 733 S> */ B(LdaNamedProperty), R(0), U8(52), U8(104),
- /* 746 S> */ B(LdaNamedProperty), R(0), U8(53), U8(106),
- /* 759 S> */ B(LdaNamedProperty), R(0), U8(54), U8(108),
- /* 772 S> */ B(LdaNamedProperty), R(0), U8(55), U8(110),
- /* 785 S> */ B(LdaNamedProperty), R(0), U8(56), U8(112),
- /* 798 S> */ B(LdaNamedProperty), R(0), U8(57), U8(114),
- /* 811 S> */ B(LdaNamedProperty), R(0), U8(58), U8(116),
- /* 824 S> */ B(LdaNamedProperty), R(0), U8(59), U8(118),
- /* 837 S> */ B(LdaNamedProperty), R(0), U8(60), U8(120),
- /* 850 S> */ B(LdaNamedProperty), R(0), U8(61), U8(122),
- /* 863 S> */ B(LdaNamedProperty), R(0), U8(62), U8(124),
- /* 876 S> */ B(LdaNamedProperty), R(0), U8(63), U8(126),
- /* 889 S> */ B(LdaNamedProperty), R(0), U8(64), U8(128),
- /* 902 S> */ B(LdaNamedProperty), R(0), U8(65), U8(130),
- /* 915 S> */ B(LdaNamedProperty), R(0), U8(66), U8(132),
- /* 928 S> */ B(LdaNamedProperty), R(0), U8(67), U8(134),
- /* 941 S> */ B(LdaNamedProperty), R(0), U8(68), U8(136),
- /* 954 S> */ B(LdaNamedProperty), R(0), U8(69), U8(138),
- /* 967 S> */ B(LdaNamedProperty), R(0), U8(70), U8(140),
- /* 980 S> */ B(LdaNamedProperty), R(0), U8(71), U8(142),
- /* 993 S> */ B(LdaNamedProperty), R(0), U8(72), U8(144),
- /* 1006 S> */ B(LdaNamedProperty), R(0), U8(73), U8(146),
- /* 1019 S> */ B(LdaNamedProperty), R(0), U8(74), U8(148),
- /* 1032 S> */ B(LdaNamedProperty), R(0), U8(75), U8(150),
- /* 1045 S> */ B(LdaNamedProperty), R(0), U8(76), U8(152),
- /* 1058 S> */ B(LdaNamedProperty), R(0), U8(77), U8(154),
- /* 1071 S> */ B(LdaNamedProperty), R(0), U8(78), U8(156),
- /* 1084 S> */ B(LdaNamedProperty), R(0), U8(79), U8(158),
- /* 1097 S> */ B(LdaNamedProperty), R(0), U8(80), U8(160),
- /* 1110 S> */ B(LdaNamedProperty), R(0), U8(81), U8(162),
- /* 1123 S> */ B(LdaNamedProperty), R(0), U8(82), U8(164),
- /* 1136 S> */ B(LdaNamedProperty), R(0), U8(83), U8(166),
- /* 1149 S> */ B(LdaNamedProperty), R(0), U8(84), U8(168),
- /* 1162 S> */ B(LdaNamedProperty), R(0), U8(85), U8(170),
- /* 1175 S> */ B(LdaNamedProperty), R(0), U8(86), U8(172),
- /* 1188 S> */ B(LdaNamedProperty), R(0), U8(87), U8(174),
- /* 1201 S> */ B(LdaNamedProperty), R(0), U8(88), U8(176),
- /* 1214 S> */ B(LdaNamedProperty), R(0), U8(89), U8(178),
- /* 1227 S> */ B(LdaNamedProperty), R(0), U8(90), U8(180),
- /* 1240 S> */ B(LdaNamedProperty), R(0), U8(91), U8(182),
- /* 1253 S> */ B(LdaNamedProperty), R(0), U8(92), U8(184),
- /* 1266 S> */ B(LdaNamedProperty), R(0), U8(93), U8(186),
- /* 1279 S> */ B(LdaNamedProperty), R(0), U8(94), U8(188),
- /* 1292 S> */ B(LdaNamedProperty), R(0), U8(95), U8(190),
- /* 1305 S> */ B(LdaNamedProperty), R(0), U8(96), U8(192),
- /* 1318 S> */ B(LdaNamedProperty), R(0), U8(97), U8(194),
- /* 1331 S> */ B(LdaNamedProperty), R(0), U8(98), U8(196),
- /* 1344 S> */ B(LdaNamedProperty), R(0), U8(99), U8(198),
- /* 1357 S> */ B(LdaNamedProperty), R(0), U8(100), U8(200),
- /* 1370 S> */ B(LdaNamedProperty), R(0), U8(101), U8(202),
- /* 1383 S> */ B(LdaNamedProperty), R(0), U8(102), U8(204),
- /* 1396 S> */ B(LdaNamedProperty), R(0), U8(103), U8(206),
- /* 1409 S> */ B(LdaNamedProperty), R(0), U8(104), U8(208),
- /* 1422 S> */ B(LdaNamedProperty), R(0), U8(105), U8(210),
- /* 1435 S> */ B(LdaNamedProperty), R(0), U8(106), U8(212),
- /* 1448 S> */ B(LdaNamedProperty), R(0), U8(107), U8(214),
- /* 1461 S> */ B(LdaNamedProperty), R(0), U8(108), U8(216),
- /* 1474 S> */ B(LdaNamedProperty), R(0), U8(109), U8(218),
- /* 1487 S> */ B(LdaNamedProperty), R(0), U8(110), U8(220),
- /* 1500 S> */ B(LdaNamedProperty), R(0), U8(111), U8(222),
- /* 1513 S> */ B(LdaNamedProperty), R(0), U8(112), U8(224),
- /* 1526 S> */ B(LdaNamedProperty), R(0), U8(113), U8(226),
- /* 1539 S> */ B(LdaNamedProperty), R(0), U8(114), U8(228),
- /* 1552 S> */ B(LdaNamedProperty), R(0), U8(115), U8(230),
- /* 1565 S> */ B(LdaNamedProperty), R(0), U8(116), U8(232),
- /* 1578 S> */ B(LdaNamedProperty), R(0), U8(117), U8(234),
- /* 1591 S> */ B(LdaNamedProperty), R(0), U8(118), U8(236),
- /* 1604 S> */ B(LdaNamedProperty), R(0), U8(119), U8(238),
- /* 1617 S> */ B(LdaNamedProperty), R(0), U8(120), U8(240),
- /* 1630 S> */ B(LdaNamedProperty), R(0), U8(121), U8(242),
- /* 1643 S> */ B(LdaNamedProperty), R(0), U8(122), U8(244),
- /* 1656 S> */ B(LdaNamedProperty), R(0), U8(123), U8(246),
- /* 1669 S> */ B(LdaNamedProperty), R(0), U8(124), U8(248),
- /* 1682 S> */ B(LdaNamedProperty), R(0), U8(125), U8(250),
- /* 1695 S> */ B(LdaNamedProperty), R(0), U8(126), U8(252),
- /* 1708 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
+ /* 57 S> */ B(GetNamedProperty), R(0), U8(0), U8(0),
+ /* 70 S> */ B(GetNamedProperty), R(0), U8(1), U8(2),
+ /* 83 S> */ B(GetNamedProperty), R(0), U8(2), U8(4),
+ /* 96 S> */ B(GetNamedProperty), R(0), U8(3), U8(6),
+ /* 109 S> */ B(GetNamedProperty), R(0), U8(4), U8(8),
+ /* 122 S> */ B(GetNamedProperty), R(0), U8(5), U8(10),
+ /* 135 S> */ B(GetNamedProperty), R(0), U8(6), U8(12),
+ /* 148 S> */ B(GetNamedProperty), R(0), U8(7), U8(14),
+ /* 161 S> */ B(GetNamedProperty), R(0), U8(8), U8(16),
+ /* 174 S> */ B(GetNamedProperty), R(0), U8(9), U8(18),
+ /* 187 S> */ B(GetNamedProperty), R(0), U8(10), U8(20),
+ /* 200 S> */ B(GetNamedProperty), R(0), U8(11), U8(22),
+ /* 213 S> */ B(GetNamedProperty), R(0), U8(12), U8(24),
+ /* 226 S> */ B(GetNamedProperty), R(0), U8(13), U8(26),
+ /* 239 S> */ B(GetNamedProperty), R(0), U8(14), U8(28),
+ /* 252 S> */ B(GetNamedProperty), R(0), U8(15), U8(30),
+ /* 265 S> */ B(GetNamedProperty), R(0), U8(16), U8(32),
+ /* 278 S> */ B(GetNamedProperty), R(0), U8(17), U8(34),
+ /* 291 S> */ B(GetNamedProperty), R(0), U8(18), U8(36),
+ /* 304 S> */ B(GetNamedProperty), R(0), U8(19), U8(38),
+ /* 317 S> */ B(GetNamedProperty), R(0), U8(20), U8(40),
+ /* 330 S> */ B(GetNamedProperty), R(0), U8(21), U8(42),
+ /* 343 S> */ B(GetNamedProperty), R(0), U8(22), U8(44),
+ /* 356 S> */ B(GetNamedProperty), R(0), U8(23), U8(46),
+ /* 369 S> */ B(GetNamedProperty), R(0), U8(24), U8(48),
+ /* 382 S> */ B(GetNamedProperty), R(0), U8(25), U8(50),
+ /* 395 S> */ B(GetNamedProperty), R(0), U8(26), U8(52),
+ /* 408 S> */ B(GetNamedProperty), R(0), U8(27), U8(54),
+ /* 421 S> */ B(GetNamedProperty), R(0), U8(28), U8(56),
+ /* 434 S> */ B(GetNamedProperty), R(0), U8(29), U8(58),
+ /* 447 S> */ B(GetNamedProperty), R(0), U8(30), U8(60),
+ /* 460 S> */ B(GetNamedProperty), R(0), U8(31), U8(62),
+ /* 473 S> */ B(GetNamedProperty), R(0), U8(32), U8(64),
+ /* 486 S> */ B(GetNamedProperty), R(0), U8(33), U8(66),
+ /* 499 S> */ B(GetNamedProperty), R(0), U8(34), U8(68),
+ /* 512 S> */ B(GetNamedProperty), R(0), U8(35), U8(70),
+ /* 525 S> */ B(GetNamedProperty), R(0), U8(36), U8(72),
+ /* 538 S> */ B(GetNamedProperty), R(0), U8(37), U8(74),
+ /* 551 S> */ B(GetNamedProperty), R(0), U8(38), U8(76),
+ /* 564 S> */ B(GetNamedProperty), R(0), U8(39), U8(78),
+ /* 577 S> */ B(GetNamedProperty), R(0), U8(40), U8(80),
+ /* 590 S> */ B(GetNamedProperty), R(0), U8(41), U8(82),
+ /* 603 S> */ B(GetNamedProperty), R(0), U8(42), U8(84),
+ /* 616 S> */ B(GetNamedProperty), R(0), U8(43), U8(86),
+ /* 629 S> */ B(GetNamedProperty), R(0), U8(44), U8(88),
+ /* 642 S> */ B(GetNamedProperty), R(0), U8(45), U8(90),
+ /* 655 S> */ B(GetNamedProperty), R(0), U8(46), U8(92),
+ /* 668 S> */ B(GetNamedProperty), R(0), U8(47), U8(94),
+ /* 681 S> */ B(GetNamedProperty), R(0), U8(48), U8(96),
+ /* 694 S> */ B(GetNamedProperty), R(0), U8(49), U8(98),
+ /* 707 S> */ B(GetNamedProperty), R(0), U8(50), U8(100),
+ /* 720 S> */ B(GetNamedProperty), R(0), U8(51), U8(102),
+ /* 733 S> */ B(GetNamedProperty), R(0), U8(52), U8(104),
+ /* 746 S> */ B(GetNamedProperty), R(0), U8(53), U8(106),
+ /* 759 S> */ B(GetNamedProperty), R(0), U8(54), U8(108),
+ /* 772 S> */ B(GetNamedProperty), R(0), U8(55), U8(110),
+ /* 785 S> */ B(GetNamedProperty), R(0), U8(56), U8(112),
+ /* 798 S> */ B(GetNamedProperty), R(0), U8(57), U8(114),
+ /* 811 S> */ B(GetNamedProperty), R(0), U8(58), U8(116),
+ /* 824 S> */ B(GetNamedProperty), R(0), U8(59), U8(118),
+ /* 837 S> */ B(GetNamedProperty), R(0), U8(60), U8(120),
+ /* 850 S> */ B(GetNamedProperty), R(0), U8(61), U8(122),
+ /* 863 S> */ B(GetNamedProperty), R(0), U8(62), U8(124),
+ /* 876 S> */ B(GetNamedProperty), R(0), U8(63), U8(126),
+ /* 889 S> */ B(GetNamedProperty), R(0), U8(64), U8(128),
+ /* 902 S> */ B(GetNamedProperty), R(0), U8(65), U8(130),
+ /* 915 S> */ B(GetNamedProperty), R(0), U8(66), U8(132),
+ /* 928 S> */ B(GetNamedProperty), R(0), U8(67), U8(134),
+ /* 941 S> */ B(GetNamedProperty), R(0), U8(68), U8(136),
+ /* 954 S> */ B(GetNamedProperty), R(0), U8(69), U8(138),
+ /* 967 S> */ B(GetNamedProperty), R(0), U8(70), U8(140),
+ /* 980 S> */ B(GetNamedProperty), R(0), U8(71), U8(142),
+ /* 993 S> */ B(GetNamedProperty), R(0), U8(72), U8(144),
+ /* 1006 S> */ B(GetNamedProperty), R(0), U8(73), U8(146),
+ /* 1019 S> */ B(GetNamedProperty), R(0), U8(74), U8(148),
+ /* 1032 S> */ B(GetNamedProperty), R(0), U8(75), U8(150),
+ /* 1045 S> */ B(GetNamedProperty), R(0), U8(76), U8(152),
+ /* 1058 S> */ B(GetNamedProperty), R(0), U8(77), U8(154),
+ /* 1071 S> */ B(GetNamedProperty), R(0), U8(78), U8(156),
+ /* 1084 S> */ B(GetNamedProperty), R(0), U8(79), U8(158),
+ /* 1097 S> */ B(GetNamedProperty), R(0), U8(80), U8(160),
+ /* 1110 S> */ B(GetNamedProperty), R(0), U8(81), U8(162),
+ /* 1123 S> */ B(GetNamedProperty), R(0), U8(82), U8(164),
+ /* 1136 S> */ B(GetNamedProperty), R(0), U8(83), U8(166),
+ /* 1149 S> */ B(GetNamedProperty), R(0), U8(84), U8(168),
+ /* 1162 S> */ B(GetNamedProperty), R(0), U8(85), U8(170),
+ /* 1175 S> */ B(GetNamedProperty), R(0), U8(86), U8(172),
+ /* 1188 S> */ B(GetNamedProperty), R(0), U8(87), U8(174),
+ /* 1201 S> */ B(GetNamedProperty), R(0), U8(88), U8(176),
+ /* 1214 S> */ B(GetNamedProperty), R(0), U8(89), U8(178),
+ /* 1227 S> */ B(GetNamedProperty), R(0), U8(90), U8(180),
+ /* 1240 S> */ B(GetNamedProperty), R(0), U8(91), U8(182),
+ /* 1253 S> */ B(GetNamedProperty), R(0), U8(92), U8(184),
+ /* 1266 S> */ B(GetNamedProperty), R(0), U8(93), U8(186),
+ /* 1279 S> */ B(GetNamedProperty), R(0), U8(94), U8(188),
+ /* 1292 S> */ B(GetNamedProperty), R(0), U8(95), U8(190),
+ /* 1305 S> */ B(GetNamedProperty), R(0), U8(96), U8(192),
+ /* 1318 S> */ B(GetNamedProperty), R(0), U8(97), U8(194),
+ /* 1331 S> */ B(GetNamedProperty), R(0), U8(98), U8(196),
+ /* 1344 S> */ B(GetNamedProperty), R(0), U8(99), U8(198),
+ /* 1357 S> */ B(GetNamedProperty), R(0), U8(100), U8(200),
+ /* 1370 S> */ B(GetNamedProperty), R(0), U8(101), U8(202),
+ /* 1383 S> */ B(GetNamedProperty), R(0), U8(102), U8(204),
+ /* 1396 S> */ B(GetNamedProperty), R(0), U8(103), U8(206),
+ /* 1409 S> */ B(GetNamedProperty), R(0), U8(104), U8(208),
+ /* 1422 S> */ B(GetNamedProperty), R(0), U8(105), U8(210),
+ /* 1435 S> */ B(GetNamedProperty), R(0), U8(106), U8(212),
+ /* 1448 S> */ B(GetNamedProperty), R(0), U8(107), U8(214),
+ /* 1461 S> */ B(GetNamedProperty), R(0), U8(108), U8(216),
+ /* 1474 S> */ B(GetNamedProperty), R(0), U8(109), U8(218),
+ /* 1487 S> */ B(GetNamedProperty), R(0), U8(110), U8(220),
+ /* 1500 S> */ B(GetNamedProperty), R(0), U8(111), U8(222),
+ /* 1513 S> */ B(GetNamedProperty), R(0), U8(112), U8(224),
+ /* 1526 S> */ B(GetNamedProperty), R(0), U8(113), U8(226),
+ /* 1539 S> */ B(GetNamedProperty), R(0), U8(114), U8(228),
+ /* 1552 S> */ B(GetNamedProperty), R(0), U8(115), U8(230),
+ /* 1565 S> */ B(GetNamedProperty), R(0), U8(116), U8(232),
+ /* 1578 S> */ B(GetNamedProperty), R(0), U8(117), U8(234),
+ /* 1591 S> */ B(GetNamedProperty), R(0), U8(118), U8(236),
+ /* 1604 S> */ B(GetNamedProperty), R(0), U8(119), U8(238),
+ /* 1617 S> */ B(GetNamedProperty), R(0), U8(120), U8(240),
+ /* 1630 S> */ B(GetNamedProperty), R(0), U8(121), U8(242),
+ /* 1643 S> */ B(GetNamedProperty), R(0), U8(122), U8(244),
+ /* 1656 S> */ B(GetNamedProperty), R(0), U8(123), U8(246),
+ /* 1669 S> */ B(GetNamedProperty), R(0), U8(124), U8(248),
+ /* 1682 S> */ B(GetNamedProperty), R(0), U8(125), U8(250),
+ /* 1695 S> */ B(GetNamedProperty), R(0), U8(126), U8(252),
+ /* 1708 S> */ B(GetNamedProperty), R(0), U8(127), U8(254),
/* 1719 S> */ B(LdaSmi), I8(2),
/* 1721 E> */ B(Wide), B(StaGlobal), U16(128), U16(256),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index da9b0975e1..9d1a65522e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -93,7 +93,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 111
+bytecode array length: 103
bytecodes: [
/* 128 E> */ B(CreateRestParameter),
B(Star3),
@@ -101,36 +101,31 @@ bytecodes: [
B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
- B(CreateEmptyArrayLiteral), U8(0),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star7),
- B(LdaZero),
- B(Star6),
B(LdaSmi), I8(1),
- B(StaInArrayLiteral), R(7), R(6), U8(1),
- B(Ldar), R(6),
- B(Inc), U8(3),
/* 152 S> */ B(Star6),
- /* 152 E> */ B(GetIterator), R(3), U8(4), U8(6),
+ /* 152 E> */ B(GetIterator), R(3), U8(1), U8(3),
B(Mov), R(1), R(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star9),
- B(LdaNamedProperty), R(9), U8(0), U8(8),
+ B(GetNamedProperty), R(9), U8(1), U8(5),
B(Star8),
B(CallProperty0), R(8), R(9), U8(14),
B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(LdaNamedProperty), R(10), U8(1), U8(16),
+ B(GetNamedProperty), R(10), U8(2), U8(16),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(10), U8(2), U8(10),
- B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(GetNamedProperty), R(10), U8(3), U8(7),
+ B(StaInArrayLiteral), R(7), R(6), U8(12),
B(Ldar), R(6),
- B(Inc), U8(3),
+ B(Inc), U8(11),
B(Star6),
B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(1),
- B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(StaInArrayLiteral), R(7), R(6), U8(12),
B(ThrowIfNotSuperConstructor), R(5),
B(Mov), R(5), R(6),
B(Mov), R(0), R(8),
@@ -144,6 +139,7 @@ bytecodes: [
/* 162 S> */ B(Return),
]
constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index e007b667a2..9b8d6006bc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -21,7 +21,7 @@ bytecodes: [
/* 8 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
B(Star1),
/* 16 E> */ B(CreateClosure), U8(2), U8(0), U8(0),
- B(StaNamedOwnProperty), R(1), U8(3), U8(1),
+ B(DefineNamedOwnProperty), R(1), U8(3), U8(1),
B(Ldar), R(1),
/* 8 E> */ B(StaGlobal), U8(4), U8(3),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index f20d7937cd..54a753f1ff 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -164,7 +164,7 @@ class InterpreterTester {
// overwriting existing metadata.
function->shared().set_raw_outer_scope_info_or_feedback_metadata(
*feedback_metadata_.ToHandleChecked());
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate_, function, &is_compiled_scope);
}
return function;
}
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index ae1d66eaeb..09e2926059 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2604,7 +2604,38 @@ TEST(PrivateMethodAccess) {
"}\n"
"\n"
"var test = D;\n"
- "new test;\n"};
+ "new test;\n",
+
+ "var test;\n"
+ "class F extends class {} {\n"
+ " #method() { }\n"
+ " constructor() {\n"
+ " (test = () => super())();\n"
+ " this.#method();\n"
+ " }\n"
+ "};\n"
+ "new F;\n",
+
+ "var test;\n"
+ "class G extends class {} {\n"
+ " #method() { }\n"
+ " constructor() {\n"
+ " test = () => super();\n"
+ " test();\n"
+ " this.#method();\n"
+ " }\n"
+ "};\n"
+ "new G();\n",
+
+ "var test;\n"
+ "class H extends class {} {\n"
+ " #method() { }\n"
+ " constructor(str) {\n"
+ " eval(str);\n"
+ " this.#method();\n"
+ " }\n"
+ "};\n"
+ "new test('test = () => super(); test()');\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrivateMethodAccess.golden")));
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 5132fb4779..2099990504 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -1238,7 +1238,7 @@ TEST(InterpreterLoadKeyedProperty) {
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(789));
}
-TEST(InterpreterStoreNamedProperty) {
+TEST(InterpreterSetNamedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -1256,8 +1256,8 @@ TEST(InterpreterStoreNamedProperty) {
BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
builder.LoadLiteral(Smi::FromInt(999))
- .StoreNamedProperty(builder.Receiver(), name, GetIndex(slot),
- LanguageMode::kStrict)
+ .SetNamedProperty(builder.Receiver(), name, GetIndex(slot),
+ LanguageMode::kStrict)
.Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1301,7 +1301,7 @@ TEST(InterpreterStoreNamedProperty) {
CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
}
-TEST(InterpreterStoreKeyedProperty) {
+TEST(InterpreterSetKeyedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -1321,8 +1321,8 @@ TEST(InterpreterStoreKeyedProperty) {
builder.LoadLiteral(name)
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(999))
- .StoreKeyedProperty(builder.Receiver(), Register(0), GetIndex(slot),
- i::LanguageMode::kSloppy)
+ .SetKeyedProperty(builder.Receiver(), Register(0), GetIndex(slot),
+ i::LanguageMode::kSloppy)
.Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -5046,7 +5046,7 @@ TEST(InterpreterWithNativeStack) {
CHECK(f->shared().HasBytecodeArray());
i::CodeT code = f->shared().GetCode();
i::Handle<i::CodeT> interpreter_entry_trampoline =
- BUILTIN_CODET(isolate, InterpreterEntryTrampoline);
+ BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
CHECK(code.IsCodeT());
CHECK(code.is_interpreter_trampoline_builtin());
@@ -5060,24 +5060,24 @@ TEST(InterpreterGetBytecodeHandler) {
Interpreter* interpreter = isolate->interpreter();
// Test that single-width bytecode handlers deserializer correctly.
- Code wide_handler =
+ CodeT wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kSingle);
CHECK_EQ(wide_handler.builtin_id(), Builtin::kWideHandler);
- Code add_handler =
+ CodeT add_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kSingle);
CHECK_EQ(add_handler.builtin_id(), Builtin::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
- Code wide_wide_handler =
+ CodeT wide_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kDouble);
CHECK_EQ(wide_wide_handler.builtin_id(), Builtin::kIllegalHandler);
- Code add_wide_handler =
+ CodeT add_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kDouble);
CHECK_EQ(add_wide_handler.builtin_id(), Builtin::kAddWideHandler);
@@ -5280,16 +5280,16 @@ TEST(InterpreterCollectSourcePositions_GenerateStackTrace) {
TEST(InterpreterLookupNameOfBytecodeHandler) {
Interpreter* interpreter = CcTest::i_isolate()->interpreter();
- Code ldaLookupSlot = interpreter->GetBytecodeHandler(Bytecode::kLdaLookupSlot,
- OperandScale::kSingle);
+ Code ldaLookupSlot = FromCodeT(interpreter->GetBytecodeHandler(
+ Bytecode::kLdaLookupSlot, OperandScale::kSingle));
CheckStringEqual("LdaLookupSlotHandler",
interpreter->LookupNameOfBytecodeHandler(ldaLookupSlot));
- Code wideLdaLookupSlot = interpreter->GetBytecodeHandler(
- Bytecode::kLdaLookupSlot, OperandScale::kDouble);
+ Code wideLdaLookupSlot = FromCodeT(interpreter->GetBytecodeHandler(
+ Bytecode::kLdaLookupSlot, OperandScale::kDouble));
CheckStringEqual("LdaLookupSlotWideHandler",
interpreter->LookupNameOfBytecodeHandler(wideLdaLookupSlot));
- Code extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
- Bytecode::kLdaLookupSlot, OperandScale::kQuadruple);
+ Code extraWideLdaLookupSlot = FromCodeT(interpreter->GetBytecodeHandler(
+ Bytecode::kLdaLookupSlot, OperandScale::kQuadruple));
CheckStringEqual(
"LdaLookupSlotExtraWideHandler",
interpreter->LookupNameOfBytecodeHandler(extraWideLdaLookupSlot));
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index d02b6e2023..719bb519b0 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -778,3 +778,42 @@ TEST(ObjectTemplateSetLazyPropertyHasNoSideEffect) {
->Int32Value(env.local())
.FromJust());
}
+
+namespace {
+void FunctionNativeGetter(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetIsolate()->ThrowError(v8_str("side effect in getter"));
+}
+} // namespace
+
+TEST(BindFunctionTemplateSetNativeDataProperty) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Check that getter is called on Function.prototype.bind.
+ {
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->SetNativeDataProperty(v8_str("name"), FunctionNativeGetter);
+ v8::Local<v8::Function> func =
+ templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(CompileRun("func.bind()").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+
+ // Check that getter is called on Function.prototype.bind.
+ {
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->SetNativeDataProperty(v8_str("length"), FunctionNativeGetter);
+ v8::Local<v8::Function> func =
+ templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(CompileRun("func.bind()").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index fe5524a0ee..97d017d5f7 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -60,6 +60,16 @@ void EmptyInterceptorDeleter(
void EmptyInterceptorEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {}
+void EmptyInterceptorDefinerWithSideEffect(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Local<v8::Value> result = CompileRun("interceptor_definer_side_effect()");
+ if (!result->IsNull()) {
+ info.GetReturnValue().Set(result);
+ }
+}
+
void SimpleAccessorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
Local<Object> self = info.This().As<Object>();
@@ -869,13 +879,17 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
namespace {
void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertySetterCallback setter,
v8::GenericNamedPropertyQueryCallback query,
- const char* source, int expected) {
+ v8::GenericNamedPropertyDefinerCallback definer,
+ v8::PropertyHandlerFlags flags, const char* source,
+ int expected) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- getter, nullptr, query, nullptr, nullptr, v8_str("data")));
+ getter, setter, query, nullptr /* deleter */, nullptr /* enumerator */,
+ definer, nullptr /* descriptor */, v8_str("data"), flags));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("o"),
@@ -885,9 +899,17 @@ void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
}
+void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertyQueryCallback query,
+ const char* source, int expected) {
+ CheckInterceptorIC(getter, nullptr, query, nullptr,
+ v8::PropertyHandlerFlags::kNone, source, expected);
+}
+
void CheckInterceptorLoadIC(v8::GenericNamedPropertyGetterCallback getter,
const char* source, int expected) {
- CheckInterceptorIC(getter, nullptr, source, expected);
+ CheckInterceptorIC(getter, nullptr, nullptr, nullptr,
+ v8::PropertyHandlerFlags::kNone, source, expected);
}
void InterceptorLoadICGetter(Local<Name> name,
@@ -1581,6 +1603,38 @@ THREADED_TEST(InterceptorStoreICWithSideEffectfulCallbacks) {
19);
}
+THREADED_TEST(InterceptorDefineICWithSideEffectfulCallbacks) {
+ CheckInterceptorIC(EmptyInterceptorGetter, EmptyInterceptorSetter,
+ EmptyInterceptorQuery,
+ EmptyInterceptorDefinerWithSideEffect,
+ v8::PropertyHandlerFlags::kNonMasking,
+ "let inside_side_effect = false;"
+ "let interceptor_definer_side_effect = function() {"
+ " if (!inside_side_effect) {"
+ " inside_side_effect = true;"
+ " o.y = 153;"
+ " inside_side_effect = false;"
+ " }"
+ " return null;"
+ "};"
+ "class Base {"
+ " constructor(arg) {"
+ " return arg;"
+ " }"
+ "}"
+ "class ClassWithField extends Base {"
+ " y = (() => {"
+ " return 42;"
+ " })();"
+ " constructor(arg) {"
+ " super(arg);"
+ " }"
+ "}"
+ "new ClassWithField(o);"
+ "o.y",
+ 42);
+}
+
static void InterceptorStoreICSetter(
Local<Name> key, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -2286,12 +2340,12 @@ void LogDefinerCallsAndStopCallback(
info.GetReturnValue().Set(name);
}
-struct StoreOwnICInterceptorConfig {
+struct DefineNamedOwnICInterceptorConfig {
std::string code;
std::vector<std::string> intercepted_defines;
};
-std::vector<StoreOwnICInterceptorConfig> configs{
+std::vector<DefineNamedOwnICInterceptorConfig> configs{
{
R"(
class ClassWithNormalField extends Base {
@@ -2387,8 +2441,8 @@ std::vector<StoreOwnICInterceptorConfig> configs{
};
} // namespace
-void CheckPropertyDefinerCallbackInStoreOwnIC(Local<Context> context,
- bool stop) {
+void CheckPropertyDefinerCallbackInDefineNamedOwnIC(Local<Context> context,
+ bool stop) {
v8_compile(R"(
class Base {
constructor(arg) {
@@ -2437,17 +2491,17 @@ void CheckPropertyDefinerCallbackInStoreOwnIC(Local<Context> context,
}
}
-THREADED_TEST(PropertyDefinerCallbackInStoreOwnIC) {
+THREADED_TEST(PropertyDefinerCallbackInDefineNamedOwnIC) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CheckPropertyDefinerCallbackInStoreOwnIC(env.local(), true);
+ CheckPropertyDefinerCallbackInDefineNamedOwnIC(env.local(), true);
}
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CheckPropertyDefinerCallbackInStoreOwnIC(env.local(), false);
+ CheckPropertyDefinerCallbackInDefineNamedOwnIC(env.local(), false);
}
{
@@ -2455,7 +2509,7 @@ THREADED_TEST(PropertyDefinerCallbackInStoreOwnIC) {
i::FlagList::EnforceFlagImplications();
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CheckPropertyDefinerCallbackInStoreOwnIC(env.local(), true);
+ CheckPropertyDefinerCallbackInDefineNamedOwnIC(env.local(), true);
}
{
@@ -2463,7 +2517,7 @@ THREADED_TEST(PropertyDefinerCallbackInStoreOwnIC) {
i::FlagList::EnforceFlagImplications();
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CheckPropertyDefinerCallbackInStoreOwnIC(env.local(), false);
+ CheckPropertyDefinerCallbackInDefineNamedOwnIC(env.local(), false);
}
}
@@ -5833,10 +5887,10 @@ void DatabaseGetter(Local<Name> name,
const v8::PropertyCallbackInfo<Value>& info) {
ApiTestFuzzer::Fuzz();
auto context = info.GetIsolate()->GetCurrentContext();
- Local<v8::Object> db = info.Holder()
- ->GetRealNamedProperty(context, v8_str("db"))
- .ToLocalChecked()
- .As<v8::Object>();
+ v8::MaybeLocal<Value> maybe_db =
+ info.Holder()->GetRealNamedProperty(context, v8_str("db"));
+ if (maybe_db.IsEmpty()) return;
+ Local<v8::Object> db = maybe_db.ToLocalChecked().As<v8::Object>();
if (!db->Has(context, name).FromJust()) return;
info.GetReturnValue().Set(db->Get(context, name).ToLocalChecked());
}
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index 9045186e12..130ce0b257 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -887,3 +887,58 @@ UNINITIALIZED_TEST(CaptureStackTraceForStackOverflow) {
isolate->Exit();
isolate->Dispose();
}
+
+void AnalyzeScriptNameInStack(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::String> name =
+ v8::StackTrace::CurrentScriptNameOrSourceURL(args.GetIsolate());
+ CHECK(!name.IsEmpty());
+ CHECK(name->StringEquals(v8_str("test.js")));
+}
+
+TEST(CurrentScriptNameOrSourceURL_Name) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(
+ isolate, "AnalyzeScriptNameInStack",
+ v8::FunctionTemplate::New(CcTest::isolate(), AnalyzeScriptNameInStack));
+ LocalContext context(nullptr, templ);
+
+ const char* source = R"(
+ function foo() {
+ AnalyzeScriptNameInStack();
+ }
+ foo();
+ )";
+
+ CHECK(CompileRunWithOrigin(source, "test.js")->IsUndefined());
+}
+
+void AnalyzeScriptURLInStack(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::String> name =
+ v8::StackTrace::CurrentScriptNameOrSourceURL(args.GetIsolate());
+ CHECK(!name.IsEmpty());
+ CHECK(name->StringEquals(v8_str("foo.js")));
+}
+
+TEST(CurrentScriptNameOrSourceURL_SourceURL) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(
+ isolate, "AnalyzeScriptURLInStack",
+ v8::FunctionTemplate::New(CcTest::isolate(), AnalyzeScriptURLInStack));
+ LocalContext context(nullptr, templ);
+
+ const char* source = R"(
+ function foo() {
+ AnalyzeScriptURLInStack();
+ }
+ foo();
+ //# sourceURL=foo.js
+ )";
+
+ CHECK(CompileRunWithOrigin(source, "")->IsUndefined());
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index f7d45de81a..dd4b46fa48 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -53,6 +53,7 @@
#include "src/base/platform/platform.h"
#include "src/base/strings.h"
#include "src/codegen/compilation-cache.h"
+#include "src/compiler/globals.h"
#include "src/debug/debug.h"
#include "src/execution/arguments.h"
#include "src/execution/execution.h"
@@ -60,9 +61,9 @@
#include "src/execution/protectors-inl.h"
#include "src/execution/vm-state.h"
#include "src/handles/global-handles.h"
+#include "src/heap/evacuation-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/local-allocator.h"
#include "src/logging/metrics.h"
#include "src/objects/feedback-vector-inl.h"
#include "src/objects/feedback-vector.h"
@@ -169,22 +170,6 @@ static void Returns42(const v8::FunctionCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(42);
}
-// Tests that call v8::V8::Dispose() cannot be threaded.
-UNINITIALIZED_TEST(InitializeAndDisposeOnce) {
- CHECK(v8::V8::Initialize());
- CHECK(v8::V8::Dispose());
-}
-
-
-// Tests that call v8::V8::Dispose() cannot be threaded.
-UNINITIALIZED_TEST(InitializeAndDisposeMultiple) {
- for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
- for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
- for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
- for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
- for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
-}
-
THREADED_TEST(Handles) {
v8::HandleScope scope(CcTest::isolate());
Local<Context> local_env;
@@ -709,8 +694,9 @@ TEST(MakingExternalStringConditions) {
CHECK(local_string->CanMakeExternal());
// Tiny strings are not in-place externalizable when pointer compression is
- // enabled.
- CHECK_EQ(i::kTaggedSize == i::kSystemPointerSize,
+ // enabled, but they are if sandboxed external pointers are enabled.
+ CHECK_EQ(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ||
+ i::kTaggedSize == i::kSystemPointerSize,
tiny_local_string->CanMakeExternal());
}
@@ -739,8 +725,9 @@ TEST(MakingExternalOneByteStringConditions) {
CHECK(local_string->CanMakeExternal());
// Tiny strings are not in-place externalizable when pointer compression is
- // enabled.
- CHECK_EQ(i::kTaggedSize == i::kSystemPointerSize,
+ // enabled, but they are if sandboxed external pointers are enabled.
+ CHECK_EQ(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ||
+ i::kTaggedSize == i::kSystemPointerSize,
tiny_local_string->CanMakeExternal());
}
@@ -3066,7 +3053,7 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
CheckAlignedPointerInInternalField(obj, stack_allocated);
// The aligned pointer must have the top bits be zero on 64-bit machines (at
- // least if the heap sandbox is enabled).
+ // least if the sandboxed external pointers are enabled).
void* huge = reinterpret_cast<void*>(0x0000fffffffffffe);
CheckAlignedPointerInInternalField(obj, huge);
@@ -3144,7 +3131,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
CHECK_EQ(3, (*env)->GetNumberOfEmbedderDataFields());
// The aligned pointer must have the top bits be zero on 64-bit machines (at
- // least if the heap sandbox is enabled).
+ // least if the sandboxed external pointers are enabled).
void* huge = reinterpret_cast<void*>(0x0000fffffffffffe);
CheckAlignedPointerInEmbedderData(&env, 3, huge);
CHECK_EQ(4, (*env)->GetNumberOfEmbedderDataFields());
@@ -7841,6 +7828,12 @@ void CheckInternalFields(
}
void InternalFieldCallback(bool global_gc) {
+ // Manual GC scope as --stress-incremental-marking starts marking early and
+ // setting internal pointer fields mark the object for a heap layout change,
+ // which prevents it from being reclaimed and the callbacks from being
+ // executed.
+ ManualGCScope manual_gc_scope;
+
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -16719,11 +16712,13 @@ TEST(TestIdleNotification) {
i::GarbageCollectionReason::kTesting);
}
finished = env->GetIsolate()->IdleNotificationDeadline(
- (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ (v8::base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
IdlePauseInSeconds);
if (CcTest::heap()->mark_compact_collector()->sweeping_in_progress()) {
- CcTest::heap()->mark_compact_collector()->EnsureSweepingCompleted();
+ CcTest::heap()->mark_compact_collector()->EnsureSweepingCompleted(
+ v8::internal::MarkCompactCollector::SweepingForcedFinalizationMode::
+ kV8Only);
}
}
intptr_t final_size = CcTest::heap()->SizeOfObjects();
@@ -23801,9 +23796,9 @@ void RunStreamingTest(const char** chunks, v8::ScriptType type,
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreaming(isolate, &source, type);
- // TestSourceStream::GetMoreData won't block, so it's OK to just run the
- // task here in the main thread.
- task->Run();
+ // TestSourceStream::GetMoreData won't block, so it's OK to just join the
+ // background task.
+ StreamerThread::StartThreadForTaskAndJoin(task);
delete task;
// Possible errors are only produced while compiling.
@@ -24126,7 +24121,9 @@ TEST(StreamingWithDebuggingEnabledLate) {
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreaming(isolate, &source);
- task->Run();
+ // TestSourceStream::GetMoreData won't block, so it's OK to just join the
+ // background task.
+ StreamerThread::StartThreadForTaskAndJoin(task);
delete task;
CHECK(!try_catch.HasCaught());
@@ -24236,7 +24233,10 @@ TEST(StreamingWithHarmonyScopes) {
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreaming(isolate, &source);
- task->Run();
+
+ // TestSourceStream::GetMoreData won't block, so it's OK to just join the
+ // background task.
+ StreamerThread::StartThreadForTaskAndJoin(task);
delete task;
// Parsing should succeed (the script will be parsed and compiled in a context
@@ -24258,6 +24258,57 @@ TEST(StreamingWithHarmonyScopes) {
delete[] full_source;
}
+// Regression test for crbug.com/v8/12668. Verifies that after a streamed script
+// is inserted into the isolate script cache, a non-streamed script with
+// identical origin can reuse that data.
+TEST(StreamingWithIsolateScriptCache) {
+ const char* chunks[] = {"'use strict'; (function test() { return 13; })",
+ nullptr};
+ const char* full_source = chunks[0];
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin(isolate, v8_str("http://foo.com"), 0, 0, false, -1,
+ v8::Local<v8::Value>(), false, false, false);
+ i::Handle<i::JSFunction> first_function;
+ i::Handle<i::JSFunction> second_function;
+
+ // Run the script using streaming.
+ {
+ LocalContext env;
+
+ v8::ScriptCompiler::StreamedSource source(
+ std::make_unique<TestSourceStream>(chunks),
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE);
+ v8::ScriptCompiler::ScriptStreamingTask* task =
+ v8::ScriptCompiler::StartStreaming(isolate, &source,
+ v8::ScriptType::kClassic);
+ StreamerThread::StartThreadForTaskAndJoin(task);
+ delete task;
+ v8::Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &source, v8_str(full_source),
+ origin)
+ .ToLocalChecked();
+ v8::Local<Value> result(script->Run(env.local()).ToLocalChecked());
+ first_function =
+ i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*result));
+ }
+
+ // Run the same script in another Context without streaming.
+ {
+ LocalContext env;
+ v8::ScriptCompiler::Source script_source(v8_str(full_source), origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &script_source)
+ .ToLocalChecked();
+ v8::Local<Value> result(script->Run(env.local()).ToLocalChecked());
+ second_function =
+ i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*result));
+ }
+
+ // The functions created by both copies of the script should refer to the same
+ // SharedFunctionInfo instance due to the isolate script cache.
+ CHECK_EQ(first_function->shared(), second_function->shared());
+}
TEST(CodeCache) {
v8::Isolate::CreateParams create_params;
@@ -26603,6 +26654,90 @@ TEST(ImportMetaThrowHandled) {
CHECK(!try_catch.HasCaught());
}
+v8::MaybeLocal<v8::Context> HostCreateShadowRealmContextCallbackStatic(
+ v8::Local<v8::Context> initiator_context) {
+ CHECK(!initiator_context.IsEmpty());
+ return v8::Context::New(initiator_context->GetIsolate());
+}
+
+TEST(CreateShadowRealmContextHostNotSupported) {
+ i::FLAG_harmony_shadow_realm = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ Local<String> url = v8_str("www.google.com");
+ Local<String> source_text = v8_str("new ShadowRealm()");
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, false);
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source).ToLocalChecked();
+
+ v8::TryCatch try_catch(isolate);
+ v8::MaybeLocal<v8::Value> result = script->Run(context.local());
+ CHECK(try_catch.HasCaught());
+ CHECK(result.IsEmpty());
+ CHECK(v8_str("Error: Not supported")
+ ->Equals(isolate->GetCurrentContext(),
+ try_catch.Exception()
+ ->ToString(isolate->GetCurrentContext())
+ .ToLocalChecked())
+ .FromJust());
+}
+
+TEST(CreateShadowRealmContext) {
+ i::FLAG_harmony_shadow_realm = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallbackStatic);
+
+ Local<String> url = v8_str("www.google.com");
+ Local<String> source_text = v8_str("new ShadowRealm()");
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, false);
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source).ToLocalChecked();
+
+ Local<Value> result = script->Run(context.local()).ToLocalChecked();
+ CHECK(result->IsObject());
+ i::Handle<i::Object> object = v8::Utils::OpenHandle(*result);
+ CHECK(object->IsJSShadowRealm());
+}
+
+v8::MaybeLocal<v8::Context> HostCreateShadowRealmContextCallbackThrow(
+ v8::Local<v8::Context> initiator_context) {
+ CcTest::isolate()->ThrowException(v8_num(42));
+ return v8::MaybeLocal<v8::Context>();
+}
+
+TEST(CreateShadowRealmContextThrow) {
+ i::FLAG_harmony_shadow_realm = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetHostCreateShadowRealmContextCallback(
+ HostCreateShadowRealmContextCallbackThrow);
+
+ Local<String> url = v8_str("www.google.com");
+ Local<String> source_text = v8_str("new ShadowRealm()");
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, false);
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source).ToLocalChecked();
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(script->Run(context.local()).IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(try_catch.Exception()->StrictEquals(v8_num(42)));
+}
+
TEST(GetModuleNamespace) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -27990,7 +28125,7 @@ struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>, void> {
v8::FastApiCallbackOptions& options) {
v8::Object* receiver_obj = *receiver;
if (!IsValidUnwrapObject(receiver_obj)) {
- options.fallback = 1;
+ options.fallback = true;
return;
}
ApiNumberChecker<T>* receiver_ptr =
@@ -28003,7 +28138,7 @@ struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>, void> {
// against after loading it from a stack slot, as defined in
// EffectControlLinearizer::LowerFastApiCall.
CHECK_EQ(options.fallback, 0);
- options.fallback = 1;
+ options.fallback = true;
}
}
@@ -28160,30 +28295,28 @@ bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
return try_catch.HasCaught();
}
-template <typename T>
-void CheckEqual(T actual, T expected) {
- CHECK_EQ(actual, expected);
-}
-
-template <>
-void CheckEqual<float>(float actual, float expected) {
- if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
- } else {
- // This differentiates between -0 and +0.
- CHECK_EQ(std::signbit(actual), std::signbit(expected));
- CHECK_EQ(actual, expected);
+template <typename I, std::enable_if_t<std::is_integral<I>::value, bool> = true>
+void CheckEqual(I actual, I expected, std::ostringstream& error_msg) {
+ if (actual != expected) {
+ error_msg << "Value mismatch (expected: " << expected
+ << ", actual: " << actual << ")";
}
}
-template <>
-void CheckEqual<double>(double actual, double expected) {
+template <typename F,
+ std::enable_if_t<std::is_floating_point<F>::value, bool> = true>
+void CheckEqual(F actual, F expected, std::ostringstream& error_msg) {
if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
+ if (!std::isnan(actual)) {
+ error_msg << "Value mismatch (expected: " << expected
+ << ", actual: " << actual << ")";
+ }
} else {
// This differentiates between -0 and +0.
- CHECK_EQ(std::signbit(actual), std::signbit(expected));
- CHECK_EQ(actual, expected);
+ if (std::signbit(actual) != std::signbit(expected) || actual != expected) {
+ error_msg << "Value mismatch (expected: " << expected
+ << ", actual: " << actual << ")";
+ }
}
}
@@ -28214,20 +28347,45 @@ void CallAndCheck(
}
CHECK_EQ(expected_behavior == Behavior::kException, has_caught);
- CHECK_EQ(expected_path == ApiCheckerResult::kSlowCalled,
- !checker.DidCallFast());
- CHECK_EQ(expected_path == ApiCheckerResult::kFastCalled,
- !checker.DidCallSlow());
+
+ std::ostringstream error_msg;
+ if (expected_path == ApiCheckerResult::kSlowCalled) {
+ if (checker.DidCallFast()) {
+ error_msg << "Fast path was called when only the default was expected. ";
+ }
+ }
+ if (expected_path == ApiCheckerResult::kFastCalled) {
+ if (checker.DidCallSlow()) {
+ error_msg << "Default path was called when no fallback was expected. ";
+ }
+ }
+ if (error_msg.str().length() > 0) {
+ error_msg << "Expected value was: " << expected_value;
+ CHECK_WITH_MSG(false, error_msg.str().c_str());
+ }
if (expected_path & ApiCheckerResult::kSlowCalled) {
- CHECK(checker.DidCallSlow());
+ if (!checker.DidCallSlow()) {
+ error_msg << "Default path was expected, but wasn't called. ";
+ }
if (expected_behavior != Behavior::kException) {
- CheckEqual(checker.slow_value_.ToChecked(), expected_value);
+ CheckEqual(checker.slow_value_.ToChecked(), expected_value, error_msg);
+ }
+ if (error_msg.str().length() > 0) {
+ error_msg << " from default path. ";
}
}
if (expected_path & ApiCheckerResult::kFastCalled) {
- CHECK(checker.DidCallFast());
- CheckEqual(checker.fast_value_, expected_value);
+ if (!checker.DidCallFast()) {
+ error_msg << "Fast path was expected, but wasn't called. ";
+ }
+ CheckEqual(checker.fast_value_, expected_value, error_msg);
+ if (error_msg.str().length() > 0) {
+ error_msg << " from fast path";
+ }
+ }
+ if (error_msg.str().length() > 0) {
+ CHECK_WITH_MSG(false, error_msg.str().c_str());
}
}
@@ -28995,13 +29153,13 @@ TEST(FastApiCalls) {
// Fallback to slow call and don't throw an exception.
CallAndCheck<int32_t>(
- 42, Behavior::kNoException,
- ApiCheckerResult::kFastCalled | ApiCheckerResult::kSlowCalled, v8_num(42),
+ 43, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled | ApiCheckerResult::kSlowCalled, v8_num(43),
Behavior::kNoException, FallbackPolicy::kRequestFallback);
// Doesn't fallback to slow call, so don't throw an exception.
CallAndCheck<int32_t>(
- 42, Behavior::kNoException, ApiCheckerResult::kFastCalled, v8_num(42),
+ 44, Behavior::kNoException, ApiCheckerResult::kFastCalled, v8_num(44),
Behavior::kNoException, FallbackPolicy::kDontRequestFallback);
// Wrong number of arguments
diff --git a/deps/v8/test/cctest/test-array-list.cc b/deps/v8/test/cctest/test-array-list.cc
index 40ddffb83b..88eefe5ba8 100644
--- a/deps/v8/test/cctest/test-array-list.cc
+++ b/deps/v8/test/cctest/test-array-list.cc
@@ -15,8 +15,7 @@ TEST(ArrayList) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Handle<ArrayList> array(
- ArrayList::cast(ReadOnlyRoots(isolate).empty_fixed_array()), isolate);
+ Handle<ArrayList> array = ReadOnlyRoots(isolate).empty_array_list_handle();
CHECK_EQ(0, array->Length());
array = ArrayList::Add(isolate, array, handle(Smi::FromInt(100), isolate));
CHECK_EQ(1, array->Length());
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 4265f2b367..43c5e276e2 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -169,7 +169,7 @@ TEST(3) {
Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ mov(r4, Operand(r0));
__ ldr(r0, MemOperand(r4, offsetof(T, i)));
@@ -183,7 +183,7 @@ TEST(3) {
__ add(r0, r2, Operand(r0));
__ mov(r2, Operand(r2, ASR, 3));
__ strh(r2, MemOperand(r4, offsetof(T, s)));
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -240,7 +240,7 @@ TEST(4) {
CpuFeatureScope scope(&assm, VFPv3);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ mov(r4, Operand(r0));
@@ -313,7 +313,7 @@ TEST(4) {
__ vmov(s0, Float32(-16.0f));
__ vstr(s0, r4, offsetof(T, p));
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -636,7 +636,7 @@ TEST(8) {
Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
@@ -655,7 +655,7 @@ TEST(8) {
__ vstm(ia_w, r4, s6, s7);
__ vstm(ia_w, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -741,7 +741,7 @@ TEST(9) {
Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
@@ -764,7 +764,7 @@ TEST(9) {
__ add(r4, r4, Operand(2 * 4));
__ vstm(ia, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -850,7 +850,7 @@ TEST(10) {
Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, h)) + 8));
@@ -869,7 +869,7 @@ TEST(10) {
__ vstm(db_w, r4, s0, s5);
__ vstm(db_w, r4, s6, s7);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -1030,7 +1030,7 @@ TEST(13) {
if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(&assm, VFPv3);
- __ stm(db_w, sp, r4.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, lr});
// Load a, b, c into d16, d17, d18.
__ mov(r4, Operand(r0));
@@ -1088,7 +1088,7 @@ TEST(13) {
__ vmov(NeonS32, r4, d22, 1);
__ str(r4, MemOperand(r0, offsetof(T, high)));
- __ ldm(ia_w, sp, r4.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -1329,7 +1329,7 @@ TEST(15) {
if (CpuFeatures::IsSupported(NEON)) {
CpuFeatureScope scope(&assm, NEON);
- __ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, r5, lr});
// Move 32 bytes with neon.
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, src0))));
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
@@ -2164,7 +2164,7 @@ TEST(15) {
__ vstr(d2, r0, offsetof(T, vtbx));
// Restore and return.
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, r5, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -2446,7 +2446,7 @@ TEST(16) {
// the doubles and floats.
Assembler assm(AssemblerOptions{});
- __ stm(db_w, sp, r4.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, lr});
__ mov(r4, Operand(r0));
__ ldr(r0, MemOperand(r4, offsetof(T, src0)));
@@ -2468,7 +2468,7 @@ TEST(16) {
__ uxtab(r2, r0, r1, 8);
__ str(r2, MemOperand(r4, offsetof(T, dst4)));
- __ ldm(ia_w, sp, r4.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -2937,7 +2937,7 @@ TEST(code_relative_offset) {
Label start, target_away, target_faraway;
- __ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, r5, lr});
// r3 is used as the address zero, the test will crash when we load it.
__ mov(r3, Operand::Zero());
@@ -2982,7 +2982,7 @@ TEST(code_relative_offset) {
// r0 = r0 + 5 + 5 + 11
__ add(r0, r0, Operand(11));
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, r5, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -3090,7 +3090,7 @@ TEST(ARMv8_float32_vrintX) {
CpuFeatureScope scope(&assm, ARMv8);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ mov(r4, Operand(r0));
@@ -3119,7 +3119,7 @@ TEST(ARMv8_float32_vrintX) {
__ vrintz(s5, s6);
__ vstr(s5, r4, offsetof(T, zr));
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -3191,7 +3191,7 @@ TEST(ARMv8_vrintX) {
CpuFeatureScope scope(&assm, ARMv8);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ mov(r4, Operand(r0));
@@ -3220,7 +3220,7 @@ TEST(ARMv8_vrintX) {
__ vrintz(d5, d6);
__ vstr(d5, r4, offsetof(T, zr));
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -3930,7 +3930,7 @@ TEST(vswp) {
};
T t;
- __ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, r5, r6, r7, lr});
uint64_t one = bit_cast<uint64_t>(1.0);
__ mov(r5, Operand(one >> 32));
@@ -3966,7 +3966,7 @@ TEST(vswp) {
__ add(r6, r0, Operand(static_cast<int32_t>(offsetof(T, vswp_q5))));
__ vst1(Neon8, NeonListOperand(q5), NeonMemOperand(r6));
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, r5, r6, r7, pc});
__ bx(lr);
CodeDesc desc;
@@ -4060,18 +4060,18 @@ TEST(use_scratch_register_scope) {
Assembler assm(AssemblerOptions{});
// The assembler should have ip as a scratch by default.
- CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
+ CHECK_EQ(*assm.GetScratchRegisterList(), RegList{ip});
{
UseScratchRegisterScope temps(&assm);
- CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
+ CHECK_EQ(*assm.GetScratchRegisterList(), RegList{ip});
Register scratch = temps.Acquire();
CHECK_EQ(scratch.code(), ip.code());
- CHECK_EQ(*assm.GetScratchRegisterList(), 0);
+ CHECK_EQ(*assm.GetScratchRegisterList(), RegList{});
}
- CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
+ CHECK_EQ(*assm.GetScratchRegisterList(), RegList{ip});
}
TEST(use_scratch_vfp_register_scope) {
@@ -4342,8 +4342,7 @@ TEST(move_pair) {
HandleScope scope(isolate);
auto f = AssembleCode<F_piiii>(isolate, [](MacroAssembler& assm) {
- RegList used_callee_saved =
- r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit();
+ RegList used_callee_saved = {r4, r5, r6, r7, r8};
__ stm(db_w, sp, used_callee_saved);
// Save output register bank pointer to r8.
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 0c41602514..fea98df487 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -9571,7 +9571,7 @@ TEST(fcmp) {
// test. A UseScratchRegisterScope will make sure that they are restored to
// the default values once we're finished.
UseScratchRegisterScope temps(&masm);
- masm.FPTmpList()->set_list(0);
+ masm.FPTmpList()->set_bits(0);
__ Fmov(s8, 0.0);
__ Fmov(s9, 0.5);
@@ -9590,9 +9590,9 @@ TEST(fcmp) {
__ Mrs(x4, NZCV);
__ Fcmp(s8, 0.0);
__ Mrs(x5, NZCV);
- masm.FPTmpList()->set_list(d0.bit());
+ masm.FPTmpList()->set_bits(DoubleRegList{d0}.bits());
__ Fcmp(s8, 255.0);
- masm.FPTmpList()->set_list(0);
+ masm.FPTmpList()->set_bits(0);
__ Mrs(x6, NZCV);
__ Fmov(d19, 0.0);
@@ -9612,9 +9612,9 @@ TEST(fcmp) {
__ Mrs(x14, NZCV);
__ Fcmp(d19, 0.0);
__ Mrs(x15, NZCV);
- masm.FPTmpList()->set_list(d0.bit());
+ masm.FPTmpList()->set_bits(DoubleRegList{d0}.bits());
__ Fcmp(d19, 12.3456);
- masm.FPTmpList()->set_list(0);
+ masm.FPTmpList()->set_bits(0);
__ Mrs(x16, NZCV);
}
@@ -12026,27 +12026,27 @@ TEST(register_bit) {
// teardown.
// Simple tests.
- CHECK_EQ(x0.bit(), 1ULL << 0);
- CHECK_EQ(x1.bit(), 1ULL << 1);
- CHECK_EQ(x10.bit(), 1ULL << 10);
+ CHECK_EQ(RegList{x0}.bits(), 1ULL << 0);
+ CHECK_EQ(RegList{x1}.bits(), 1ULL << 1);
+ CHECK_EQ(RegList{x10}.bits(), 1ULL << 10);
// AAPCS64 definitions.
- CHECK_EQ(fp.bit(), 1ULL << kFramePointerRegCode);
- CHECK_EQ(lr.bit(), 1ULL << kLinkRegCode);
+ CHECK_EQ(RegList{fp}.bits(), 1ULL << kFramePointerRegCode);
+ CHECK_EQ(RegList{lr}.bits(), 1ULL << kLinkRegCode);
// Fixed (hardware) definitions.
- CHECK_EQ(xzr.bit(), 1ULL << kZeroRegCode);
+ CHECK_EQ(RegList{xzr}.bits(), 1ULL << kZeroRegCode);
// Internal ABI definitions.
- CHECK_EQ(sp.bit(), 1ULL << kSPRegInternalCode);
- CHECK_NE(sp.bit(), xzr.bit());
+ CHECK_EQ(RegList{sp}.bits(), 1ULL << kSPRegInternalCode);
+ CHECK_NE(RegList{sp}.bits(), RegList{xzr}.bits());
- // xn.bit() == wn.bit() at all times, for the same n.
- CHECK_EQ(x0.bit(), w0.bit());
- CHECK_EQ(x1.bit(), w1.bit());
- CHECK_EQ(x10.bit(), w10.bit());
- CHECK_EQ(xzr.bit(), wzr.bit());
- CHECK_EQ(sp.bit(), wsp.bit());
+ // RegList{xn}.bits() == RegList{wn}.bits() at all times, for the same n.
+ CHECK_EQ(RegList{x0}.bits(), RegList{w0}.bits());
+ CHECK_EQ(RegList{x1}.bits(), RegList{w1}.bits());
+ CHECK_EQ(RegList{x10}.bits(), RegList{w10}.bits());
+ CHECK_EQ(RegList{xzr}.bits(), RegList{wzr}.bits());
+ CHECK_EQ(RegList{sp}.bits(), RegList{wsp}.bits());
}
TEST(peek_poke_simple) {
@@ -12054,9 +12054,8 @@ TEST(peek_poke_simple) {
SETUP();
START();
- static const RegList x0_to_x3 = x0.bit() | x1.bit() | x2.bit() | x3.bit();
- static const RegList x10_to_x13 =
- x10.bit() | x11.bit() | x12.bit() | x13.bit();
+ static const RegList x0_to_x3 = {x0, x1, x2, x3};
+ static const RegList x10_to_x13 = {x10, x11, x12, x13};
// The literal base is chosen to have two useful properties:
// * When multiplied by small values (such as a register index), this value
@@ -12141,35 +12140,35 @@ TEST(peek_poke_unaligned) {
// x0-x6 should be unchanged.
// w10-w12 should contain the lower words of x0-x2.
__ Poke(x0, 1);
- Clobber(&masm, x0.bit());
+ Clobber(&masm, {x0});
__ Peek(x0, 1);
__ Poke(x1, 2);
- Clobber(&masm, x1.bit());
+ Clobber(&masm, {x1});
__ Peek(x1, 2);
__ Poke(x2, 3);
- Clobber(&masm, x2.bit());
+ Clobber(&masm, {x2});
__ Peek(x2, 3);
__ Poke(x3, 4);
- Clobber(&masm, x3.bit());
+ Clobber(&masm, {x3});
__ Peek(x3, 4);
__ Poke(x4, 5);
- Clobber(&masm, x4.bit());
+ Clobber(&masm, {x4});
__ Peek(x4, 5);
__ Poke(x5, 6);
- Clobber(&masm, x5.bit());
+ Clobber(&masm, {x5});
__ Peek(x5, 6);
__ Poke(x6, 7);
- Clobber(&masm, x6.bit());
+ Clobber(&masm, {x6});
__ Peek(x6, 7);
__ Poke(w0, 1);
- Clobber(&masm, w10.bit());
+ Clobber(&masm, {w10});
__ Peek(w10, 1);
__ Poke(w1, 2);
- Clobber(&masm, w11.bit());
+ Clobber(&masm, {w11});
__ Peek(w11, 2);
__ Poke(w2, 3);
- Clobber(&masm, w12.bit());
+ Clobber(&masm, {w12});
__ Peek(w12, 3);
__ Drop(4);
@@ -12332,9 +12331,11 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
// For simplicity, exclude LR as well, as we would need to sign it when
// pushing it. This also ensures that the list has an even number of elements,
// which is needed for alignment.
- RegList allowed = ~(masm.TmpList()->list() | x18.bit() | lr.bit());
+ static RegList const allowed =
+ RegList::FromBits(static_cast<uint32_t>(~masm.TmpList()->bits())) -
+ RegList{x18, lr};
if (reg_count == kPushPopMaxRegCount) {
- reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ reg_count = CountSetBits(allowed.bits(), kNumberOfRegisters);
}
DCHECK_EQ(reg_count % 2, 0);
// Work out which registers to use, based on reg_size.
@@ -12480,15 +12481,15 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
// We can use any floating-point register. None of them are reserved for
// debug code, for example.
- static RegList const allowed = ~0;
+ static DoubleRegList const allowed = DoubleRegList::FromBits(~0);
if (reg_count == kPushPopFPMaxRegCount) {
- reg_count = CountSetBits(allowed, kNumberOfVRegisters);
+ reg_count = CountSetBits(allowed.bits(), kNumberOfVRegisters);
}
// Work out which registers to use, based on reg_size.
auto v = CreateRegisterArray<VRegister, kNumberOfRegisters>();
auto d = CreateRegisterArray<VRegister, kNumberOfRegisters>();
- RegList list = PopulateVRegisterArray(nullptr, d.data(), v.data(), reg_size,
- reg_count, allowed);
+ DoubleRegList list = PopulateVRegisterArray(nullptr, d.data(), v.data(),
+ reg_size, reg_count, allowed);
// The literal base is chosen to have two useful properties:
// * When multiplied (using an integer) by small values (such as a register
@@ -12530,7 +12531,7 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PushSizeRegList(list, reg_size, CPURegister::kVRegister);
+ __ PushSizeRegList(list, reg_size);
break;
}
@@ -12554,7 +12555,7 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PopSizeRegList(list, reg_size, CPURegister::kVRegister);
+ __ PopSizeRegList(list, reg_size);
break;
}
}
@@ -12627,24 +12628,25 @@ static void PushPopMixedMethodsHelper(int reg_size) {
// Registers in the TmpList can be used by the macro assembler for debug code
// (for example in 'Pop'), so we can't use them here.
- static RegList const allowed = ~(masm.TmpList()->list());
+ static RegList const allowed =
+ RegList::FromBits(static_cast<uint32_t>(~masm.TmpList()->bits()));
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, 10>();
auto x = CreateRegisterArray<Register, 10>();
PopulateRegisterArray(nullptr, x.data(), r.data(), reg_size, 10, allowed);
// Calculate some handy register lists.
- RegList r0_to_r3 = 0;
+ RegList r0_to_r3;
for (int i = 0; i <= 3; i++) {
- r0_to_r3 |= x[i].bit();
+ r0_to_r3.set(x[i]);
}
- RegList r4_to_r5 = 0;
+ RegList r4_to_r5;
for (int i = 4; i <= 5; i++) {
- r4_to_r5 |= x[i].bit();
+ r4_to_r5.set(x[i]);
}
- RegList r6_to_r9 = 0;
+ RegList r6_to_r9;
for (int i = 6; i <= 9; i++) {
- r6_to_r9 |= x[i].bit();
+ r6_to_r9.set(x[i]);
}
// The literal base is chosen to have two useful properties:
@@ -12706,17 +12708,17 @@ TEST(push_pop) {
__ Mov(x1, 0x1111111111111111UL);
__ Mov(x0, 0x0000000000000000UL);
__ Claim(2);
- __ PushXRegList(x0.bit() | x1.bit() | x2.bit() | x3.bit());
+ __ PushXRegList({x0, x1, x2, x3});
__ Push(x3, x2);
- __ PopXRegList(x0.bit() | x1.bit() | x2.bit() | x3.bit());
+ __ PopXRegList({x0, x1, x2, x3});
__ Push(x2, x1, x3, x0);
__ Pop(x4, x5);
__ Pop(x6, x7, x8, x9);
__ Claim(2);
- __ PushWRegList(w0.bit() | w1.bit() | w2.bit() | w3.bit());
+ __ PushWRegList({w0, w1, w2, w3});
__ Push(w3, w1, w2, w0);
- __ PopWRegList(w10.bit() | w11.bit() | w12.bit() | w13.bit());
+ __ PopWRegList({w10, w11, w12, w13});
__ Pop(w14, w15, w16, w17);
__ Claim(2);
@@ -12726,18 +12728,18 @@ TEST(push_pop) {
__ Pop(x22, x23);
__ Claim(2);
- __ PushXRegList(x1.bit() | x22.bit());
- __ PopXRegList(x24.bit() | x26.bit());
+ __ PushXRegList({x1, x22});
+ __ PopXRegList({x24, x26});
__ Claim(2);
- __ PushWRegList(w1.bit() | w2.bit() | w4.bit() | w22.bit());
- __ PopWRegList(w25.bit() | w27.bit() | w28.bit() | w29.bit());
+ __ PushWRegList({w1, w2, w4, w22});
+ __ PopWRegList({w25, w27, w28, w29});
__ Claim(2);
- __ PushXRegList(0);
- __ PopXRegList(0);
+ __ PushXRegList({});
+ __ PopXRegList({});
// Don't push/pop x18 (platform register) or lr
- RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | lr.bit());
+ RegList all_regs = RegList::FromBits(0xFFFFFFFF) - RegList{x18, lr};
__ PushXRegList<TurboAssembler::kDontStoreLR>(all_regs);
__ PopXRegList<TurboAssembler::kDontLoadLR>(all_regs);
__ Drop(12);
@@ -13891,10 +13893,10 @@ TEST(cpureglist_utils_empty) {
// Test an empty list.
// Empty lists can have type and size properties. Check that we can create
// them, and that they are empty.
- CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
- CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
- CPURegList fpreg32(CPURegister::kVRegister, kSRegSizeInBits, 0);
- CPURegList fpreg64(CPURegister::kVRegister, kDRegSizeInBits, 0);
+ CPURegList reg32(kWRegSizeInBits, RegList{});
+ CPURegList reg64(kXRegSizeInBits, RegList{});
+ CPURegList fpreg32(kSRegSizeInBits, DoubleRegList{});
+ CPURegList fpreg64(kDRegSizeInBits, DoubleRegList{});
CHECK(reg32.IsEmpty());
CHECK(reg64.IsEmpty());
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index c5a0950019..f27344ff49 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -38,13 +38,14 @@
#include "src/init/v8.h"
#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
-using F0 = int (*)();
-using F1 = int (*)(int x);
-using F2 = int (*)(int x, int y);
+using F0 = int();
+using F1 = int(int x);
+using F2 = int(int x, int y);
#define __ assm.
@@ -69,8 +70,8 @@ TEST(AssemblerIa320) {
StdoutStream os;
code->Print(os);
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = f(3, 4);
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ auto res = f.Call(3, 4);
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
}
@@ -107,8 +108,8 @@ TEST(AssemblerIa321) {
StdoutStream os;
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = f(100);
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int res = f.Call(100);
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
}
@@ -149,13 +150,13 @@ TEST(AssemblerIa322) {
StdoutStream os;
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = f(10);
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int res = f.Call(10);
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
}
-using F3 = int (*)(float x);
+using F3 = int(float x);
TEST(AssemblerIa323) {
CcTest::InitializeVM();
@@ -178,13 +179,13 @@ TEST(AssemblerIa323) {
StdoutStream os;
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- int res = f(static_cast<float>(-3.1415));
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ int res = f.Call(-3.1415f);
::printf("f() = %d\n", res);
CHECK_EQ(-3, res);
}
-using F4 = int (*)(double x);
+using F4 = int(double x);
TEST(AssemblerIa324) {
CcTest::InitializeVM();
@@ -207,8 +208,8 @@ TEST(AssemblerIa324) {
StdoutStream os;
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
- int res = f(2.718281828);
+ auto f = GeneratedCode<F4>::FromCode(*code);
+ int res = f.Call(2.718281828);
::printf("f() = %d\n", res);
CHECK_EQ(2, res);
}
@@ -231,12 +232,12 @@ TEST(AssemblerIa325) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- F0 f = FUNCTION_CAST<F0>(code->entry());
- int res = f();
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ int res = f.Call();
CHECK_EQ(42, res);
}
-using F5 = double (*)(double x, double y);
+using F5 = double(double x, double y);
TEST(AssemblerIa326) {
CcTest::InitializeVM();
@@ -268,13 +269,13 @@ TEST(AssemblerIa326) {
StdoutStream os;
code->Print(os);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
- double res = f(2.2, 1.1);
+ auto f = GeneratedCode<F5>::FromCode(*code);
+ double res = f.Call(2.2, 1.1);
::printf("f() = %f\n", res);
CHECK(2.29 < res && res < 2.31);
}
-using F6 = double (*)(int x);
+using F6 = double(int x);
TEST(AssemblerIa328) {
CcTest::InitializeVM();
@@ -300,8 +301,8 @@ TEST(AssemblerIa328) {
StdoutStream os;
code->Print(os);
#endif
- F6 f = FUNCTION_CAST<F6>(code->entry());
- double res = f(12);
+ auto f = GeneratedCode<F6>::FromCode(*code);
+ double res = f.Call(12);
::printf("f() = %f\n", res);
CHECK(11.99 < res && res < 12.001);
@@ -381,8 +382,8 @@ TEST(AssemblerMultiByteNop) {
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
- F0 f = FUNCTION_CAST<F0>(code->entry());
- int res = f();
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ int res = f.Call();
CHECK_EQ(42, res);
}
@@ -431,8 +432,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- F0 f = FUNCTION_CAST<F0>(code->entry());
- int res = f();
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ int res = f.Call();
args.GetReturnValue().Set(v8::Integer::New(CcTest::isolate(), res));
}
@@ -500,14 +501,15 @@ TEST(AssemblerIa32Extractps) {
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
uint64_t value1 = 0x1234'5678'8765'4321;
- CHECK_EQ(0x12345678, f(base::uint64_to_double(value1)));
+ CHECK_EQ(0x12345678, f.Call(base::uint64_to_double(value1)));
uint64_t value2 = 0x8765'4321'1234'5678;
- CHECK_EQ(static_cast<int>(0x87654321), f(base::uint64_to_double(value2)));
+ CHECK_EQ(static_cast<int>(0x87654321),
+ f.Call(base::uint64_to_double(value2)));
}
-using F8 = int (*)(float x, float y);
+using F8 = int(float x, float y);
TEST(AssemblerIa32SSE) {
CcTest::InitializeVM();
@@ -539,8 +541,8 @@ TEST(AssemblerIa32SSE) {
code->Print(os);
#endif
- F8 f = FUNCTION_CAST<F8>(code->entry());
- CHECK_EQ(2, f(1.0, 2.0));
+ auto f = GeneratedCode<F8>::FromCode(*code);
+ CHECK_EQ(2, f.Call(1.0, 2.0));
}
TEST(AssemblerIa32SSE3) {
@@ -572,11 +574,11 @@ TEST(AssemblerIa32SSE3) {
code->Print(os);
#endif
- F8 f = FUNCTION_CAST<F8>(code->entry());
- CHECK_EQ(4, f(1.0, 2.0));
+ auto f = GeneratedCode<F8>::FromCode(*code);
+ CHECK_EQ(4, f.Call(1.0, 2.0));
}
-using F9 = int (*)(double x, double y, double z);
+using F9 = int(double x, double y, double z);
TEST(AssemblerX64FMA_sd) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -800,11 +802,12 @@ TEST(AssemblerX64FMA_sd) {
code->Print(os);
#endif
- F9 f = FUNCTION_CAST<F9>(code->entry());
- CHECK_EQ(0, f(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
+ auto f = GeneratedCode<F9>::FromCode(*code);
+ CHECK_EQ(
+ 0, f.Call(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
-using F10 = int (*)(float x, float y, float z);
+using F10 = int(float x, float y, float z);
TEST(AssemblerX64FMA_ss) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -1028,8 +1031,8 @@ TEST(AssemblerX64FMA_ss) {
code->Print(os);
#endif
- F10 f = FUNCTION_CAST<F10>(code->entry());
- CHECK_EQ(0, f(9.26621069e-05f, -2.4607749f, -1.09587872f));
+ auto f = GeneratedCode<F10>::FromCode(*code);
+ CHECK_EQ(0, f.Call(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
@@ -1136,8 +1139,8 @@ TEST(AssemblerIa32BMI1) {
code->Print(os);
#endif
- F0 f = FUNCTION_CAST<F0>(code->entry());
- CHECK_EQ(0, f());
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ CHECK_EQ(0, f.Call());
}
@@ -1184,8 +1187,8 @@ TEST(AssemblerIa32LZCNT) {
code->Print(os);
#endif
- F0 f = FUNCTION_CAST<F0>(code->entry());
- CHECK_EQ(0, f());
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ CHECK_EQ(0, f.Call());
}
@@ -1232,8 +1235,8 @@ TEST(AssemblerIa32POPCNT) {
code->Print(os);
#endif
- F0 f = FUNCTION_CAST<F0>(code->entry());
- CHECK_EQ(0, f());
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ CHECK_EQ(0, f.Call());
}
@@ -1378,8 +1381,8 @@ TEST(AssemblerIa32BMI2) {
code->Print(os);
#endif
- F0 f = FUNCTION_CAST<F0>(code->entry());
- CHECK_EQ(0, f());
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ CHECK_EQ(0, f.Call());
}
@@ -1421,9 +1424,9 @@ TEST(AssemblerIa32JumpTables1) {
StdoutStream os;
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int res = f(i);
+ int res = f.Call(i);
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -1469,9 +1472,9 @@ TEST(AssemblerIa32JumpTables2) {
StdoutStream os;
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int res = f(i);
+ int res = f.Call(i);
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -1514,8 +1517,8 @@ TEST(Regress621926) {
code->Print(os);
#endif
- F0 f = FUNCTION_CAST<F0>(code->entry());
- CHECK_EQ(1, f());
+ auto f = GeneratedCode<F0>::FromCode(*code);
+ CHECK_EQ(1, f.Call());
}
TEST(DeoptExitSizeIsFixed) {
diff --git a/deps/v8/test/cctest/test-assembler-loong64.cc b/deps/v8/test/cctest/test-assembler-loong64.cc
index d9ad4d9015..0bc18b5fd2 100644
--- a/deps/v8/test/cctest/test-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-assembler-loong64.cc
@@ -1546,10 +1546,10 @@ TEST(LA11) {
CHECK_EQ(static_cast<int64_t>(0x81a15c3000), t.result_bstrins_d_si2);
CHECK_EQ(static_cast<int64_t>(0x1e), t.result_bstrpick_d_si1);
CHECK_EQ(static_cast<int64_t>(0xfb80), t.result_bstrpick_d_si2);
- CHECK_EQ(static_cast<int64_t>(0), t.result_maskeqz_si1);
- CHECK_EQ(static_cast<int64_t>(0xFB8017FF781A15C3), t.result_maskeqz_si2);
- CHECK_EQ(static_cast<int64_t>(0x10C021098B710CDE), t.result_masknez_si1);
- CHECK_EQ(static_cast<int64_t>(0), t.result_masknez_si2);
+ CHECK_EQ(static_cast<int64_t>(0x10C021098B710CDE), t.result_maskeqz_si1);
+ CHECK_EQ(static_cast<int64_t>(0), t.result_maskeqz_si2);
+ CHECK_EQ(static_cast<int64_t>(0), t.result_masknez_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFB8017FF781A15C3), t.result_masknez_si2);
}
uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) {
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index d7bdd7e2cd..0ee531a8bc 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -263,7 +263,7 @@ TEST(4) {
CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ mov(r4, Operand(r0));
@@ -321,7 +321,7 @@ TEST(4) {
__ vneg(d0, d1);
__ vstr(d0, r4, offsetof(T, n));
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -659,7 +659,7 @@ TEST(8) {
CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ addi(r4, r0, Operand(offsetof(D, a)));
@@ -678,7 +678,7 @@ TEST(8) {
__ vstm(ia_w, r4, s6, s7);
__ vstm(ia_w, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -770,7 +770,7 @@ TEST(9) {
CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ addi(r4, r0, Operand(offsetof(D, a)));
@@ -793,7 +793,7 @@ TEST(9) {
__ addi(r4, r4, Operand(2 * 4));
__ vstm(ia, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -885,7 +885,7 @@ TEST(10) {
CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, fp, lr});
__ sub(fp, ip, Operand(4));
__ addi(r4, r0, Operand(offsetof(D, h) + 8));
@@ -904,7 +904,7 @@ TEST(10) {
__ vstm(db_w, r4, s0, s5);
__ vstm(db_w, r4, s6, s7);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, fp, pc});
CodeDesc desc;
assm.GetCode(isolate, &desc);
diff --git a/deps/v8/test/cctest/test-assembler-riscv64.cc b/deps/v8/test/cctest/test-assembler-riscv64.cc
index a9940c09d8..0e18c468ed 100644
--- a/deps/v8/test/cctest/test-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-assembler-riscv64.cc
@@ -1977,38 +1977,29 @@ TEST(li_estimate) {
}
}
-#define UTEST_LOAD_STORE_RVV(ldname, stname, SEW, arg...) \
- TEST(RISCV_UTEST_##stname##ldname##SEW) { \
- CcTest::InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- int8_t src[16] = {arg}; \
- int8_t dst[16]; \
- auto fn = [](MacroAssembler& assm) { \
- __ VU.set(t0, SEW, Vlmul::m1); \
- __ vl(v2, a0, 0, VSew::E8); \
- __ vs(v2, a1, 0, VSew::E8); \
- }; \
- GenAndRunTest<int32_t, int64_t>((int64_t)src, (int64_t)dst, fn); \
- CHECK(!memcmp(src, dst, sizeof(src))); \
+#define UTEST_LOAD_STORE_RVV(ldname, stname, SEW, arry) \
+ TEST(RISCV_UTEST_##stname##ldname##SEW) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ int8_t src[16]; \
+ for (size_t i = 0; i < sizeof(src); i++) src[i] = arry[i % arry.size()]; \
+ int8_t dst[16]; \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, SEW, Vlmul::m1); \
+ __ vl(v2, a0, 0, SEW); \
+ __ vs(v2, a1, 0, SEW); \
+ }; \
+ GenAndRunTest<int32_t, int64_t>((int64_t)src, (int64_t)dst, fn); \
+ CHECK(!memcmp(src, dst, sizeof(src))); \
}
-UTEST_LOAD_STORE_RVV(vl, vs, E8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16)
-// UTEST_LOAD_STORE_RVV(vl, vs, E8, 127, 127, 127, 127, 127, 127, 127)
-
-TEST(RVV_VSETIVLI) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- auto fn = [](MacroAssembler& assm) {
- __ VU.set(t0, VSew::E8, Vlmul::m1);
- __ vsetivli(t0, 16, VSew::E64, Vlmul::m1);
- };
- GenAndRunTest(fn);
-}
+UTEST_LOAD_STORE_RVV(vl, vs, E8, compiler::ValueHelper::GetVector<int8_t>())
TEST(RVV_VFMV) {
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return;
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2036,18 +2027,22 @@ inline int32_t ToImm5(int32_t v) {
// Tests for vector integer arithmetic instructions between vector and vector
#define UTEST_RVV_VI_VV_FORM_WITH_RES(instr_name, width, array, expect_res) \
TEST(RISCV_UTEST_##instr_name##_##width) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
- auto fn = [](MacroAssembler& assm) { \
+ int##width##_t result[kRvvVLEN / width] = {0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E##width, Vlmul::m1); \
__ vmv_vx(v0, a0); \
__ vmv_vx(v1, a1); \
__ instr_name(v0, v0, v1); \
- __ vmv_xs(a0, v0); \
+ __ li(t1, int64_t(result)); \
+ __ vs(v0, t1, 0, VSew::E##width); \
}; \
for (int##width##_t rs1_val : array) { \
for (int##width##_t rs2_val : array) { \
- auto res = GenAndRunTest<int32_t, int32_t>(rs1_val, rs2_val, fn); \
- CHECK_EQ(static_cast<int##width##_t>(expect_res), res); \
+ GenAndRunTest<int32_t, int32_t>(rs1_val, rs2_val, fn); \
+ for (int i = 0; i < kRvvVLEN / width; i++) \
+ CHECK_EQ(static_cast<int##width##_t>(expect_res), result[i]); \
} \
} \
}
@@ -2055,17 +2050,21 @@ inline int32_t ToImm5(int32_t v) {
// Tests for vector integer arithmetic instructions between vector and scalar
#define UTEST_RVV_VI_VX_FORM_WITH_RES(instr_name, width, array, expect_res) \
TEST(RISCV_UTEST_##instr_name##_##width) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
- auto fn = [](MacroAssembler& assm) { \
+ int##width##_t result[kRvvVLEN / width] = {0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E##width, Vlmul::m1); \
__ vmv_vx(v0, a0); \
__ instr_name(v0, v0, a1); \
- __ vmv_xs(a0, v0); \
+ __ li(t1, int64_t(result)); \
+ __ vs(v0, t1, 0, VSew::E##width); \
}; \
for (int##width##_t rs1_val : array) { \
for (int##width##_t rs2_val : array) { \
- auto res = GenAndRunTest<int32_t, int32_t>(rs1_val, rs2_val, fn); \
- CHECK_EQ(static_cast<int##width##_t>(expect_res), res); \
+ GenAndRunTest<int32_t, int32_t>(rs1_val, rs2_val, fn); \
+ for (int i = 0; i < kRvvVLEN / width; i++) \
+ CHECK_EQ(static_cast<int##width##_t>(expect_res), result[i]); \
} \
} \
}
@@ -2074,17 +2073,21 @@ inline int32_t ToImm5(int32_t v) {
// immediate
#define UTEST_RVV_VI_VI_FORM_WITH_RES(instr_name, width, array, expect_res) \
TEST(RISCV_UTEST_##instr_name##_##width) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
+ int##width##_t result[kRvvVLEN / width] = {0}; \
for (int##width##_t rs1_val : array) { \
for (int##width##_t rs2_val : array) { \
- auto fn = [rs2_val](MacroAssembler& assm) { \
+ auto fn = [rs2_val, &result](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E##width, Vlmul::m1); \
__ vmv_vx(v0, a0); \
__ instr_name(v0, v0, ToImm5(rs2_val)); \
- __ vmv_xs(a0, v0); \
+ __ li(t1, int64_t(result)); \
+ __ vs(v0, t1, 0, VSew::E##width); \
}; \
- auto res = GenAndRunTest<int32_t, int32_t>(rs1_val, fn); \
- CHECK_EQ(static_cast<int##width##_t>(expect_res), res); \
+ GenAndRunTest<int32_t, int32_t>(rs1_val, fn); \
+ for (int i = 0; i < kRvvVLEN / width; i++) \
+ CHECK_EQ(static_cast<int##width##_t>(expect_res), result[i]); \
} \
} \
}
@@ -2181,28 +2184,59 @@ UTEST_RVV_VI_VX_FORM_WITH_FN(vminu_vx, 32, ARRAY_INT32, std::min<uint32_t>)
// Tests for vector single-width floating-point arithmetic instructions between
// vector and vector
-#define UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, array, expect_res) \
- TEST(RISCV_UTEST_##instr_name) { \
- CcTest::InitializeVM(); \
- auto fn = [](MacroAssembler& assm) { \
- __ VU.set(t0, VSew::E32, Vlmul::m1); \
- __ vfmv_vf(v0, fa0); \
- __ vfmv_vf(v1, fa1); \
- __ instr_name(v0, v0, v1); \
- __ vfmv_fs(fa0, v0); \
- }; \
- for (float rs1_fval : array) { \
- for (float rs2_fval : array) { \
- auto res = GenAndRunTest<float, float>(rs1_fval, rs2_fval, fn); \
- CHECK_FLOAT_EQ(UseCanonicalNan<float>(expect_res), res); \
- } \
- } \
+#define UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, expect_res) \
+ TEST(RISCV_UTEST_FLOAT_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ float result[4] = {0.0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ __ vfmv_vf(v0, fa0); \
+ __ vfmv_vf(v1, fa1); \
+ __ instr_name(v0, v0, v1); \
+ __ vfmv_fs(fa0, v0); \
+ __ li(a3, Operand(int64_t(result))); \
+ __ vs(v0, a3, 0, E32); \
+ }; \
+ for (float rs1_fval : compiler::ValueHelper::GetVector<float>()) { \
+ for (float rs2_fval : compiler::ValueHelper::GetVector<float>()) { \
+ GenAndRunTest<float, float>(rs1_fval, rs2_fval, fn); \
+ for (int i = 0; i < 4; i++) { \
+ CHECK_FLOAT_EQ(UseCanonicalNan<float>(expect_res), result[i]); \
+ result[i] = 0.0; \
+ } \
+ } \
+ } \
+ } \
+ TEST(RISCV_UTEST_DOUBLE_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ double result[2] = {0.0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E64, Vlmul::m1); \
+ __ vfmv_vf(v0, fa0); \
+ __ vfmv_vf(v1, fa1); \
+ __ instr_name(v0, v0, v1); \
+ __ vfmv_fs(fa0, v0); \
+ __ li(a3, Operand(int64_t(result))); \
+ __ vs(v0, a3, 0, E64); \
+ }; \
+ for (double rs1_fval : compiler::ValueHelper::GetVector<double>()) { \
+ for (double rs2_fval : compiler::ValueHelper::GetVector<double>()) { \
+ GenAndRunTest<double, double>(rs1_fval, rs2_fval, fn); \
+ for (int i = 0; i < 2; i++) { \
+ CHECK_DOUBLE_EQ(UseCanonicalNan<double>(expect_res), result[i]); \
+ result[i] = 0.0; \
+ } \
+ } \
+ } \
}
// Tests for vector single-width floating-point arithmetic instructions between
// vector and scalar
#define UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, array, expect_res) \
TEST(RISCV_UTEST_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
auto fn = [](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E32, Vlmul::m1); \
@@ -2218,35 +2252,226 @@ UTEST_RVV_VI_VX_FORM_WITH_FN(vminu_vx, 32, ARRAY_INT32, std::min<uint32_t>)
} \
}
-#define UTEST_RVV_VF_VV_FORM_WITH_OP(instr_name, array, tested_op) \
- UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, array, \
- ((rs1_fval)tested_op(rs2_fval)))
+#define UTEST_RVV_VF_VV_FORM_WITH_OP(instr_name, tested_op) \
+ UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
-#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, array, tested_op) \
- UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, array, \
- ((rs1_fval)tested_op(rs2_fval)))
+#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, tested_op) \
+ UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
-#define ARRAY_FLOAT compiler::ValueHelper::GetVector<float>()
-
-UTEST_RVV_VF_VV_FORM_WITH_OP(vfadd_vv, ARRAY_FLOAT, +)
+UTEST_RVV_VF_VV_FORM_WITH_OP(vfadd_vv, +)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfadd_vf, ARRAY_FLOAT, +)
-UTEST_RVV_VF_VV_FORM_WITH_OP(vfsub_vv, ARRAY_FLOAT, -)
+UTEST_RVV_VF_VV_FORM_WITH_OP(vfsub_vv, -)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfsub_vf, ARRAY_FLOAT, -)
-UTEST_RVV_VF_VV_FORM_WITH_OP(vfmul_vv, ARRAY_FLOAT, *)
+UTEST_RVV_VF_VV_FORM_WITH_OP(vfmul_vv, *)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfmul_vf, ARRAY_FLOAT, *)
-UTEST_RVV_VF_VV_FORM_WITH_OP(vfdiv_vv, ARRAY_FLOAT, /)
+UTEST_RVV_VF_VV_FORM_WITH_OP(vfdiv_vv, /)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfdiv_vf, ARRAY_FLOAT, /)
#undef ARRAY_FLOAT
#undef UTEST_RVV_VF_VV_FORM_WITH_OP
#undef UTEST_RVV_VF_VF_FORM_WITH_OP
-#undef UTEST_RVV_VF_VV_FORM
-#undef UTEST_RVV_VF_VF_FORM
+
+// Tests for vector widening floating-point arithmetic instructions between
+// vector and vector
+#define UTEST_RVV_VFW_VV_FORM_WITH_RES(instr_name, tested_op, is_first_double, \
+ check_fn) \
+ TEST(RISCV_UTEST_FLOAT_WIDENING_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ constexpr size_t n = kRvvVLEN / 32; \
+ double result[n] = {0.0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
+ if (is_first_double) { \
+ __ fcvt_d_s(fa0, fa0); \
+ __ VU.set(t0, VSew::E64, Vlmul::m2); \
+ __ vfmv_vf(v2, fa0); \
+ } \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ if (!is_first_double) { \
+ __ vfmv_vf(v2, fa0); \
+ } \
+ __ vfmv_vf(v4, fa1); \
+ __ instr_name(v0, v2, v4); \
+ __ li(t1, Operand(int64_t(result))); \
+ __ vs(v0, t1, 0, VSew::E64); \
+ }; \
+ for (float rs1_fval : compiler::ValueHelper::GetVector<float>()) { \
+ for (float rs2_fval : compiler::ValueHelper::GetVector<float>()) { \
+ GenAndRunTest<double, float>(rs1_fval, rs2_fval, fn); \
+ for (size_t i = 0; i < n; i++) { \
+ CHECK_DOUBLE_EQ( \
+ check_fn(rs1_fval, rs2_fval) \
+ ? std::numeric_limits<double>::quiet_NaN() \
+ : UseCanonicalNan<double>(static_cast<double>( \
+ rs1_fval) tested_op static_cast<double>(rs2_fval)), \
+ result[i]); \
+ result[i] = 0.0; \
+ } \
+ } \
+ } \
+ }
+
+// Tests for vector widening floating-point arithmetic instructions between
+// vector and scalar
+#define UTEST_RVV_VFW_VF_FORM_WITH_RES(instr_name, tested_op, is_first_double, \
+ check_fn) \
+ TEST(RISCV_UTEST_FLOAT_WIDENING_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ constexpr size_t n = kRvvVLEN / 32; \
+ double result[n] = {0.0}; \
+ auto fn = [&result](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ if (is_first_double) { \
+ __ fcvt_d_s(fa0, fa0); \
+ __ VU.set(t0, VSew::E64, Vlmul::m2); \
+ __ vfmv_vf(v2, fa0); \
+ } \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ if (!is_first_double) { \
+ __ vfmv_vf(v2, fa0); \
+ } \
+ __ instr_name(v0, v2, fa1); \
+ __ li(t1, Operand(int64_t(result))); \
+ __ li(t2, Operand(int64_t(&result[n / 2]))); \
+ __ vs(v0, t1, 0, VSew::E64); \
+ __ vs(v1, t2, 0, VSew::E64); \
+ }; \
+ for (float rs1_fval : compiler::ValueHelper::GetVector<float>()) { \
+ for (float rs2_fval : compiler::ValueHelper::GetVector<float>()) { \
+ GenAndRunTest<double, float>(rs1_fval, rs2_fval, fn); \
+ for (size_t i = 0; i < n; i++) { \
+ CHECK_DOUBLE_EQ( \
+ check_fn(rs1_fval, rs2_fval) \
+ ? std::numeric_limits<double>::quiet_NaN() \
+ : UseCanonicalNan<double>(static_cast<double>( \
+ rs1_fval) tested_op static_cast<double>(rs2_fval)), \
+ result[i]); \
+ result[i] = 0.0; \
+ } \
+ } \
+ } \
+ }
+
+#define UTEST_RVV_VFW_VV_FORM_WITH_OP(instr_name, tested_op, is_first_double, \
+ check_fn) \
+ UTEST_RVV_VFW_VV_FORM_WITH_RES(instr_name, tested_op, is_first_double, \
+ check_fn)
+#define UTEST_RVV_VFW_VF_FORM_WITH_OP(instr_name, tested_op, is_first_double, \
+ check_fn) \
+ UTEST_RVV_VFW_VF_FORM_WITH_RES(instr_name, tested_op, is_first_double, \
+ check_fn)
+
+template <typename T>
+static inline bool is_invalid_fmul(T src1, T src2) {
+ return (isinf(src1) && src2 == static_cast<T>(0.0)) ||
+ (src1 == static_cast<T>(0.0) && isinf(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fadd(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) != std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fsub(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) == std::signbit(src2));
+}
+
+UTEST_RVV_VFW_VV_FORM_WITH_OP(vfwadd_vv, +, false, is_invalid_fadd)
+UTEST_RVV_VFW_VF_FORM_WITH_OP(vfwadd_vf, +, false, is_invalid_fadd)
+UTEST_RVV_VFW_VV_FORM_WITH_OP(vfwsub_vv, -, false, is_invalid_fsub)
+UTEST_RVV_VFW_VF_FORM_WITH_OP(vfwsub_vf, -, false, is_invalid_fsub)
+UTEST_RVV_VFW_VV_FORM_WITH_OP(vfwadd_wv, +, true, is_invalid_fadd)
+UTEST_RVV_VFW_VF_FORM_WITH_OP(vfwadd_wf, +, true, is_invalid_fadd)
+UTEST_RVV_VFW_VV_FORM_WITH_OP(vfwsub_wv, -, true, is_invalid_fsub)
+UTEST_RVV_VFW_VF_FORM_WITH_OP(vfwsub_wf, -, true, is_invalid_fsub)
+UTEST_RVV_VFW_VV_FORM_WITH_OP(vfwmul_vv, *, false, is_invalid_fmul)
+UTEST_RVV_VFW_VF_FORM_WITH_OP(vfwmul_vf, *, false, is_invalid_fmul)
+
+#undef UTEST_RVV_VF_VV_FORM_WITH_OP
+#undef UTEST_RVV_VF_VF_FORM_WITH_OP
+
+// Tests for vector widening floating-point fused multiply-add Instructions
+// between vectors
+#define UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES(instr_name, array, expect_res) \
+ TEST(RISCV_UTEST_FLOAT_WIDENING_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ __ vfmv_vf(v0, fa0); \
+ __ vfmv_vf(v2, fa1); \
+ __ vfmv_vf(v4, fa2); \
+ __ instr_name(v0, v2, v4); \
+ __ VU.set(t0, VSew::E64, Vlmul::m1); \
+ __ vfmv_fs(fa0, v0); \
+ }; \
+ for (float rs1_fval : array) { \
+ for (float rs2_fval : array) { \
+ for (float rs3_fval : array) { \
+ double res = \
+ GenAndRunTest<double, float>(rs1_fval, rs2_fval, rs3_fval, fn); \
+ CHECK_DOUBLE_EQ((expect_res), res); \
+ } \
+ } \
+ } \
+ }
+
+// Tests for vector single-width floating-point fused multiply-add Instructions
+// between vectors and scalar
+#define UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES(instr_name, array, expect_res) \
+ TEST(RISCV_UTEST_FLOAT_WIDENING_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ __ vfmv_vf(v0, fa0); \
+ __ vfmv_vf(v2, fa2); \
+ __ instr_name(v0, fa1, v2); \
+ __ VU.set(t0, VSew::E64, Vlmul::m1); \
+ __ vfmv_fs(fa0, v0); \
+ }; \
+ for (float rs1_fval : array) { \
+ for (float rs2_fval : array) { \
+ for (float rs3_fval : array) { \
+ double res = \
+ GenAndRunTest<double, float>(rs1_fval, rs2_fval, rs3_fval, fn); \
+ CHECK_DOUBLE_EQ((expect_res), res); \
+ } \
+ } \
+ } \
+ }
+
+#define ARRAY_FLOAT compiler::ValueHelper::GetVector<float>()
+UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES(vfwmacc_vv, ARRAY_FLOAT,
+ std::fma(rs2_fval, rs3_fval, rs1_fval))
+UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES(vfwmacc_vf, ARRAY_FLOAT,
+ std::fma(rs2_fval, rs3_fval, rs1_fval))
+UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES(vfwnmacc_vv, ARRAY_FLOAT,
+ std::fma(rs2_fval, -rs3_fval, -rs1_fval))
+UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES(vfwnmacc_vf, ARRAY_FLOAT,
+ std::fma(rs2_fval, -rs3_fval, -rs1_fval))
+UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES(vfwmsac_vv, ARRAY_FLOAT,
+ std::fma(rs2_fval, rs3_fval, -rs1_fval))
+UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES(vfwmsac_vf, ARRAY_FLOAT,
+ std::fma(rs2_fval, rs3_fval, -rs1_fval))
+UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES(vfwnmsac_vv, ARRAY_FLOAT,
+ std::fma(rs2_fval, -rs3_fval, rs1_fval))
+UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES(vfwnmsac_vf, ARRAY_FLOAT,
+ std::fma(rs2_fval, -rs3_fval, rs1_fval))
+
+#undef ARRAY_FLOAT
+#undef UTEST_RVV_VFW_FMA_VV_FORM_WITH_RES
+#undef UTEST_RVV_VFW_FMA_VF_FORM_WITH_RES
// Tests for vector single-width floating-point fused multiply-add Instructions
// between vectors
#define UTEST_RVV_FMA_VV_FORM_WITH_RES(instr_name, array, expect_res) \
TEST(RISCV_UTEST_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
auto fn = [](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E32, Vlmul::m1); \
@@ -2271,6 +2496,7 @@ UTEST_RVV_VF_VV_FORM_WITH_OP(vfdiv_vv, ARRAY_FLOAT, /)
// between vectors and scalar
#define UTEST_RVV_FMA_VF_FORM_WITH_RES(instr_name, array, expect_res) \
TEST(RISCV_UTEST_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
CcTest::InitializeVM(); \
auto fn = [](MacroAssembler& assm) { \
__ VU.set(t0, VSew::E32, Vlmul::m1); \
@@ -2326,9 +2552,42 @@ UTEST_RVV_FMA_VF_FORM_WITH_RES(vfnmsac_vf, ARRAY_FLOAT,
std::fma(rs2_fval, -rs3_fval, rs1_fval))
#undef ARRAY_FLOAT
-#undef UTEST_RVV_FMA_VV_FORM
-#undef UTEST_RVV_FMA_VF_FORM
+#undef UTEST_RVV_FMA_VV_FORM_WITH_RES
+#undef UTEST_RVV_FMA_VF_FORM_WITH_RES
+// Tests for vector Widening Floating-Point Reduction Instructions
+#define UTEST_RVV_VFW_REDSUM_VV_FORM_WITH_RES(instr_name) \
+ TEST(RISCV_UTEST_FLOAT_WIDENING_##instr_name) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E32, Vlmul::m1); \
+ __ vfmv_vf(v2, fa0); \
+ __ vfmv_vf(v4, fa0); \
+ __ instr_name(v0, v2, v4); \
+ __ VU.set(t0, VSew::E64, Vlmul::m1); \
+ __ vfmv_fs(fa0, v0); \
+ }; \
+ for (float rs1_fval : compiler::ValueHelper::GetVector<float>()) { \
+ std::vector<double> temp_arr(kRvvVLEN / 32, \
+ static_cast<double>(rs1_fval)); \
+ double expect_res = rs1_fval; \
+ for (double val : temp_arr) { \
+ expect_res += val; \
+ if (std::isnan(expect_res)) { \
+ expect_res = std::numeric_limits<double>::quiet_NaN(); \
+ break; \
+ } \
+ } \
+ double res = GenAndRunTest<double, float>(rs1_fval, fn); \
+ CHECK_DOUBLE_EQ(UseCanonicalNan<double>(expect_res), res); \
+ } \
+ }
+
+UTEST_RVV_VFW_REDSUM_VV_FORM_WITH_RES(vfwredusum_vv)
+UTEST_RVV_VFW_REDSUM_VV_FORM_WITH_RES(vfwredosum_vv)
+
+#undef UTEST_RVV_VFW_REDSUM_VV_FORM_WITH_RES
// calculate the value of r used in rounding
static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) {
// uint8_t d = extract64(v, shift, 1);
@@ -2362,6 +2621,7 @@ static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) {
#define UTEST_RVV_VNCLIP_E32M2_E16M1(instr_name, sign) \
TEST(RISCV_UTEST_##instr_name##_E32M2_E16M1) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
constexpr RoundingMode vxrm = RNE; \
CcTest::InitializeVM(); \
Isolate* isolate = CcTest::i_isolate(); \
@@ -2370,9 +2630,9 @@ static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) {
for (uint8_t shift = 0; shift < 32; shift++) { \
auto fn = [shift](MacroAssembler& assm) { \
__ VU.set(vxrm); \
- __ vsetvli(t0, zero_reg, VSew::E32, Vlmul::m2); \
+ __ VU.set(t0, VSew::E32, Vlmul::m2); \
__ vl(v2, a0, 0, VSew::E32); \
- __ vsetvli(t0, zero_reg, VSew::E16, Vlmul::m1); \
+ __ VU.set(t0, VSew::E16, Vlmul::m1); \
__ instr_name(v4, v2, shift); \
__ vs(v4, a1, 0, VSew::E16); \
}; \
@@ -2401,8 +2661,8 @@ UTEST_RVV_VNCLIP_E32M2_E16M1(vnclip_vi, )
#define UTEST_RVV_VI_VIE_FORM_WITH_RES(instr_name, type, width, frac_width, \
array, expect_res) \
TEST(RISCV_UTEST_##instr_name##_##width##_##frac_width) { \
- constexpr uint32_t vlen = 128; \
- constexpr uint32_t n = vlen / width; \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ constexpr uint32_t n = kRvvVLEN / width; \
CcTest::InitializeVM(); \
for (int##frac_width##_t x : array) { \
int##frac_width##_t src[n] = {0}; \
@@ -2456,8 +2716,8 @@ UTEST_RVV_VI_VIE_FORM_WITH_RES(vsext_vf2, int16_t, 16, 8, ARRAY(int8_t),
#define UTEST_RVV_VP_VS_VI_FORM_WITH_RES(instr_name, type, width, array, \
expect_res) \
TEST(RISCV_UTEST_##instr_name##_##type) { \
- constexpr uint32_t vlen = 128; \
- constexpr uint32_t n = vlen / width; \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ constexpr uint32_t n = kRvvVLEN / width; \
CcTest::InitializeVM(); \
for (type x : array) { \
for (uint32_t offset = 0; offset < n; offset++) { \
@@ -2494,9 +2754,77 @@ UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslidedown_vi, uint16_t, 16, ARRAY(uint16_t),
UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslidedown_vi, uint8_t, 8, ARRAY(uint8_t),
(i + offset) < n ? src[i + offset] : 0)
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, int64_t, 64, ARRAY(int64_t),
+ i < offset ? dst[i] : src[i - offset])
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, int32_t, 32, ARRAY(int32_t),
+ i < offset ? dst[i] : src[i - offset])
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, int16_t, 16, ARRAY(int16_t),
+ i < offset ? dst[i] : src[i - offset])
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, int8_t, 8, ARRAY(int8_t),
+ i < offset ? dst[i] : src[i - offset])
+
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, uint32_t, 32, ARRAY(uint32_t),
+ i < offset ? dst[i] : src[i - offset])
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, uint16_t, 16, ARRAY(uint16_t),
+ i < offset ? dst[i] : src[i - offset])
+UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslideup_vi, uint8_t, 8, ARRAY(uint8_t),
+ i < offset ? dst[i] : src[i - offset])
+
#undef UTEST_RVV_VP_VS_VI_FORM_WITH_RES
#undef ARRAY
+#define UTEST_VFIRST_M_WITH_WIDTH(width) \
+ TEST(RISCV_UTEST_vfirst_m_##width) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ constexpr uint32_t vlen = 128; \
+ constexpr uint32_t n = vlen / width; \
+ CcTest::InitializeVM(); \
+ for (uint32_t i = 0; i <= n; i++) { \
+ uint64_t src[2] = {0}; \
+ src[0] = 1 << i; \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E##width, Vlmul::m1); \
+ __ vl(v2, a0, 0, VSew::E##width); \
+ __ vfirst_m(a0, v2); \
+ }; \
+ auto res = GenAndRunTest<int64_t, int64_t>((int64_t)src, fn); \
+ CHECK_EQ(i < n ? i : (int64_t)-1, res); \
+ } \
+ }
+
+UTEST_VFIRST_M_WITH_WIDTH(64)
+UTEST_VFIRST_M_WITH_WIDTH(32)
+UTEST_VFIRST_M_WITH_WIDTH(16)
+UTEST_VFIRST_M_WITH_WIDTH(8)
+
+#undef UTEST_VFIRST_M_WITH_WIDTH
+
+#define UTEST_VCPOP_M_WITH_WIDTH(width) \
+ TEST(RISCV_UTEST_vcpop_m_##width) { \
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return; \
+ uint32_t vlen = 128; \
+ uint32_t n = vlen / width; \
+ CcTest::InitializeVM(); \
+ for (uint16_t x : compiler::ValueHelper::GetVector<uint16_t>()) { \
+ uint64_t src[2] = {0}; \
+ src[0] = x >> (16 - n); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, VSew::E##width, Vlmul::m1); \
+ __ vl(v2, a0, 0, VSew::E##width); \
+ __ vcpop_m(a0, v2); \
+ }; \
+ auto res = GenAndRunTest<int64_t, int64_t>((int64_t)src, fn); \
+ CHECK_EQ(std::__popcount(src[0]), res); \
+ } \
+ }
+
+UTEST_VCPOP_M_WITH_WIDTH(64)
+UTEST_VCPOP_M_WITH_WIDTH(32)
+UTEST_VCPOP_M_WITH_WIDTH(16)
+UTEST_VCPOP_M_WITH_WIDTH(8)
+
+#undef UTEST_VCPOP_M_WITH_WIDTH
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index c76e9f4efc..11ef08fb89 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -2681,6 +2681,7 @@ TEST(AssemblerX64FloatingPoint256bit) {
__ vhaddps(ymm0, ymm1, Operand(rbx, rcx, times_4, 10000));
__ vblendvps(ymm0, ymm3, ymm5, ymm9);
__ vblendvpd(ymm7, ymm4, ymm3, ymm1);
+ __ vshufps(ymm3, ymm1, ymm2, 0x75);
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2712,7 +2713,9 @@ TEST(AssemblerX64FloatingPoint256bit) {
// vblendvps ymm0, ymm3, ymm5, ymm9
0xC4, 0xE3, 0x65, 0x4A, 0xC5, 0x90,
// vblendvpd ymm7, ymm4, ymm3, ymm1
- 0xC4, 0xE3, 0x5D, 0x4B, 0xFB, 0x10};
+ 0xC4, 0xE3, 0x5D, 0x4B, 0xFB, 0x10,
+ // vshufps ymm3, ymm1, ymm2, 0x75
+ 0xC5, 0xF4, 0xC6, 0xDA, 0x75};
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index 478bc47e7b..8d959e6dfc 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -59,16 +59,21 @@ TEST(BitVector) {
v.Add(30);
v.Add(31);
v.Add(33);
- BitVector::Iterator iter(&v);
- CHECK_EQ(27, iter.Current());
- iter.Advance();
- CHECK_EQ(30, iter.Current());
- iter.Advance();
- CHECK_EQ(31, iter.Current());
- iter.Advance();
- CHECK_EQ(33, iter.Current());
- iter.Advance();
- CHECK(iter.Done());
+ BitVector::Iterator iter = v.begin();
+ BitVector::Iterator end = v.end();
+ CHECK_NE(iter, end);
+ CHECK_EQ(27, *iter);
+ ++iter;
+ CHECK_NE(iter, end);
+ CHECK_EQ(30, *iter);
+ ++iter;
+ CHECK_NE(iter, end);
+ CHECK_EQ(31, *iter);
+ ++iter;
+ CHECK_NE(iter, end);
+ CHECK_EQ(33, *iter);
+ ++iter;
+ CHECK(!(iter != end));
}
{
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 30fbe5d0ca..705e506432 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -1099,7 +1099,7 @@ TEST(TransitionLookup) {
if ((i & 2) == 0) {
for (int j = 0; j < ATTRS_COUNT; j++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(j);
+ auto attributes = PropertyAttributesFromInt(j);
if (attributes == base_attributes) continue;
// Don't add private symbols with enumerable attributes.
if (is_private && ((attributes & DONT_ENUM) == 0)) continue;
@@ -1122,7 +1122,7 @@ TEST(TransitionLookup) {
// Ensure we didn't overflow transition array and therefore all the
// combinations of cases are covered.
- CHECK(TransitionsAccessor(isolate, root_map).CanHaveMoreTransitions());
+ CHECK(TransitionsAccessor::CanHaveMoreTransitions(isolate, root_map));
// Now try querying keys.
bool positive_lookup_tested = false;
@@ -1340,7 +1340,7 @@ TEST(TryHasOwnProperty) {
for (Handle<JSObject> object : objects) {
for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
Handle<Name> name = names[name_index];
- CHECK(JSReceiver::HasProperty(object, name).FromJust());
+ CHECK(JSReceiver::HasProperty(isolate, object, name).FromJust());
ft.CheckTrue(object, name, expect_found);
}
}
@@ -1360,7 +1360,7 @@ TEST(TryHasOwnProperty) {
for (size_t key_index = 0; key_index < arraysize(non_existing_names);
key_index++) {
Handle<Name> name = non_existing_names[key_index];
- CHECK(!JSReceiver::HasProperty(object, name).FromJust());
+ CHECK(!JSReceiver::HasProperty(isolate, object, name).FromJust());
ft.CheckTrue(object, name, expect_not_found);
}
}
@@ -1666,12 +1666,12 @@ TEST(TryLookupElement) {
Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
Handle<Object> expect_bailout(Smi::FromInt(kBailout), isolate);
-#define CHECK_FOUND(object, index) \
- CHECK(JSReceiver::HasElement(object, index).FromJust()); \
+#define CHECK_FOUND(object, index) \
+ CHECK(JSReceiver::HasElement(isolate, object, index).FromJust()); \
ft.CheckTrue(object, smi##index, expect_found);
-#define CHECK_NOT_FOUND(object, index) \
- CHECK(!JSReceiver::HasElement(object, index).FromJust()); \
+#define CHECK_NOT_FOUND(object, index) \
+ CHECK(!JSReceiver::HasElement(isolate, object, index).FromJust()); \
ft.CheckTrue(object, smi##index, expect_not_found);
#define CHECK_ABSENT(object, index) \
@@ -2129,9 +2129,6 @@ TEST(PopAndReturnConstant) {
CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
int pop_count = kNumParams;
- if (!kJSArgcIncludesReceiver) {
- pop_count += 1; // Include receiver.
- }
m.PopAndReturn(m.IntPtrConstant(pop_count), m.SmiConstant(1234));
}
@@ -2166,9 +2163,6 @@ TEST(PopAndReturnVariable) {
CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
int pop_count = kNumParams;
- if (!kJSArgcIncludesReceiver) {
- pop_count += 1; // Include receiver.
- }
m.PopAndReturn(m.IntPtrConstant(pop_count), m.SmiConstant(1234));
}
@@ -2960,9 +2954,9 @@ TEST(AllocateFunctionWithMapAndContext) {
CHECK(!fun->has_prototype_slot());
CHECK_EQ(*isolate->factory()->promise_capability_default_resolve_shared_fun(),
fun->shared());
- CHECK_EQ(FromCodeT(isolate->factory()
- ->promise_capability_default_resolve_shared_fun()
- ->GetCode()),
+ CHECK_EQ(isolate->factory()
+ ->promise_capability_default_resolve_shared_fun()
+ ->GetCode(),
fun->code());
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 5b0a5c78ba..f3901fc484 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -976,8 +976,9 @@ TEST(ProfilerEnabledDuringBackgroundCompile) {
std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task(
v8::ScriptCompiler::StartStreaming(isolate, &streamed_source));
- // Run the background compilation task on the main thread.
- task->Run();
+ // Run the background compilation task. DummySourceStream::GetMoreData won't
+ // block, so it's OK to just join the background task.
+ StreamerThread::StartThreadForTaskAndJoin(task.get());
// Enable the CPU profiler.
auto* cpu_profiler = v8::CpuProfiler::New(isolate, v8::kStandardNaming);
diff --git a/deps/v8/test/cctest/test-concurrent-prototype.cc b/deps/v8/test/cctest/test-concurrent-prototype.cc
index 44f6e02c75..d9e5ffac45 100644
--- a/deps/v8/test/cctest/test-concurrent-prototype.cc
+++ b/deps/v8/test/cctest/test-concurrent-prototype.cc
@@ -188,8 +188,9 @@ TEST(ProtoWalkBackground_PrototypeChainWrite) {
sema_started.Wait();
for (int i = 0; i < 20; ++i) {
- CHECK(JSReceiver::SetPrototype(
- js_object, i % 2 == 0 ? new_proto : old_proto, false, kDontThrow)
+ CHECK(JSReceiver::SetPrototype(isolate, js_object,
+ i % 2 == 0 ? new_proto : old_proto, false,
+ kDontThrow)
.FromJust());
}
diff --git a/deps/v8/test/cctest/test-concurrent-script-context-table.cc b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
index d185d0538b..475b327c26 100644
--- a/deps/v8/test/cctest/test-concurrent-script-context-table.cc
+++ b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
@@ -98,6 +98,7 @@ TEST(ScriptContextTable_Extend) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
+ const bool kIgnoreDuplicateNames = true;
Factory* factory = isolate->factory();
Handle<NativeContext> native_context = factory->NewNativeContext();
@@ -116,8 +117,8 @@ TEST(ScriptContextTable_Extend) {
Handle<Context> script_context =
factory->NewScriptContext(native_context, scope_info);
- script_context_table =
- ScriptContextTable::Extend(script_context_table, script_context);
+ script_context_table = ScriptContextTable::Extend(
+ isolate, script_context_table, script_context, kIgnoreDuplicateNames);
}
std::unique_ptr<PersistentHandles> ph = isolate->NewPersistentHandles();
@@ -137,8 +138,8 @@ TEST(ScriptContextTable_Extend) {
for (int i = 0; i < 100; ++i) {
Handle<Context> context =
factory->NewScriptContext(native_context, scope_info);
- script_context_table =
- ScriptContextTable::Extend(script_context_table, context);
+ script_context_table = ScriptContextTable::Extend(
+ isolate, script_context_table, context, kIgnoreDuplicateNames);
}
thread->Join();
@@ -164,7 +165,7 @@ TEST(ScriptContextTable_AccessScriptContextTable) {
Handle<Context> context =
factory->NewScriptContext(native_context, scope_info);
script_context_table =
- ScriptContextTable::Extend(script_context_table, context);
+ ScriptContextTable::Extend(isolate, script_context_table, context);
int initialized_entries = 1;
g_initialized_entries.store(initialized_entries, std::memory_order_release);
@@ -183,11 +184,12 @@ TEST(ScriptContextTable_AccessScriptContextTable) {
sema_started.Wait();
+ const bool kIgnoreDuplicateNames = true;
for (; initialized_entries < 1000; ++initialized_entries) {
Handle<Context> new_context =
factory->NewScriptContext(native_context, scope_info);
- script_context_table =
- ScriptContextTable::Extend(script_context_table, new_context);
+ script_context_table = ScriptContextTable::Extend(
+ isolate, script_context_table, new_context, kIgnoreDuplicateNames);
native_context->synchronized_set_script_context_table(
*script_context_table);
// Update with relaxed semantics to not introduce ordering constraints.
diff --git a/deps/v8/test/cctest/test-concurrent-transition-array.cc b/deps/v8/test/cctest/test-concurrent-transition-array.cc
index b4a8eaa8a4..0741f34196 100644
--- a/deps/v8/test/cctest/test-concurrent-transition-array.cc
+++ b/deps/v8/test/cctest/test-concurrent-transition-array.cc
@@ -40,7 +40,7 @@ class ConcurrentSearchThread : public v8::base::Thread {
background_thread_started_->Signal();
- CHECK_EQ(TransitionsAccessor(CcTest::i_isolate(), map_, true)
+ CHECK_EQ(TransitionsAccessor(CcTest::i_isolate(), *map_, true)
.SearchTransition(*name_, PropertyKind::kData, NONE),
result_map_ ? **result_map_ : Map());
}
@@ -74,11 +74,11 @@ class ConcurrentSearchOnOutdatedAccessorThread final
LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
UnparkedScope scope(&local_heap);
- TransitionsAccessor accessor(CcTest::i_isolate(), map_, true);
background_thread_started_->Signal();
main_thread_finished_->Wait();
- CHECK_EQ(accessor.SearchTransition(*name_, PropertyKind::kData, NONE),
+ CHECK_EQ(TransitionsAccessor(CcTest::i_isolate(), *map_, true)
+ .SearchTransition(*name_, PropertyKind::kData, NONE),
result_map_ ? **result_map_ : Map());
}
@@ -102,7 +102,7 @@ TEST(FullFieldTransitions_OnlySearch) {
attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- TransitionsAccessor(isolate, map0).Insert(name, map1, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name, map1, PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
@@ -124,8 +124,9 @@ TEST(FullFieldTransitions_OnlySearch) {
background_thread_started.Wait();
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name, kind, attributes));
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name,
+ kind, attributes)
+ .ToHandleChecked());
thread->Join();
}
@@ -154,7 +155,7 @@ TEST(FullFieldTransitions) {
attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- TransitionsAccessor(isolate, map0).Insert(name1, map1, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1, PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
@@ -176,11 +177,13 @@ TEST(FullFieldTransitions) {
background_thread_started.Wait();
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name1, kind, attributes));
- TransitionsAccessor(isolate, map0).Insert(name2, map2, PROPERTY_TRANSITION);
- CHECK_EQ(*map2, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name2, kind, attributes));
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name1,
+ kind, attributes)
+ .ToHandleChecked());
+ TransitionsAccessor::Insert(isolate, map0, name2, map2, PROPERTY_TRANSITION);
+ CHECK_EQ(*map2, *TransitionsAccessor::SearchTransition(isolate, map0, *name2,
+ kind, attributes)
+ .ToHandleChecked());
thread->Join();
}
@@ -210,8 +213,8 @@ TEST(WeakRefToFullFieldTransitions) {
attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- TransitionsAccessor(isolate, map0)
- .Insert(name1, map1, SIMPLE_PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1,
+ SIMPLE_PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsWeakRefEncoding());
@@ -233,16 +236,18 @@ TEST(WeakRefToFullFieldTransitions) {
background_thread_started.Wait();
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name1, kind, attributes));
- TransitionsAccessor(isolate, map0)
- .Insert(name2, map2, SIMPLE_PROPERTY_TRANSITION);
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name1,
+ kind, attributes)
+ .ToHandleChecked());
+ TransitionsAccessor::Insert(isolate, map0, name2, map2,
+ SIMPLE_PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
}
- CHECK_EQ(*map2, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name2, kind, attributes));
+ CHECK_EQ(*map2, *TransitionsAccessor::SearchTransition(isolate, map0, *name2,
+ kind, attributes)
+ .ToHandleChecked());
thread->Join();
}
@@ -278,8 +283,8 @@ TEST(FullFieldTransitions_withSlack) {
attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- TransitionsAccessor(isolate, map0).Insert(name1, map1, PROPERTY_TRANSITION);
- TransitionsAccessor(isolate, map0).Insert(name2, map2, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name2, map2, PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
@@ -301,19 +306,22 @@ TEST(FullFieldTransitions_withSlack) {
background_thread_started.Wait();
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name1, kind, attributes));
- CHECK_EQ(*map2, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name2, kind, attributes));
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name1,
+ kind, attributes)
+ .ToHandleChecked());
+ CHECK_EQ(*map2, *TransitionsAccessor::SearchTransition(isolate, map0, *name2,
+ kind, attributes)
+ .ToHandleChecked());
{
// Check that we have enough slack for the 3rd insertion into the
// TransitionArray.
TestTransitionsAccessor transitions(isolate, map0);
CHECK_GE(transitions.Capacity(), 3);
}
- TransitionsAccessor(isolate, map0).Insert(name3, map3, PROPERTY_TRANSITION);
- CHECK_EQ(*map3, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name3, kind, attributes));
+ TransitionsAccessor::Insert(isolate, map0, name3, map3, PROPERTY_TRANSITION);
+ CHECK_EQ(*map3, *TransitionsAccessor::SearchTransition(isolate, map0, *name3,
+ kind, attributes)
+ .ToHandleChecked());
thread->Join();
}
@@ -359,9 +367,10 @@ TEST(UninitializedToFullFieldTransitions) {
background_thread_started.Wait();
- TransitionsAccessor(isolate, map0).Insert(name1, map1, PROPERTY_TRANSITION);
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name1, kind, attributes));
+ TransitionsAccessor::Insert(isolate, map0, name1, map1, PROPERTY_TRANSITION);
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name1,
+ kind, attributes)
+ .ToHandleChecked());
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
@@ -395,7 +404,7 @@ TEST(FullFieldTransitions_BackgroundSearchOldPointer) {
attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- TransitionsAccessor(isolate, map0).Insert(name1, map1, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1, PROPERTY_TRANSITION);
{
TestTransitionsAccessor transitions(isolate, map0);
CHECK(transitions.IsFullTransitionArrayEncoding());
@@ -420,17 +429,19 @@ TEST(FullFieldTransitions_BackgroundSearchOldPointer) {
background_thread_started.Wait();
- CHECK_EQ(*map1, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name1, kind, attributes));
+ CHECK_EQ(*map1, *TransitionsAccessor::SearchTransition(isolate, map0, *name1,
+ kind, attributes)
+ .ToHandleChecked());
{
// Check that we do not have enough slack for the 2nd insertion into the
// TransitionArray.
TestTransitionsAccessor transitions(isolate, map0);
CHECK_EQ(transitions.Capacity(), 1);
}
- TransitionsAccessor(isolate, map0).Insert(name2, map2, PROPERTY_TRANSITION);
- CHECK_EQ(*map2, TransitionsAccessor(isolate, map0)
- .SearchTransition(*name2, kind, attributes));
+ TransitionsAccessor::Insert(isolate, map0, name2, map2, PROPERTY_TRANSITION);
+ CHECK_EQ(*map2, *TransitionsAccessor::SearchTransition(isolate, map0, *name2,
+ kind, attributes)
+ .ToHandleChecked());
main_thread_finished.Signal();
thread->Join();
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 5d7e45198b..8fdc86c2c1 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -120,7 +120,7 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
sample.stack[1] = reinterpret_cast<void*>(frame3);
sample.frames_count = 2;
}
- sample.timestamp = base::TimeTicks::HighResolutionNow();
+ sample.timestamp = base::TimeTicks::Now();
proc->AddSample(sample);
}
@@ -424,7 +424,7 @@ TEST(Issue1398) {
for (unsigned i = 0; i < sample.frames_count; ++i) {
sample.stack[i] = reinterpret_cast<void*>(code->InstructionStart());
}
- sample.timestamp = base::TimeTicks::HighResolutionNow();
+ sample.timestamp = base::TimeTicks::Now();
processor->AddSample(sample);
processor->StopSynchronously();
@@ -3920,6 +3920,15 @@ TEST(ContextIsolation) {
diff_context_profile->GetTopDownRoot();
// Ensure that no children were recorded (including callbacks, builtins).
CHECK(!FindChild(diff_root, "start"));
+
+ CHECK_GT(diff_context_profile->GetSamplesCount(), 0);
+ for (int i = 0; i < diff_context_profile->GetSamplesCount(); i++) {
+ CHECK(diff_context_profile->GetSampleState(i) == StateTag::IDLE ||
+ // GC State do not have a context
+ diff_context_profile->GetSampleState(i) == StateTag::GC ||
+ // first frame and native code reports as external
+ diff_context_profile->GetSampleState(i) == StateTag::EXTERNAL);
+ }
}
}
@@ -4205,7 +4214,7 @@ int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source,
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*CompileRun(source)));
if (function->ActiveTierIsIgnition()) return -1;
- i::Handle<i::Code> code(function->code(), isolate);
+ i::Handle<i::Code> code(i::FromCodeT(function->code()), isolate);
i::SourcePositionTableIterator iterator(
ByteArray::cast(code->source_position_table()));
@@ -4332,7 +4341,7 @@ struct FastApiReceiver {
// TODO(mslekova): The fallback is not used by the test. Replace this
// with a CHECK.
if (!IsValidUnwrapObject(*receiver)) {
- options.fallback = 1;
+ options.fallback = true;
return;
}
FastApiReceiver* receiver_ptr =
@@ -4383,7 +4392,6 @@ TEST(FastApiCPUProfiler) {
#if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR)
// None of the following configurations include JSCallReducer.
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_SCOPE(opt);
FLAG_SCOPE(turbo_fast_api_calls);
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 8afb615646..28600996f5 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -4360,6 +4360,7 @@ TEST(DebugCoverage) {
v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(0);
v8::Local<v8::debug::Script> script = script_data.GetScript();
CHECK(script->Source()
+ ->JavaScriptCode()
.ToLocalChecked()
->Equals(env.local(), source)
.FromMaybe(false));
@@ -4413,6 +4414,7 @@ TEST(DebugCoverageWithCoverageOutOfScope) {
GetScriptDataAndDeleteCoverage(isolate);
v8::Local<v8::debug::Script> script = script_data.GetScript();
CHECK(script->Source()
+ ->JavaScriptCode()
.ToLocalChecked()
->Equals(env.local(), source)
.FromMaybe(false));
@@ -4497,7 +4499,7 @@ TEST(BuiltinsExceptionPrediction) {
bool fail = false;
for (i::Builtin builtin = i::Builtins::kFirst; builtin <= i::Builtins::kLast;
++builtin) {
- i::Code code = builtins->code(builtin);
+ i::Code code = FromCodeT(builtins->code(builtin));
if (code.kind() != i::CodeKind::BUILTIN) continue;
auto prediction = code.GetBuiltinCatchPrediction();
USE(prediction);
@@ -5725,3 +5727,69 @@ TEST(AwaitCleansUpGlobalPromiseStack) {
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
}
+
+TEST(CreateMessageFromOldException) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(true);
+
+ v8::Local<v8::Value> error;
+ {
+ v8::TryCatch try_catch(context->GetIsolate());
+ CompileRun(R"javascript(
+ function f1() {
+ throw new Error('error in f1');
+ };
+ f1();
+ )javascript");
+ CHECK(try_catch.HasCaught());
+
+ error = try_catch.Exception();
+ }
+ CHECK(error->IsObject());
+
+ v8::Local<v8::Message> message =
+ v8::debug::CreateMessageFromException(context->GetIsolate(), error);
+ CHECK(!message.IsEmpty());
+ CHECK_EQ(3, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(16, message->GetStartColumn(context.local()).FromJust());
+
+ v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
+ CHECK(!stackTrace.IsEmpty());
+ CHECK_EQ(2, stackTrace->GetFrameCount());
+
+ stackTrace = v8::Exception::GetStackTrace(error);
+ CHECK(!stackTrace.IsEmpty());
+ CHECK_EQ(2, stackTrace->GetFrameCount());
+}
+
+TEST(CreateMessageDoesNotInspectStack) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ // Do not enable Isolate::SetCaptureStackTraceForUncaughtExceptions.
+
+ v8::Local<v8::Value> error;
+ {
+ v8::TryCatch try_catch(context->GetIsolate());
+ CompileRun(R"javascript(
+ function f1() {
+ throw new Error('error in f1');
+ };
+ f1();
+ )javascript");
+ CHECK(try_catch.HasCaught());
+
+ error = try_catch.Exception();
+ }
+ // The caught error should not have a stack trace attached.
+ CHECK(error->IsObject());
+ CHECK(v8::Exception::GetStackTrace(error).IsEmpty());
+
+ // The corresponding message should also not have a stack trace.
+ v8::Local<v8::Message> message =
+ v8::debug::CreateMessageFromException(context->GetIsolate(), error);
+ CHECK(!message.IsEmpty());
+ CHECK(message->GetStackTrace().IsEmpty());
+}
diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc
index afc5e4feaa..80d7e7a942 100644
--- a/deps/v8/test/cctest/test-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-descriptor-array.cc
@@ -25,15 +25,10 @@ using TVariable = compiler::TypedCodeAssemblerVariable<T>;
Handle<Name> NewNameWithHash(Isolate* isolate, const char* str, uint32_t hash,
bool is_integer) {
- uint32_t hash_field = hash << Name::kHashShift;
+ uint32_t hash_field = Name::CreateHashFieldValue(
+ hash, is_integer ? Name::HashFieldType::kIntegerIndex
+ : Name::HashFieldType::kHash);
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
- static_assert(Name::kHashNotComputedMask == 1, "This test needs updating");
- static_assert(Name::kIsNotIntegerIndexMask == 2, "This test needs updating");
-
- if (!is_integer) {
- hash_field |= Name::kIsNotIntegerIndexMask;
- }
Handle<Name> name = isolate->factory()->NewOneByteInternalizedString(
base::OneByteVector(str), hash_field);
name->set_raw_hash_field(hash_field);
@@ -223,13 +218,15 @@ TEST(DescriptorArrayHashCollisionMassive) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::HashFieldTypeBits::kSize == 2,
+ "This test might require updating if more HashFieldType values "
+ "are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
- uint32_t hash =
- static_cast<uint32_t>(isolate->GenerateIdentityHash(Name::kHashBitMask));
+ uint32_t hash = static_cast<uint32_t>(
+ isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < kMaxNumberOfDescriptors / 2; ++i) {
// Add pairs of names having the same base hash value but having different
@@ -269,7 +266,9 @@ TEST(DescriptorArrayHashCollision) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::HashFieldTypeBits::kSize == 2,
+ "This test might require updating if more HashFieldType values "
+ "are introduced");
std::vector<Handle<Name>> names;
uint32_t hash = 0;
@@ -278,7 +277,7 @@ TEST(DescriptorArrayHashCollision) {
if (i % 2 == 0) {
// Change hash value for every pair of names.
hash = static_cast<uint32_t>(
- isolate->GenerateIdentityHash(Name::kHashBitMask));
+ isolate->GenerateIdentityHash(Name::HashBits::kMax));
}
// Add pairs of names having the same base hash value but having different
@@ -318,13 +317,15 @@ TEST(TransitionArrayHashCollisionMassive) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::HashFieldTypeBits::kSize == 2,
+ "This test might require updating if more HashFieldType values "
+ "are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
- uint32_t hash =
- static_cast<uint32_t>(isolate->GenerateIdentityHash(Name::kHashBitMask));
+ uint32_t hash = static_cast<uint32_t>(
+ isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) {
// Add pairs of names having the same base hash value but having different
@@ -369,19 +370,21 @@ TEST(TransitionArrayHashCollision) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::HashFieldTypeBits::kSize == 2,
+ "This test might require updating if more HashFieldType values "
+ "are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
- uint32_t hash =
- static_cast<uint32_t>(isolate->GenerateIdentityHash(Name::kHashBitMask));
+ uint32_t hash = static_cast<uint32_t>(
+ isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) {
if (i % 2 == 0) {
// Change hash value for every pair of names.
hash = static_cast<uint32_t>(
- isolate->GenerateIdentityHash(Name::kHashBitMask));
+ isolate->GenerateIdentityHash(Name::HashBits::kMax));
}
// Add pairs of names having the same base hash value but having different
// values of is_integer bit.
diff --git a/deps/v8/test/cctest/test-disasm-loong64.cc b/deps/v8/test/cctest/test-disasm-loong64.cc
index 5620eb9c69..16521a8c3b 100644
--- a/deps/v8/test/cctest/test-disasm-loong64.cc
+++ b/deps/v8/test/cctest/test-disasm-loong64.cc
@@ -621,11 +621,11 @@ TEST(TypeOp17) {
COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6");
COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4");
- COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3");
- COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5");
+ COMPARE(maskeqz(a6, a7, t0), "0013316a maskeqz a6, a7, t0");
+ COMPARE(maskeqz(t1, t2, t3), "00133dcd maskeqz t1, t2, t3");
- COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0");
- COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3");
+ COMPARE(masknez(a5, a5, a3), "00139d29 masknez a5, a5, a3");
+ COMPARE(masknez(a3, a4, a5), "0013a507 masknez a3, a4, a5");
COMPARE(or_(s3, sp, zero_reg),
"0015007a or s3, sp, zero_reg");
diff --git a/deps/v8/test/cctest/test-disasm-riscv64.cc b/deps/v8/test/cctest/test-disasm-riscv64.cc
index 6177de7884..22108d2ca3 100644
--- a/deps/v8/test/cctest/test-disasm-riscv64.cc
+++ b/deps/v8/test/cctest/test-disasm-riscv64.cc
@@ -527,10 +527,11 @@ TEST(Previleged) {
VERIFY_RUN();
}
*/
-#ifdef CAN_USE_RVV_INSTRUCTIONS
+
TEST(RVV) {
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return;
SET_UP();
- COMPARE(vsetvlmax(kScratchReg, E64, m1),
+ COMPARE(VU.set(kScratchReg, E64, m1),
"018079d7 vsetvli s3, zero_reg, E64, m1");
COMPARE(vl(v2, a0, 0, VSew::E8), "02050107 vle8.v v2, (a0)");
COMPARE(vl(v2, a0, 0, VSew::E8), "02050107 vle8.v v2, (a0)");
@@ -622,8 +623,51 @@ TEST(RVV) {
COMPARE(vzext_vf2(v17, v14), "4ae328d7 vzext.vf2 v17, v14");
COMPARE(vsext_vf2(v17, v14), "4ae3a8d7 vsext.vf2 v17, v14");
+ // Vector Mask Instructions
+ COMPARE(vfirst_m(a5, v17), "4318a7d7 vfirst.m a5, v17");
+ COMPARE(vcpop_m(a5, v17), "431827d7 vcpop.m a5, v17");
+
+ COMPARE(vfsqrt_v(v17, v28), "4fc018d7 vfsqrt.v v17, v28")
+ COMPARE(vfrsqrt7_v(v17, v28), "4fc218d7 vfrsqrt7.v v17, v28")
+ COMPARE(vfrec7_v(v17, v28), "4fc298d7 vfrec7.v v17, v28")
+ COMPARE(vfclass_v(v17, v28), "4fc818d7 vfclass.v v17, v28")
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ COMPARE(vfwadd_vv(v17, v14, v28), "c2ee18d7 vfwadd.vv v17, v14, v28");
+ COMPARE(vfwsub_vv(v17, v14, v28), "caee18d7 vfwsub.vv v17, v14, v28");
+ COMPARE(vfwadd_wv(v17, v14, v28), "d2ee18d7 vfwadd.wv v17, v14, v28");
+ COMPARE(vfwsub_wv(v17, v14, v28), "daee18d7 vfwsub.wv v17, v14, v28");
+ COMPARE(vfwadd_vf(v17, v28, fa5), "c3c7d8d7 vfwadd.vf v17, v28, fa5");
+ COMPARE(vfwsub_vf(v17, v28, fa5), "cbc7d8d7 vfwsub.vf v17, v28, fa5");
+ COMPARE(vfwadd_wf(v17, v28, fa5), "d3c7d8d7 vfwadd.wf v17, v28, fa5");
+ COMPARE(vfwsub_wf(v17, v28, fa5), "dbc7d8d7 vfwsub.wf v17, v28, fa5");
+
+ // Vector Widening Floating-Point Reduction Instructions
+ COMPARE(vfwredusum_vv(v17, v14, v28),
+ "c6ee18d7 vfwredusum.vs v17, v14, v28");
+ COMPARE(vfwredosum_vv(v17, v14, v28),
+ "ceee18d7 vfwredosum.vs v17, v14, v28");
+
+ // Vector Widening Floating-Point Multiply
+ COMPARE(vfwmul_vv(v17, v14, v28), "e2ee18d7 vfwmul.vv v17, v14, v28");
+ COMPARE(vfwmul_vf(v17, v28, fa5), "e3c7d8d7 vfwmul.vf v17, v28, fa5");
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ COMPARE(vfwmacc_vv(v17, v14, v28), "f3c718d7 vfwmacc.vv v17, v14, v28");
+ COMPARE(vfwnmacc_vv(v17, v14, v28),
+ "f7c718d7 vfwnmacc.vv v17, v14, v28");
+ COMPARE(vfwmsac_vv(v17, v14, v28), "fbc718d7 vfwmsac.vv v17, v14, v28");
+ COMPARE(vfwnmsac_vv(v17, v14, v28),
+ "ffc718d7 vfwnmsac.vv v17, v14, v28");
+ COMPARE(vfwmacc_vf(v17, fa5, v28), "f3c7d8d7 vfwmacc.vf v17, fa5, v28");
+ COMPARE(vfwnmacc_vf(v17, fa5, v28),
+ "f7c7d8d7 vfwnmacc.vf v17, fa5, v28");
+ COMPARE(vfwmsac_vf(v17, fa5, v28), "fbc7d8d7 vfwmsac.vf v17, fa5, v28");
+ COMPARE(vfwnmsac_vf(v17, fa5, v28),
+ "ffc7d8d7 vfwnmsac.vf v17, fa5, v28");
+
VERIFY_RUN();
}
-#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 5df2c8630b..2b2aa963ee 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -63,7 +63,7 @@ TEST(DisasmX64) {
__ bind(&L2);
__ call(rcx);
__ nop();
- Handle<Code> ic = BUILTIN_CODE(isolate, ArrayFrom);
+ Handle<CodeT> ic = BUILTIN_CODE(isolate, ArrayFrom);
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -1415,6 +1415,9 @@ UNINITIALIZED_TEST(DisasmX64YMMRegister) {
COMPARE("c5ff12a48b10270000 vmovddup ymm4,[rbx+rcx*4+0x2710]",
vmovddup(ymm4, Operand(rbx, rcx, times_4, 10000)));
COMPARE("c5fe16ca vmovshdup ymm1,ymm2", vmovshdup(ymm1, ymm2));
+
+ COMPARE("c5f4c6da73 vshufps ymm3,ymm1,ymm2,0x73",
+ vshufps(ymm3, ymm1, ymm2, 115));
}
if (!CpuFeatures::IsSupported(AVX2)) return;
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 717901a47d..e65884fd04 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -432,7 +432,6 @@ TEST(VectorCallSpeculationModeAndFeedbackContent) {
if (!i::FLAG_opt) return;
if (i::FLAG_always_opt) return;
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
@@ -667,7 +666,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(2, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
- CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreNamedStrict);
+ CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kSetNamedStrict);
}
{
@@ -689,7 +688,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
CHECK_EQ(5, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kCall);
CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
- CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kStoreNamedSloppy);
+ CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kSetNamedSloppy);
CHECK_SLOT_KIND(helper, 3, FeedbackSlotKind::kCall);
CHECK_SLOT_KIND(helper, 4, FeedbackSlotKind::kLoadProperty);
}
@@ -711,7 +710,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(3, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
- CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreKeyedSloppy);
+ CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kSetKeyedSloppy);
CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kLoadKeyed);
}
@@ -733,7 +732,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(3, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
- CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreKeyedStrict);
+ CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kSetKeyedStrict);
CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kLoadKeyed);
}
@@ -755,9 +754,9 @@ TEST(ReferenceContextAllocatesNoSlots) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(7, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
- CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreNamedStrict);
- CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kStoreNamedStrict);
- CHECK_SLOT_KIND(helper, 3, FeedbackSlotKind::kStoreNamedStrict);
+ CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kSetNamedStrict);
+ CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kSetNamedStrict);
+ CHECK_SLOT_KIND(helper, 3, FeedbackSlotKind::kSetNamedStrict);
CHECK_SLOT_KIND(helper, 4, FeedbackSlotKind::kBinaryOp);
CHECK_SLOT_KIND(helper, 5, FeedbackSlotKind::kLoadProperty);
CHECK_SLOT_KIND(helper, 6, FeedbackSlotKind::kLoadProperty);
@@ -793,7 +792,7 @@ TEST(VectorStoreICBasic) {
CHECK_EQ(InlineCacheState::MONOMORPHIC, nexus.ic_state());
}
-TEST(StoreOwnIC) {
+TEST(DefineNamedOwnIC) {
if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
FLAG_allow_natives_syntax = true;
@@ -816,7 +815,7 @@ TEST(StoreOwnIC) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(2, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLiteral);
- CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreOwnNamed);
+ CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kDefineNamedOwn);
FeedbackNexus nexus(feedback_vector, helper.slot(1));
CHECK_EQ(InlineCacheState::MONOMORPHIC, nexus.ic_state());
}
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b5047b4d7d..1bee88aa4d 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -66,8 +66,7 @@ static Handle<AccessorPair> CreateAccessorPair(bool with_getter,
// Check cached migration target map after Map::Update() and Map::TryUpdate()
static void CheckMigrationTarget(Isolate* isolate, Map old_map, Map new_map) {
- Map target = TransitionsAccessor(isolate, handle(old_map, isolate))
- .GetMigrationTarget();
+ Map target = TransitionsAccessor(isolate, old_map).GetMigrationTarget();
if (target.is_null()) return;
CHECK_EQ(new_map, target);
CHECK_EQ(MapUpdater::TryUpdateNoLock(isolate, old_map,
@@ -391,10 +390,10 @@ class Expectations {
heap_type);
Handle<String> name = CcTest::MakeName("prop", property_index);
- Map target = TransitionsAccessor(isolate_, map)
- .SearchTransition(*name, PropertyKind::kData, attributes);
+ MaybeHandle<Map> target = TransitionsAccessor::SearchTransition(
+ isolate_, map, *name, PropertyKind::kData, attributes);
CHECK(!target.is_null());
- return handle(target, isolate_);
+ return target.ToHandleChecked();
}
Handle<Map> AddAccessorConstant(Handle<Map> map,
@@ -1752,9 +1751,8 @@ static void TestReconfigureElementsKind_GeneralizeFieldInPlace(
Expectations expectations(isolate, PACKED_SMI_ELEMENTS);
// Create a map, add required properties to it and initialize expectations.
- Handle<Map> initial_map = Map::Create(isolate, 0);
- initial_map->set_instance_type(JS_ARRAY_TYPE);
- initial_map->set_elements_kind(PACKED_SMI_ELEMENTS);
+ Handle<Map> initial_map = isolate->factory()->NewMap(
+ JS_ARRAY_TYPE, JSArray::kHeaderSize, PACKED_SMI_ELEMENTS);
Handle<Map> map = initial_map;
map = expectations.AsElementsKind(map, PACKED_ELEMENTS);
@@ -2065,10 +2063,10 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
}
Handle<String> name = CcTest::MakeName("prop", i);
- Map target = TransitionsAccessor(isolate, map2)
- .SearchTransition(*name, PropertyKind::kData, NONE);
+ MaybeHandle<Map> target = TransitionsAccessor::SearchTransition(
+ isolate, map2, *name, PropertyKind::kData, NONE);
CHECK(!target.is_null());
- map2 = handle(target, isolate);
+ map2 = target.ToHandleChecked();
}
map2 = ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp),
@@ -2090,14 +2088,14 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// Fill in transition tree of |map2| so that it can't have more transitions.
for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions; i++) {
- CHECK(TransitionsAccessor(isolate, map2).CanHaveMoreTransitions());
+ CHECK(TransitionsAccessor::CanHaveMoreTransitions(isolate, map2));
Handle<String> name = CcTest::MakeName("foo", i);
Map::CopyWithField(isolate, map2, name, any_type, NONE,
PropertyConstness::kMutable, Representation::Smi(),
INSERT_TRANSITION)
.ToHandleChecked();
}
- CHECK(!TransitionsAccessor(isolate, map2).CanHaveMoreTransitions());
+ CHECK(!TransitionsAccessor::CanHaveMoreTransitions(isolate, map2));
// Try to update |map|, since there is no place for propX transition at |map2|
// |map| should become normalized.
@@ -3094,7 +3092,7 @@ TEST(DeletePropertyGeneralizesConstness) {
// |new_parent_map| must have exactly one outgoing transition to |new_map|.
{
- TransitionsAccessor ta(isolate, new_parent_map);
+ TransitionsAccessor ta(isolate, *new_parent_map);
CHECK_EQ(ta.NumberOfTransitions(), 1);
CHECK_EQ(ta.GetTarget(0), *new_map);
}
@@ -3114,7 +3112,7 @@ TEST(DeletePropertyGeneralizesConstness) {
std::vector<Handle<Map>> transitions;
Handle<Object> value = handle(Smi::FromInt(0), isolate);
for (int i = 0; i < kPropertyAttributesCombinationsCount; i++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+ auto attributes = PropertyAttributesFromInt(i);
Handle<Map> tmp;
// Add some transitions to "x" and "y".
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index d9efaba7b1..3c9f290eda 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -28,6 +28,7 @@
#include "include/v8-function.h"
#include "include/v8-locker.h"
#include "src/api/api-inl.h"
+#include "src/common/allow-deprecated.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
@@ -36,6 +37,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
+START_ALLOW_USE_DEPRECATED()
+
namespace v8 {
namespace internal {
@@ -708,6 +711,7 @@ TEST(TotalSizeRegularNode) {
}
TEST(TotalSizeTracedNode) {
+ ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
@@ -726,3 +730,5 @@ TEST(TotalSizeTracedNode) {
} // namespace internal
} // namespace v8
+
+END_ALLOW_USE_DEPRECATED()
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 449b09a508..d39629ecec 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2106,6 +2106,7 @@ static int StringCmp(const char* ref, i::String act) {
TEST(GetConstructor) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
CompileRun(
"function Constructor1() {};\n"
@@ -2128,42 +2129,43 @@ TEST(GetConstructor) {
.As<v8::Object>();
i::Handle<i::JSObject> js_obj1 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj1));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj1).is_null());
+ CHECK(!i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj1).is_null());
v8::Local<v8::Object> obj2 = js_global->Get(env.local(), v8_str("obj2"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj2 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj2));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj2).is_null());
+ CHECK(!i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj2).is_null());
v8::Local<v8::Object> obj3 = js_global->Get(env.local(), v8_str("obj3"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj3 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj3));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj3).is_null());
+ CHECK(!i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj3).is_null());
v8::Local<v8::Object> obj4 = js_global->Get(env.local(), v8_str("obj4"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj4 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj4));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj4).is_null());
+ CHECK(!i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj4).is_null());
v8::Local<v8::Object> obj5 = js_global->Get(env.local(), v8_str("obj5"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj5 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj5));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj5).is_null());
+ CHECK(i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj5).is_null());
v8::Local<v8::Object> obj6 = js_global->Get(env.local(), v8_str("obj6"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj6 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj6));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj6).is_null());
+ CHECK(i::V8HeapExplorer::GetConstructor(i_isolate, *js_obj6).is_null());
}
TEST(GetConstructorName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
CompileRun(
"function Constructor1() {};\n"
@@ -2186,43 +2188,43 @@ TEST(GetConstructorName) {
.As<v8::Object>();
i::Handle<i::JSObject> js_obj1 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj1));
- CHECK_EQ(0, StringCmp(
- "Constructor1", i::V8HeapExplorer::GetConstructorName(*js_obj1)));
+ CHECK_EQ(0, StringCmp("Constructor1", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj1)));
v8::Local<v8::Object> obj2 = js_global->Get(env.local(), v8_str("obj2"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj2 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj2));
- CHECK_EQ(0, StringCmp(
- "Constructor2", i::V8HeapExplorer::GetConstructorName(*js_obj2)));
+ CHECK_EQ(0, StringCmp("Constructor2", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj2)));
v8::Local<v8::Object> obj3 = js_global->Get(env.local(), v8_str("obj3"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj3 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj3));
- CHECK_EQ(0, StringCmp("Constructor3",
- i::V8HeapExplorer::GetConstructorName(*js_obj3)));
+ CHECK_EQ(0, StringCmp("Constructor3", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj3)));
v8::Local<v8::Object> obj4 = js_global->Get(env.local(), v8_str("obj4"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj4 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj4));
- CHECK_EQ(0, StringCmp("Constructor4",
- i::V8HeapExplorer::GetConstructorName(*js_obj4)));
+ CHECK_EQ(0, StringCmp("Constructor4", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj4)));
v8::Local<v8::Object> obj5 = js_global->Get(env.local(), v8_str("obj5"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj5 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj5));
- CHECK_EQ(0, StringCmp(
- "Object", i::V8HeapExplorer::GetConstructorName(*js_obj5)));
+ CHECK_EQ(0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj5)));
v8::Local<v8::Object> obj6 = js_global->Get(env.local(), v8_str("obj6"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj6 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj6));
- CHECK_EQ(0, StringCmp(
- "Object", i::V8HeapExplorer::GetConstructorName(*js_obj6)));
+ CHECK_EQ(0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(
+ i_isolate, *js_obj6)));
}
@@ -2772,13 +2774,20 @@ TEST(CheckCodeNames) {
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
- const char* builtin_path1[] = {"::(GC roots)", "::(Builtins)",
- "::(KeyedLoadIC_PolymorphicName builtin)"};
+ const char* builtin_path1[] = {
+ "::(GC roots)", "::(Builtins)",
+#ifdef V8_EXTERNAL_CODE_SPACE
+ "KeyedLoadIC_PolymorphicName::system / CodeDataContainer",
+#endif
+ "::(KeyedLoadIC_PolymorphicName builtin)"};
const v8::HeapGraphNode* node = GetNodeByPath(
env->GetIsolate(), snapshot, builtin_path1, arraysize(builtin_path1));
CHECK(node);
const char* builtin_path2[] = {"::(GC roots)", "::(Builtins)",
+#ifdef V8_EXTERNAL_CODE_SPACE
+ "CompileLazy::system / CodeDataContainer",
+#endif
"::(CompileLazy builtin)"};
node = GetNodeByPath(env->GetIsolate(), snapshot, builtin_path2,
arraysize(builtin_path2));
@@ -2991,7 +3000,8 @@ TEST(TrackBumpPointerAllocations) {
// Now check that not all allocations are tracked if we manually reenable
// inline allocations.
- CHECK(CcTest::heap()->inline_allocation_disabled());
+ CHECK(i::FLAG_single_generation ||
+ !CcTest::heap()->new_space()->IsInlineAllocationEnabled());
CcTest::heap()->EnableInlineAllocation();
CompileRun(inline_heap_allocation_source);
@@ -4125,10 +4135,11 @@ TEST(WeakReference) {
.Build();
CHECK(code->IsCode());
+ // Manually inlined version of FeedbackVector::SetOptimizedCode (needed due
+ // to the FOR_TESTING code kind).
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)),
v8::kReleaseStore);
- fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode(
- i::OptimizationTier::kTopTier) |
+ fv->set_flags(i::FeedbackVector::MaybeHasOptimizedCodeBit::encode(true) |
i::FeedbackVector::OptimizationMarkerBits::encode(
i::OptimizationMarker::kNone));
diff --git a/deps/v8/test/cctest/test-helper-riscv64.h b/deps/v8/test/cctest/test-helper-riscv64.h
index 0380f6bd39..79a6dca989 100644
--- a/deps/v8/test/cctest/test-helper-riscv64.h
+++ b/deps/v8/test/cctest/test-helper-riscv64.h
@@ -127,7 +127,6 @@ template <typename OUTPUT_T, typename INPUT_T>
OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, INPUT_T input2,
Func test_generator) {
DCHECK((sizeof(INPUT_T) == 4 || sizeof(INPUT_T) == 8));
- DCHECK(sizeof(OUTPUT_T) == sizeof(INPUT_T));
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index be7f846d86..ed757fc5ee 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -184,6 +184,24 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
+ struct V8_NODISCARD EnableWritePermissionsOnMacArm64Scope {
+#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
+// Ignoring this warning is considered better than relying on
+// __builtin_available.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability-new"
+ EnableWritePermissionsOnMacArm64Scope() { pthread_jit_write_protect_np(0); }
+ ~EnableWritePermissionsOnMacArm64Scope() {
+ pthread_jit_write_protect_np(1);
+ }
+#pragma clang diagnostic pop
+#else
+ EnableWritePermissionsOnMacArm64Scope() {
+ // Define a constructor to avoid unused variable warnings.
+ }
+#endif
+ };
+
for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr,
VirtualMemory::kMapAsJittable);
@@ -194,19 +212,13 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute));
{
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
- // Make sure to switch memory to writable on M1 hardware.
- wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
-#endif
+ EnableWritePermissionsOnMacArm64Scope write_scope;
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
{
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
- // Make sure to switch memory to writable on M1 hardware.
- wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
-#endif
+ EnableWritePermissionsOnMacArm64Scope write_scope;
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index c82e1d6c4f..7d9f03b1c1 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -46,8 +46,7 @@ Handle<T> GetLexical(const char* name) {
isolate->native_context()->script_context_table(), isolate);
VariableLookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate, *script_contexts, *str_name,
- &lookup_result)) {
+ if (script_contexts->Lookup(str_name, &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 166bcae058..e57c5a6198 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -1068,6 +1068,17 @@ UNINITIALIZED_TEST(ConsoleTimeEvents) {
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
ScopedLoggerInitializer logger(isolate);
+ {
+ // setup console global.
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::String> name = v8::String::NewFromUtf8Literal(
+ isolate, "console", v8::NewStringType::kInternalized);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Value> console = context->GetExtrasBindingObject()
+ ->Get(context, name)
+ .ToLocalChecked();
+ context->Global()->Set(context, name, console).FromJust();
+ }
// Test that console time events are properly logged
const char* source_text =
"console.time();"
@@ -1129,10 +1140,7 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
logger.StopLogging();
- // Ignore all the log entries that happened before warmup
- size_t start = logger.IndexOfLine(
- {"function,first-execution", "warmUpEndMarkerFunction"});
- CHECK(start != std::string::npos);
+ // TODO(cbruni): Reimplement first-execution logging if needed.
std::vector<std::vector<std::string>> lines = {
// Create a new script
{"script,create"},
@@ -1159,23 +1167,17 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
// - execute eager functions.
{"function,parse-function,", ",lazyFunction"},
{"function,interpreter-lazy,", ",lazyFunction"},
- {"function,first-execution,", ",lazyFunction"},
{"function,parse-function,", ",lazyInnerFunction"},
{"function,interpreter-lazy,", ",lazyInnerFunction"},
- {"function,first-execution,", ",lazyInnerFunction"},
-
- {"function,first-execution,", ",eagerFunction"},
{"function,parse-function,", ",Foo"},
{"function,interpreter-lazy,", ",Foo"},
- {"function,first-execution,", ",Foo"},
{"function,parse-function,", ",Foo.foo"},
{"function,interpreter-lazy,", ",Foo.foo"},
- {"function,first-execution,", ",Foo.foo"},
};
- CHECK(logger.ContainsLinesInOrder(lines, start));
+ CHECK(logger.ContainsLinesInOrder(lines));
}
i::FLAG_log_function_events = false;
isolate->Dispose();
@@ -1193,8 +1195,10 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
logger.LogCompiledFunctions();
logger.StopLogging();
- i::Handle<i::Code> builtin = logger.i_isolate()->builtins()->code_handle(
- i::Builtin::kBooleanConstructor);
+ i::Isolate* i_isolate = logger.i_isolate();
+ i::Handle<i::Code> builtin = FromCodeT(
+ i_isolate->builtins()->code_handle(i::Builtin::kBooleanConstructor),
+ i_isolate);
v8::base::EmbeddedVector<char, 100> buffer;
// Should only be logged as "Builtin" with a name, never as "LazyCompile".
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 1190a4afb9..55a0441c52 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -73,7 +73,7 @@ TEST(ExtractLane) {
};
T t;
- __ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, r5, lr});
for (int i = 0; i < 4; i++) {
__ mov(r4, Operand(i));
@@ -143,7 +143,7 @@ TEST(ExtractLane) {
}
}
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, r5, pc});
CodeDesc desc;
masm->GetCode(isolate, &desc);
@@ -210,7 +210,7 @@ TEST(ReplaceLane) {
};
T t;
- __ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
+ __ stm(db_w, sp, {r4, r5, r6, r7, lr});
__ veor(q0, q0, q0); // Zero
__ veor(q1, q1, q1); // Zero
@@ -274,7 +274,7 @@ TEST(ReplaceLane) {
__ vst1(Neon8, NeonListOperand(q14), NeonMemOperand(r4));
}
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | pc.bit());
+ __ ldm(ia_w, sp, {r4, r5, r6, r7, pc});
CodeDesc desc;
masm->GetCode(isolate, &desc);
diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
index d3b597a5d7..ee1e58e0f4 100644
--- a/deps/v8/test/cctest/test-macro-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
@@ -508,7 +508,7 @@ static const std::vector<int64_t> ffint_ftintrz_int64_test_values() {
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
- // clang-off on
+// clang-format on
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
#define FOR_INPUTS(ctype, itype, var, test_vector) \
@@ -616,7 +616,7 @@ TEST(Ffint_d_l_Ftintrz_l_ud) {
CcTest::InitializeVM();
FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) {
int64_t input = *i;
- uint64_t abs_input = (input < 0) ? -input : input;
+ uint64_t abs_input = (input >= 0 || input == INT64_MIN) ? input : -input;
auto fn = [](MacroAssembler* masm) {
__ movgr2fr_d(f8, a0);
__ ffint_d_l(f10, f8);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 5651364cd7..430c4a31d9 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -102,7 +102,7 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi value) {
__ movl(rax, Immediate(id));
__ Move(rcx, value);
__ Move(rdx, static_cast<intptr_t>(value.ptr()));
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, exit);
}
@@ -487,9 +487,6 @@ TEST(EmbeddedObj) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base));
- if (!V8_EXTERNAL_CODE_SPACE_BOOL) {
- CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base));
- }
} else {
CHECK(RelocInfo::IsFullEmbeddedObject(mode));
CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base));
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index f2b3842774..5501c5f80a 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -3657,7 +3657,6 @@ TEST(MaybeAssignedParameters) {
base::ScopedVector<char> program(Utf8LengthHelper(source) +
Utf8LengthHelper(suffix) + 1);
base::SNPrintF(program, "%s%s", source, suffix);
- std::unique_ptr<i::ParseInfo> info;
printf("%s\n", program.begin());
v8::Local<v8::Value> v = CompileRun(program.begin());
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
@@ -3668,16 +3667,15 @@ TEST(MaybeAssignedParameters) {
i::UnoptimizedCompileFlags flags =
i::UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared);
flags.set_allow_lazy_parsing(allow_lazy);
- info = std::make_unique<i::ParseInfo>(isolate, flags, &state,
- &reusable_state);
- CHECK_PARSE_FUNCTION(info.get(), shared, isolate);
+ i::ParseInfo info(isolate, flags, &state, &reusable_state);
+ CHECK_PARSE_FUNCTION(&info, shared, isolate);
- i::Scope* scope = info->literal()->scope();
+ i::Scope* scope = info.literal()->scope();
CHECK(!scope->AsDeclarationScope()->was_lazily_parsed());
CHECK_NULL(scope->sibling());
CHECK(scope->is_function_scope());
const i::AstRawString* var_name =
- info->ast_value_factory()->GetOneByteString("arg");
+ info.ast_value_factory()->GetOneByteString("arg");
i::Variable* var = scope->LookupForTesting(var_name);
CHECK(var->is_used() || !assigned);
bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
@@ -3708,12 +3706,11 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
flags.set_is_module(module);
flags.set_allow_lazy_parsing(allow_lazy_parsing);
- std::unique_ptr<i::ParseInfo> info =
- std::make_unique<i::ParseInfo>(isolate, flags, &state, &reusable_state);
+ i::ParseInfo info(isolate, flags, &state, &reusable_state);
- CHECK_PARSE_PROGRAM(info.get(), script, isolate);
+ CHECK_PARSE_PROGRAM(&info, script, isolate);
- i::Scope* scope = info->literal()->scope();
+ i::Scope* scope = info.literal()->scope();
CHECK(!scope->AsDeclarationScope()->was_lazily_parsed());
CHECK_NULL(scope->sibling());
CHECK(module ? scope->is_module_scope() : scope->is_script_scope());
@@ -3723,7 +3720,7 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
// Find the variable.
scope = i::ScopeTestHelper::FindScope(scope, input.location);
const i::AstRawString* var_name =
- info->ast_value_factory()->GetOneByteString(variable);
+ info.ast_value_factory()->GetOneByteString(variable);
var = scope->LookupForTesting(var_name);
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 3bc84e4e48..4564afd480 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -477,7 +477,7 @@ TEST(SampleIds) {
// (root)#1 -> aaa #2 -> bbb #4 -> ccc #5 - sample2
// -> ccc #6 -> aaa #7 - sample3
TickSample sample1;
- sample1.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample1.timestamp = v8::base::TimeTicks::Now();
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
@@ -487,7 +487,7 @@ TEST(SampleIds) {
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
TickSample sample2;
- sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample2.timestamp = v8::base::TimeTicks::Now();
sample2.pc = ToPointer(0x1925);
sample2.stack[0] = ToPointer(0x1780);
sample2.stack[1] = ToPointer(0x10000); // non-existent.
@@ -499,7 +499,7 @@ TEST(SampleIds) {
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
TickSample sample3;
- sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample3.timestamp = v8::base::TimeTicks::Now();
sample3.pc = ToPointer(0x1510);
sample3.stack[0] = ToPointer(0x1910);
sample3.stack[1] = ToPointer(0x1610);
@@ -598,7 +598,7 @@ TEST(MaxSamplesCallback) {
CodeMap code_map(storage);
Symbolizer symbolizer(&code_map);
TickSample sample1;
- sample1.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample1.timestamp = v8::base::TimeTicks::Now();
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
@@ -608,7 +608,7 @@ TEST(MaxSamplesCallback) {
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
CHECK_EQ(0, mock_platform->posted_count());
TickSample sample2;
- sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample2.timestamp = v8::base::TimeTicks::Now();
sample2.pc = ToPointer(0x1925);
sample2.stack[0] = ToPointer(0x1780);
sample2.frames_count = 2;
@@ -618,7 +618,7 @@ TEST(MaxSamplesCallback) {
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
CHECK_EQ(1, mock_platform->posted_count());
TickSample sample3;
- sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample3.timestamp = v8::base::TimeTicks::Now();
sample3.pc = ToPointer(0x1510);
sample3.frames_count = 3;
symbolized = symbolizer.SymbolizeTickSample(sample3);
@@ -652,10 +652,9 @@ TEST(NoSamples) {
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
auto symbolized = symbolizer.SymbolizeTickSample(sample1);
- profiles.AddPathToCurrentProfiles(v8::base::TimeTicks::HighResolutionNow(),
- symbolized.stack_trace, symbolized.src_line,
- true, base::TimeDelta(), StateTag::JS,
- EmbedderStateTag::EMPTY);
+ profiles.AddPathToCurrentProfiles(
+ v8::base::TimeTicks::Now(), symbolized.stack_trace, symbolized.src_line,
+ true, base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
CpuProfile* profile = profiles.StopProfiling("");
unsigned nodeId = 1;
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index eaa05af0ca..9c19b399df 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -651,7 +651,8 @@ UNINITIALIZED_TEST(ContextSerializerCustomContext) {
CHECK(context->global_proxy() == *global_proxy);
Handle<String> o = isolate->factory()->NewStringFromAsciiChecked("o");
Handle<JSObject> global_object(context->global_object(), isolate);
- Handle<Object> property = JSReceiver::GetDataProperty(global_object, o);
+ Handle<Object> property =
+ JSReceiver::GetDataProperty(isolate, global_object, o);
CHECK(property.is_identical_to(global_proxy));
v8::Local<v8::Context> v8_context = v8::Utils::ToLocal(context);
@@ -2163,9 +2164,9 @@ TEST(CodeSerializerLargeStrings) {
CHECK_EQ(6 * 1999999, Handle<String>::cast(copy_result)->length());
Handle<Object> property = JSReceiver::GetDataProperty(
- isolate->global_object(), f->NewStringFromAsciiChecked("s"));
+ isolate, isolate->global_object(), f->NewStringFromAsciiChecked("s"));
CHECK(isolate->heap()->InSpace(HeapObject::cast(*property), LO_SPACE));
- property = JSReceiver::GetDataProperty(isolate->global_object(),
+ property = JSReceiver::GetDataProperty(isolate, isolate->global_object(),
f->NewStringFromAsciiChecked("t"));
CHECK(isolate->heap()->InSpace(HeapObject::cast(*property), LO_SPACE));
// Make sure we do not serialize too much, e.g. include the source string.
@@ -3414,8 +3415,8 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
object_template->NewInstance(context).ToLocalChecked();
v8::Local<v8::Object> c =
object_template->NewInstance(context).ToLocalChecked();
- v8::Local<v8::External> null_external =
- v8::External::New(isolate, nullptr);
+ v8::Local<v8::External> resource_external =
+ v8::External::New(isolate, &serializable_one_byte_resource);
v8::Local<v8::External> field_external =
v8::External::New(isolate, &serialized_static_field);
@@ -3426,7 +3427,7 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
b->SetAlignedPointerInInternalField(1, b1);
c->SetAlignedPointerInInternalField(1, c1);
- a->SetInternalField(2, null_external);
+ a->SetInternalField(2, resource_external);
b->SetInternalField(2, field_external);
c->SetInternalField(2, v8_num(35));
CHECK(context->Global()->Set(context, v8_str("a"), a).FromJust());
@@ -3524,7 +3525,8 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
CHECK_EQ(30u, c1->data);
CHECK(a2->IsExternal());
- CHECK_NULL(v8::Local<v8::External>::Cast(a2)->Value());
+ CHECK_EQ(static_cast<void*>(&serializable_one_byte_resource),
+ v8::Local<v8::External>::Cast(a2)->Value());
CHECK(b2->IsExternal());
CHECK_EQ(static_cast<void*>(&serialized_static_field),
v8::Local<v8::External>::Cast(b2)->Value());
@@ -3835,6 +3837,39 @@ UNINITIALIZED_TEST(SnapshotAccessorDescriptors) {
delete[] data1.data;
}
+UNINITIALIZED_TEST(SnapshotObjectDefinePropertyWhenNewGlobalTemplate) {
+ const char* source1 =
+ "Object.defineProperty(this, 'property1', {\n"
+ " value: 42,\n"
+ " writable: false\n"
+ "});\n"
+ "var bValue = 38;\n"
+ "Object.defineProperty(this, 'property2', {\n"
+ " get() { return bValue; },\n"
+ " set(newValue) { bValue = newValue; }\n"
+ "});";
+ v8::StartupData data1 = CreateSnapshotDataBlob(source1);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ params1.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ v8::Isolate* isolate1 = v8::Isolate::New(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate1);
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate1, nullptr, global_template);
+ v8::Context::Scope c_scope(context);
+ ExpectInt32("this.property1", 42);
+ ExpectInt32("this.property2", 38);
+ }
+ isolate1->Dispose();
+ delete[] data1.data;
+}
+
UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
@@ -4307,11 +4342,17 @@ UNINITIALIZED_TEST(ClassFieldsReferenceClassVariable) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
CompileRun(
- "class Klass {"
- " #consturctor = Klass;"
+ "class PrivateFieldClass {"
+ " #consturctor = PrivateFieldClass;"
" func() {"
" return this.#consturctor;"
" }"
+ "}"
+ "class PublicFieldClass {"
+ " ctor = PublicFieldClass;"
+ " func() {"
+ " return this.ctor;"
+ " }"
"}");
creator.SetDefaultContext(context);
}
@@ -4329,7 +4370,8 @@ UNINITIALIZED_TEST(ClassFieldsReferenceClassVariable) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
CHECK(!context.IsEmpty());
v8::Context::Scope context_scope(context);
- ExpectTrue("new Klass().func() === Klass");
+ ExpectTrue("new PrivateFieldClass().func() === PrivateFieldClass");
+ ExpectTrue("new PublicFieldClass().func() === PublicFieldClass");
}
isolate->Dispose();
delete[] blob.data;
diff --git a/deps/v8/test/cctest/test-shared-strings.cc b/deps/v8/test/cctest/test-shared-strings.cc
index e5e32e3ab8..0d60b9336e 100644
--- a/deps/v8/test/cctest/test-shared-strings.cc
+++ b/deps/v8/test/cctest/test-shared-strings.cc
@@ -235,103 +235,142 @@ UNINITIALIZED_TEST(YoungInternalization) {
CHECK_EQ(*two_byte_intern1, *two_byte_intern2);
}
-enum TestHitOrMiss { kTestMiss, kTestHit };
-
-class ConcurrentInternalizationThread final : public v8::base::Thread {
+class ConcurrentStringThreadBase : public v8::base::Thread {
public:
- ConcurrentInternalizationThread(MultiClientIsolateTest* test,
- Handle<FixedArray> shared_strings,
- TestHitOrMiss hit_or_miss,
- base::Semaphore* sema_ready,
- base::Semaphore* sema_execute_start,
- base::Semaphore* sema_execute_complete)
- : v8::base::Thread(
- base::Thread::Options("ConcurrentInternalizationThread")),
+ ConcurrentStringThreadBase(const char* name, MultiClientIsolateTest* test,
+ Handle<FixedArray> shared_strings,
+ base::Semaphore* sema_ready,
+ base::Semaphore* sema_execute_start,
+ base::Semaphore* sema_execute_complete)
+ : v8::base::Thread(base::Thread::Options(name)), // typeid(this).name?
test_(test),
shared_strings_(shared_strings),
- hit_or_miss_(hit_or_miss),
sema_ready_(sema_ready),
sema_execute_start_(sema_execute_start),
sema_execute_complete_(sema_execute_complete) {}
+ virtual void Setup() {}
+ virtual void RunForString(Handle<String> string) = 0;
+ virtual void Teardown() {}
void Run() override {
- v8::Isolate* isolate = test_->NewClientIsolate();
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- Factory* factory = i_isolate->factory();
+ isolate = test_->NewClientIsolate();
+ i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ Setup();
sema_ready_->Signal();
sema_execute_start_->Wait();
HandleScope scope(i_isolate);
-
- Handle<String> manual_thin_actual =
- factory->InternalizeString(factory->NewStringFromAsciiChecked("TODO"));
-
for (int i = 0; i < shared_strings_->length(); i++) {
Handle<String> input_string(String::cast(shared_strings_->get(i)),
i_isolate);
- CHECK(input_string->IsShared());
- if (hit_or_miss_ == kTestMiss) {
- Handle<String> interned = factory->InternalizeString(input_string);
- CHECK_EQ(*input_string, *interned);
- } else {
- // TODO(v8:12007): Make this branch also test InternalizeString. But
- // LookupString needs to be made threadsafe first and restart-aware.
- input_string->MakeThin(i_isolate, *manual_thin_actual);
- CHECK(input_string->IsThinString());
- }
+ RunForString(input_string);
}
sema_execute_complete_->Signal();
+
+ Teardown();
}
- private:
+ protected:
+ v8::Isolate* isolate;
+ Isolate* i_isolate;
MultiClientIsolateTest* test_;
Handle<FixedArray> shared_strings_;
- TestHitOrMiss hit_or_miss_;
base::Semaphore* sema_ready_;
base::Semaphore* sema_execute_start_;
base::Semaphore* sema_execute_complete_;
};
-namespace {
-void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
- if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
- if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
-
- FLAG_shared_string_table = true;
+enum TestHitOrMiss { kTestMiss, kTestHit };
- MultiClientIsolateTest test;
+class ConcurrentInternalizationThread final
+ : public ConcurrentStringThreadBase {
+ public:
+ ConcurrentInternalizationThread(MultiClientIsolateTest* test,
+ Handle<FixedArray> shared_strings,
+ TestHitOrMiss hit_or_miss,
+ base::Semaphore* sema_ready,
+ base::Semaphore* sema_execute_start,
+ base::Semaphore* sema_execute_complete)
+ : ConcurrentStringThreadBase("ConcurrentInternalizationThread", test,
+ shared_strings, sema_ready,
+ sema_execute_start, sema_execute_complete),
+ hit_or_miss_(hit_or_miss) {}
+
+ void Setup() override { factory = i_isolate->factory(); }
+
+ void RunForString(Handle<String> input_string) override {
+ CHECK(input_string->IsShared());
+ Handle<String> interned = factory->InternalizeString(input_string);
+ CHECK(interned->IsShared());
+ if (hit_or_miss_ == kTestMiss) {
+ CHECK_EQ(*input_string, *interned);
+ } else {
+ // TODO(v8:12007): In-place internalization currently do not migrate
+ // shared strings to ThinStrings. This is triviailly threadsafe for
+ // character access but bad for performance, as run-time
+ // internalizations do not speed up comparisons for shared strings.
+ CHECK(!input_string->IsThinString());
+ CHECK_NE(*input_string, *interned);
+ CHECK(String::Equals(i_isolate, input_string, interned));
+ }
+ }
- constexpr int kThreads = 4;
- constexpr int kStrings = 4096;
+ private:
+ TestHitOrMiss hit_or_miss_;
+ Factory* factory;
+};
- v8::Isolate* isolate = test.NewClientIsolate();
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- Factory* factory = i_isolate->factory();
+namespace {
- HandleScope scope(i_isolate);
+Handle<FixedArray> CreateSharedOneByteStrings(Isolate* isolate,
+ Factory* factory, int count,
+ bool internalize) {
Handle<FixedArray> shared_strings =
- factory->NewFixedArray(kStrings, AllocationType::kSharedOld);
- for (int i = 0; i < kStrings; i++) {
+ factory->NewFixedArray(count, AllocationType::kSharedOld);
+ for (int i = 0; i < count; i++) {
char* ascii = new char[i + 3];
- // Don't make single character strings, which might will end up
- // deduplicating to an RO string and mess up the string table hit test.
+ // Don't make single character strings, which will end up deduplicating to
+ // an RO string and mess up the string table hit test.
for (int j = 0; j < i + 2; j++) ascii[j] = 'a';
ascii[i + 2] = '\0';
- if (hit_or_miss == kTestHit) {
+ if (internalize) {
// When testing concurrent string table hits, pre-internalize a string of
// the same contents so all subsequent internalizations are hits.
factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii));
}
Handle<String> string = String::Share(
- i_isolate,
+ isolate,
factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld));
CHECK(string->IsShared());
string->EnsureHash();
shared_strings->set(i, *string);
delete[] ascii;
}
+ return shared_strings;
+}
+
+void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
+ if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
+ if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
+
+ FLAG_shared_string_table = true;
+
+ MultiClientIsolateTest test;
+
+ constexpr int kThreads = 4;
+ constexpr int kStrings = 4096;
+
+ v8::Isolate* isolate = test.NewClientIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ Factory* factory = i_isolate->factory();
+
+ HandleScope scope(i_isolate);
+
+ Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
+ i_isolate, factory, kStrings, hit_or_miss == kTestHit);
base::Semaphore sema_ready(0);
base::Semaphore sema_execute_start(0);
@@ -363,6 +402,82 @@ UNINITIALIZED_TEST(ConcurrentInternalizationHit) {
TestConcurrentInternalization(kTestHit);
}
+class ConcurrentStringTableLookupThread final
+ : public ConcurrentStringThreadBase {
+ public:
+ ConcurrentStringTableLookupThread(MultiClientIsolateTest* test,
+ Handle<FixedArray> shared_strings,
+ base::Semaphore* sema_ready,
+ base::Semaphore* sema_execute_start,
+ base::Semaphore* sema_execute_complete)
+ : ConcurrentStringThreadBase("ConcurrentStringTableLookup", test,
+ shared_strings, sema_ready,
+ sema_execute_start, sema_execute_complete) {}
+
+ void RunForString(Handle<String> input_string) override {
+ CHECK(input_string->IsShared());
+ Object result = Object(StringTable::TryStringToIndexOrLookupExisting(
+ i_isolate, input_string->ptr()));
+ if (result.IsString()) {
+ String internalized = String::cast(result);
+ CHECK(internalized.IsInternalizedString());
+ CHECK_IMPLIES(input_string->IsInternalizedString(),
+ *input_string == internalized);
+ } else {
+ CHECK_EQ(Smi::cast(result).value(), ResultSentinel::kNotFound);
+ }
+ }
+};
+
+UNINITIALIZED_TEST(ConcurrentStringTableLookup) {
+ if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
+ if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
+
+ FLAG_shared_string_table = true;
+
+ MultiClientIsolateTest test;
+
+ constexpr int kTotalThreads = 4;
+ constexpr int kInternalizationThreads = 1;
+ constexpr int kStrings = 4096;
+
+ v8::Isolate* isolate = test.NewClientIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ Factory* factory = i_isolate->factory();
+
+ HandleScope scope(i_isolate);
+
+ Handle<FixedArray> shared_strings =
+ CreateSharedOneByteStrings(i_isolate, factory, kStrings, false);
+
+ base::Semaphore sema_ready(0);
+ base::Semaphore sema_execute_start(0);
+ base::Semaphore sema_execute_complete(0);
+ std::vector<std::unique_ptr<v8::base::Thread>> threads;
+ for (int i = 0; i < kInternalizationThreads; i++) {
+ auto thread = std::make_unique<ConcurrentInternalizationThread>(
+ &test, shared_strings, kTestMiss, &sema_ready, &sema_execute_start,
+ &sema_execute_complete);
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+ for (int i = 0; i < kTotalThreads - kInternalizationThreads; i++) {
+ auto thread = std::make_unique<ConcurrentStringTableLookupThread>(
+ &test, shared_strings, &sema_ready, &sema_execute_start,
+ &sema_execute_complete);
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ for (int i = 0; i < kTotalThreads; i++) sema_ready.Wait();
+ for (int i = 0; i < kTotalThreads; i++) sema_execute_start.Signal();
+ for (int i = 0; i < kTotalThreads; i++) sema_execute_complete.Wait();
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+}
+
namespace {
void CheckSharedStringIsEqualCopy(Handle<String> shared,
Handle<String> original) {
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 80bcd5cf31..40394923af 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1368,11 +1368,6 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
};
TEST(InternalizeExternal) {
-#ifdef ENABLE_MINOR_MC
- // TODO(mlippautz): Remove once we add support for forwarding ThinStrings in
- // minor MC
- if (FLAG_minor_mc) return;
-#endif // ENABLE_MINOR_MC
FLAG_stress_incremental_marking = false;
CcTest::InitializeVM();
i::Isolate* isolate = CcTest::i_isolate();
@@ -1840,13 +1835,13 @@ void TestString(i::Isolate* isolate, const IndexData& data) {
CHECK(s->AsIntegerIndex(&index));
CHECK_EQ(data.integer_index, index);
s->EnsureHash();
- CHECK_EQ(0, s->raw_hash_field() & String::kIsNotIntegerIndexMask);
+ CHECK(String::IsIntegerIndex(s->raw_hash_field()));
CHECK(s->HasHashCode());
}
if (!s->HasHashCode()) s->EnsureHash();
CHECK(s->HasHashCode());
if (!data.is_integer_index) {
- CHECK_NE(0, s->raw_hash_field() & String::kIsNotIntegerIndexMask);
+ CHECK(String::IsHash(s->raw_hash_field()));
}
}
@@ -1858,12 +1853,12 @@ TEST(HashArrayIndexStrings) {
v8::HandleScope scope(CcTest::isolate());
i::Isolate* isolate = CcTest::i_isolate();
- CHECK_EQ(StringHasher::MakeArrayIndexHash(0 /* value */, 1 /* length */) >>
- Name::kHashShift,
+ CHECK_EQ(Name::HashBits::decode(
+ StringHasher::MakeArrayIndexHash(0 /* value */, 1 /* length */)),
isolate->factory()->zero_string()->hash());
- CHECK_EQ(StringHasher::MakeArrayIndexHash(1 /* value */, 1 /* length */) >>
- Name::kHashShift,
+ CHECK_EQ(Name::HashBits::decode(
+ StringHasher::MakeArrayIndexHash(1 /* value */, 1 /* length */)),
isolate->factory()->one_string()->hash());
IndexData tests[] = {
@@ -2136,15 +2131,19 @@ TEST(CheckCachedDataInternalExternalUncachedString) {
// that we indeed cached it.
Handle<ExternalOneByteString> external_string =
Handle<ExternalOneByteString>::cast(string);
- CHECK(external_string->is_uncached());
+ // If sandboxed external pointers are enabled, string objects will always be
+ // cacheable because they are smaller.
+ CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached());
CHECK(external_string->resource()->IsCacheable());
- CHECK_NOT_NULL(external_string->resource()->cached_data());
- CHECK_EQ(external_string->resource()->cached_data(),
- external_string->resource()->data());
+ if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) {
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+ }
}
// Show that we cache the data pointer for internal, external and uncached
-// strings with cacheable resources through MakeExternal. One byte version.
+// strings with cacheable resources through MakeExternal. Two byte version.
TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) {
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
@@ -2175,11 +2174,15 @@ TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) {
// that we indeed cached it.
Handle<ExternalTwoByteString> external_string =
Handle<ExternalTwoByteString>::cast(string);
- CHECK(external_string->is_uncached());
+ // If sandboxed external pointers are enabled, string objects will always be
+ // cacheable because they are smaller.
+ CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached());
CHECK(external_string->resource()->IsCacheable());
- CHECK_NOT_NULL(external_string->resource()->cached_data());
- CHECK_EQ(external_string->resource()->cached_data(),
- external_string->resource()->data());
+ if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) {
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+ }
}
} // namespace test_strings
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
index 863177bb22..abd3580a97 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
@@ -266,7 +266,8 @@ Handle<Code> CSATestRunner::create_find_entry(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return isolate->builtins()->code_handle(Builtin::kIllegal);
+ return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
+ isolate);
}
STATIC_ASSERT(kFindEntryParams == 2); // (table, key)
compiler::CodeAssemblerTester asm_tester(isolate, kFindEntryParams + 1);
@@ -338,7 +339,8 @@ Handle<Code> CSATestRunner::create_delete(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return isolate->builtins()->code_handle(Builtin::kIllegal);
+ return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
+ isolate);
}
STATIC_ASSERT(kDeleteParams == 2); // (table, entry)
compiler::CodeAssemblerTester asm_tester(isolate, kDeleteParams + 1);
@@ -363,7 +365,8 @@ Handle<Code> CSATestRunner::create_add(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return isolate->builtins()->code_handle(Builtin::kIllegal);
+ return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
+ isolate);
}
STATIC_ASSERT(kAddParams == 4); // (table, key, value, details)
compiler::CodeAssemblerTester asm_tester(isolate, kAddParams + 1);
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
index 539d71c823..8c9746bb95 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
@@ -27,8 +27,7 @@ std::vector<PropertyDetails> MakeDistinctDetails() {
if (!configurable) {
attrs |= PropertyAttributes::DONT_DELETE;
}
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(attrs);
+ auto attributes = PropertyAttributesFromInt(attrs);
PropertyDetails details(kind, attributes,
PropertyCellType::kNoCell);
details = details.CopyWithConstness(constness);
@@ -100,15 +99,9 @@ Handle<Name> CreateKeyWithHash(Isolate* isolate, KeyCache& keys,
fake_hash |= swiss_table::H2(override_with);
}
- // Ensure that just doing a shift below is correct.
- static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
- static_assert(Name::kHashNotComputedMask == 1,
- "This test needs updating");
- static_assert(Name::kIsNotIntegerIndexMask == 2,
- "This test needs updating");
-
// Prepare what to put into the hash field.
- uint32_t hash_field = fake_hash << Name::kHashShift;
+ uint32_t hash_field =
+ Name::CreateHashFieldValue(fake_hash, Name::HashFieldType::kHash);
CHECK_NE(hash_field, 0);
key_symbol->set_raw_hash_field(hash_field);
diff --git a/deps/v8/test/cctest/test-temporal-parser.cc b/deps/v8/test/cctest/test-temporal-parser.cc
index a5da7f8462..9b74202b55 100644
--- a/deps/v8/test/cctest/test-temporal-parser.cc
+++ b/deps/v8/test/cctest/test-temporal-parser.cc
@@ -53,19 +53,16 @@ void CheckTimeZoneNumericUTCOffset(const ParsedISO8601Result& actual,
CHECK_EQ(tzuo_nanosecond, actual.tzuo_nanosecond);
}
-#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(R) \
- void VerifyParseTemporal##R##StringSuccess( \
- Isolate* isolate, const char* str, int32_t date_year, \
- int32_t date_month, int32_t date_day, const char* calendar_name) { \
- bool satisfy = false; \
- Handle<String> input = CcTest::MakeString(str); \
- ParsedISO8601Result actual = \
- TemporalParser::ParseTemporal##R##String(isolate, input, &satisfy) \
- .ToChecked(); \
- CHECK(satisfy); \
- CheckDate(actual, date_year, date_month, date_day); \
- CheckCalendar(isolate, input, actual.calendar_name_start, \
- actual.calendar_name_length, calendar_name); \
+#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(R) \
+ void VerifyParseTemporal##R##StringSuccess( \
+ Isolate* isolate, const char* str, int32_t date_year, \
+ int32_t date_month, int32_t date_day, const char* calendar_name) { \
+ Handle<String> input = CcTest::MakeString(str); \
+ ParsedISO8601Result actual = \
+ TemporalParser::ParseTemporal##R##String(isolate, input).ToChecked(); \
+ CheckDate(actual, date_year, date_month, date_day); \
+ CheckCalendar(isolate, input, actual.calendar_name_start, \
+ actual.calendar_name_length, calendar_name); \
}
IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(Date)
@@ -73,22 +70,19 @@ IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(YearMonth)
IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(MonthDay)
IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(RelativeTo)
-#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_TIME_STRING_SUCCESS(R) \
- void VerifyParseTemporal##R##StringSuccess( \
- Isolate* isolate, const char* str, int32_t date_year, \
- int32_t date_month, int32_t date_day, int32_t time_hour, \
- int32_t time_minute, int32_t time_second, int32_t time_nanosecond, \
- const char* calendar_name) { \
- bool satisfy = false; \
- Handle<String> input = CcTest::MakeString(str); \
- ParsedISO8601Result actual = \
- TemporalParser::ParseTemporal##R##String(isolate, input, &satisfy) \
- .ToChecked(); \
- CHECK(satisfy); \
- CheckDate(actual, date_year, date_month, date_day); \
- CheckCalendar(isolate, input, actual.calendar_name_start, \
- actual.calendar_name_length, calendar_name); \
- CheckTime(actual, time_hour, time_minute, time_second, time_nanosecond); \
+#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_TIME_STRING_SUCCESS(R) \
+ void VerifyParseTemporal##R##StringSuccess( \
+ Isolate* isolate, const char* str, int32_t date_year, \
+ int32_t date_month, int32_t date_day, int32_t time_hour, \
+ int32_t time_minute, int32_t time_second, int32_t time_nanosecond, \
+ const char* calendar_name) { \
+ Handle<String> input = CcTest::MakeString(str); \
+ ParsedISO8601Result actual = \
+ TemporalParser::ParseTemporal##R##String(isolate, input).ToChecked(); \
+ CheckDate(actual, date_year, date_month, date_day); \
+ CheckCalendar(isolate, input, actual.calendar_name_start, \
+ actual.calendar_name_length, calendar_name); \
+ CheckTime(actual, time_hour, time_minute, time_second, time_nanosecond); \
}
IMPL_VERIFY_PARSE_TEMPORAL_DATE_TIME_STRING_SUCCESS(DateTime)
@@ -102,12 +96,9 @@ IMPL_VERIFY_PARSE_TEMPORAL_DATE_TIME_STRING_SUCCESS(RelativeTo)
const char* calendar_name, int32_t tzuo_sign, int32_t tzuo_hour, \
int32_t tzuo_minute, int32_t tzuo_second, int32_t tzuo_nanosecond, \
bool utc_designator, const char* tzi_name) { \
- bool satisfy = false; \
Handle<String> input = CcTest::MakeString(str); \
ParsedISO8601Result actual = \
- TemporalParser::ParseTemporal##R##String(isolate, input, &satisfy) \
- .ToChecked(); \
- CHECK(satisfy); \
+ TemporalParser::ParseTemporal##R##String(isolate, input).ToChecked(); \
CheckDate(actual, date_year, date_month, date_day); \
CheckCalendar(isolate, input, actual.calendar_name_start, \
actual.calendar_name_length, calendar_name); \
@@ -129,12 +120,9 @@ void VerifyParseTemporalInstantStringSuccess(
Isolate* isolate, const char* str, bool utc_designator, int32_t tzuo_sign,
int32_t tzuo_hour, int32_t tzuo_minute, int32_t tzuo_second,
int32_t tzuo_nanosecond) {
- bool satisfy = false;
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Result actual =
- TemporalParser::ParseTemporalInstantString(isolate, input, &satisfy)
- .ToChecked();
- CHECK(satisfy);
+ TemporalParser::ParseTemporalInstantString(isolate, input).ToChecked();
CHECK_EQ(utc_designator, actual.utc_designator);
if (!utc_designator) {
CheckTimeZoneNumericUTCOffset(actual, tzuo_sign, tzuo_hour, tzuo_minute,
@@ -144,33 +132,25 @@ void VerifyParseTemporalInstantStringSuccess(
void VerifyParseTemporalCalendarStringSuccess(
Isolate* isolate, const char* str, const std::string& calendar_name) {
- bool satisfy = false;
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Result actual =
- TemporalParser::ParseTemporalCalendarString(isolate, input, &satisfy)
- .ToChecked();
- CHECK(satisfy);
+ TemporalParser::ParseTemporalCalendarString(isolate, input).ToChecked();
CheckCalendar(isolate, input, actual.calendar_name_start,
actual.calendar_name_length, calendar_name);
}
-#define VERIFY_PARSE_FAIL(R, str) \
- do { \
- bool satisfy = false; \
- Handle<String> input = CcTest::MakeString(str); \
- TemporalParser::Parse##R(isolate, input, &satisfy).ToChecked(); \
- CHECK(satisfy == false); \
+#define VERIFY_PARSE_FAIL(R, str) \
+ do { \
+ Handle<String> input = CcTest::MakeString(str); \
+ CHECK(TemporalParser::Parse##R(isolate, input).IsNothing()); \
} while (false)
void VerifyParseTemporalTimeStringSuccess(
Isolate* isolate, const char* str, int32_t time_hour, int32_t time_minute,
int32_t time_second, int32_t time_nanosecond, const char* calendar_name) {
- bool satisfy = false;
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Result actual =
- TemporalParser::ParseTemporalTimeString(isolate, input, &satisfy)
- .ToChecked();
- CHECK(satisfy);
+ TemporalParser::ParseTemporalTimeString(isolate, input).ToChecked();
CheckTime(actual, time_hour, time_minute, time_second, time_nanosecond);
CheckCalendar(isolate, input, actual.calendar_name_start,
actual.calendar_name_length, calendar_name);
@@ -1989,14 +1969,11 @@ void VerifyParseDurationSuccess(Isolate* isolate, const char* str, int64_t sign,
int64_t hours_fraction, int64_t whole_minutes,
int64_t minutes_fraction, int64_t whole_seconds,
int64_t seconds_fraction) {
- bool satisfy = false;
Handle<String> input = CcTest::MakeString(str);
CheckDuration(
- TemporalParser::ParseTemporalDurationString(isolate, input, &satisfy)
- .ToChecked(),
+ TemporalParser::ParseTemporalDurationString(isolate, input).ToChecked(),
sign, years, months, weeks, days, whole_hours, hours_fraction,
whole_minutes, minutes_fraction, whole_seconds, seconds_fraction);
- CHECK(satisfy);
}
void VerifyParseDurationSuccess(Isolate* isolate, const char* str,
@@ -2009,26 +1986,20 @@ void VerifyParseDurationSuccess(Isolate* isolate, const char* str,
}
void VerifyParseDurationWithPositiveSign(Isolate* isolate, const char* str) {
- bool satisfy1 = false;
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Duration expected =
- TemporalParser::ParseTemporalDurationString(isolate, input, &satisfy1)
- .ToChecked();
- CHECK(satisfy1);
+ TemporalParser::ParseTemporalDurationString(isolate, input).ToChecked();
std::string with_sign("+");
with_sign += str;
VerifyParseDurationSuccess(isolate, with_sign.c_str(), expected);
}
void VerifyParseDurationWithMinusSign(Isolate* isolate, const char* str) {
- bool satisfy1 = false;
std::string with_sign("-");
with_sign += str;
Handle<String> input = CcTest::MakeString(with_sign.c_str());
ParsedISO8601Duration expected =
- TemporalParser::ParseTemporalDurationString(isolate, input, &satisfy1)
- .ToChecked();
- CHECK(satisfy1);
+ TemporalParser::ParseTemporalDurationString(isolate, input).ToChecked();
with_sign = u8"\u2212";
with_sign += str;
VerifyParseDurationSuccess(isolate, with_sign.c_str(), expected);
@@ -2039,12 +2010,9 @@ char asciitolower(char in) {
}
void VerifyParseDurationWithLowerCase(Isolate* isolate, const char* str) {
- bool satisfy1 = false;
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Duration expected =
- TemporalParser::ParseTemporalDurationString(isolate, input, &satisfy1)
- .ToChecked();
- CHECK(satisfy1);
+ TemporalParser::ParseTemporalDurationString(isolate, input).ToChecked();
std::string lower(str);
std::transform(lower.begin(), lower.end(), lower.begin(), asciitolower);
VerifyParseDurationSuccess(isolate, lower.c_str(), expected);
@@ -2052,14 +2020,11 @@ void VerifyParseDurationWithLowerCase(Isolate* isolate, const char* str) {
char commatoperiod(char in) { return (in == ',') ? '.' : in; }
void VerifyParseDurationWithComma(Isolate* isolate, const char* str) {
- bool satisfy1 = false;
std::string period(str);
std::transform(period.begin(), period.end(), period.begin(), commatoperiod);
Handle<String> input = CcTest::MakeString(str);
ParsedISO8601Duration expected =
- TemporalParser::ParseTemporalDurationString(isolate, input, &satisfy1)
- .ToChecked();
- CHECK(satisfy1);
+ TemporalParser::ParseTemporalDurationString(isolate, input).ToChecked();
VerifyParseDurationSuccess(isolate, str, expected);
}
@@ -2324,13 +2289,10 @@ TEST(TemporalDurationStringNotSatisfy) {
void VerifyParseTimeZoneNumericUTCOffsetSuccess(
Isolate* isolate, const char* str, int32_t tzuo_sign, int32_t tzuo_hour,
int32_t tzuo_minute, int32_t tzuo_second, int32_t tzuo_nanosecond) {
- bool satisfy = false;
Handle<String> input = CcTest::MakeString(str);
CheckTimeZoneNumericUTCOffset(
- TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, input, &satisfy)
- .ToChecked(),
+ TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, input).ToChecked(),
tzuo_sign, tzuo_hour, tzuo_minute, tzuo_second, tzuo_nanosecond);
- CHECK(satisfy);
}
TEST(TimeZoneNumericUTCOffsetBasic) {
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 30b39a0d73..25352250e9 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -854,6 +854,16 @@ TEST(TerminateConsole) {
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
+ {
+ // setup console global.
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::String> name = v8::String::NewFromUtf8Literal(
+ isolate, "console", v8::NewStringType::kInternalized);
+ v8::Local<v8::Value> console =
+ context->GetExtrasBindingObject()->Get(context, name).ToLocalChecked();
+ context->Global()->Set(context, name, console).FromJust();
+ }
+
CHECK(!isolate->IsExecutionTerminating());
v8::TryCatch try_catch(isolate);
CHECK(!isolate->IsExecutionTerminating());
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 7a2ab7ae09..d9f1fc9fef 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -45,19 +45,22 @@ TEST(TransitionArray_SimpleFieldTransitions) {
CHECK(map0->raw_transitions()->IsSmi());
{
- TestTransitionsAccessor transitions(isolate, map0);
- transitions.Insert(name1, map1, SIMPLE_PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1,
+ SIMPLE_PROPERTY_TRANSITION);
}
{
- TestTransitionsAccessor transitions(isolate, map0);
- CHECK(transitions.IsWeakRefEncoding());
- CHECK_EQ(*map1, transitions.SearchTransition(*name1, PropertyKind::kData,
- attributes));
- CHECK_EQ(1, transitions.NumberOfTransitions());
- CHECK_EQ(*name1, transitions.GetKey(0));
- CHECK_EQ(*map1, transitions.GetTarget(0));
+ {
+ TestTransitionsAccessor transitions(isolate, map0);
+ CHECK(transitions.IsWeakRefEncoding());
+ CHECK_EQ(*map1, transitions.SearchTransition(*name1, PropertyKind::kData,
+ attributes));
+ CHECK_EQ(1, transitions.NumberOfTransitions());
+ CHECK_EQ(*name1, transitions.GetKey(0));
+ CHECK_EQ(*map1, transitions.GetTarget(0));
+ }
- transitions.Insert(name2, map2, SIMPLE_PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name2, map2,
+ SIMPLE_PROPERTY_TRANSITION);
}
{
TestTransitionsAccessor transitions(isolate, map0);
@@ -105,19 +108,22 @@ TEST(TransitionArray_FullFieldTransitions) {
CHECK(map0->raw_transitions()->IsSmi());
{
- TestTransitionsAccessor transitions(isolate, map0);
- transitions.Insert(name1, map1, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name1, map1,
+ PROPERTY_TRANSITION);
}
{
- TestTransitionsAccessor transitions(isolate, map0);
- CHECK(transitions.IsFullTransitionArrayEncoding());
- CHECK_EQ(*map1, transitions.SearchTransition(*name1, PropertyKind::kData,
- attributes));
- CHECK_EQ(1, transitions.NumberOfTransitions());
- CHECK_EQ(*name1, transitions.GetKey(0));
- CHECK_EQ(*map1, transitions.GetTarget(0));
+ {
+ TestTransitionsAccessor transitions(isolate, map0);
+ CHECK(transitions.IsFullTransitionArrayEncoding());
+ CHECK_EQ(*map1, transitions.SearchTransition(*name1, PropertyKind::kData,
+ attributes));
+ CHECK_EQ(1, transitions.NumberOfTransitions());
+ CHECK_EQ(*name1, transitions.GetKey(0));
+ CHECK_EQ(*map1, transitions.GetTarget(0));
+ }
- transitions.Insert(name2, map2, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name2, map2,
+ PROPERTY_TRANSITION);
}
{
TestTransitionsAccessor transitions(isolate, map0);
@@ -166,10 +172,10 @@ TEST(TransitionArray_DifferentFieldNames) {
names[i] = name;
maps[i] = map;
- TransitionsAccessor(isolate, map0).Insert(name, map, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name, map, PROPERTY_TRANSITION);
}
- TransitionsAccessor transitions(isolate, map0);
+ TransitionsAccessor transitions(isolate, *map0);
for (int i = 0; i < PROPS_COUNT; i++) {
CHECK_EQ(*maps[i], transitions.SearchTransition(
*names[i], PropertyKind::kData, attributes));
@@ -205,7 +211,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
// Add transitions for same field name but different attributes.
for (int i = 0; i < ATTRS_COUNT; i++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+ auto attributes = PropertyAttributesFromInt(i);
Handle<Map> map =
Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate),
@@ -214,13 +220,13 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
.ToHandleChecked();
attr_maps[i] = map;
- TransitionsAccessor(isolate, map0).Insert(name, map, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name, map, PROPERTY_TRANSITION);
}
// Ensure that transitions for |name| field are valid.
- TransitionsAccessor transitions(isolate, map0);
+ TransitionsAccessor transitions(isolate, *map0);
for (int i = 0; i < ATTRS_COUNT; i++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+ auto attributes = PropertyAttributesFromInt(i);
CHECK_EQ(*attr_maps[i], transitions.SearchTransition(
*name, PropertyKind::kData, attributes));
// All transitions use the same key, so this check doesn't need to
@@ -258,7 +264,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
names[i] = name;
maps[i] = map;
- TransitionsAccessor(isolate, map0).Insert(name, map, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name, map, PROPERTY_TRANSITION);
}
const int ATTRS_COUNT = (READ_ONLY | DONT_ENUM | DONT_DELETE) + 1;
@@ -268,7 +274,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
// Add transitions for same field name but different attributes.
for (int i = 0; i < ATTRS_COUNT; i++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+ auto attributes = PropertyAttributesFromInt(i);
Handle<Map> map =
Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate),
@@ -277,13 +283,13 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
.ToHandleChecked();
attr_maps[i] = map;
- TransitionsAccessor(isolate, map0).Insert(name, map, PROPERTY_TRANSITION);
+ TransitionsAccessor::Insert(isolate, map0, name, map, PROPERTY_TRANSITION);
}
// Ensure that transitions for |name| field are valid.
- TransitionsAccessor transitions(isolate, map0);
+ TransitionsAccessor transitions(isolate, *map0);
for (int i = 0; i < ATTRS_COUNT; i++) {
- PropertyAttributes attr = static_cast<PropertyAttributes>(i);
+ auto attr = PropertyAttributesFromInt(i);
CHECK_EQ(*attr_maps[i],
transitions.SearchTransition(*name, PropertyKind::kData, attr));
}
diff --git a/deps/v8/test/cctest/test-transitions.h b/deps/v8/test/cctest/test-transitions.h
index e1177aba85..28f055c217 100644
--- a/deps/v8/test/cctest/test-transitions.h
+++ b/deps/v8/test/cctest/test-transitions.h
@@ -12,11 +12,10 @@ namespace internal {
class TestTransitionsAccessor : public TransitionsAccessor {
public:
- TestTransitionsAccessor(Isolate* isolate, Map map,
- DisallowGarbageCollection* no_gc)
- : TransitionsAccessor(isolate, map, no_gc) {}
- TestTransitionsAccessor(Isolate* isolate, Handle<Map> map)
+ TestTransitionsAccessor(Isolate* isolate, Map map)
: TransitionsAccessor(isolate, map) {}
+ TestTransitionsAccessor(Isolate* isolate, Handle<Map> map)
+ : TransitionsAccessor(isolate, *map) {}
// Expose internals for tests.
bool IsUninitializedEncoding() { return encoding() == kUninitialized; }
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index 93fc4d0532..182a13bba0 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -168,7 +168,7 @@ TEST(Unwind_BuiltinPCInMiddle_Success_CodePagesAPI) {
register_state.fp = stack;
// Put the current PC inside of a valid builtin.
- Code builtin = i_isolate->builtins()->code(Builtin::kStringEqual);
+ Code builtin = FromCodeT(i_isolate->builtins()->code(Builtin::kStringEqual));
const uintptr_t offset = 40;
CHECK_LT(offset, builtin.InstructionSize());
register_state.pc =
@@ -225,7 +225,7 @@ TEST(Unwind_BuiltinPCAtStart_Success_CodePagesAPI) {
// Put the current PC at the start of a valid builtin, so that we are setting
// up the frame.
- Code builtin = i_isolate->builtins()->code(Builtin::kStringEqual);
+ Code builtin = FromCodeT(i_isolate->builtins()->code(Builtin::kStringEqual));
register_state.pc = reinterpret_cast<void*>(builtin.InstructionStart());
bool unwound = v8::Unwinder::TryUnwindV8Frames(
@@ -456,7 +456,7 @@ TEST(Unwind_JSEntry_Fail_CodePagesAPI) {
CHECK_LE(pages_length, arraysize(code_pages));
RegisterState register_state;
- Code js_entry = i_isolate->builtins()->code(Builtin::kJSEntry);
+ Code js_entry = FromCodeT(i_isolate->builtins()->code(Builtin::kJSEntry));
byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
register_state.pc = start + 10;
@@ -638,7 +638,7 @@ TEST(PCIsInV8_InJSEntryRange_CodePagesAPI) {
isolate->CopyCodePages(arraysize(code_pages), code_pages);
CHECK_LE(pages_length, arraysize(code_pages));
- Code js_entry = i_isolate->builtins()->code(Builtin::kJSEntry);
+ Code js_entry = FromCodeT(i_isolate->builtins()->code(Builtin::kJSEntry));
byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
size_t length = js_entry.InstructionSize();
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index 1fc0add43f..2b9cec9065 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -232,106 +232,100 @@ bool EqualV8Registers(const RegisterDump* a, const RegisterDump* b) {
RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
int reg_size, int reg_count, RegList allowed) {
- RegList list = 0;
+ RegList list;
int i = 0;
- for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
- if (((1ULL << n) & allowed) != 0) {
- // Only assign allowed registers.
- if (r) {
- r[i] = Register::Create(n, reg_size);
- }
- if (x) {
- x[i] = Register::Create(n, kXRegSizeInBits);
- }
- if (w) {
- w[i] = Register::Create(n, kWRegSizeInBits);
- }
- list |= (1ULL << n);
- i++;
+ // Only assign allowed registers.
+ for (Register reg : allowed) {
+ if (i == reg_count) break;
+ if (r) {
+ r[i] = Register::Create(reg.code(), reg_size);
}
+ if (x) {
+ x[i] = reg.X();
+ }
+ if (w) {
+ w[i] = reg.W();
+ }
+ list.set(reg);
+ i++;
}
// Check that we got enough registers.
- CHECK(CountSetBits(list, kNumberOfRegisters) == reg_count);
+ CHECK_EQ(list.Count(), reg_count);
return list;
}
-RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
- int reg_size, int reg_count, RegList allowed) {
- RegList list = 0;
+DoubleRegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
+ int reg_size, int reg_count,
+ DoubleRegList allowed) {
+ DoubleRegList list;
int i = 0;
- for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
- if (((1ULL << n) & allowed) != 0) {
- // Only assigned allowed registers.
- if (v) {
- v[i] = VRegister::Create(n, reg_size);
- }
- if (d) {
- d[i] = VRegister::Create(n, kDRegSizeInBits);
- }
- if (s) {
- s[i] = VRegister::Create(n, kSRegSizeInBits);
- }
- list |= (1ULL << n);
- i++;
+ // Only assigned allowed registers.
+ for (VRegister reg : allowed) {
+ if (i == reg_count) break;
+ if (v) {
+ v[i] = VRegister::Create(reg.code(), reg_size);
+ }
+ if (d) {
+ d[i] = reg.D();
}
+ if (s) {
+ s[i] = reg.S();
+ }
+ list.set(reg);
+ i++;
}
// Check that we got enough registers.
- CHECK(CountSetBits(list, kNumberOfVRegisters) == reg_count);
+ CHECK_EQ(list.Count(), reg_count);
return list;
}
-
void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
Register first = NoReg;
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if (reg_list & (1ULL << i)) {
- Register xn = Register::Create(i, kXRegSizeInBits);
- // We should never write into sp here.
- CHECK_NE(xn, sp);
- if (!xn.IsZero()) {
- if (!first.is_valid()) {
- // This is the first register we've hit, so construct the literal.
- __ Mov(xn, value);
- first = xn;
- } else {
- // We've already loaded the literal, so re-use the value already
- // loaded into the first register we hit.
- __ Mov(xn, first);
- }
+ for (Register reg : reg_list) {
+ Register xn = reg.X();
+ // We should never write into sp here.
+ CHECK_NE(xn, sp);
+ if (!xn.IsZero()) {
+ if (!first.is_valid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Mov(xn, value);
+ first = xn;
+ } else {
+ // We've already loaded the literal, so re-use the value already
+ // loaded into the first register we hit.
+ __ Mov(xn, first);
}
}
}
}
-
-void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
+void ClobberFP(MacroAssembler* masm, DoubleRegList reg_list,
+ double const value) {
VRegister first = NoVReg;
- for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
- if (reg_list & (1ULL << i)) {
- VRegister dn = VRegister::Create(i, kDRegSizeInBits);
- if (!first.is_valid()) {
- // This is the first register we've hit, so construct the literal.
- __ Fmov(dn, value);
- first = dn;
- } else {
- // We've already loaded the literal, so re-use the value already loaded
- // into the first register we hit.
- __ Fmov(dn, first);
- }
+ for (VRegister reg : reg_list) {
+ VRegister dn = reg.D();
+ if (!first.is_valid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Fmov(dn, value);
+ first = dn;
+ } else {
+ // We've already loaded the literal, so re-use the value already loaded
+ // into the first register we hit.
+ __ Fmov(dn, first);
}
}
}
-
void Clobber(MacroAssembler* masm, CPURegList reg_list) {
if (reg_list.type() == CPURegister::kRegister) {
// This will always clobber X registers.
- Clobber(masm, reg_list.list());
+ Clobber(masm, RegList::FromBits(static_cast<uint32_t>(reg_list.bits())));
} else if (reg_list.type() == CPURegister::kVRegister) {
// This will always clobber D registers.
- ClobberFP(masm, reg_list.list());
+ ClobberFP(masm,
+ DoubleRegList::FromBits(static_cast<uint32_t>(reg_list.bits())));
} else {
UNREACHABLE();
}
@@ -340,10 +334,10 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
void RegisterDump::Dump(MacroAssembler* masm) {
// Ensure that we don't unintentionally clobber any registers.
- RegList old_tmp_list = masm->TmpList()->list();
- RegList old_fptmp_list = masm->FPTmpList()->list();
- masm->TmpList()->set_list(0);
- masm->FPTmpList()->set_list(0);
+ uint64_t old_tmp_list = masm->TmpList()->bits();
+ uint64_t old_fptmp_list = masm->FPTmpList()->bits();
+ masm->TmpList()->set_bits(0);
+ masm->FPTmpList()->set_bits(0);
// Preserve some temporary registers.
Register dump_base = x0;
@@ -443,8 +437,8 @@ void RegisterDump::Dump(MacroAssembler* masm) {
__ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSize));
// Restore the MacroAssembler's scratch registers.
- masm->TmpList()->set_list(old_tmp_list);
- masm->FPTmpList()->set_list(old_fptmp_list);
+ masm->TmpList()->set_bits(old_tmp_list);
+ masm->FPTmpList()->set_bits(old_fptmp_list);
completed_ = true;
}
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index 305f6bd938..5143dcf0d8 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -229,8 +229,9 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
int reg_size, int reg_count, RegList allowed);
// As PopulateRegisterArray, but for floating-point registers.
-RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
- int reg_size, int reg_count, RegList allowed);
+DoubleRegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
+ int reg_size, int reg_count,
+ DoubleRegList allowed);
// Ovewrite the contents of the specified registers. This enables tests to
// check that register contents are written in cases where it's likely that the
@@ -244,7 +245,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list,
uint64_t const value = 0xFEDCBA9876543210UL);
// As Clobber, but for FP registers.
-void ClobberFP(MacroAssembler* masm, RegList reg_list,
+void ClobberFP(MacroAssembler* masm, DoubleRegList reg_list,
double const value = kFP64SignallingNaN);
// As Clobber, but for a CPURegList with either FP or integer registers. When
diff --git a/deps/v8/test/cctest/test-web-snapshots.cc b/deps/v8/test/cctest/test-web-snapshots.cc
index b5e8a128bf..45b278af04 100644
--- a/deps/v8/test/cctest/test-web-snapshots.cc
+++ b/deps/v8/test/cctest/test-web-snapshots.cc
@@ -17,7 +17,7 @@ void TestWebSnapshotExtensive(
const char* snapshot_source, const char* test_source,
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester,
uint32_t string_count, uint32_t map_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count) {
+ uint32_t function_count, uint32_t object_count, uint32_t array_count) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -41,15 +41,16 @@ void TestWebSnapshotExtensive(
CHECK_EQ(context_count, serializer.context_count());
CHECK_EQ(function_count, serializer.function_count());
CHECK_EQ(object_count, serializer.object_count());
+ CHECK_EQ(array_count, serializer.array_count());
}
{
v8::HandleScope scope(isolate);
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
tester(isolate, new_context);
CHECK_EQ(string_count, deserializer.string_count());
@@ -63,7 +64,8 @@ void TestWebSnapshotExtensive(
void TestWebSnapshot(const char* snapshot_source, const char* test_source,
const char* expected_result, uint32_t string_count,
uint32_t map_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count) {
+ uint32_t function_count, uint32_t object_count,
+ uint32_t array_count) {
TestWebSnapshotExtensive(
snapshot_source, test_source,
[test_source, expected_result](v8::Isolate* isolate,
@@ -71,7 +73,8 @@ void TestWebSnapshot(const char* snapshot_source, const char* test_source,
v8::Local<v8::String> result = CompileRun(test_source).As<v8::String>();
CHECK(result->Equals(new_context, v8_str(expected_result)).FromJust());
},
- string_count, map_count, context_count, function_count, object_count);
+ string_count, map_count, context_count, function_count, object_count,
+ array_count);
}
} // namespace
@@ -80,13 +83,15 @@ TEST(Minimal) {
const char* snapshot_source = "var foo = {'key': 'lol'};";
const char* test_source = "foo.key";
const char* expected_result = "lol";
- uint32_t kStringCount = 3; // 'foo', 'key', 'lol'
+ uint32_t kStringCount = 2; // 'foo', 'key'
uint32_t kMapCount = 1;
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kMapCount, kContextCount, kFunctionCount, kObjectCount);
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
}
TEST(EmptyObject) {
@@ -97,6 +102,7 @@ TEST(EmptyObject) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -107,7 +113,7 @@ TEST(EmptyObject) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
TEST(Numbers) {
@@ -125,6 +131,8 @@ TEST(Numbers) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
+
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -161,7 +169,7 @@ TEST(Numbers) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
TEST(Oddballs) {
@@ -177,6 +185,7 @@ TEST(Oddballs) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -191,7 +200,7 @@ TEST(Oddballs) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
TEST(Function) {
@@ -204,8 +213,10 @@ TEST(Function) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 1;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kMapCount, kContextCount, kFunctionCount, kObjectCount);
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
}
TEST(InnerFunctionWithContext) {
@@ -217,14 +228,16 @@ TEST(InnerFunctionWithContext) {
" })()};";
const char* test_source = "foo.key()";
const char* expected_result = "11525";
- // Strings: 'foo', 'key', function source code (inner), 'result', '11525'
- uint32_t kStringCount = 5;
+ // Strings: 'foo', 'key', function source code (inner), 'result'
+ uint32_t kStringCount = 4;
uint32_t kMapCount = 1;
uint32_t kContextCount = 1;
uint32_t kFunctionCount = 1;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kMapCount, kContextCount, kFunctionCount, kObjectCount);
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
}
TEST(InnerFunctionWithContextAndParentContext) {
@@ -242,15 +255,16 @@ TEST(InnerFunctionWithContextAndParentContext) {
" })()};";
const char* test_source = "foo.key()";
const char* expected_result = "11525";
- // Strings: 'foo', 'key', function source code (innerinner), 'part1', 'part2',
- // '11', '525'
- uint32_t kStringCount = 7;
+ // Strings: 'foo', 'key', function source code (innerinner), 'part1', 'part2'.
+ uint32_t kStringCount = 5;
uint32_t kMapCount = 1;
uint32_t kContextCount = 2;
uint32_t kFunctionCount = 1;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kMapCount, kContextCount, kFunctionCount, kObjectCount);
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
}
TEST(RegExp) {
@@ -261,6 +275,7 @@ TEST(RegExp) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -279,7 +294,7 @@ TEST(RegExp) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
TEST(RegExpNoFlags) {
@@ -290,6 +305,7 @@ TEST(RegExpNoFlags) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 0;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -308,7 +324,7 @@ TEST(RegExpNoFlags) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
TEST(SFIDeduplication) {
@@ -343,9 +359,9 @@ TEST(SFIDeduplication) {
{
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
const char* get_inner = "foo.inner";
@@ -399,9 +415,9 @@ TEST(SFIDeduplicationClasses) {
{
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
const char* get_class = "foo.class";
@@ -464,9 +480,9 @@ TEST(SFIDeduplicationAfterBytecodeFlushing) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
const char* get_outer = "foo.outer";
@@ -549,9 +565,9 @@ TEST(SFIDeduplicationAfterBytecodeFlushingClasses) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
const char* get_create = "foo.create";
@@ -626,9 +642,9 @@ TEST(SFIDeduplicationOfFunctionsNotInSnapshot) {
{
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
- snapshot_data.buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
const char* create_new_inner = "foo.outer()";
@@ -678,6 +694,7 @@ TEST(FunctionKinds) {
uint32_t kContextCount = 0;
uint32_t kFunctionCount = 6;
uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
[test_source](v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
v8::Local<v8::Object> result = CompileRun(test_source).As<v8::Object>();
@@ -697,7 +714,7 @@ TEST(FunctionKinds) {
};
TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
kMapCount, kContextCount, kFunctionCount,
- kObjectCount);
+ kObjectCount, kArrayCount);
}
// Test that concatenating JS code to the snapshot works.
@@ -738,8 +755,8 @@ TEST(Concatenation) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(deserializer.UseWebSnapshot(buffer.get(), buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
+ CHECK(deserializer.Deserialize());
CHECK(!deserializer.has_error());
CHECK_EQ(kObjectCount, deserializer.object_count());
@@ -785,9 +802,124 @@ TEST(ConcatenationErrors) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> new_context = CcTest::NewContext();
v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate);
- CHECK(!deserializer.UseWebSnapshot(buffer.get(), buffer_size));
+ WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
+ CHECK(!deserializer.Deserialize());
+ }
+}
+
+TEST(CompactedSourceCode) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+
+ WebSnapshotData snapshot_data;
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ const char* snapshot_source =
+ "function foo() { 'foo' }\n"
+ "function bar() { 'bar' }\n"
+ "function baz() { 'baz' }\n"
+ "let e = [foo, bar, baz]";
+ CompileRun(snapshot_source);
+ v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
+ v8::Local<v8::String> str =
+ v8::String::NewFromUtf8(isolate, "e").ToLocalChecked();
+ exports->Set(isolate, 0, str);
+ WebSnapshotSerializer serializer(isolate);
+ CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
+ CHECK(!serializer.has_error());
+ CHECK_NOT_NULL(snapshot_data.buffer);
}
+
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
+ snapshot_data.buffer_size);
+ CHECK(deserializer.Deserialize());
+ CHECK(!deserializer.has_error());
+
+ const char* get_function = "e[0]";
+
+ // Verify that the source code got compacted.
+ v8::Local<v8::Function> v8_function =
+ CompileRun(get_function).As<v8::Function>();
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_function));
+ Handle<String> function_script_source =
+ handle(String::cast(Script::cast(function->shared().script()).source()),
+ i_isolate);
+ const char* raw_expected_source = "() { 'foo' }() { 'bar' }() { 'baz' }";
+
+ Handle<String> expected_source = Utils::OpenHandle(
+ *v8::String::NewFromUtf8(isolate, raw_expected_source).ToLocalChecked(),
+ i_isolate);
+ CHECK(function_script_source->Equals(*expected_source));
+ }
+}
+
+TEST(InPlaceStringsInArrays) {
+ const char* snapshot_source = "var foo = ['one', 'two', 'three'];";
+ const char* test_source = "foo.join('');";
+ const char* expected_result = "onetwothree";
+ uint32_t kStringCount = 1; // 'foo'; Other strings are in-place.
+ uint32_t kMapCount = 0;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 0;
+ uint32_t kArrayCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
+}
+
+TEST(RepeatedInPlaceStringsInArrays) {
+ const char* snapshot_source = "var foo = ['one', 'two', 'one'];";
+ const char* test_source = "foo.join('');";
+ const char* expected_result = "onetwoone";
+ uint32_t kStringCount = 2; // 'foo', 'one'; Other strings are in-place.
+ uint32_t kMapCount = 0;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 0;
+ uint32_t kArrayCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
+}
+
+TEST(InPlaceStringsInObjects) {
+ const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'three'};";
+ const char* test_source = "foo.a + foo.b + foo.c;";
+ const char* expected_result = "onetwothree";
+ // 'foo', 'a', 'b', 'c'. Other strings are in-place.
+ uint32_t kStringCount = 4;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
+}
+
+TEST(RepeatedInPlaceStringsInObjects) {
+ const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'one'};";
+ const char* test_source = "foo.a + foo.b + foo.c;";
+ const char* expected_result = "onetwoone";
+ // 'foo', 'a', 'b', 'c', 'one'. Other strings are in-place.
+ uint32_t kStringCount = 5;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 1;
+ uint32_t kArrayCount = 0;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount,
+ kArrayCount);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index 96d6fef4f3..04c73c55c2 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -33,7 +33,6 @@ class WasmGCTester {
explicit WasmGCTester(
TestExecutionTier execution_tier = TestExecutionTier::kTurbofan)
: flag_gc(&v8::internal::FLAG_experimental_wasm_gc, true),
- flag_reftypes(&v8::internal::FLAG_experimental_wasm_reftypes, true),
flag_typedfuns(&v8::internal::FLAG_experimental_wasm_typed_funcref,
true),
flag_liftoff(&v8::internal::FLAG_liftoff,
@@ -55,12 +54,13 @@ class WasmGCTester {
byte DefineFunction(FunctionSig* sig, std::initializer_list<ValueType> locals,
std::initializer_list<byte> code) {
- WasmFunctionBuilder* fun = builder_.AddFunction(sig);
- for (ValueType local : locals) {
- fun->AddLocal(local);
- }
- fun->EmitCode(code.begin(), static_cast<uint32_t>(code.size()));
- return fun->func_index();
+ return DefineFunctionImpl(builder_.AddFunction(sig), locals, code);
+ }
+
+ byte DefineFunction(uint32_t sig_index,
+ std::initializer_list<ValueType> locals,
+ std::initializer_list<byte> code) {
+ return DefineFunctionImpl(builder_.AddFunction(sig_index), locals, code);
}
void DefineExportedFunction(const char* name, FunctionSig* sig,
@@ -95,7 +95,9 @@ class WasmGCTester {
supertype);
}
- byte DefineSignature(FunctionSig* sig) { return builder_.AddSignature(sig); }
+ byte DefineSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType) {
+ return builder_.AddSignature(sig, supertype);
+ }
byte DefineTable(ValueType type, uint32_t min_size, uint32_t max_size) {
return builder_.AddTable(type, min_size, max_size);
@@ -176,12 +178,21 @@ class WasmGCTester {
private:
const FlagScope<bool> flag_gc;
- const FlagScope<bool> flag_reftypes;
const FlagScope<bool> flag_typedfuns;
const FlagScope<bool> flag_liftoff;
const FlagScope<bool> flag_liftoff_only;
const FlagScope<bool> flag_tierup;
+ byte DefineFunctionImpl(WasmFunctionBuilder* fun,
+ std::initializer_list<ValueType> locals,
+ std::initializer_list<byte> code) {
+ for (ValueType local : locals) {
+ fun->AddLocal(local);
+ }
+ fun->EmitCode(code.begin(), static_cast<uint32_t>(code.size()));
+ return fun->func_index();
+ }
+
void CheckResultImpl(uint32_t function_index, const FunctionSig* sig,
CWasmArgumentsPacker* packer, int32_t expected) {
CallFunctionImpl(function_index, sig, packer);
@@ -459,35 +470,27 @@ WASM_COMPILED_EXEC_TEST(RefCast) {
WasmGCTester tester(execution_tier);
const byte supertype_index = tester.DefineStruct({F(kWasmI32, true)});
- const byte subtype1_index =
- tester.DefineStruct({F(kWasmI32, true), F(kWasmF32, false)});
- const byte subtype2_index =
- tester.DefineStruct({F(kWasmI32, true), F(kWasmI64, false)});
+ const byte subtype1_index = tester.DefineStruct(
+ {F(kWasmI32, true), F(kWasmF32, false)}, supertype_index);
+ const byte subtype2_index = tester.DefineStruct(
+ {F(kWasmI32, true), F(kWasmI64, false)}, supertype_index);
const byte kTestSuccessful = tester.DefineFunction(
tester.sigs.i_v(), {ValueType::Ref(supertype_index, kNullable)},
{WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype1_index,
- WASM_RTT_SUB(subtype1_index,
- WASM_RTT_CANON(supertype_index)))),
+ subtype1_index, WASM_RTT_CANON(subtype1_index))),
WASM_STRUCT_GET(
subtype1_index, 0,
- WASM_REF_CAST(
- WASM_LOCAL_GET(0),
- WASM_RTT_SUB(subtype1_index, WASM_RTT_CANON(supertype_index)))),
+ WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(subtype1_index))),
WASM_END});
const byte kTestFailed = tester.DefineFunction(
tester.sigs.i_v(), {ValueType::Ref(supertype_index, kNullable)},
{WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype1_index,
- WASM_RTT_SUB(subtype1_index,
- WASM_RTT_CANON(supertype_index)))),
+ subtype1_index, WASM_RTT_CANON(subtype1_index))),
WASM_STRUCT_GET(
subtype2_index, 0,
- WASM_REF_CAST(
- WASM_LOCAL_GET(0),
- WASM_RTT_SUB(subtype2_index, WASM_RTT_CANON(supertype_index)))),
+ WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(subtype2_index))),
WASM_END});
tester.CompileModule();
@@ -498,8 +501,7 @@ WASM_COMPILED_EXEC_TEST(RefCast) {
WASM_COMPILED_EXEC_TEST(RefCastStatic) {
WasmGCTester tester(execution_tier);
- const byte supertype_index =
- tester.DefineStruct({F(kWasmI32, true)}, kGenericSuperType);
+ const byte supertype_index = tester.DefineStruct({F(kWasmI32, true)});
const byte subtype1_index = tester.DefineStruct(
{F(kWasmI32, true), F(kWasmF32, false)}, supertype_index);
const byte subtype2_index = tester.DefineStruct(
@@ -528,8 +530,7 @@ WASM_COMPILED_EXEC_TEST(RefCastStaticNoChecks) {
FlagScope<bool> scope(&FLAG_experimental_wasm_assume_ref_cast_succeeds, true);
WasmGCTester tester(execution_tier);
- const byte supertype_index =
- tester.DefineStruct({F(kWasmI32, true)}, kGenericSuperType);
+ const byte supertype_index = tester.DefineStruct({F(kWasmI32, true)});
const byte subtype1_index = tester.DefineStruct(
{F(kWasmI32, true), F(kWasmF32, false)}, supertype_index);
const byte subtype2_index = tester.DefineStruct(
@@ -560,7 +561,7 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
const byte type_index = tester.DefineStruct({F(kWasmI32, true)});
const byte other_type_index = tester.DefineStruct({F(kWasmF32, true)});
const byte rtt_index =
- tester.AddGlobal(ValueType::Rtt(type_index, 0), false,
+ tester.AddGlobal(ValueType::Rtt(type_index), false,
WasmInitExpr::RttCanon(
static_cast<HeapType::Representation>(type_index)));
const byte kTestStruct = tester.DefineFunction(
@@ -937,12 +938,12 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
kExprEnd});
// Reads and returns an array's length.
- const byte kGetLength = tester.DefineFunction(
- tester.sigs.i_v(), {},
- {WASM_ARRAY_LEN(type_index, WASM_ARRAY_NEW_WITH_RTT(
- type_index, WASM_I32V(0), WASM_I32V(42),
- WASM_RTT_CANON(type_index))),
- kExprEnd});
+ const byte kGetLength =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_ARRAY_LEN(WASM_ARRAY_NEW_WITH_RTT(
+ type_index, WASM_I32V(0), WASM_I32V(42),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
// Create an array of length 2, initialized to [42, 42].
const byte kAllocate = tester.DefineFunction(
@@ -1303,6 +1304,7 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
tester.CheckResult(kZeroLength, 0); // Does not throw.
}
+/* TODO(7748): This test requires for recursive groups.
WASM_COMPILED_EXEC_TEST(NewDefault) {
WasmGCTester tester(execution_tier);
const byte struct_type = tester.DefineStruct(
@@ -1336,19 +1338,20 @@ WASM_COMPILED_EXEC_TEST(NewDefault) {
tester.CheckResult(allocate_struct, 0);
tester.CheckResult(allocate_array, 0);
}
+*/
WASM_COMPILED_EXEC_TEST(BasicRtt) {
WasmGCTester tester(execution_tier);
const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
- const byte subtype_index =
- tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
+ const byte subtype_index = tester.DefineStruct(
+ {F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)}, type_index);
- ValueType kRttTypes[] = {ValueType::Rtt(type_index, 0)};
+ ValueType kRttTypes[] = {ValueType::Rtt(type_index)};
FunctionSig sig_t_v(1, 0, kRttTypes);
- ValueType kRttSubtypes[] = {ValueType::Rtt(subtype_index, 1)};
+ ValueType kRttSubtypes[] = {ValueType::Rtt(subtype_index)};
FunctionSig sig_t2_v(1, 0, kRttSubtypes);
- ValueType kRttTypesDeeper[] = {ValueType::Rtt(type_index, 1)};
+ ValueType kRttTypesDeeper[] = {ValueType::Rtt(type_index)};
FunctionSig sig_t3_v(1, 0, kRttTypesDeeper);
ValueType kRefTypes[] = {ref(type_index)};
FunctionSig sig_q_v(1, 0, kRefTypes);
@@ -1356,8 +1359,7 @@ WASM_COMPILED_EXEC_TEST(BasicRtt) {
const byte kRttCanon = tester.DefineFunction(
&sig_t_v, {}, {WASM_RTT_CANON(type_index), kExprEnd});
const byte kRttSub = tester.DefineFunction(
- &sig_t2_v, {},
- {WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)), kExprEnd});
+ &sig_t2_v, {}, {WASM_RTT_CANON(subtype_index), kExprEnd});
const byte kStructWithRtt = tester.DefineFunction(
&sig_q_v, {},
{WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
@@ -1417,40 +1419,12 @@ WASM_COMPILED_EXEC_TEST(BasicRtt) {
tester.CheckResult(kRefCast, 43);
}
-WASM_COMPILED_EXEC_TEST(RttFreshSub) {
- WasmGCTester tester(execution_tier);
- const byte kType = tester.DefineStruct({F(wasm::kWasmI32, true)});
- HeapType::Representation type_repr =
- static_cast<HeapType::Representation>(kType);
-
- const byte kRtt = tester.AddGlobal(
- ValueType::Rtt(kType, 1), false,
- WasmInitExpr::RttFreshSub(tester.zone(), type_repr,
- WasmInitExpr::RttCanon(type_repr)));
-
- // A struct allocated with a fresh RTT does not match other fresh RTTs
- // created for the same type.
- const byte kRefTest = tester.DefineFunction(
- tester.sigs.i_v(), {optref(kType)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
- kType, WASM_I32V(11),
- WASM_RTT_FRESH_SUB(kType, WASM_RTT_CANON(kType)))),
- WASM_I32_ADD(
- WASM_REF_TEST(WASM_LOCAL_GET(0),
- WASM_RTT_FRESH_SUB(kType, WASM_RTT_CANON(kType))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_GLOBAL_GET(kRtt))),
- kExprEnd});
-
- tester.CompileModule();
- tester.CheckResult(kRefTest, 0);
-}
-
WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
// TODO(7748): Add tests for branch_on_*.
WasmGCTester tester(execution_tier);
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
- byte subtype_index =
- tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)});
+ byte subtype_index = tester.DefineStruct(
+ {F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)}, type_index);
ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
FunctionSig sig(1, 2, sig_types);
byte sig_index = tester.DefineSignature(&sig);
@@ -1462,14 +1436,6 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
// Upcasts should not be optimized away for structural types.
const byte kRefTestUpcast = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_TEST(
- WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype_index,
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
- WASM_RTT_CANON(type_index)),
- kExprEnd});
- const byte kRefTestUpcastFail = tester.DefineFunction(
- tester.sigs.i_v(), {},
{WASM_REF_TEST(WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
subtype_index, WASM_RTT_CANON(subtype_index)),
WASM_RTT_CANON(type_index)),
@@ -1480,11 +1446,9 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
kExprEnd});
const byte kRefTestUnrelated = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_TEST(
- WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype_index,
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
- WASM_RTT_CANON(sig_index)),
+ {WASM_REF_TEST(WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ subtype_index, WASM_RTT_CANON(subtype_index)),
+ WASM_RTT_CANON(sig_index)),
kExprEnd});
const byte kRefTestUnrelatedNull = tester.DefineFunction(
tester.sigs.i_v(), {},
@@ -1504,11 +1468,10 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
kExprEnd});
const byte kRefCastUpcast = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_IS_NULL(WASM_REF_CAST(
- WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype_index,
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
- WASM_RTT_CANON(type_index))),
+ {WASM_REF_IS_NULL(
+ WASM_REF_CAST(WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ subtype_index, WASM_RTT_CANON(subtype_index)),
+ WASM_RTT_CANON(type_index))),
kExprEnd});
const byte kRefCastUpcastNull = tester.DefineFunction(
tester.sigs.i_v(), {},
@@ -1517,11 +1480,10 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
kExprEnd});
const byte kRefCastUnrelated = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_IS_NULL(WASM_REF_CAST(
- WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- subtype_index,
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
- WASM_RTT_CANON(sig_index))),
+ {WASM_REF_IS_NULL(
+ WASM_REF_CAST(WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ subtype_index, WASM_RTT_CANON(subtype_index)),
+ WASM_RTT_CANON(sig_index))),
kExprEnd});
const byte kRefCastUnrelatedNull = tester.DefineFunction(
tester.sigs.i_v(), {},
@@ -1540,7 +1502,6 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
tester.CheckResult(kRefTestNull, 0);
tester.CheckResult(kRefTestUpcast, 1);
- tester.CheckResult(kRefTestUpcastFail, 0);
tester.CheckResult(kRefTestUpcastNull, 0);
tester.CheckResult(kRefTestUnrelated, 0);
tester.CheckResult(kRefTestUnrelatedNull, 0);
@@ -1557,8 +1518,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
// TODO(7748): Add tests for branch_on_*.
WasmGCTester tester(execution_tier);
- byte type_index =
- tester.DefineStruct({F(wasm::kWasmI32, true)}, kGenericSuperType);
+ byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
byte subtype_index = tester.DefineStruct(
{F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)}, type_index);
ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
@@ -1639,27 +1599,111 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
tester.CheckHasThrown(kRefCastUnrelatedNonNullable);
}
+WASM_COMPILED_EXEC_TEST(TrivialAbstractCasts) {
+ // TODO(7748): Add tests for branch_on_*.
+ WasmGCTester tester(execution_tier);
+ byte type_index = tester.DefineArray(wasm::kWasmI32, true);
+ byte struct_type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
+ ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
+ FunctionSig sig(1, 2, sig_types);
+
+ const byte kIsArrayNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
+ const byte kIsArrayUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ type_index, WASM_I32V(10), WASM_RTT_CANON(type_index))),
+ kExprEnd});
+ const byte kIsArrayUpcastNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(type_index, kNullable)},
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(type_index, WASM_I32V(10),
+ WASM_RTT_CANON(type_index))),
+ WASM_REF_IS_ARRAY(WASM_LOCAL_GET(0)), kExprEnd});
+ const byte kIsArrayUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_REF_NULL(type_index)), kExprEnd});
+ const byte kIsArrayUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(struct_type_index, kNullable)},
+ {WASM_LOCAL_SET(
+ 0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ struct_type_index, WASM_RTT_CANON(struct_type_index))),
+ WASM_REF_IS_ARRAY(WASM_LOCAL_GET(0)), kExprEnd});
+ const byte kIsArrayUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_REF_NULL(kI31RefCode)), kExprEnd});
+ const byte kIsArrayUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_I31_NEW(WASM_I32V(10))), kExprEnd});
+
+ const byte kAsArrayNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_REF_NULL(kAnyRefCode))),
+ kExprEnd});
+ const byte kAsArrayUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ type_index, WASM_I32V(10), WASM_RTT_CANON(type_index)))),
+ kExprEnd});
+ const byte kAsArrayUpcastNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(type_index, kNullable)},
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(type_index, WASM_I32V(10),
+ WASM_RTT_CANON(type_index))),
+ WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_LOCAL_GET(0))), kExprEnd});
+ const byte kAsArrayUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_REF_NULL(type_index))),
+ kExprEnd});
+ const byte kAsArrayUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(struct_type_index, kNullable)},
+ {WASM_LOCAL_SET(
+ 0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ struct_type_index, WASM_RTT_CANON(struct_type_index))),
+ WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_LOCAL_GET(0))), kExprEnd});
+ const byte kAsArrayUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_REF_NULL(kI31RefCode))),
+ kExprEnd});
+ const byte kAsArrayUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_AS_ARRAY(WASM_I31_NEW(WASM_I32V(10)))),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckResult(kIsArrayNull, 0);
+ tester.CheckResult(kIsArrayUpcast, 1);
+ tester.CheckResult(kIsArrayUpcastNullable, 1);
+ tester.CheckResult(kIsArrayUpcastNull, 0);
+ tester.CheckResult(kIsArrayUnrelated, 0);
+ tester.CheckResult(kIsArrayUnrelatedNull, 0);
+ tester.CheckResult(kIsArrayUnrelatedNonNullable, 0);
+
+ tester.CheckHasThrown(kAsArrayNull);
+ tester.CheckResult(kAsArrayUpcast, 0);
+ tester.CheckResult(kAsArrayUpcastNullable, 0);
+ tester.CheckHasThrown(kAsArrayUpcastNull);
+ tester.CheckHasThrown(kAsArrayUnrelated);
+ tester.CheckHasThrown(kAsArrayUnrelatedNull);
+ tester.CheckHasThrown(kAsArrayUnrelatedNonNullable);
+}
+
WASM_EXEC_TEST(NoDepthRtt) {
WasmGCTester tester(execution_tier);
const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
- const byte subtype_index =
- tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
- const byte empty_struct_index = tester.DefineStruct({});
+ const byte subtype_index = tester.DefineStruct(
+ {F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)}, type_index);
ValueType kRttTypeNoDepth = ValueType::Rtt(type_index);
FunctionSig sig_t1_v_nd(1, 0, &kRttTypeNoDepth);
ValueType kRttSubtypeNoDepth = ValueType::Rtt(subtype_index);
FunctionSig sig_t2_v_nd(1, 0, &kRttSubtypeNoDepth);
- const byte kRttTypeCanon = tester.DefineFunction(
- &sig_t1_v_nd, {}, {WASM_RTT_CANON(type_index), kExprEnd});
const byte kRttSubtypeCanon = tester.DefineFunction(
&sig_t2_v_nd, {}, {WASM_RTT_CANON(subtype_index), kExprEnd});
- const byte kRttSubtypeSub = tester.DefineFunction(
- &sig_t2_v_nd, {},
- {WASM_RTT_SUB(subtype_index, WASM_CALL_FUNCTION0(kRttTypeCanon)),
- kExprEnd});
const byte kTestCanon = tester.DefineFunction(
tester.sigs.i_v(), {optref(type_index)},
@@ -1669,56 +1713,9 @@ WASM_EXEC_TEST(NoDepthRtt) {
WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
kExprEnd});
- const byte kTestSub = tester.DefineFunction(
- tester.sigs.i_v(), {optref(type_index)},
- {WASM_LOCAL_SET(
- 0, WASM_STRUCT_NEW_WITH_RTT(
- subtype_index, WASM_I32V(11), WASM_I32V(42),
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
- kExprEnd});
-
- const byte kTestSubVsEmpty = tester.DefineFunction(
- tester.sigs.i_v(), {optref(type_index)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
- subtype_index, WASM_I32V(11), WASM_I32V(42),
- WASM_RTT_SUB(subtype_index,
- WASM_RTT_CANON(empty_struct_index)))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
- kExprEnd});
-
- const byte kTestSubVsCanon = tester.DefineFunction(
- tester.sigs.i_v(), {optref(type_index)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
- subtype_index, WASM_I32V(11), WASM_I32V(42),
- WASM_RTT_CANON(subtype_index))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
- kExprEnd});
-
- const byte kTestCanonVsSub = tester.DefineFunction(
- tester.sigs.i_v(), {optref(type_index)},
- {WASM_LOCAL_SET(
- 0, WASM_STRUCT_NEW_WITH_RTT(
- subtype_index, WASM_I32V(11), WASM_I32V(42),
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
- kExprEnd});
-
- const byte kTestSuperVsSub = tester.DefineFunction(
- tester.sigs.i_v(), {optref(type_index)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
- WASM_RTT_CANON(type_index))),
- WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
- kExprEnd});
-
tester.CompileModule();
tester.CheckResult(kTestCanon, 1);
- tester.CheckResult(kTestSub, 1);
- tester.CheckResult(kTestSubVsEmpty, 0);
- tester.CheckResult(kTestSubVsCanon, 0);
- tester.CheckResult(kTestCanonVsSub, 0);
- tester.CheckResult(kTestSuperVsSub, 0);
}
WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
@@ -1737,7 +1734,7 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
&sig, {},
{WASM_ARRAY_NEW(type_index, WASM_I32V(10), WASM_I32V(42)), kExprEnd});
- ValueType rtt_type = ValueType::Rtt(type_index, 0);
+ ValueType rtt_type = ValueType::Rtt(type_index);
FunctionSig rtt_canon_sig(1, 0, &rtt_type);
const byte kRttCanon = tester.DefineFunction(
&rtt_canon_sig, {}, {WASM_RTT_CANON(type_index), kExprEnd});
@@ -1771,7 +1768,7 @@ WASM_COMPILED_EXEC_TEST(FunctionRefs) {
ValueType func_type = ValueType::Ref(sig_index, kNullable);
FunctionSig sig_func(1, 0, &func_type);
- ValueType rtt0 = ValueType::Rtt(sig_index, 0);
+ ValueType rtt0 = ValueType::Rtt(sig_index);
FunctionSig sig_rtt0(1, 0, &rtt0);
const byte rtt_canon = tester.DefineFunction(
&sig_rtt0, {}, {WASM_RTT_CANON(sig_index), kExprEnd});
@@ -1789,19 +1786,12 @@ WASM_COMPILED_EXEC_TEST(FunctionRefs) {
{WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_RTT_CANON(sig_index)), kExprEnd});
- const byte test_fail_1 = tester.DefineFunction(
+ const byte test_fail = tester.DefineFunction(
tester.sigs.i_v(), {kWasmFuncRef},
{WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_RTT_CANON(other_sig_index)),
kExprEnd});
- const byte test_fail_2 = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmFuncRef},
- {WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
- WASM_REF_TEST(WASM_LOCAL_GET(0),
- WASM_RTT_SUB(sig_index, WASM_RTT_CANON(sig_index))),
- kExprEnd});
-
tester.CompileModule();
Handle<Object> result_canon =
@@ -1827,8 +1817,7 @@ WASM_COMPILED_EXEC_TEST(FunctionRefs) {
cast_function_reference->code().raw_instruction_start());
tester.CheckResult(test, 1);
- tester.CheckResult(test_fail_1, 0);
- tester.CheckResult(test_fail_2, 0);
+ tester.CheckResult(test_fail, 0);
}
WASM_COMPILED_EXEC_TEST(CallRef) {
@@ -1896,9 +1885,10 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
WasmGCTester tester(execution_tier);
byte array_index = tester.DefineArray(kWasmI32, true);
+ byte struct_index = tester.DefineStruct({F(kWasmI32, true)});
byte function_index =
tester.DefineFunction(tester.sigs.v_v(), {}, {kExprEnd});
- byte sig_index = 1;
+ byte sig_index = 2;
// This is just so func_index counts as "declared".
tester.AddGlobal(ValueType::Ref(sig_index, kNullable), false,
@@ -1907,6 +1897,9 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kDataCheckNull = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_IS_DATA(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
+ byte kArrayCheckNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_ARRAY(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
byte kFuncCheckNull = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_IS_FUNC(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
@@ -1918,6 +1911,10 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
tester.DefineFunction(tester.sigs.i_v(), {},
{WASM_REF_AS_DATA(WASM_REF_NULL(kAnyRefCode)),
WASM_DROP, WASM_I32V(1), kExprEnd});
+ byte kArrayCastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_AS_ARRAY(WASM_REF_NULL(kAnyRefCode)),
+ WASM_DROP, WASM_I32V(1), kExprEnd});
byte kFuncCastNull =
tester.DefineFunction(tester.sigs.i_v(), {},
{WASM_REF_AS_FUNC(WASM_REF_NULL(kAnyRefCode)),
@@ -1936,6 +1933,12 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
DATA, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
WASM_RTT_CANON(array_index)));
byte kDataCheckFailure = TYPE_CHECK(DATA, WASM_I31_NEW(WASM_I32V(42)));
+ byte kArrayCheckSuccess = TYPE_CHECK(
+ ARRAY, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kArrayCheckFailure =
+ TYPE_CHECK(ARRAY, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ struct_index, WASM_RTT_CANON(struct_index)));
byte kFuncCheckSuccess = TYPE_CHECK(FUNC, WASM_REF_FUNC(function_index));
byte kFuncCheckFailure = TYPE_CHECK(
FUNC, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
@@ -1956,6 +1959,10 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
DATA, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
WASM_RTT_CANON(array_index)));
byte kDataCastFailure = TYPE_CAST(DATA, WASM_I31_NEW(WASM_I32V(42)));
+ byte kArrayCastSuccess = TYPE_CAST(
+ DATA, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kArrayCastFailure = TYPE_CAST(DATA, WASM_I31_NEW(WASM_I32V(42)));
byte kFuncCastSuccess = TYPE_CAST(FUNC, WASM_REF_FUNC(function_index));
byte kFuncCastFailure = TYPE_CAST(
FUNC, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
@@ -1984,6 +1991,11 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kBrOnDataNotTaken = BR_ON(DATA, Data, WASM_REF_FUNC(function_index));
byte kBrOnFuncTaken = BR_ON(FUNC, Func, WASM_REF_FUNC(function_index));
byte kBrOnFuncNotTaken = BR_ON(FUNC, Func, WASM_I31_NEW(WASM_I32V(42)));
+ byte kBrOnArrayTaken =
+ BR_ON(ARRAY, Array,
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kBrOnArrayNotTaken = BR_ON(ARRAY, Array, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnI31Taken = BR_ON(I31, I31, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnI31NotTaken =
BR_ON(I31, I31,
@@ -2010,6 +2022,12 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kBrOnNonFuncNotTaken =
BR_ON_NON(FUNC, Func, WASM_REF_FUNC(function_index));
byte kBrOnNonFuncTaken = BR_ON_NON(FUNC, Func, WASM_I31_NEW(WASM_I32V(42)));
+ byte kBrOnNonArrayNotTaken =
+ BR_ON_NON(ARRAY, Array,
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kBrOnNonArrayTaken =
+ BR_ON_NON(ARRAY, Array, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnNonI31NotTaken = BR_ON_NON(I31, I31, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnNonI31Taken =
BR_ON_NON(I31, I31,
@@ -2020,26 +2038,32 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
tester.CompileModule();
tester.CheckResult(kDataCheckNull, 0);
+ tester.CheckResult(kArrayCheckNull, 0);
tester.CheckResult(kFuncCheckNull, 0);
tester.CheckResult(kI31CheckNull, 0);
tester.CheckHasThrown(kDataCastNull);
+ tester.CheckHasThrown(kArrayCastNull);
tester.CheckHasThrown(kFuncCastNull);
tester.CheckHasThrown(kI31CastNull);
tester.CheckResult(kDataCheckSuccess, 1);
+ tester.CheckResult(kArrayCheckSuccess, 1);
tester.CheckResult(kFuncCheckSuccess, 1);
tester.CheckResult(kI31CheckSuccess, 1);
tester.CheckResult(kDataCheckFailure, 0);
+ tester.CheckResult(kArrayCheckFailure, 0);
tester.CheckResult(kFuncCheckFailure, 0);
tester.CheckResult(kI31CheckFailure, 0);
tester.CheckResult(kDataCastSuccess, 1);
+ tester.CheckResult(kArrayCastSuccess, 1);
tester.CheckResult(kFuncCastSuccess, 1);
tester.CheckResult(kI31CastSuccess, 1);
tester.CheckHasThrown(kDataCastFailure);
+ tester.CheckHasThrown(kArrayCastFailure);
tester.CheckHasThrown(kFuncCastFailure);
tester.CheckHasThrown(kI31CastFailure);
@@ -2047,6 +2071,8 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
tester.CheckResult(kBrOnDataNotTaken, 0);
tester.CheckResult(kBrOnFuncTaken, 1);
tester.CheckResult(kBrOnFuncNotTaken, 0);
+ tester.CheckResult(kBrOnArrayTaken, 1);
+ tester.CheckResult(kBrOnArrayNotTaken, 0);
tester.CheckResult(kBrOnI31Taken, 1);
tester.CheckResult(kBrOnI31NotTaken, 0);
@@ -2054,6 +2080,8 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
tester.CheckResult(kBrOnNonDataNotTaken, 1);
tester.CheckResult(kBrOnNonFuncTaken, 0);
tester.CheckResult(kBrOnNonFuncNotTaken, 1);
+ tester.CheckResult(kBrOnNonArrayTaken, 0);
+ tester.CheckResult(kBrOnNonArrayNotTaken, 1);
tester.CheckResult(kBrOnNonI31Taken, 0);
tester.CheckResult(kBrOnNonI31NotTaken, 1);
}
@@ -2082,8 +2110,8 @@ WASM_COMPILED_EXEC_TEST(BasicI31) {
WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
WasmGCTester tester(execution_tier);
const byte SuperType = tester.DefineStruct({F(wasm::kWasmI32, true)});
- const byte SubType =
- tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
+ const byte SubType = tester.DefineStruct(
+ {F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)}, SuperType);
ValueType kDataRefNull = ValueType::Ref(HeapType::kData, kNullable);
const byte ListType = tester.DefineArray(kDataRefNull, true);
@@ -2093,15 +2121,12 @@ WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
WasmInitExpr::RefNullConst(
static_cast<HeapType::Representation>(ListType)));
const byte RttSuper = tester.AddGlobal(
- ValueType::Rtt(SuperType, 0), false,
+ ValueType::Rtt(SuperType), false,
WasmInitExpr::RttCanon(static_cast<HeapType::Representation>(SuperType)));
- const byte RttSub = tester.AddGlobal(
- ValueType::Rtt(SubType, 1), false,
- WasmInitExpr::RttSub(tester.zone(),
- static_cast<HeapType::Representation>(SubType),
- WasmInitExpr::GlobalGet(RttSuper)));
+ const byte RttSub = tester.AddGlobal(ValueType::Rtt(SubType), false,
+ WasmInitExpr::RttCanon(SubType));
const byte RttList = tester.AddGlobal(
- ValueType::Rtt(ListType, 0), false,
+ ValueType::Rtt(ListType), false,
WasmInitExpr::RttCanon(static_cast<HeapType::Representation>(ListType)));
const uint32_t kListLength = 1024;
@@ -2197,25 +2222,25 @@ WASM_COMPILED_EXEC_TEST(GCTables) {
WasmGCTester tester(execution_tier);
byte super_struct = tester.DefineStruct({F(kWasmI32, false)});
- byte sub_struct =
- tester.DefineStruct({F(kWasmI32, false), F(kWasmI32, true)});
+ byte sub_struct = tester.DefineStruct({F(kWasmI32, false), F(kWasmI32, true)},
+ super_struct);
FunctionSig* super_sig =
FunctionSig::Build(tester.zone(), {kWasmI32}, {optref(sub_struct)});
byte super_sig_index = tester.DefineSignature(super_sig);
FunctionSig* sub_sig =
FunctionSig::Build(tester.zone(), {kWasmI32}, {optref(super_struct)});
- byte sub_sig_index = tester.DefineSignature(sub_sig);
+ byte sub_sig_index = tester.DefineSignature(sub_sig, super_sig_index);
tester.DefineTable(optref(super_sig_index), 10, 10);
byte super_func = tester.DefineFunction(
- super_sig, {},
+ super_sig_index, {},
{WASM_I32_ADD(WASM_STRUCT_GET(sub_struct, 0, WASM_LOCAL_GET(0)),
WASM_STRUCT_GET(sub_struct, 1, WASM_LOCAL_GET(0))),
WASM_END});
byte sub_func = tester.DefineFunction(
- sub_sig, {},
+ sub_sig_index, {},
{WASM_STRUCT_GET(super_struct, 0, WASM_LOCAL_GET(0)), WASM_END});
byte setup_func = tester.DefineFunction(
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 5ff1a422f6..56619e7763 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -55,6 +55,17 @@ constexpr uint32_t kAvailableBufferSlots = 0;
constexpr uint32_t kBufferSlotStartOffset = 0;
#endif
+void EnsureThreadHasWritePermissions() {
+#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
+// Ignoring this warning is considered better than relying on
+// __builtin_available.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability-new"
+ pthread_jit_write_protect_np(0);
+#pragma clang diagnostic pop
+#endif
+}
+
Address AllocateJumpTableThunk(
Address jump_target, byte* thunk_slot_buffer,
std::bitset<kAvailableBufferSlots>* used_slots,
@@ -162,7 +173,7 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
__ Ret();
FlushInstructionCache(thunk, kThunkBufferSize);
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// MacOS on arm64 refuses {mprotect} calls to toggle permissions of RWX
// memory. Simply do nothing here, as the space will by default be executable
// and non-writable for the JumpTableRunner.
@@ -203,10 +214,7 @@ class JumpTablePatcher : public v8::base::Thread {
void Run() override {
TRACE("Patcher %p is starting ...\n", this);
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
- // Make sure to switch memory to writable on M1 hardware.
- CodeSpaceWriteScope code_space_write_scope(nullptr);
-#endif
+ EnsureThreadHasWritePermissions();
Address slot_address =
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
// First, emit code to the two thunks.
@@ -261,16 +269,13 @@ TEST(JumpTablePatchingStress) {
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
Address slot_start = reinterpret_cast<Address>(buffer->start());
+ EnsureThreadHasWritePermissions();
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
std::vector<Address> patcher_thunks;
{
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
- // Make sure to switch memory to writable on M1 hardware.
- CodeSpaceWriteScope code_space_write_scope(nullptr);
-#endif
// Patch the jump table slot to jump to itself. This will later be patched
// by the patchers.
Address slot_addr =
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index 2e3cdf48ca..256092e3e5 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -431,7 +431,7 @@ TEST(Liftoff_debug_side_table_catch_all) {
LiftoffCompileEnvironment env;
TestSignatures sigs;
int ex = env.builder()->AddException(sigs.v_v());
- ValueType exception_type = ValueType::Ref(HeapType::kExtern, kNonNullable);
+ ValueType exception_type = ValueType::Ref(HeapType::kAny, kNonNullable);
auto debug_side_table = env.GenerateDebugSideTable(
{}, {kWasmI32},
{WASM_TRY_CATCH_ALL_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(ex)),
@@ -456,7 +456,7 @@ TEST(Liftoff_debug_side_table_catch_all) {
TEST(Regress1199526) {
EXPERIMENTAL_FLAG_SCOPE(eh);
LiftoffCompileEnvironment env;
- ValueType exception_type = ValueType::Ref(HeapType::kExtern, kNonNullable);
+ ValueType exception_type = ValueType::Ref(HeapType::kAny, kNonNullable);
auto debug_side_table = env.GenerateDebugSideTable(
{}, {},
{kExprTry, kVoidCode, kExprCallFunction, 0, kExprCatchAll, kExprLoop,
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index cd1bf10046..1ecc630916 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -36,7 +36,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name) { \
RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
@@ -65,7 +65,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
@@ -93,7 +93,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicCompareExchange) {
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 3b54172e83..1c84b38f56 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -36,7 +36,7 @@ void RunU64BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name) { \
RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
@@ -65,7 +65,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##32U) { \
RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
@@ -94,7 +94,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
@@ -122,7 +122,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicCompareExchange) {
@@ -380,7 +380,7 @@ void RunDropTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##Drop) { \
RunDropTest(execution_tier, kExprI64Atomic##Name, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicSub16UDrop) {
@@ -499,7 +499,7 @@ void RunConvertTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConvert##Name) { \
RunConvertTest(execution_tier, kExprI64Atomic##Name, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
@@ -546,7 +546,7 @@ void RunNonConstIndexTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConstIndex##Name##Narrow) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
// Test a set of Regular operations
@@ -554,7 +554,7 @@ OPERATION_LIST(TEST_OPERATION)
WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name, Name); \
}
-OPERATION_LIST(TEST_OPERATION)
+WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index a56f35b35f..c38d81a049 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -359,17 +359,14 @@ WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom0To0) {
}
WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom3To0) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyInbounds(execution_tier, 3, 0);
}
WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom5To9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyInbounds(execution_tier, 5, 9);
}
WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom6To6) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyInbounds(execution_tier, 6, 6);
}
@@ -413,13 +410,11 @@ void TestTableInitElems(TestExecutionTier execution_tier, int table_index) {
function_indexes.push_back(fn.function_index());
}
- // Passive element segment has [f0, f1, f2, f3, f4, null].
- function_indexes.push_back(WasmModuleBuilder::kNullIndex);
-
// Add 10 function tables, even though we only test one table.
for (int i = 0; i < 10; ++i) {
r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
}
+ // Passive element segment has [f0, f1, f2, f3, f4].
r.builder().AddPassiveElementSegment(function_indexes);
WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
@@ -466,11 +461,9 @@ WASM_COMPILED_EXEC_TEST(TableInitElems0) {
TestTableInitElems(execution_tier, 0);
}
WASM_COMPILED_EXEC_TEST(TableInitElems7) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableInitElems(execution_tier, 7);
}
WASM_COMPILED_EXEC_TEST(TableInitElems9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableInitElems(execution_tier, 9);
}
@@ -543,14 +536,8 @@ void TestTableInitOob(TestExecutionTier execution_tier, int table_index) {
}
WASM_COMPILED_EXEC_TEST(TableInitOob0) { TestTableInitOob(execution_tier, 0); }
-WASM_COMPILED_EXEC_TEST(TableInitOob7) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
- TestTableInitOob(execution_tier, 7);
-}
-WASM_COMPILED_EXEC_TEST(TableInitOob9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
- TestTableInitOob(execution_tier, 9);
-}
+WASM_COMPILED_EXEC_TEST(TableInitOob7) { TestTableInitOob(execution_tier, 7); }
+WASM_COMPILED_EXEC_TEST(TableInitOob9) { TestTableInitOob(execution_tier, 9); }
void TestTableCopyElems(TestExecutionTier execution_tier, int table_dst,
int table_src) {
@@ -619,17 +606,14 @@ WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom0To0) {
}
WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom3To0) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyElems(execution_tier, 3, 0);
}
WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom5To9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyElems(execution_tier, 5, 9);
}
WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom6To6) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyElems(execution_tier, 6, 6);
}
@@ -693,17 +677,14 @@ WASM_COMPILED_EXEC_TEST(TableCopyCallsTo0From0) {
}
WASM_COMPILED_EXEC_TEST(TableCopyCallsTo3From0) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyCalls(execution_tier, 3, 0);
}
WASM_COMPILED_EXEC_TEST(TableCopyCallsTo5From9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyCalls(execution_tier, 5, 9);
}
WASM_COMPILED_EXEC_TEST(TableCopyCallsTo6From6) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyCalls(execution_tier, 6, 6);
}
@@ -768,17 +749,14 @@ WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom0To0) {
}
WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom3To0) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOobWrites(execution_tier, 3, 0);
}
WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom5To9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOobWrites(execution_tier, 5, 9);
}
WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom6To6) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOobWrites(execution_tier, 6, 6);
}
@@ -826,17 +804,14 @@ WASM_COMPILED_EXEC_TEST(TableCopyOob1From0To0) {
}
WASM_COMPILED_EXEC_TEST(TableCopyOob1From3To0) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOob1(execution_tier, 3, 0);
}
WASM_COMPILED_EXEC_TEST(TableCopyOob1From5To9) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOob1(execution_tier, 5, 9);
}
WASM_COMPILED_EXEC_TEST(TableCopyOob1From6To6) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
TestTableCopyOob1(execution_tier, 6, 6);
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index af6510d0cd..cee94c7264 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -565,7 +565,6 @@ WASM_EXEC_TEST(TryCatchTrapRemByZero) {
}
WASM_EXEC_TEST(TryCatchTrapTableFill) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
int table_index = 0;
int length = 10; // OOB.
int start = 10; // OOB.
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 6ca2153365..3ac15d54f4 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -348,20 +348,19 @@ TEST(MemoryGrowInvalidSize) {
TEST(ReferenceTypeLocals) {
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_REF_IS_NULL(WASM_REF_NULL(kExternRefCode)));
+ BUILD(r, WASM_REF_IS_NULL(WASM_REF_NULL(kAnyRefCode)));
CHECK_EQ(1, r.Call());
}
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
- r.AllocateLocal(kWasmExternRef);
+ r.AllocateLocal(kWasmAnyRef);
BUILD(r, WASM_REF_IS_NULL(WASM_LOCAL_GET(0)));
CHECK_EQ(1, r.Call());
}
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
- r.AllocateLocal(kWasmExternRef);
- BUILD(r,
- WASM_REF_IS_NULL(WASM_LOCAL_TEE(0, WASM_REF_NULL(kExternRefCode))));
+ r.AllocateLocal(kWasmAnyRef);
+ BUILD(r, WASM_REF_IS_NULL(WASM_LOCAL_TEE(0, WASM_REF_NULL(kAnyRefCode))));
CHECK_EQ(1, r.Call());
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 81efe93eb6..3aa0fb6383 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -201,6 +201,7 @@ TEST(Run_WasmModule_CompilationHintsNoTiering) {
}
TEST(Run_WasmModule_CompilationHintsTierUp) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
{
EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
@@ -265,6 +266,7 @@ TEST(Run_WasmModule_CompilationHintsTierUp) {
}
TEST(Run_WasmModule_CompilationHintsLazyBaselineEagerTopTier) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
{
EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
index d3a50b0661..4a7cc4bc18 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
@@ -34,7 +34,7 @@ namespace test_run_wasm_relaxed_simd {
void RunWasm_##name##_Impl(TestExecutionTier execution_tier)
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
// Only used for qfma and qfms tests below.
// FMOperation holds the params (a, b, c) for a Multiply-Add or
@@ -122,10 +122,10 @@ bool ExpectFused(TestExecutionTier tier) {
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
- // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32
+ // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
WASM_RELAXED_SIMD_TEST(F32x4Qfma) {
WasmRunner<int32_t, float, float, float> r(execution_tier);
// Set up global to hold mask output.
@@ -222,7 +222,7 @@ WASM_RELAXED_SIMD_TEST(F64x2Qfms) {
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
- // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32
+ // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
WASM_RELAXED_SIMD_TEST(F32x4RecipApprox) {
RunF32x4UnOpTest(execution_tier, kExprF32x4RecipApprox, base::Recip,
@@ -234,7 +234,8 @@ WASM_RELAXED_SIMD_TEST(F32x4RecipSqrtApprox) {
false /* !exact */);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
namespace {
// Helper to convert an array of T into an array of uint8_t to be used a v128
// constants.
@@ -385,7 +386,11 @@ WASM_RELAXED_SIMD_TEST(I32x4RelaxedTruncF32x4U) {
IntRelaxedTruncFloatTest<uint32_t, float>(
execution_tier, kExprI32x4RelaxedTruncF32x4U, kExprF32x4Splat);
}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
+ // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_RISCV64
WASM_RELAXED_SIMD_TEST(I8x16RelaxedSwizzle) {
// Output is only defined for indices in the range [0,15].
WasmRunner<int32_t> r(execution_tier);
@@ -407,7 +412,8 @@ WASM_RELAXED_SIMD_TEST(I8x16RelaxedSwizzle) {
CHECK_EQ(LANE(dst, i), i);
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
+ // V8_TARGET_ARCH_RISCV64
#undef WASM_RELAXED_SIMD_TEST
} // namespace test_run_wasm_relaxed_simd
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
index 03f5c4ec6d..5eb495f674 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
@@ -32,12 +32,12 @@ Handle<WasmInstanceObject> CompileModule(Zone* zone, Isolate* isolate,
return maybe_instance.ToHandleChecked();
}
-bool IsGeneric(Code wrapper) {
+bool IsGeneric(CodeT wrapper) {
return wrapper.is_builtin() &&
wrapper.builtin_id() == Builtin::kGenericJSToWasmWrapper;
}
-bool IsSpecific(Code wrapper) {
+bool IsSpecific(CodeT wrapper) {
return wrapper.kind() == CodeKind::JS_TO_WASM_FUNCTION;
}
@@ -153,11 +153,10 @@ TEST(WrapperReplacement) {
// Call the exported Wasm function as many times as required to almost
// exhaust the remaining budget for using the generic wrapper.
- Handle<Code> wrapper_before_call;
+ Handle<CodeT> wrapper_before_call;
for (int i = remaining_budget; i > 0; --i) {
// Verify that the wrapper to be used is the generic one.
- wrapper_before_call =
- Handle<Code>(main_function_data->wrapper_code(), isolate);
+ wrapper_before_call = handle(main_function_data->wrapper_code(), isolate);
CHECK(IsGeneric(*wrapper_before_call));
// Call the function.
Handle<Object> params[1] = {SmiHandle(isolate, i)};
@@ -167,7 +166,7 @@ TEST(WrapperReplacement) {
}
// Get the wrapper-code object after the wrapper replacement.
- Code wrapper_after_call = main_function_data->wrapper_code();
+ CodeT wrapper_after_call = main_function_data->wrapper_code();
// Verify that the budget has been exhausted.
CHECK_EQ(main_function_data->wrapper_budget(), 0);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index f111620844..05cca1be13 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -801,7 +801,6 @@ WASM_EXEC_TEST(Select_s128_parameters) {
}
WASM_EXEC_TEST(SelectWithType_float_parameters) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
WasmRunner<float, float, float, int32_t> r(execution_tier);
BUILD(r,
WASM_SELECT_F(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)));
@@ -819,7 +818,6 @@ WASM_EXEC_TEST(Select) {
}
WASM_EXEC_TEST(SelectWithType) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
WasmRunner<int32_t, int32_t> r(execution_tier);
// return select(11, 22, a);
BUILD(r, WASM_SELECT_I(WASM_I32V_1(11), WASM_I32V_1(22), WASM_LOCAL_GET(0)));
@@ -841,7 +839,6 @@ WASM_EXEC_TEST(Select_strict1) {
}
WASM_EXEC_TEST(SelectWithType_strict1) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
WasmRunner<int32_t, int32_t> r(execution_tier);
// select(a=0, a=1, a=2); return a
BUILD(r,
@@ -866,7 +863,6 @@ WASM_EXEC_TEST(Select_strict2) {
}
WASM_EXEC_TEST(SelectWithType_strict2) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
@@ -894,7 +890,6 @@ WASM_EXEC_TEST(Select_strict3) {
}
WASM_EXEC_TEST(SelectWithType_strict3) {
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 657d2057f0..a6fc58f5d1 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -1236,6 +1236,7 @@ STREAM_TEST(TestModuleWithErrorAfterDataSection) {
// Test that cached bytes work.
STREAM_TEST(TestDeserializationBypassesCompilation) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
StreamTester tester(isolate);
ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
ZoneBuffer module_bytes =
@@ -1251,6 +1252,7 @@ STREAM_TEST(TestDeserializationBypassesCompilation) {
// Test that bad cached bytes don't cause compilation of wire bytes to fail.
STREAM_TEST(TestDeserializationFails) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
StreamTester tester(isolate);
ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
ZoneBuffer module_bytes =
@@ -1294,6 +1296,7 @@ STREAM_TEST(TestFunctionSectionWithoutCodeSection) {
}
STREAM_TEST(TestSetModuleCompiledCallback) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
StreamTester tester(isolate);
bool callback_called = false;
tester.stream()->SetModuleCompiledCallback(
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
index 239140cc83..0153f02f17 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -39,13 +39,14 @@ TEST(CacheHit) {
auto sig = sigs.i_i();
int expected_arity = static_cast<int>(sig->parameter_count());
- WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind,
- sig, expected_arity, &cache_scope);
+ WasmCode* c1 =
+ CompileImportWrapper(module.get(), isolate->counters(), kind, sig,
+ expected_arity, kNoSuspend, &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = cache_scope[{kind, sig, expected_arity}];
+ WasmCode* c2 = cache_scope[{kind, sig, expected_arity, kNoSuspend}];
CHECK_NOT_NULL(c2);
CHECK_EQ(c1, c2);
@@ -65,13 +66,14 @@ TEST(CacheMissSig) {
auto sig2 = sigs.i_ii();
int expected_arity2 = static_cast<int>(sig2->parameter_count());
- WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind,
- sig1, expected_arity1, &cache_scope);
+ WasmCode* c1 =
+ CompileImportWrapper(module.get(), isolate->counters(), kind, sig1,
+ expected_arity1, kNoSuspend, &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2}];
+ WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}];
CHECK_NULL(c2);
}
@@ -89,13 +91,14 @@ TEST(CacheMissKind) {
auto sig = sigs.i_i();
int expected_arity = static_cast<int>(sig->parameter_count());
- WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind1,
- sig, expected_arity, &cache_scope);
+ WasmCode* c1 =
+ CompileImportWrapper(module.get(), isolate->counters(), kind1, sig,
+ expected_arity, kNoSuspend, &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = cache_scope[{kind2, sig, expected_arity}];
+ WasmCode* c2 = cache_scope[{kind2, sig, expected_arity, kNoSuspend}];
CHECK_NULL(c2);
}
@@ -114,27 +117,28 @@ TEST(CacheHitMissSig) {
auto sig2 = sigs.i_ii();
int expected_arity2 = static_cast<int>(sig2->parameter_count());
- WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind,
- sig1, expected_arity1, &cache_scope);
+ WasmCode* c1 =
+ CompileImportWrapper(module.get(), isolate->counters(), kind, sig1,
+ expected_arity1, kNoSuspend, &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2}];
+ WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}];
CHECK_NULL(c2);
c2 = CompileImportWrapper(module.get(), isolate->counters(), kind, sig2,
- expected_arity2, &cache_scope);
+ expected_arity2, kNoSuspend, &cache_scope);
CHECK_NE(c1, c2);
- WasmCode* c3 = cache_scope[{kind, sig1, expected_arity1}];
+ WasmCode* c3 = cache_scope[{kind, sig1, expected_arity1, kNoSuspend}];
CHECK_NOT_NULL(c3);
CHECK_EQ(c1, c3);
- WasmCode* c4 = cache_scope[{kind, sig2, expected_arity2}];
+ WasmCode* c4 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}];
CHECK_NOT_NULL(c4);
CHECK_EQ(c2, c4);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index 04d02342e8..abb3dc9520 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -269,6 +269,7 @@ class MetricsRecorder : public v8::metrics::Recorder {
};
COMPILE_TEST(TestEventMetrics) {
+ FlagScope<bool> no_wasm_dynamic_tiering(&FLAG_wasm_dynamic_tiering, false);
std::shared_ptr<MetricsRecorder> recorder =
std::make_shared<MetricsRecorder>();
reinterpret_cast<v8::Isolate*>(isolate)->SetMetricsRecorder(recorder);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index e61be71fdc..9678a41b3e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -31,7 +31,9 @@ namespace test_wasm_serialization {
// Approximate gtest TEST_F style, in case we adopt gtest.
class WasmSerializationTest {
public:
- WasmSerializationTest() : zone_(&allocator_, ZONE_NAME) {
+ WasmSerializationTest()
+ : zone_(&allocator_, ZONE_NAME),
+ no_wasm_dynamic_tiering_(&FLAG_wasm_dynamic_tiering, false) {
// Don't call here if we move to gtest.
SetUp();
}
@@ -184,6 +186,7 @@ class WasmSerializationTest {
v8::OwnedBuffer data_;
v8::MemorySpan<const uint8_t> wire_bytes_ = {nullptr, 0};
v8::MemorySpan<const uint8_t> serialized_bytes_ = {nullptr, 0};
+ FlagScope<bool> no_wasm_dynamic_tiering_;
};
const char* WasmSerializationTest::kFunctionName = "increment";
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 301b90cdaf..9dedd390db 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -5,7 +5,7 @@
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/call-site-info-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -91,7 +91,7 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
const ExceptionInfo& topLocation,
const v8::Local<v8::StackFrame> stackFrame) {
MessageLocation loc;
- CHECK(i_isolate->ComputeLocationFromStackTrace(&loc, exc));
+ CHECK(i_isolate->ComputeLocationFromSimpleStackTrace(&loc, exc));
printf("loc start: %d, end: %d\n", loc.start_pos(), loc.end_pos());
Handle<JSMessageObject> message = i_isolate->CreateMessage(exc, nullptr);
printf("msg start: %d, end: %d, line: %d, col: %d\n",
@@ -210,13 +210,13 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
// Extract stack trace from the exception.
Handle<FixedArray> stack_trace_object =
- isolate->GetDetailedStackTrace(Handle<JSObject>::cast(exception));
- CHECK(!stack_trace_object.is_null());
- Handle<StackFrameInfo> stack_frame(
- StackFrameInfo::cast(stack_trace_object->get(0)), isolate);
+ isolate->GetSimpleStackTrace(Handle<JSReceiver>::cast(exception));
+ CHECK_NE(0, stack_trace_object->length());
+ Handle<CallSiteInfo> stack_frame(
+ CallSiteInfo::cast(stack_trace_object->get(0)), isolate);
MaybeHandle<String> maybe_stack_trace_str =
- SerializeStackFrameInfo(isolate, stack_frame);
+ SerializeCallSiteInfo(isolate, stack_frame);
CHECK(!maybe_stack_trace_str.is_null());
Handle<String> stack_trace_str = maybe_stack_trace_str.ToHandleChecked();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 4cb9e7296e..f772b2abf6 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -5,7 +5,7 @@
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -48,17 +48,16 @@ void CheckExceptionInfos(v8::internal::Isolate* isolate, Handle<Object> exc,
exc->Print();
// Extract stack frame from the exception.
- auto stack = Handle<FixedArray>::cast(JSReceiver::GetDataProperty(
- Handle<JSObject>::cast(exc), isolate->factory()->stack_trace_symbol()));
+ auto stack = isolate->GetSimpleStackTrace(Handle<JSObject>::cast(exc));
CHECK_EQ(N, stack->length());
for (int i = 0; i < N; ++i) {
- Handle<StackFrameInfo> info(StackFrameInfo::cast(stack->get(i)), isolate);
- auto func_name = Handle<String>::cast(StackFrameInfo::GetFunctionName(info))
- ->ToCString();
+ Handle<CallSiteInfo> info(CallSiteInfo::cast(stack->get(i)), isolate);
+ auto func_name =
+ Handle<String>::cast(CallSiteInfo::GetFunctionName(info))->ToCString();
CHECK_CSTREQ(excInfos[i].func_name, func_name.get());
- CHECK_EQ(excInfos[i].line_nr, StackFrameInfo::GetLineNumber(info));
- CHECK_EQ(excInfos[i].column, StackFrameInfo::GetColumnNumber(info));
+ CHECK_EQ(excInfos[i].line_nr, CallSiteInfo::GetLineNumber(info));
+ CHECK_EQ(excInfos[i].column, CallSiteInfo::GetColumnNumber(info));
}
}
diff --git a/deps/v8/test/cctest/wasm/wasm-atomics-utils.h b/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
index c868d3004d..d9f033766f 100644
--- a/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
@@ -13,12 +13,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define OPERATION_LIST(V) \
- V(Add) \
- V(Sub) \
- V(And) \
- V(Or) \
- V(Xor) \
+#define WASM_ATOMIC_OPERATION_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
V(Exchange)
using Uint64BinOp = uint64_t (*)(uint64_t, uint64_t);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index fd454ceb4b..142ba2d8e2 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -73,23 +73,24 @@ TestingModuleBuilder::TestingModuleBuilder(
auto resolved = compiler::ResolveWasmImportCall(
maybe_import->js_function, maybe_import->sig,
instance_object_->module(), enabled_features_);
- compiler::WasmImportCallKind kind = resolved.first;
- Handle<JSReceiver> callable = resolved.second;
+ compiler::WasmImportCallKind kind = resolved.kind;
+ Handle<JSReceiver> callable = resolved.callable;
WasmImportWrapperCache::ModificationScope cache_scope(
native_module_->import_wrapper_cache());
WasmImportWrapperCache::CacheKey key(
kind, maybe_import->sig,
- static_cast<int>(maybe_import->sig->parameter_count()));
+ static_cast<int>(maybe_import->sig->parameter_count()), kNoSuspend);
auto import_wrapper = cache_scope[key];
if (import_wrapper == nullptr) {
CodeSpaceWriteScope write_scope(native_module_);
import_wrapper = CompileImportWrapper(
native_module_, isolate_->counters(), kind, maybe_import->sig,
- static_cast<int>(maybe_import->sig->parameter_count()), &cache_scope);
+ static_cast<int>(maybe_import->sig->parameter_count()), kNoSuspend,
+ &cache_scope);
}
ImportedFunctionEntry(instance_object_, maybe_import_index)
- .SetWasmToJs(isolate_, callable, import_wrapper);
+ .SetWasmToJs(isolate_, callable, import_wrapper, resolved.suspender);
}
if (tier == TestExecutionTier::kInterpreter) {
@@ -326,11 +327,12 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
uint32_t index = static_cast<uint32_t>(test_module_->elem_segments.size());
DCHECK_EQ(index, dropped_elem_segments_.size());
- test_module_->elem_segments.emplace_back(kWasmFuncRef, false);
+ test_module_->elem_segments.emplace_back(
+ kWasmFuncRef, WasmElemSegment::kStatusPassive,
+ WasmElemSegment::kFunctionIndexElements);
auto& elem_segment = test_module_->elem_segments.back();
for (uint32_t entry : entries) {
- elem_segment.entries.push_back(
- WasmElemSegment::Entry(WasmElemSegment::Entry::kRefFuncEntry, entry));
+ elem_segment.entries.emplace_back(ConstantExpression::RefFunc(entry));
}
// The vector pointers may have moved, so update the instance object.
@@ -358,9 +360,12 @@ const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
const bool kUsesLiftoff = true;
+ DynamicTiering dynamic_tiering = FLAG_wasm_dynamic_tiering
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
size_t code_size_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(test_module_.get(),
- kUsesLiftoff);
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ test_module_.get(), kUsesLiftoff, dynamic_tiering);
auto native_module = GetWasmEngine()->NewNativeModule(
isolate_, enabled_features_, test_module_, code_size_estimate);
native_module->SetWireBytes(base::OwnedVector<const uint8_t>());
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index ba021366cb..fbdcbeba2b 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -25,8 +25,8 @@ class TestSignatures {
sig_i_ff(1, 2, kIntFloatTypes4),
sig_i_d(1, 1, kIntDoubleTypes4),
sig_i_dd(1, 2, kIntDoubleTypes4),
- sig_i_e(1, 1, kIntExternRefTypes4),
- sig_i_ee(1, 2, kIntExternRefTypes4),
+ sig_i_a(1, 1, kIntAnyRefTypes4),
+ sig_i_aa(1, 2, kIntAnyRefTypes4),
sig_i_c(1, 1, kIntFuncRefTypes4),
sig_i_s(1, 1, kIntSimd128Types4),
sig_l_v(1, 0, kLongTypes4),
@@ -37,15 +37,15 @@ class TestSignatures {
sig_f_ff(1, 2, kFloatTypes4),
sig_d_d(1, 1, kDoubleTypes4),
sig_d_dd(1, 2, kDoubleTypes4),
- sig_e_v(1, 0, kExternRefTypes4),
+ sig_a_v(1, 0, kAnyRefTypes4),
sig_c_v(1, 0, kFuncTypes4),
- sig_e_e(1, 1, kExternRefTypes4),
+ sig_a_a(1, 1, kAnyRefTypes4),
sig_c_c(1, 1, kFuncTypes4),
sig_v_v(0, 0, kIntTypes4),
sig_v_i(0, 1, kIntTypes4),
sig_v_ii(0, 2, kIntTypes4),
sig_v_iii(0, 3, kIntTypes4),
- sig_v_e(0, 1, kExternRefTypes4),
+ sig_v_a(0, 1, kAnyRefTypes4),
sig_v_c(0, 1, kFuncTypes4),
sig_v_d(0, 1, kDoubleTypes4),
sig_s_i(1, 1, kSimd128IntTypes4),
@@ -58,12 +58,12 @@ class TestSignatures {
for (int i = 0; i < 4; i++) kLongTypes4[i] = kWasmI64;
for (int i = 0; i < 4; i++) kFloatTypes4[i] = kWasmF32;
for (int i = 0; i < 4; i++) kDoubleTypes4[i] = kWasmF64;
- for (int i = 0; i < 4; i++) kExternRefTypes4[i] = kWasmExternRef;
+ for (int i = 0; i < 4; i++) kAnyRefTypes4[i] = kWasmAnyRef;
for (int i = 0; i < 4; i++) kFuncTypes4[i] = kWasmFuncRef;
for (int i = 1; i < 4; i++) kIntLongTypes4[i] = kWasmI64;
for (int i = 1; i < 4; i++) kIntFloatTypes4[i] = kWasmF32;
for (int i = 1; i < 4; i++) kIntDoubleTypes4[i] = kWasmF64;
- for (int i = 1; i < 4; i++) kIntExternRefTypes4[i] = kWasmExternRef;
+ for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef;
for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef;
for (int i = 0; i < 4; i++) kSimd128Types4[i] = kWasmS128;
for (int i = 1; i < 4; i++) kIntSimd128Types4[i] = kWasmS128;
@@ -71,7 +71,7 @@ class TestSignatures {
kIntLongTypes4[0] = kWasmI32;
kIntFloatTypes4[0] = kWasmI32;
kIntDoubleTypes4[0] = kWasmI32;
- kIntExternRefTypes4[0] = kWasmI32;
+ kIntAnyRefTypes4[0] = kWasmI32;
kIntFuncRefTypes4[0] = kWasmI32;
kIntSimd128Types4[0] = kWasmI32;
kSimd128IntTypes4[1] = kWasmI32;
@@ -91,8 +91,8 @@ class TestSignatures {
FunctionSig* l_l() { return &sig_l_l; }
FunctionSig* l_ll() { return &sig_l_ll; }
FunctionSig* i_ll() { return &sig_i_ll; }
- FunctionSig* i_e() { return &sig_i_e; }
- FunctionSig* i_ee() { return &sig_i_ee; }
+ FunctionSig* i_a() { return &sig_i_a; }
+ FunctionSig* i_aa() { return &sig_i_aa; }
FunctionSig* i_c() { return &sig_i_c; }
FunctionSig* i_s() { return &sig_i_s; }
@@ -101,16 +101,16 @@ class TestSignatures {
FunctionSig* d_d() { return &sig_d_d; }
FunctionSig* d_dd() { return &sig_d_dd; }
- FunctionSig* e_v() { return &sig_e_v; }
- FunctionSig* a_v() { return &sig_c_v; }
- FunctionSig* e_e() { return &sig_e_e; }
+ FunctionSig* a_v() { return &sig_a_v; }
+ FunctionSig* c_v() { return &sig_c_v; }
+ FunctionSig* a_a() { return &sig_a_a; }
FunctionSig* c_c() { return &sig_c_c; }
FunctionSig* v_v() { return &sig_v_v; }
FunctionSig* v_i() { return &sig_v_i; }
FunctionSig* v_ii() { return &sig_v_ii; }
FunctionSig* v_iii() { return &sig_v_iii; }
- FunctionSig* v_e() { return &sig_v_e; }
+ FunctionSig* v_a() { return &sig_v_a; }
FunctionSig* v_c() { return &sig_v_c; }
FunctionSig* v_d() { return &sig_v_d; }
FunctionSig* s_i() { return &sig_s_i; }
@@ -134,12 +134,12 @@ class TestSignatures {
ValueType kLongTypes4[4];
ValueType kFloatTypes4[4];
ValueType kDoubleTypes4[4];
- ValueType kExternRefTypes4[4];
+ ValueType kAnyRefTypes4[4];
ValueType kFuncTypes4[4];
ValueType kIntLongTypes4[4];
ValueType kIntFloatTypes4[4];
ValueType kIntDoubleTypes4[4];
- ValueType kIntExternRefTypes4[4];
+ ValueType kIntAnyRefTypes4[4];
ValueType kIntFuncRefTypes4[4];
ValueType kSimd128Types4[4];
ValueType kIntSimd128Types4[4];
@@ -154,8 +154,8 @@ class TestSignatures {
FunctionSig sig_i_ff;
FunctionSig sig_i_d;
FunctionSig sig_i_dd;
- FunctionSig sig_i_e;
- FunctionSig sig_i_ee;
+ FunctionSig sig_i_a;
+ FunctionSig sig_i_aa;
FunctionSig sig_i_c;
FunctionSig sig_i_s;
@@ -169,16 +169,16 @@ class TestSignatures {
FunctionSig sig_d_d;
FunctionSig sig_d_dd;
- FunctionSig sig_e_v;
+ FunctionSig sig_a_v;
FunctionSig sig_c_v;
- FunctionSig sig_e_e;
+ FunctionSig sig_a_a;
FunctionSig sig_c_c;
FunctionSig sig_v_v;
FunctionSig sig_v_i;
FunctionSig sig_v_ii;
FunctionSig sig_v_iii;
- FunctionSig sig_v_e;
+ FunctionSig sig_v_a;
FunctionSig sig_v_c;
FunctionSig sig_v_d;
FunctionSig sig_s_i;
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index 115d5a8cc9..721118ea37 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -1440,9 +1440,8 @@ class WasmInterpreterInternals {
val = WasmValue(isolate_->factory()->null_value(), p);
break;
}
- case kRef: // TODO(7748): Implement.
+ case kRef:
case kRtt:
- case kRttWithDepth:
case kVoid:
case kBottom:
case kI8:
@@ -3164,28 +3163,10 @@ class WasmInterpreterInternals {
break;
}
case kRef:
- case kOptRef: {
- switch (sig->GetParam(i).heap_representation()) {
- case HeapType::kExtern:
- case HeapType::kFunc:
- case HeapType::kEq:
- case HeapType::kData:
- case HeapType::kI31:
- case HeapType::kAny: {
- Handle<Object> ref = value.to_ref();
- encoded_values->set(encoded_index++, *ref);
- break;
- }
- case HeapType::kBottom:
- UNREACHABLE();
- default:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- }
+ case kOptRef:
+ case kRtt:
+ encoded_values->set(encoded_index++, *value.to_ref());
break;
- }
- case kRtt: // TODO(7748): Implement.
- case kRttWithDepth:
case kI8:
case kI16:
case kVoid:
@@ -3269,27 +3250,12 @@ class WasmInterpreterInternals {
break;
}
case kRef:
- case kOptRef: {
- switch (sig->GetParam(i).heap_representation()) {
- case HeapType::kExtern:
- case HeapType::kFunc:
- case HeapType::kEq:
- case HeapType::kData:
- case HeapType::kI31:
- case HeapType::kAny: {
- Handle<Object> ref(encoded_values->get(encoded_index++),
- isolate_);
- value = WasmValue(ref, sig->GetParam(i));
- break;
- }
- default:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- }
+ case kOptRef:
+ case kRtt: {
+ Handle<Object> ref(encoded_values->get(encoded_index++), isolate_);
+ value = WasmValue(ref, sig->GetParam(i));
break;
}
- case kRtt: // TODO(7748): Implement.
- case kRttWithDepth:
case kI8:
case kI16:
case kVoid:
@@ -3660,7 +3626,8 @@ class WasmInterpreterInternals {
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kRef:
- case kOptRef: {
+ case kOptRef:
+ case kRtt: {
// TODO(7748): Type checks or DCHECKs for ref types?
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
@@ -3672,8 +3639,6 @@ class WasmInterpreterInternals {
global_buffer->set(global_index, *ref);
break;
}
- case kRtt: // TODO(7748): Implement.
- case kRttWithDepth:
case kI8:
case kI16:
case kVoid:
@@ -4071,25 +4036,17 @@ class WasmInterpreterInternals {
case kVoid:
PrintF("void");
break;
- case kRef:
- case kOptRef: {
- if (val.type().is_reference_to(HeapType::kExtern)) {
- Handle<Object> ref = val.to_ref();
- if (ref->IsNull()) {
- PrintF("ref:null");
- } else {
- PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
- }
- } else {
- // TODO(7748): Implement this properly.
- PrintF("ref/ref null");
+ case kOptRef:
+ if (val.to_ref()->IsNull()) {
+ PrintF("ref:null");
+ break;
}
+ V8_FALLTHROUGH;
+ case kRef:
+ PrintF("ref:0x%" V8PRIxPTR, val.to_ref()->ptr());
break;
- }
case kRtt:
- case kRttWithDepth:
- // TODO(7748): Implement properly.
- PrintF("rtt");
+ PrintF("rtt:0x%" V8PRIxPTR, val.to_ref()->ptr());
break;
case kI8:
case kI16:
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 14755e00cf..ca7e2b172d 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -210,7 +210,7 @@
#define WASM_SELECT_D(tval, fval, cond) \
tval, fval, cond, kExprSelectWithType, U32V_1(1), kF64Code
#define WASM_SELECT_R(tval, fval, cond) \
- tval, fval, cond, kExprSelectWithType, U32V_1(1), kExternRefCode
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kAnyRefCode
#define WASM_SELECT_A(tval, fval, cond) \
tval, fval, cond, kExprSelectWithType, U32V_1(1), kFuncRefCode
@@ -531,17 +531,23 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_REF_IS_FUNC(ref) ref, WASM_GC_OP(kExprRefIsFunc)
#define WASM_REF_IS_DATA(ref) ref, WASM_GC_OP(kExprRefIsData)
+#define WASM_REF_IS_ARRAY(ref) ref, WASM_GC_OP(kExprRefIsArray)
#define WASM_REF_IS_I31(ref) ref, WASM_GC_OP(kExprRefIsI31)
#define WASM_REF_AS_FUNC(ref) ref, WASM_GC_OP(kExprRefAsFunc)
#define WASM_REF_AS_DATA(ref) ref, WASM_GC_OP(kExprRefAsData)
+#define WASM_REF_AS_ARRAY(ref) ref, WASM_GC_OP(kExprRefAsArray)
#define WASM_REF_AS_I31(ref) ref, WASM_GC_OP(kExprRefAsI31)
#define WASM_BR_ON_FUNC(depth) \
WASM_GC_OP(kExprBrOnFunc), static_cast<byte>(depth)
+#define WASM_BR_ON_ARRAY(depth) \
+ WASM_GC_OP(kExprBrOnArray), static_cast<byte>(depth)
#define WASM_BR_ON_DATA(depth) \
WASM_GC_OP(kExprBrOnData), static_cast<byte>(depth)
#define WASM_BR_ON_I31(depth) WASM_GC_OP(kExprBrOnI31), static_cast<byte>(depth)
#define WASM_BR_ON_NON_FUNC(depth) \
WASM_GC_OP(kExprBrOnNonFunc), static_cast<byte>(depth)
+#define WASM_BR_ON_NON_ARRAY(depth) \
+ WASM_GC_OP(kExprBrOnNonArray), static_cast<byte>(depth)
#define WASM_BR_ON_NON_DATA(depth) \
WASM_GC_OP(kExprBrOnNonData), static_cast<byte>(depth)
#define WASM_BR_ON_NON_I31(depth) \
@@ -564,8 +570,8 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
array, index, WASM_GC_OP(kExprArrayGetS), static_cast<byte>(typeidx)
#define WASM_ARRAY_SET(typeidx, array, index, value) \
array, index, value, WASM_GC_OP(kExprArraySet), static_cast<byte>(typeidx)
-#define WASM_ARRAY_LEN(typeidx, array) \
- array, WASM_GC_OP(kExprArrayLen), static_cast<byte>(typeidx)
+#define WASM_ARRAY_LEN(array) \
+ array, WASM_GC_OP(kExprArrayLen), /* dummy index */ 0
#define WASM_ARRAY_COPY(dst_idx, src_idx, dst_array, dst_index, src_array, \
src_index, length) \
dst_array, dst_index, src_array, src_index, length, \
@@ -578,15 +584,9 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
__VA_ARGS__, WASM_GC_OP(kExprArrayInitStatic), static_cast<byte>(index), \
static_cast<byte>(length)
-#define WASM_RTT_WITH_DEPTH(depth, typeidx) \
- kRttWithDepthCode, U32V_1(depth), U32V_1(typeidx)
#define WASM_RTT(typeidx) kRttCode, U32V_1(typeidx)
#define WASM_RTT_CANON(typeidx) \
WASM_GC_OP(kExprRttCanon), static_cast<byte>(typeidx)
-#define WASM_RTT_SUB(typeidx, supertype) \
- supertype, WASM_GC_OP(kExprRttSub), static_cast<byte>(typeidx)
-#define WASM_RTT_FRESH_SUB(typeidx, supertype) \
- supertype, WASM_GC_OP(kExprRttFreshSub), static_cast<byte>(typeidx)
#define WASM_I31_NEW(val) val, WASM_GC_OP(kExprI31New)
#define WASM_I31_GET_S(val) val, WASM_GC_OP(kExprI31GetS)
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 3de55d7623..56b5b2c52a 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -73,7 +73,6 @@ base::OwnedVector<WasmValue> MakeDefaultInterpreterArguments(
break;
case kRef:
case kRtt:
- case kRttWithDepth:
case kI8:
case kI16:
case kVoid:
@@ -108,7 +107,6 @@ base::OwnedVector<Handle<Object>> MakeDefaultArguments(Isolate* isolate,
break;
case kRef:
case kRtt:
- case kRttWithDepth:
case kI8:
case kI16:
case kVoid:
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
index 41f24955a8..e9c996aff6 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
@@ -7,6 +7,7 @@
Debug = debug.Debug
var exception = null;
+var object = {"foo": "bar"};
var object_with_symbol_key = {[Symbol("a")]: 1};
var object_with_callbacks = { toString: () => "string", valueOf: () => 3};
var symbol_for_a = Symbol.for("a");
@@ -14,6 +15,8 @@ var typed_array = new Uint8Array([1, 2, 3]);
var array_buffer = new ArrayBuffer(3);
var data_view = new DataView(new ArrayBuffer(8), 0, 8);
var array = [1,2,3];
+var pure_function = function(x) { return x * x; };
+var unpure_function = function(x) { array.push(x); };
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -219,6 +222,22 @@ function listener(event, exec_state, event_data, data) {
success("a", `Symbol.keyFor(symbol_for_a)`);
success("Symbol(a)", `symbol_for_a.valueOf().toString()`);
success("Symbol(a)", `symbol_for_a[Symbol.toPrimitive]().toString()`);
+
+ // Test Reflect functions.
+ success(4, `Reflect.apply(pure_function, undefined, [2])`);
+ fail(`Reflect.apply(unpure_function, undefined, [2])`);
+ success("foo", `Reflect.construct(String, ["foo"]).toString()`);
+ fail(`Reflect.construct(unpure_function, ["foo"])`);
+ success("bar", `Reflect.getOwnPropertyDescriptor(object, "foo").value`);
+ success(true, `Reflect.getPrototypeOf(object) === Object.prototype`);
+ success(true, `Reflect.has(object, "foo")`);
+ success(true, `Reflect.isExtensible(object)`);
+ success("foo", `Reflect.ownKeys(object)[0]`);
+ fail(`Reflect.defineProperty(object, "baz", {})`);
+ fail(`Reflect.deleteProperty(object, "foo")`);
+ fail(`Reflect.preventExtensions(object)`);
+ fail(`Reflect.set(object, "great", "expectations")`);
+ fail(`Reflect.setPrototypeOf(object, Array.prototype)`);
} catch (e) {
exception = e;
print(e, e.stack);
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
index 7a0f373be7..de3990d09b 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
@@ -11,7 +11,7 @@ success(10, `(function(){
return x;
})()`);
-// StaNamedProperty
+// SetNamedProperty
var a = {name: 'foo'};
function set_name(a) {
a.name = 'bar';
@@ -21,7 +21,7 @@ function set_name(a) {
fail(`set_name(a)`);
success('bar', `set_name({name: 'foo'})`);
-// StaNamedOwnProperty
+// DefineNamedOwnProperty
var name_value = 'value';
function create_object_literal() {
var obj = {name: name_value};
@@ -30,7 +30,7 @@ function create_object_literal() {
success('value', `create_object_literal()`);
-// StaKeyedProperty
+// SetKeyedProperty
var arrayValue = 1;
function create_array_literal() {
return [arrayValue];
@@ -52,7 +52,7 @@ var array = [1,2,3];
fail(`array.length = 2`);
success(2, `[1,2,3].length = 2`);
-// StaDataPropertyInLiteral
+// DefineKeyedOwnPropertyInLiteral
function return_literal_with_data_property(a) {
return {[a] : 1};
}
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 3140b5365f..d4803e0f08 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -136,13 +136,6 @@
}], # not has_webassembly or variant == jitless
##############################################################################
-['variant == turboprop or variant == turboprop_as_toptier', {
- # Deopts differently than TurboFan.
- 'debug/debug-optimize': [SKIP],
- 'debug/debug-compile-optimized': [SKIP],
-}], # variant == turboprop or variant == turboprop_as_toptier
-
-##############################################################################
# Tests requiring Sparkplug.
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, loong64)', {
'regress/regress-crbug-1199681': [SKIP],
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 2eda4fafc9..b266f58ab3 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -23,9 +23,9 @@ FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
v8::V8::InitializeExternalStartupData((*argv)[0]);
platform_ = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform_.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
v8::V8::Initialize();
@@ -106,9 +106,9 @@ bool FuzzerSupport::PumpMessageLoop(
// Explicitly specify some attributes to avoid issues with the linker dead-
// stripping the following function on macOS, as it is not called directly
// by fuzz target. LibFuzzer runtime uses dlsym() to resolve that function.
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
__attribute__((used)) __attribute__((visibility("default")))
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
extern "C" int
LLVMFuzzerInitialize(int* argc, char*** argv) {
v8_fuzzer::FuzzerSupport::InitializeFuzzerSupport(argc, argv);
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index 6e6d44f124..2c74544522 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -26,6 +26,7 @@
'inspector/endless-loop': [SKIP],
'inspector/invalid': [SKIP],
'inspector/regress-1166549': [SKIP],
+ 'inspector/regress-1297964': [SKIP],
}], # third_party_heap
]
diff --git a/deps/v8/test/fuzzer/inspector/regress-1297964 b/deps/v8/test/fuzzer/inspector/regress-1297964
new file mode 100644
index 0000000000..029a76c89d
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector/regress-1297964
@@ -0,0 +1,411 @@
+utils = new Proxy(utils, {
+ get: function(target, prop) {
+ if (prop in target) return target[prop];
+ return i=>i;
+ }
+ });
+
+// Loaded from 'test/inspector/protocol-test.js':
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest = {};
+InspectorTest._dumpInspectorProtocolMessages = false;
+InspectorTest._commandsForLogging = new Set();
+InspectorTest._sessions = new Set();
+
+InspectorTest.log = utils.print.bind(utils);
+InspectorTest.quitImmediately = utils.quit.bind(utils);
+
+InspectorTest.logProtocolCommandCalls = function(command) {
+ InspectorTest._commandsForLbytes;
+}
+
+InspectorTest.trimErrorMessage = function(message) {
+ if (!message.error || !message.error.data)
+ return message;
+ message.error.data = message.error.data.replace(/at position \d+/,
+ 'at <some position>');
+ return message;
+}
+
+InspectorTest.ContextGroup = class {
+ constructor() {
+ this.id = utils.createContextGroup();
+ }
+
+ createContext(name) {
+ utils.createContext(this.id, name || '');
+ }
+
+ schedulePauseOnNextStatement(reason, details) {
+ utils.schedulePauseOnNextStatement(this.id, reason, details);
+ }
+
+ cancelPauseOnNextStatement() {
+ utils.cancelPauseOnNextStatement(this.id);
+ }
+
+ addScript(string, lineOffset, columnOffset, url) {
+ utils.compileAndRunWithOrigin(this.id, string, url || '', lineOffset || 0, columnOffset || 0, false);
+ }
+
+ addInlineScript(string, url) {
+ const match = (new Error().stack).split('\n')[2].match(/([0-9]+):([0-9]+)/);
+ this.addScript(
+ string, match[1] * 1, match[1] * 1 + '.addInlineScript('.length, url);
+ }
+
+ addModule(string, url, lineOffset, columnOffset) {
+ utils.compileAndRunWithOrigin(this.id, string, url, lineOffset || 0, columnOffset || 0, true);
+ }
+
+ loadScript(fileName) {
+ this.addScript(utils.read(fileName));
+ }
+
+ connect() {
+ return new InspectorTest.Session(this);
+ }
+
+ reset() {
+ utils.resetContextGroup(this.id);
+ }
+
+ setupInjectedScriptEnvironment(session) {
+ let scriptSource = '';
+ let getters = ["length","internalConstructorName","subtype","getProperty",
+ "objectHasOwnProperty","nullifyPrototype","primitiveTypes",
+ "closureTypes","prototype","all","RemoteObject","bind",
+ "PropertyDescriptor","object","get","set","value","configurable",
+ "enumerable","symbol","getPrototypeOf","nativeAccessorDescriptor",
+ "isBuiltin","hasGetter","hasSetter","getOwnPropertyDescriptor",
+ "description","isOwn","name",
+ "typedArrayProperties","keys","getOwnPropertyNames",
+ "getOwnPropertySymbols","isPrimitiveValue","com","toLowerCase",
+ "ELEMENT","trim","replace","DOCUMENT","size","byteLength","toString",
+ "stack","substr","message","indexOf","key","type","unserializableValue",
+ "objectId","className","preview","proxyTargetValue","customPreview",
+ "CustomPreview","resolve","then","console","error","header","hasBody",
+ "stringify","ObjectPreview","ObjectPreviewType","properties",
+ "ObjectPreviewSubtype","getInternalProperties","wasThrown","indexes",
+ "overflow","valuePreview","entries"];
+ scriptSource += `(function installSettersAndGetters() {
+ let defineProperty = Object.defineProperty;
+ let ObjectPrototype = Object.prototype;
+ let ArrayPrototype = Array.prototype;
+ defineProperty(ArrayPrototype, 0, {
+ { debugger; throw 42; }, get() { debugger; throw 42; },
+ __proto__: null
+ });`,
+ scriptSource += getters.map(getter => `
+ defineProperty(ObjectPrototype, '${getter}', {
+ set() { debugger; throw 42; }, get() { debugger; throw 42; },
+ __proto__: null
+ });
+ `).join('\n') + '})();';
+ this.addScript(scriptSource);
+
+ if (session) {
+ InspectorTest.log('WARNING: setupInjectedScriptEnvironment with debug flag for debugging only and should not be landed.');
+ session.setupScriptMap();
+ session.Protocol.Debugger.enable();
+ session.Protocol.Debugger.onPaused(message => {
+ let callFrames = message.params.callFrames;
+ session.logSourceLocations(callFrames.map(frame => frame.location));
+ })
+ }
+ }
+};
+
+InspectorTest.Session = class {
+ constructor(contextGroup) {
+ this.contextGroup = contextGroup;
+ this._dispatchTable = new Map();
+ this._eventHandlers = new Map();
+ this._requestId = 0;
+ this.Protocol = this._setupProtocol();
+ InspectorTest._sessions.add(this);
+ this.id = utils.connectSession(contextGroup.id, '', this._dispatchMessage.bind(this));
+ }
+
+ disconnect() {
+ InspectorTest._sessions.delete(this);
+ utils.disconnectSession(this.id);
+ }
+
+ reconnect() {
+ var state = utils.disconnectSession(this.id);
+ this.id = utils.connectSession(this.contextGroup.id, state, this._dispatchMessage.bind(this));
+ }
+
+ async addInspectedObject(serializable) {
+ return this.Protocol.Runtime.evaluate({expression: `inspector.addInspectedObject(${this.id}, ${JSON.stringify(serializable)})`});
+ }
+
+ sendRawCommand(requestId, command, handler) {
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("frontend: " + command);
+ this._dispatchTable.set(requestId, handler);
+ utils.sendMessageToBackend(this.id, command);
+ }
+
+ setupScriptMap() {
+ if (this._scriptMap)
+ return;
+ this._scriptMap = new Map();
+ }
+
+ logCallFrames(callFrames) {
+ for (var frame of callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var scriptId = frame.location ? frame.location.scriptId : frame.scriptId;
+ var url = frame.url ? frame.url : this._scriptMap.get(scriptId).url;
+ var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
+ var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
+ InspectorTest.log(`${functionName} (${url}:${lineNumber}:${columnNumber})`);
+ }
+ }
+
+ async getScriptWithSource(scriptId, forceSourceRequest) {
+ var script = this._scriptMap.get(scriptId);
+ if (forceSourceRequest || !(script.scriptSource || script.bytecode)) {
+ var message = await this.Protocol.Debugger.getScriptSource({ scriptId });
+ script.scriptSource = message.result.scriptSource;
+ if (message.result.bytecode) {
+ script.bytecode = InspectorTest.decodeBase64(message.result.bytecode);
+ }
+ }
+ return script;
+ }
+
+ async logSourceLocation(location, forceSourceRequest) {
+ var scriptId = location.scriptId;
+ if (!this._scriptMap || !this._scriptMap.has(scriptId)) {
+ InspectorTest.log("setupScriptMap should be called before Protocol.Debugger.enable.");
+ InspectorTest.completeTest();
+ }
+ var script = await this.getScriptWithSource(scriptId, forceSourceRequest);
+
+ if (script.bytecode) {
+ if (location.lineNumber != 0) {
+ InspectorTest.log('Unexpected wasm line number: ' + location.lineNumber);
+ }
+ let wasm_opcode = script.bytecode[location.columnNumber];
+ let opcode_str = wasm_opcode.toString(16);
+ if (opcode_str.length % 2) opcode_str = `0${opcode_str}`;
+ if (InspectorTest.getWasmOpcodeName) {
+ opcode_str += ` (${InspectorTest.getWasmOpcodeName(wasm_opcode)})`;
+ }
+ InspectorTest.log(`Script ${script.url} byte offset ${
+ location.columnNumber}: Wasm opcode 0x${opcode_str}`);
+ } else {
+ var lines = script.scriptSource.split('\n');
+ var line = lines[location.lineNumber];
+ line = line.slice(0, location.columnNumber) + '#' + (line.slice(location.columnNumber) || '');
+ lines[location.lineNumber] = line;
+ lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
+ InspectorTest.log(lines.slice(Math.max(location.lineNumber - 1, 0), location.lineNumber + 2).join('\n'));
+ InspectorTest.log('');
+ }
+ }
+
+ logSourceLocations(locations) {
+ if (locations.length == 0) return Promise.resolve();
+ return this.logSourceLocation(locations[0]).then(() => this.logSourceLocations(locations.splice(1)));
+ }
+
+ async logBreakLocations(inputLocations) {
+ let locations = inputLocations.slice();
+ let scriptId = locations[0].scriptId;
+ let script = await this.getScriptWithSource(scriptId);
+ let lines = script.scriptSource.split('\n');
+ locations = locations.sort((loc1, loc2) => {
+ if (loc2.lineNumber !== loc1.lineNumber) return loc2.lineNumber - loc1.lineNumber;
+ return loc2.columnNumber - loc1.columnNumber;
+ });
+ for (let location of locations) {
+ let line = lines[location.lineNumber];
+ line = line.slice(0, location.columnNumber) + locationMark(location.type) + line.slice(location.columnNumber);
+ lines[location.lineNumber] = line;
+ }
+ lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
+ InspectorTest.log(lines.join('\n') + '\n');
+ return inputLocations;
+
+ function locationMark(type) {
+ if (type === 'return') return '|R|';
+ if (type === 'call') return '|C|';
+ if (type === 'debuggerStatement') return '|D|';
+ return '|_|';
+ }
+ }
+
+ async logTypeProfile(typeProfile, source) {
+ let entries = typeProfile.entries;
+
+ // Sort in reverse order so we can replace entries without invalidating
+ // the other offsets.
+ entries = entries.sort((a, b) => b.offset - a.offset);
+
+ for (let entry of entries) {
+ source = source.slice(0, entry.offset) + typeAnnotation(entry.types) +
+ source.slice(entry.offset);
+ }
+ InspectorTest.log(source);
+ return typeProfile;
+
+ function typeAnnotation(types) {
+ return `/*${types.map(t => t.name).join(', ')}*/`;
+ }
+ }
+
+ logAsyncStackTrace(asyncStackTrace) {
+ while (asyncStackTrace) {
+ InspectorTest.log(`-- ${asyncStackTrace.description || '<empty>'} --`);
+ this.logCallFrames(asyncStackTrace.callFrames);
+ if (asyncStackTrace.parentId) InspectorTest.log(' <external stack>');
+ asyncStackTrace = asyncStackTrace.parent;
+ }
+ }
+
+ _sendCommandPromise(method, params) {
+ if (typeof params !== 'object')
+ utils.print(`WARNING: non-object params passed to invocation of method ${method}`);
+ if (InspectorTest._commandsForLogging.has(method))
+ utils.print(method + ' called');
+ var requestId = ++this._requestId;
+ var messageObject = { "id": requestId, "method": method, "params": params };
+ return new Promise(fulfill => this.sendRawCommand(requestId, JSON.stringify(messageObject), fulfill));
+ }
+
+ _setupProtocol() {
+ return new Proxy({}, { get: (target, agentName, receiver) => new Proxy({}, {
+ get: (target, methodName, receiver) => {
+ const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
+ var match = eventPattern.exec(methodName);
+ if (!match)
+ return args => this._sendCommandPromise(`${agentName}.${methodName}`, args || {});
+ var eventName = match[2];
+ eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
+ if (match[1])
+ return numOfEvents => this._waitForEventPromise(
+ `${agentName}.${eventName}`, numOfEvents || 1);
+ return listener => this._eventHandlers.set(`${agentName}.${eventName}`, listener);
+ }
+ })});
+ }
+
+ _dispatchMessage(messageString) {
+ var messageObject = JSON.parse(messageString);
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("backend: " + JSON.stringify(messageObject));
+ const kMethodNotFound = -32601;
+ if (messageObject.error && messageObject.error.code === kMethodNotFound) {
+ InspectorTest.log(`Error: Called non-existent method. ${
+ messageObject.error.message} code: ${messageObject.error.code}`);
+ InspectorTest.completeTest();
+ }
+ try {
+ var messageId = messageObject["id"];
+ if (typeof messageId === "number") {
+ var handler = this._dispatchTable.get(messageId);
+ if (handler) {
+ handler(messageObject);
+ this._dispatchTable.delete(messageId);
+ }
+ } else {
+ var eventName = messageObject["method"];
+ var eventHandler = this._eventHandlers.get(eventName);
+ if (this._scriptMap && eventName === "Debugger.scriptParsed")
+ this._scriptMap.set(messageObject.params.scriptId, JSON.parse(JSON.stringify(messageObject.params)));
+ if (eventName === "Debugger.scriptParsed" && messageObject.params.url === "wait-for-pending-tasks.js")
+ return;
+ if (eventHandler)
+ eventHandler(messageObject);
+ }
+ } catch (e) {
+ InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
+ InspectorTest.completeTest();
+ }
+ };
+
+ _waitForEventPromise(eventName, numOfEvents) {
+ let events = [];
+ return new Promise(fulfill => {
+ this._eventHandlers.set(eventName, result => {
+ --numOfEvents;
+ events.push(result);
+ if (numOfEvents === 0) {
+ delete this._eventHandlers.delete(eventName);
+ fulfill(events.length > 1 ? events : events[0]);
+ }
+ });
+ });
+ }
+};
+
+InspectorTest.runTestSuite = function(testSuite) {
+ function nextTest() {
+ if (!testSuite.length) {
+ InspectorTest.completeTest();
+ return;
+ }
+ var fun = testSuite.shift();
+ InspectorTest.log("\nRunning test: " + fun.name);
+ fun(nextTest);
+ }
+ nextTest();
+}
+
+InspectorTest.runAsyncTestSuite = async function(testSuite) {
+ const selected = testSuite.filter(test => test.name.startsWith('f_'));
+ if (selected.length)
+ testSuite = selected;
+ for (var test of testSuite) {
+ InspectorTest.log("\nRunning test: " + test.name);
+ try {
+ await test();
+ } catch (e) {
+ utils.print(e.stack);
+ }
+ }
+ InspectorTest.completeTest();
+}
+
+InspectorTest.start = function(description) {
+ try {
+ InspectorTest.log(description);
+ var contextGroup = new InspectorTest.ContextGroup();
+ var session = contextGroup.connect();
+ return { session: session, contextGroup: contextGroup, Protocol: session.Protocol };
+ } catch (e) {
+ utils.print(e.stack);
+ }
+}
+
+// Loaded from 'test/inspector/debugger/step-into-break-on-async-call.js':
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test for Debugger.stepInto with breakOnAsyncCall.');
+
+InspectorTest.runAsyncTestSuite([
+ async function testSetTimeout() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.pause();
+ let pausedPromise = Protocol.Debugger.oncePaused();
+ Protocol.Runtime.evaluate({ expression: 'setTimeout(() => 42, 0)//# sourceURL=setTimeout.js' });
+ await pausedPromise;
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ await Protocol.Debugger.oncePaused();
+ },
+
+ async function testPromiseThen() {
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 1});
+ Protocol.Runtime.evaluate({expression: 'Promise.resolve().then(() => 21)//# sourceURL=promiseThen.js'});
+ }
+]);
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 6f25275d7a..dcc6430d07 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -114,43 +114,47 @@ ValueType GetValueTypeHelper(DataRange* data, bool liftoff_as_reference,
NonNullables allow_non_nullable,
PackedTypes include_packed_types,
Generics include_generics) {
- const bool nullable =
- (allow_non_nullable == kAllowNonNullables) ? data->get<bool>() : true;
- const uint32_t num_types =
- nullable ? num_nullable_types : num_non_nullable_types;
-
+ // Non wasm-gc types.
std::vector<ValueType> types{kWasmI32, kWasmI64, kWasmF32, kWasmF64,
kWasmS128};
- if (include_packed_types == kIncludePackedTypes) {
- types.insert(types.end(), {kWasmI8, kWasmI16});
- }
-
if (!liftoff_as_reference) {
return types[data->get<uint8_t>() % types.size()];
}
+ // If {liftoff_as_reference}, include wasm-gc types.
+ if (include_packed_types == kIncludePackedTypes) {
+ types.insert(types.end(), {kWasmI8, kWasmI16});
+ }
+ // Decide if the return type will be nullable or not.
+ const bool nullable =
+ (allow_non_nullable == kAllowNonNullables) ? data->get<bool>() : true;
if (nullable) {
- types.insert(types.end(),
- {ValueType::Ref(HeapType(HeapType::kI31), kNullable),
- kWasmExternRef, kWasmFuncRef});
+ types.insert(types.end(), {kWasmI31Ref, kWasmFuncRef});
}
-
if (include_generics == kIncludeGenerics) {
types.insert(types.end(), {kWasmDataRef, kWasmAnyRef, kWasmEqRef});
}
- uint32_t id = data->get<uint8_t>() % (types.size() + num_types);
+ // The last index of user-defined types allowed is different based on the
+ // nullability of the output.
+ const uint32_t num_user_defined_types =
+ nullable ? num_nullable_types : num_non_nullable_types;
+
+ // Conceptually, user-defined types are added to the end of the list. Pick a
+ // random one among them.
+ uint32_t id = data->get<uint8_t>() % (types.size() + num_user_defined_types);
if (id >= types.size()) {
+ // Return user-defined type.
return ValueType::Ref(id - static_cast<uint32_t>(types.size()),
nullable ? kNullable : kNonNullable);
}
-
+ // If returning a reference type, fix its nullability according to {nullable}.
if (types[id].is_reference()) {
return ValueType::Ref(types[id].heap_type(),
nullable ? kNullable : kNonNullable);
}
-
+ // Otherwise, just return the picked type.
return types[id];
}
@@ -622,7 +626,7 @@ class WasmGenerator {
uint8_t random_byte = data->get<uint8_t>();
int func_index = random_byte % functions_.size();
uint32_t sig_index = functions_[func_index];
- FunctionSig* sig = builder_->builder()->GetSignature(sig_index);
+ const FunctionSig* sig = builder_->builder()->GetSignature(sig_index);
// Generate arguments.
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Generate(sig->GetParam(i), data);
@@ -808,7 +812,8 @@ class WasmGenerator {
control_depth - catch_blocks_[catch_index]);
} else {
int tag = data->get<uint8_t>() % builder_->builder()->NumExceptions();
- FunctionSig* exception_sig = builder_->builder()->GetExceptionType(tag);
+ const FunctionSig* exception_sig =
+ builder_->builder()->GetExceptionType(tag);
base::Vector<const ValueType> exception_types(
exception_sig->parameters().begin(),
exception_sig->parameter_count());
@@ -853,7 +858,7 @@ class WasmGenerator {
bool new_default = data->get<bool>();
if (builder_->builder()->IsStructType(index)) {
- StructType* struct_gen = builder_->builder()->GetStructType(index);
+ const StructType* struct_gen = builder_->builder()->GetStructType(index);
int field_count = struct_gen->field_count();
bool can_be_defaultable = false;
@@ -902,19 +907,14 @@ class WasmGenerator {
builder_->EmitU32V(index);
}
} else {
- DCHECK(builder_->builder()->IsSignature(index));
- int func_size = builder_->builder()->NumFunctions();
- for (int i = 0; i < func_size; i++) {
- WasmFunctionBuilder* func = builder_->builder()->GetFunction(i);
- // TODO(11954): Choose a random function from among those matching the
- // signature (consider function subtyping?).
- if (*(func->signature()) ==
- *(builder_->builder()->GetSignature(index))) {
- builder_->EmitWithU32V(kExprRefFunc, func->func_index());
- return true;
- }
- }
- UNREACHABLE();
+ // Map the type index to a function index.
+ // TODO(11954. 7748): Once we have type canonicalization, choose a random
+ // function from among those matching the signature (consider function
+ // subtyping?).
+ uint32_t func_index = index - (num_arrays_ + num_structs_);
+ DCHECK_EQ(builder_->builder()->GetSignature(index),
+ builder_->builder()->GetFunction(func_index)->signature());
+ builder_->EmitWithU32V(kExprRefFunc, func_index);
}
return true;
@@ -974,10 +974,7 @@ class WasmGenerator {
table_op<kVoid>({kWasmI32, kWasmFuncRef, kWasmI32}, data, kExprTableFill);
}
void table_copy(DataRange* data) {
- ValueType needed_type =
- data->get<bool>()
- ? ValueType::Ref(HeapType(HeapType::kFunc), kNullable)
- : ValueType::Ref(HeapType(HeapType::kExtern), kNullable);
+ ValueType needed_type = data->get<bool>() ? kWasmFuncRef : kWasmAnyRef;
int table_count = builder_->builder()->NumTables();
ZoneVector<uint32_t> table(builder_->builder()->zone());
for (int i = 0; i < table_count; i++) {
@@ -1143,7 +1140,7 @@ class WasmGenerator {
if (num_structs_ > 0) {
int struct_index = data->get<uint8_t>() % num_structs_;
DCHECK(builder->IsStructType(struct_index));
- StructType* struct_type = builder->GetStructType(struct_index);
+ const StructType* struct_type = builder->GetStructType(struct_index);
ZoneVector<uint32_t> field_indices(builder->zone());
for (uint32_t i = 0; i < struct_type->field_count(); i++) {
if (struct_type->mutability(i)) {
@@ -1253,7 +1250,7 @@ class WasmGenerator {
num_structs_(num_structs),
num_arrays_(num_arrays),
liftoff_as_reference_(liftoff_as_reference) {
- FunctionSig* sig = fn->signature();
+ const FunctionSig* sig = fn->signature();
blocks_.emplace_back();
for (size_t i = 0; i < sig->return_count(); ++i) {
blocks_.back().push_back(sig->GetReturn(i));
@@ -2083,7 +2080,7 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
&WasmGenerator::new_object, &WasmGenerator::get_local_ref,
&WasmGenerator::array_get_ref, &WasmGenerator::struct_get_ref};
- constexpr GenerateFnWithHeap alternatives_func_extern[] = {
+ constexpr GenerateFnWithHeap alternatives_func_any[] = {
&WasmGenerator::table_get, &WasmGenerator::get_local_ref,
&WasmGenerator::array_get_ref, &WasmGenerator::struct_get_ref};
@@ -2094,50 +2091,61 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
switch (type.representation()) {
// For abstract types, sometimes generate one of their subtypes.
case HeapType::kAny: {
- // Note: It is possible we land here even without {liftoff_as_reference_},
- // because we use anyref as a supertype of all reference types. Therefore,
- // we have to generate the correct subtypes based on the value of
- // {liftoff_as_reference_}.
- // Weighed according to the types in the module. If there are D data types
- // and F function types, the relative frequencies for dataref is D, for
- // funcref F, for externref 1, for i31ref 2 if {liftoff_as_reference_}
- // otherwise 0, and for falling back to anyref 2 or 0.
+ // Note: It is possible we land here even without {liftoff_as_reference_}.
+ // In this case, we do not support any subtyping, and just fall back to
+ // directly generating anyref.
+ if (!liftoff_as_reference_) {
+ DCHECK(nullability);
+ GenerateOneOf(alternatives_func_any, type, data, nullability);
+ return;
+ }
+ // Weighed according to the types in the module:
+ // If there are D data types and F function types, the relative
+ // frequencies for dataref is D, for funcref F, and for i31ref and falling
+ // back to anyref 2.
const uint8_t num_data_types = num_structs_ + num_arrays_;
const uint8_t num_function_types = functions_.size();
- const uint8_t emit_externref = (nullability == kNullable) ? 1 : 0;
- const uint8_t emit_i31ref = liftoff_as_reference_ ? 2 : 0;
- const uint8_t fallback_to_anyref = liftoff_as_reference_ ? 2 : 0;
- uint8_t random = data->get<uint8_t>() %
- (num_data_types + num_function_types + emit_externref +
- emit_i31ref + fallback_to_anyref);
+ const uint8_t emit_i31ref = 2;
+ const uint8_t fallback_to_anyref = 2;
+ uint8_t random =
+ data->get<uint8_t>() % (num_data_types + num_function_types +
+ emit_i31ref + fallback_to_anyref);
// We have to compute this first so in case GenerateOneOf fails
// we will continue to fall back on an alternative that is guaranteed
// to generate a value of the wanted type.
// In order to know which alternative to fall back to in case
// GenerateOneOf failed, the random variable is recomputed.
- if (random >=
- num_data_types + num_function_types + emit_externref + emit_i31ref) {
+ if (random >= num_data_types + num_function_types + emit_i31ref) {
DCHECK(liftoff_as_reference_);
- if (GenerateOneOf(alternatives_other, type, data, nullability)) {
+ if (GenerateOneOf(alternatives_func_any, type, data, nullability)) {
return;
}
- random = data->get<uint8_t>() % (num_data_types + num_function_types +
- emit_externref + emit_i31ref);
+ random = data->get<uint8_t>() %
+ (num_data_types + num_function_types + emit_i31ref);
}
if (random < num_data_types) {
- DCHECK(liftoff_as_reference_);
GenerateRef(HeapType(HeapType::kData), data, nullability);
} else if (random < num_data_types + num_function_types) {
GenerateRef(HeapType(HeapType::kFunc), data, nullability);
- } else if (random <
- num_data_types + num_function_types + emit_externref) {
- GenerateRef(HeapType(HeapType::kExtern), data, nullability);
} else {
- DCHECK(liftoff_as_reference_);
GenerateRef(HeapType(HeapType::kI31), data, nullability);
}
return;
}
+ case HeapType::kArray: {
+ DCHECK(liftoff_as_reference_);
+ constexpr uint8_t fallback_to_dataref = 1;
+ uint8_t random =
+ data->get<uint8_t>() % (num_arrays_ + fallback_to_dataref);
+ // Try generating one of the alternatives and continue to the rest of the
+ // methods in case it fails.
+ if (random >= num_arrays_) {
+ if (GenerateOneOf(alternatives_other, type, data, nullability)) return;
+ random = data->get<uint8_t>() % num_arrays_;
+ }
+ GenerateRef(HeapType(random), data, nullability);
+ return;
+ }
case HeapType::kData: {
DCHECK(liftoff_as_reference_);
constexpr uint8_t fallback_to_dataref = 2;
@@ -2176,17 +2184,12 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
}
return;
}
- case HeapType::kExtern: {
- DCHECK(nullability);
- GenerateOneOf(alternatives_func_extern, type, data, nullability);
- return;
- }
case HeapType::kFunc: {
uint32_t random = data->get<uint32_t>() % (functions_.size() + 1);
/// Try generating one of the alternatives
// and continue to the rest of the methods in case it fails.
if (random >= functions_.size()) {
- if (GenerateOneOf(alternatives_func_extern, type, data, nullability)) {
+ if (GenerateOneOf(alternatives_func_any, type, data, nullability)) {
return;
}
random = data->get<uint32_t>() % functions_.size();
@@ -2348,6 +2351,26 @@ FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
ValueType type,
+ uint32_t num_struct_and_array_types);
+
+WasmInitExpr GenerateStructNewInitExpr(Zone* zone, WasmModuleBuilder* builder,
+ uint32_t index,
+ uint32_t num_struct_and_array_types) {
+ const StructType* struct_type = builder->GetStructType(index);
+ ZoneVector<WasmInitExpr>* elements =
+ zone->New<ZoneVector<WasmInitExpr>>(zone);
+ int field_count = struct_type->field_count();
+ for (int field_index = 0; field_index < field_count; field_index++) {
+ elements->push_back(GenerateInitExpr(zone, builder,
+ struct_type->field(field_index),
+ num_struct_and_array_types));
+ }
+ elements->push_back(WasmInitExpr::RttCanon(index));
+ return WasmInitExpr::StructNewWithRtt(index, elements);
+}
+
+WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
+ ValueType type,
uint32_t num_struct_and_array_types) {
switch (type.kind()) {
case kOptRef:
@@ -2367,50 +2390,47 @@ WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
return WasmInitExpr(s128_const);
}
case kRef: {
- HeapType::Representation representation =
- type.heap_type().representation();
- int index = 0;
- if (type.has_index()) {
- index = type.ref_index();
- }
- if (representation == HeapType::kData ||
- representation == HeapType::kAny || representation == HeapType::kEq ||
- builder->IsStructType(index)) {
- // We materialize all these types with a struct because they are all its
- // supertypes.
- DCHECK(builder->IsStructType(index));
- ZoneVector<WasmInitExpr>* elements =
- zone->New<ZoneVector<WasmInitExpr>>(zone);
- int field_count = builder->GetStructType(index)->field_count();
- for (int field_index = 0; field_index < field_count; field_index++) {
- elements->push_back(GenerateInitExpr(
- zone, builder, builder->GetStructType(index)->field(field_index),
- num_struct_and_array_types));
+ switch (type.heap_type().representation()) {
+ case HeapType::kData:
+ case HeapType::kAny:
+ case HeapType::kEq: {
+ // We materialize all these types with a struct because they are all
+ // its supertypes.
+ DCHECK(builder->IsStructType(0));
+ return GenerateStructNewInitExpr(zone, builder, 0,
+ num_struct_and_array_types);
+ }
+ case HeapType::kFunc:
+ // We just pick the function at index 0.
+ DCHECK_GT(builder->NumFunctions(), 0);
+ return WasmInitExpr::RefFuncConst(0);
+ default: {
+ uint32_t index = type.ref_index();
+ if (builder->IsStructType(index)) {
+ return GenerateStructNewInitExpr(zone, builder, index,
+ num_struct_and_array_types);
+ }
+ if (builder->IsArrayType(index)) {
+ ZoneVector<WasmInitExpr>* elements =
+ zone->New<ZoneVector<WasmInitExpr>>(zone);
+ elements->push_back(GenerateInitExpr(
+ zone, builder, builder->GetArrayType(index)->element_type(),
+ num_struct_and_array_types));
+ elements->push_back(WasmInitExpr::RttCanon(index));
+ return WasmInitExpr::ArrayInit(index, elements);
+ }
+ if (builder->IsSignature(index)) {
+ // Transform from signature index to function index.
+ return WasmInitExpr::RefFuncConst(index -
+ num_struct_and_array_types);
+ }
+ UNREACHABLE();
}
- elements->push_back(WasmInitExpr::RttCanon(index));
- return WasmInitExpr::StructNewWithRtt(index, elements);
- }
- DCHECK(type.has_index());
- if (representation == HeapType::kFunc) {
- return WasmInitExpr::RefFuncConst(index);
- }
- if (builder->IsArrayType(index)) {
- ZoneVector<WasmInitExpr>* elements =
- zone->New<ZoneVector<WasmInitExpr>>(zone);
- elements->push_back(GenerateInitExpr(
- zone, builder, builder->GetArrayType(index)->element_type(),
- num_struct_and_array_types));
- elements->push_back(WasmInitExpr::RttCanon(index));
- return WasmInitExpr::ArrayInit(index, elements);
- }
- if (builder->IsSignature(index)) {
- // Transform from signature index to function specific index.
- index -= num_struct_and_array_types;
- return WasmInitExpr::RefFuncConst(index);
}
- UNREACHABLE();
}
- default:
+ case kVoid:
+ case kRtt:
+ case kBottom:
UNREACHABLE();
}
}
@@ -2449,13 +2469,23 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
uint8_t num_fields = range.get<uint8_t>() % (kMaxStructFields + 1);
StructType::Builder struct_builder(zone, num_fields);
for (int field_index = 0; field_index < num_fields; field_index++) {
- // We exclude generics for struct 0, because in GenerateInitExpr we
- // generate it by default for kAny, kData and kEq.
- // Allowing generic types in struct 0's fields would produce
- // a recursive infinite loop.
+ // Notes:
+ // - We allow a type to only have non-nullable fields of types that
+ // are defined earlier. This way we avoid infinite non-nullable
+ // constructions. Also relevant for arrays and functions.
+ // - Currently, we also allow nullable fields to only reference types
+ // that are defined earlier. The reason is that every type can only
+ // reference types in its own or earlier recursive groups, and we do
+ // not support recursive groups yet. Also relevant for arrays and
+ // functions. TODO(7748): Change the number of nullable types once
+ // we support rec. groups.
+ // - We exclude the generics types anyref, dataref, and eqref from the
+ // fields of struct 0. This is because in GenerateInitExpr we
+ // materialize these types with (ref 0), and having such fields in
+ // struct 0 would produce an infinite recursion.
ValueType type = GetValueTypeHelper(
- &range, true, num_types, builder.NumTypes(), kAllowNonNullables,
- kIncludePackedTypes,
+ &range, true, builder.NumTypes(), builder.NumTypes(),
+ kAllowNonNullables, kIncludePackedTypes,
struct_index != 0 ? kIncludeGenerics : kExcludeGenerics);
bool mutability = range.get<bool>();
@@ -2467,18 +2497,19 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
for (int array_index = 0; array_index < num_arrays; array_index++) {
ValueType type = GetValueTypeHelper(
- &range, true, num_types, builder.NumTypes(), kAllowNonNullables,
- kIncludePackedTypes, kIncludeGenerics);
+ &range, true, builder.NumTypes(), builder.NumTypes(),
+ kAllowNonNullables, kIncludePackedTypes, kIncludeGenerics);
ArrayType* array_fuz = zone->New<ArrayType>(type, true);
builder.AddArrayType(array_fuz);
}
}
+ // We keep the signature for the first (main) function constant.
function_signatures.push_back(builder.ForceAddSignature(sigs.i_iii()));
- for (int i = 1; i < num_functions; ++i) {
+ for (uint8_t i = 1; i < num_functions; i++) {
FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig,
- liftoff_as_reference, num_types);
+ liftoff_as_reference, builder.NumTypes());
uint32_t signature_index = builder.ForceAddSignature(sig);
function_signatures.push_back(signature_index);
}
@@ -2493,9 +2524,17 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
// Generate function declarations before tables. This will be needed once we
// have typed-function tables.
std::vector<WasmFunctionBuilder*> functions;
- for (int i = 0; i < num_functions; ++i) {
- FunctionSig* sig = builder.GetSignature(function_signatures[i]);
- functions.push_back(builder.AddFunction(sig));
+ for (uint8_t i = 0; i < num_functions; i++) {
+ const FunctionSig* sig = builder.GetSignature(function_signatures[i]);
+ // If we are using wasm-gc, we cannot allow signature normalization
+ // performed by adding a function by {FunctionSig}, because we emit
+ // everything in one recursive group which blocks signature
+ // canonicalization.
+ // TODO(7748): Relax this when we implement type canonicalization and
+ // proper recursive-group support.
+ functions.push_back(liftoff_as_reference
+ ? builder.AddFunction(function_signatures[i])
+ : builder.AddFunction(sig));
}
int num_globals = range.get<uint8_t>() % (kMaxGlobals + 1);
@@ -2537,7 +2576,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
// other table indices.
// TODO(11954): Support typed function tables.
bool use_funcref = i == 0 || range.get<bool>();
- ValueType type = use_funcref ? kWasmFuncRef : kWasmExternRef;
+ ValueType type = use_funcref ? kWasmFuncRef : kWasmAnyRef;
uint32_t table_index = builder.AddTable(type, min_size, max_size);
if (type == kWasmFuncRef) {
// For function tables, initialize them with functions from the program.
@@ -2562,7 +2601,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmGenerator gen(f, function_signatures, globals, mutable_globals,
num_structs, num_arrays, &function_range,
liftoff_as_reference);
- FunctionSig* sig = f->signature();
+ const FunctionSig* sig = f->signature();
base::Vector<const ValueType> return_types(sig->returns().begin(),
sig->return_count());
gen.Generate(return_types, &function_range);
@@ -2572,8 +2611,6 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
}
builder.SetMaxMemorySize(32);
- // We enable shared memory to be able to test atomics.
- builder.SetHasSharedMemory();
builder.WriteTo(buffer);
return true;
}
@@ -2581,7 +2618,6 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
constexpr bool require_valid = true;
- EXPERIMENTAL_FLAG_SCOPE(reftypes);
EXPERIMENTAL_FLAG_SCOPE(typed_funcref);
EXPERIMENTAL_FLAG_SCOPE(gc);
EXPERIMENTAL_FLAG_SCOPE(simd);
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 3d4297b7e6..885ff9c99d 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -193,16 +193,41 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
}
namespace {
+
struct PrintSig {
const size_t num;
const std::function<ValueType(size_t)> getter;
};
+
PrintSig PrintParameters(const FunctionSig* sig) {
return {sig->parameter_count(), [=](size_t i) { return sig->GetParam(i); }};
}
+
PrintSig PrintReturns(const FunctionSig* sig) {
return {sig->return_count(), [=](size_t i) { return sig->GetReturn(i); }};
}
+
+std::string HeapTypeToConstantName(HeapType heap_type) {
+ switch (heap_type.representation()) {
+ case HeapType::kFunc:
+ return "kWasmFuncRef";
+ case HeapType::kEq:
+ return "kWasmEqRef";
+ case HeapType::kI31:
+ return "kWasmI31Ref";
+ case HeapType::kData:
+ return "kWasmDataRef";
+ case HeapType::kArray:
+ return "kWasmArrayRef";
+ case HeapType::kAny:
+ return "kWasmAnyRef";
+ case HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ return std::to_string(heap_type.ref_index());
+ }
+}
+
std::string ValueTypeToConstantName(ValueType type) {
switch (type.kind()) {
case kI8:
@@ -221,63 +246,27 @@ std::string ValueTypeToConstantName(ValueType type) {
return "kWasmS128";
case kOptRef:
switch (type.heap_representation()) {
- case HeapType::kExtern:
- return "kWasmExternRef";
case HeapType::kFunc:
return "kWasmFuncRef";
case HeapType::kEq:
return "kWasmEqRef";
case HeapType::kAny:
return "kWasmAnyRef";
- case HeapType::kData:
- return "wasmOptRefType(kWasmDataRef)";
- case HeapType::kI31:
- return "wasmOptRefType(kWasmI31Ref)";
case HeapType::kBottom:
- default:
- return "wasmOptRefType(" + std::to_string(type.ref_index()) + ")";
- }
- case kRef:
- switch (type.heap_representation()) {
- case HeapType::kExtern:
- return "wasmRefType(kWasmExternRef)";
- case HeapType::kFunc:
- return "wasmRefType(kWasmFuncRef)";
- case HeapType::kEq:
- return "wasmRefType(kWasmEqRef)";
- case HeapType::kAny:
- return "wasmRefType(kWasmAnyRef)";
+ UNREACHABLE();
case HeapType::kData:
- return "wasmRefType(kWasmDataRef)";
+ case HeapType::kArray:
case HeapType::kI31:
- return "wasmRefType(kWasmI31Ref)";
- case HeapType::kBottom:
default:
- return "wasmRefType(" + std::to_string(type.ref_index()) + ")";
+ return "wasmOptRefType(" + HeapTypeToConstantName(type.heap_type()) +
+ ")";
}
- default:
- UNREACHABLE();
- }
-}
-
-std::string HeapTypeToConstantName(HeapType heap_type) {
- switch (heap_type.representation()) {
- case HeapType::kFunc:
- return "kWasmFuncRef";
- case HeapType::kExtern:
- return "kWasmExternRef";
- case HeapType::kEq:
- return "kWasmEqRef";
- case HeapType::kI31:
- return "kWasmI31Ref";
- case HeapType::kData:
- return "kWasmDataRef";
- case HeapType::kAny:
- return "kWasmAnyRef";
- case HeapType::kBottom:
+ case kRef:
+ return "wasmRefType(" + HeapTypeToConstantName(type.heap_type()) + ")";
+ case kRtt:
+ case kVoid:
+ case kBottom:
UNREACHABLE();
- default:
- return std::to_string(heap_type.ref_index());
}
}
@@ -295,23 +284,7 @@ struct PrintName {
: name(wire_bytes.GetNameOrNull(ref)) {}
};
std::ostream& operator<<(std::ostream& os, const PrintName& name) {
- return os.write(name.name.begin(), name.name.size());
-}
-
-std::ostream& operator<<(std::ostream& os, WasmElemSegment::Entry entry) {
- os << "WasmInitExpr.";
- switch (entry.kind) {
- case WasmElemSegment::Entry::kGlobalGetEntry:
- os << "GlobalGet(" << entry.index;
- break;
- case WasmElemSegment::Entry::kRefFuncEntry:
- os << "RefFunc(" << entry.index;
- break;
- case WasmElemSegment::Entry::kRefNullEntry:
- os << "RefNull(" << HeapType(entry.index).name().c_str();
- break;
- }
- return os << ")";
+ return os.put('\'').write(name.name.begin(), name.name.size()).put('\'');
}
// An interface for WasmFullDecoder used to decode initializer expressions. As
@@ -366,6 +339,12 @@ class InitExprInterface {
result->init_expr = WasmInitExpr(imm.value);
}
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
+ const Value& rhs, Value* result) {
+ // TODO(12089): Implement.
+ UNIMPLEMENTED();
+ }
+
void RefNull(FullDecoder* decoder, ValueType type, Value* result) {
result->init_expr = WasmInitExpr::RefNullConst(type.heap_representation());
}
@@ -418,14 +397,17 @@ class InitExprInterface {
: WasmInitExpr::ArrayInit(imm.index, args);
}
- void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
- result->init_expr = WasmInitExpr::RttCanon(type_index);
+ void ArrayInitFromData(FullDecoder* decoder,
+ const ArrayIndexImmediate<validate>& array_imm,
+ const IndexImmediate<validate>& data_segment_imm,
+ const Value& offset_value, const Value& length_value,
+ const Value& rtt, Value* result) {
+ // TODO(7748): Implement.
+ UNIMPLEMENTED();
}
- void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
- Value* result, WasmRttSubMode mode) {
- result->init_expr =
- WasmInitExpr::RttSub(zone_, type_index, parent.init_expr);
+ void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
+ result->init_expr = WasmInitExpr::RttCanon(type_index);
}
void DoReturn(FullDecoder* decoder, uint32_t /*drop_values*/) {
@@ -505,14 +487,6 @@ void AppendInitExpr(std::ostream& os, const WasmInitExpr& expr) {
case WasmInitExpr::kRttCanon:
os << "RttCanon(" << expr.immediate().index;
break;
- case WasmInitExpr::kRttSub:
- os << "RttSub(" << expr.immediate().index << ", ";
- AppendInitExpr(os, (*expr.operands())[0]);
- break;
- case WasmInitExpr::kRttFreshSub:
- os << "RttFreshSub(" << expr.immediate().index << ", ";
- AppendInitExpr(os, (*expr.operands())[0]);
- break;
}
if (append_operands) {
@@ -529,18 +503,36 @@ void AppendInitExpr(std::ostream& os, const WasmInitExpr& expr) {
void DecodeAndAppendInitExpr(StdoutStream& os, Zone* zone,
const WasmModule* module,
- ModuleWireBytes module_bytes, WireBytesRef init,
- ValueType expected) {
- FunctionBody body(FunctionSig::Build(zone, {expected}, {}), init.offset(),
- module_bytes.start() + init.offset(),
- module_bytes.start() + init.end_offset());
- WasmFeatures detected;
- WasmFullDecoder<Decoder::kFullValidation, InitExprInterface, kInitExpression>
- decoder(zone, module, WasmFeatures::All(), &detected, body, zone);
-
- decoder.DecodeFunctionBody();
-
- AppendInitExpr(os, decoder.interface().result());
+ ModuleWireBytes module_bytes,
+ ConstantExpression init, ValueType expected) {
+ switch (init.kind()) {
+ case ConstantExpression::kEmpty:
+ UNREACHABLE();
+ case ConstantExpression::kI32Const:
+ AppendInitExpr(os, WasmInitExpr(init.i32_value()));
+ break;
+ case ConstantExpression::kRefNull:
+ AppendInitExpr(os, WasmInitExpr::RefNullConst(init.repr()));
+ break;
+ case ConstantExpression::kRefFunc:
+ AppendInitExpr(os, WasmInitExpr::RefFuncConst(init.index()));
+ break;
+ case ConstantExpression::kWireBytesRef: {
+ WireBytesRef ref = init.wire_bytes_ref();
+ auto sig = FixedSizeSignature<ValueType>::Returns(expected);
+ FunctionBody body(&sig, ref.offset(), module_bytes.start() + ref.offset(),
+ module_bytes.start() + ref.end_offset());
+ WasmFeatures detected;
+ WasmFullDecoder<Decoder::kFullValidation, InitExprInterface,
+ kInitExpression>
+ decoder(zone, module, WasmFeatures::All(), &detected, body, zone);
+
+ decoder.DecodeFunctionBody();
+
+ AppendInitExpr(os, decoder.interface().result());
+ break;
+ }
+ }
}
} // namespace
@@ -584,35 +576,6 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"\n"
"const builder = new WasmModuleBuilder();\n";
- if (module->has_memory) {
- os << "builder.addMemory(" << module->initial_pages;
- if (module->has_maximum_pages) {
- os << ", " << module->maximum_pages;
- } else {
- os << ", undefined";
- }
- os << ", " << (module->mem_export ? "true" : "false");
- if (module->has_shared_memory) {
- os << ", true";
- }
- os << ");\n";
- }
-
- for (WasmGlobal& global : module->globals) {
- os << "builder.addGlobal(" << ValueTypeToConstantName(global.type) << ", "
- << global.mutability << ", ";
- DecodeAndAppendInitExpr(os, &zone, module, wire_bytes, global.init,
- global.type);
- os << ");\n";
- }
-
-#if DEBUG
- for (uint8_t kind : module->type_kinds) {
- DCHECK(kWasmArrayTypeCode == kind || kWasmStructTypeCode == kind ||
- kWasmFunctionTypeCode == kind);
- }
-#endif
-
for (int i = 0; i < static_cast<int>(module->types.size()); i++) {
if (module->has_struct(i)) {
const StructType* struct_type = module->types[i].struct_type;
@@ -638,6 +601,56 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
+ for (WasmImport imported : module->import_table) {
+ // TODO(wasm): Support other imports when needed.
+ CHECK_EQ(kExternalFunction, imported.kind);
+ auto module_name = PrintName(wire_bytes, imported.module_name);
+ auto field_name = PrintName(wire_bytes, imported.field_name);
+ int sig_index = module->functions[imported.index].sig_index;
+ os << "builder.addImport(" << module_name << ", " << field_name << ", "
+ << sig_index << " /* sig */);\n";
+ }
+
+ if (module->has_memory) {
+ os << "builder.addMemory(" << module->initial_pages;
+ if (module->has_maximum_pages) {
+ os << ", " << module->maximum_pages;
+ } else {
+ os << ", undefined";
+ }
+ os << ", " << (module->mem_export ? "true" : "false");
+ if (module->has_shared_memory) {
+ os << ", true";
+ }
+ os << ");\n";
+ }
+
+ for (WasmDataSegment segment : module->data_segments) {
+ base::Vector<const uint8_t> data = wire_bytes.module_bytes().SubVector(
+ segment.source.offset(), segment.source.end_offset());
+ if (segment.active) {
+ // TODO(wasm): Add other expressions when needed.
+ CHECK_EQ(ConstantExpression::kI32Const, segment.dest_addr.kind());
+ os << "builder.addDataSegment(" << segment.dest_addr.i32_value() << ", ";
+ } else {
+ os << "builder.addPassiveDataSegment(";
+ }
+ os << "[";
+ if (!data.empty()) {
+ os << unsigned{data[0]};
+ for (unsigned byte : data + 1) os << ", " << byte;
+ }
+ os << "]);\n";
+ }
+
+ for (WasmGlobal& global : module->globals) {
+ os << "builder.addGlobal(" << ValueTypeToConstantName(global.type) << ", "
+ << global.mutability << ", ";
+ DecodeAndAppendInitExpr(os, &zone, module, wire_bytes, global.init,
+ global.type);
+ os << ");\n";
+ }
+
Zone tmp_zone(isolate->allocator(), ZONE_NAME);
// TODO(9495): Add support for tables with explicit initializers.
@@ -664,10 +677,19 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
os << "[";
for (uint32_t i = 0; i < elem_segment.entries.size(); i++) {
- os << elem_segment.entries[i];
+ if (elem_segment.element_type == WasmElemSegment::kExpressionElements) {
+ DecodeAndAppendInitExpr(os, &zone, module, wire_bytes,
+ elem_segment.entries[i], elem_segment.type);
+ } else {
+ os << elem_segment.entries[i].index();
+ }
if (i < elem_segment.entries.size() - 1) os << ", ";
}
- os << "], " << ValueTypeToConstantName(elem_segment.type) << ");\n";
+ os << "], "
+ << (elem_segment.element_type == WasmElemSegment::kExpressionElements
+ ? ValueTypeToConstantName(elem_segment.type)
+ : "undefined")
+ << ");\n";
}
for (const WasmTag& tag : module->tags) {
@@ -676,6 +698,8 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
for (const WasmFunction& func : module->functions) {
+ if (func.imported) continue;
+
base::Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
os << "// Generate function " << (func.func_index + 1) << " (out of "
<< module->functions.size() << ").\n";
@@ -713,7 +737,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
for (WasmExport& exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
- os << "builder.addExport('" << PrintName(wire_bytes, exp.name) << "', "
+ os << "builder.addExport(" << PrintName(wire_bytes, exp.name) << ", "
<< exp.index << ");\n";
}
@@ -791,15 +815,15 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
}
// Note: After dividing by 3 for 4 times, configuration_byte is within [0, 3].
- // Control whether Liftoff or the interpreter will be used as the reference
- // tier.
- // TODO(thibaudm): Port nondeterminism detection to arm.
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_X86)
+// Control whether Liftoff or the interpreter will be used as the reference
+// tier.
+// TODO(thibaudm): Port nondeterminism detection to arm.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_X86) || \
+ defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
bool liftoff_as_reference = configuration_byte & 1;
#else
bool liftoff_as_reference = false;
#endif
-
FlagScope<bool> turbo_mid_tier_regalloc(&FLAG_turbo_force_mid_tier_regalloc,
configuration_byte == 0);
diff --git a/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1
index 3f67560ae4..014a7710ec 100644
--- a/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1
+++ b/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1
@@ -1 +1 @@
-a1493bb387aed38462c49f4351dc47e79e4b784b \ No newline at end of file
+5e785f96e6a9bb5a1fb54cc8649a551569aeb31d \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
index a37b08bab7..001f393148 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
@@ -9,12 +9,12 @@ test (test.js:21:2)
foo (test.js:10:2)
-- Promise.then --
-test (test.js:12:14)
+test (test.js:19:14)
(anonymous) (expr1.js:0:0)
foo (test.js:12:2)
-- Promise.then --
-test (test.js:12:14)
+test (test.js:19:14)
(anonymous) (expr1.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation.js b/deps/v8/test/inspector/debugger/async-instrumentation.js
index 6de2ce7d2f..44116637c0 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation.js
+++ b/deps/v8/test/inspector/debugger/async-instrumentation.js
@@ -4,7 +4,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks async instrumentation enabled in the middle.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function foo() {
// asyncTaskStarted
debugger;
@@ -20,9 +21,8 @@ function test() {
resolve1(); // asyncTaskScheduled
debugger;
return p2;
-}
-
-//# sourceURL=test.js`, 7, 26);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
index a2b4b96439..ab08d3d69b 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
@@ -1,16 +1,16 @@
Checks async stack for late .then handlers with gc
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:10:14)
+test (test.js:18:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:14:14)
+test (test.js:22:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:16:14)
+test (test.js:24:14)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then.js b/deps/v8/test/inspector/debugger/async-promise-late-then.js
index cad3c7ed86..9efa3fb98f 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then.js
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then.js
@@ -5,7 +5,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks async stack for late .then handlers with gc');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function foo1() {
gc();
debugger;
@@ -24,8 +25,8 @@ function test() {
var p4 = p1.then(foo1);
gc();
return Promise.all([p2,p3,p4]);
-}
-//# sourceURL=test.js`, 8, 26);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt b/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
index c7cbea72c5..80cbb1f317 100644
--- a/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
@@ -2,10 +2,10 @@ Checks that async stack contains setTimeout
inner1 (test.js:11:4)
foo1 (test.js:14:2)
-- setTimeout --
-inner2 (test.js:11:4)
-foo2 (test.js:13:2)
+inner2 (test.js:18:4)
+foo2 (test.js:20:2)
-- setTimeout --
-inner3 (test.js:18:4)
-foo3 (test.js:20:2)
+inner3 (test.js:25:4)
+foo3 (test.js:27:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-set-timeout.js b/deps/v8/test/inspector/debugger/async-set-timeout.js
index 30096b637f..ff1d69a7b9 100644
--- a/deps/v8/test/inspector/debugger/async-set-timeout.js
+++ b/deps/v8/test/inspector/debugger/async-set-timeout.js
@@ -4,7 +4,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async stack contains setTimeout');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
var resolveCallback;
function foo1() {
function inner1() {
@@ -26,8 +27,8 @@ function foo3() {
}
inner3();
return promise;
-}
-//# sourceURL=test.js`, 7, 26);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
index 176ed99f2a..21e7dc1632 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
@@ -3,78 +3,78 @@ Checks created frame for async call chain
Running test: testPromise
foo1 (test.js:10:2)
-- Promise.then --
-promise (test.js:12:14)
+promise (test.js:20:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThen
foo1 (test.js:10:2)
-- Promise.then --
-promiseThen (test.js:20:14)
+promiseThen (test.js:28:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
-- Promise.then --
-promiseThen (test.js:21:14)
+promiseThen (test.js:29:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThenThen
foo1 (test.js:10:2)
-- Promise.then --
-promiseThenThen (test.js:29:14)
+promiseThenThen (test.js:37:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:10:2)
-- Promise.then --
-promiseThenThen (test.js:30:14)
+promiseThenThen (test.js:38:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
-- Promise.then --
-promiseThenThen (test.js:29:25)
+promiseThenThen (test.js:37:25)
(anonymous) (expr.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:10:2)
-- Promise.then --
-promiseResolve (test.js:36:27)
+promiseResolve (test.js:44:27)
(anonymous) (expr.js:0:0)
Running test: testPromiseReject
foo1 (test.js:10:2)
-- Promise.catch --
-promiseReject (test.js:40:31)
+promiseReject (test.js:48:31)
(anonymous) (expr.js:0:0)
Running test: testPromiseAll
foo1 (test.js:10:2)
-- Promise.then --
-promiseAll (test.js:44:44)
+promiseAll (test.js:52:44)
(anonymous) (expr.js:0:0)
Running test: testPromiseRace
foo1 (test.js:10:2)
-- Promise.then --
-promiseRace (test.js:48:45)
+promiseRace (test.js:56:45)
(anonymous) (expr.js:0:0)
Running test: testThenableJob1
foo1 (test.js:10:2)
-- Promise.then --
-thenableJob1 (test.js:52:72)
+thenableJob1 (test.js:60:72)
(anonymous) (expr.js:0:0)
Running test: testThenableJob2
foo1 (test.js:10:2)
-- Promise.then --
-thenableJob2 (test.js:56:57)
+thenableJob2 (test.js:64:57)
(anonymous) (expr.js:0:0)
@@ -82,10 +82,10 @@ Running test: testSetTimeouts
foo1 (test.js:10:2)
(anonymous) (test.js:72:25)
-- setTimeout --
-(anonymous) (test.js:64:6)
+(anonymous) (test.js:72:6)
-- setTimeout --
-(anonymous) (test.js:63:4)
+(anonymous) (test.js:71:4)
-- setTimeout --
-setTimeouts (test.js:62:2)
+setTimeouts (test.js:70:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame.js b/deps/v8/test/inspector/debugger/async-stack-created-frame.js
index 0f2c7a1e78..c65c5499b4 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame.js
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame.js
@@ -5,7 +5,7 @@
// TODO(kozyatinskiy): fix this test.
let {session, contextGroup, Protocol} = InspectorTest.start('Checks created frame for async call chain');
-contextGroup.addScript(
+contextGroup.addInlineScript(
`
function foo1() {
debugger;
@@ -72,10 +72,8 @@ function setTimeouts() {
setTimeout(() =>
setTimeout(() => { foo1(); resolve(); }, 0), 0), 0);
return p;
-}
-
-//# sourceURL=test.js`,
- 8, 4);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
index 4de838252e..86860fdb39 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
@@ -3,94 +3,94 @@ Checks that async chains for promises are correct.
Running test: testPromise
foo1 (test.js:9:2)
-- Promise.then --
-promise (test.js:12:14)
+promise (test.js:19:14)
(anonymous) (testPromise.js:0:0)
Running test: testPromiseResolvedBySetTimeout
foo1 (test.js:9:2)
-- Promise.then --
-promiseResolvedBySetTimeout (test.js:20:14)
+promiseResolvedBySetTimeout (test.js:27:14)
(anonymous) (testPromiseResolvedBySetTimeout.js:0:0)
Running test: testPromiseAll
foo1 (test.js:9:2)
-- Promise.then --
-promiseAll (test.js:30:35)
+promiseAll (test.js:37:35)
(anonymous) (testPromiseAll.js:0:0)
Running test: testPromiseAllReverseOrder
foo1 (test.js:9:2)
-- Promise.then --
-promiseAllReverseOrder (test.js:41:35)
+promiseAllReverseOrder (test.js:48:35)
(anonymous) (testPromiseAllReverseOrder.js:0:0)
Running test: testPromiseRace
foo1 (test.js:9:2)
-- Promise.then --
-promiseRace (test.js:52:36)
+promiseRace (test.js:59:36)
(anonymous) (testPromiseRace.js:0:0)
Running test: testTwoChainedCallbacks
foo1 (test.js:9:2)
-- Promise.then --
-twoChainedCallbacks (test.js:61:14)
+twoChainedCallbacks (test.js:68:14)
(anonymous) (testTwoChainedCallbacks.js:0:0)
foo2 (test.js:13:2)
-- Promise.then --
-twoChainedCallbacks (test.js:61:25)
+twoChainedCallbacks (test.js:68:25)
(anonymous) (testTwoChainedCallbacks.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:9:2)
-- Promise.then --
-promiseResolve (test.js:67:27)
+promiseResolve (test.js:74:27)
(anonymous) (testPromiseResolve.js:0:0)
foo2 (test.js:13:2)
-- Promise.then --
-promiseResolve (test.js:67:38)
+promiseResolve (test.js:74:38)
(anonymous) (testPromiseResolve.js:0:0)
Running test: testThenableJobResolvedInSetTimeout
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedInSetTimeout (test.js:79:40)
+thenableJobResolvedInSetTimeout (test.js:86:40)
(anonymous) (testThenableJobResolvedInSetTimeout.js:0:0)
Running test: testThenableJobResolvedInSetTimeoutWithStack
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedInSetTimeoutWithStack (test.js:97:40)
+thenableJobResolvedInSetTimeoutWithStack (test.js:104:40)
(anonymous) (testThenableJobResolvedInSetTimeoutWithStack.js:0:0)
Running test: testThenableJobResolvedByPromise
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedByPromise (test.js:111:40)
+thenableJobResolvedByPromise (test.js:118:40)
(anonymous) (testThenableJobResolvedByPromise.js:0:0)
Running test: testThenableJobResolvedByPromiseWithStack
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedByPromiseWithStack (test.js:129:40)
+thenableJobResolvedByPromiseWithStack (test.js:136:40)
(anonymous) (testThenableJobResolvedByPromiseWithStack.js:0:0)
Running test: testLateThenCallback
foo1 (test.js:9:2)
-- Promise.then --
-lateThenCallback (test.js:138:12)
+lateThenCallback (test.js:145:12)
(anonymous) (testLateThenCallback.js:0:0)
@@ -98,36 +98,36 @@ Running test: testComplex
inner1 (test.js:154:6)
foo1 (test.js:156:4)
-- Promise.then --
-complex (test.js:195:5)
+complex (test.js:202:5)
(anonymous) (testComplex.js:0:0)
(anonymous) (test.js:207:8)
-- Promise.then --
-(anonymous) (test.js:199:8)
+(anonymous) (test.js:206:8)
-- Promise.then --
-(anonymous) (test.js:198:6)
+(anonymous) (test.js:205:6)
-- setTimeout --
-complex (test.js:197:2)
+complex (test.js:204:2)
(anonymous) (testComplex.js:0:0)
Running test: testReject
foo1 (test.js:9:2)
-- Promise.catch --
-reject (test.js:210:31)
+reject (test.js:217:31)
(anonymous) (testReject.js:0:0)
Running test: testFinally1
foo1 (test.js:9:2)
-- Promise.finally --
-finally1 (test.js:214:33)
+finally1 (test.js:221:33)
(anonymous) (testFinally1.js:0:0)
Running test: testFinally2
foo1 (test.js:9:2)
-- Promise.finally --
-finally2 (test.js:218:34)
+finally2 (test.js:225:34)
(anonymous) (testFinally2.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise.js b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
index 79c3261263..6aaba6dba1 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise.js
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
@@ -4,7 +4,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async chains for promises are correct.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function foo1() {
debugger;
}
@@ -223,8 +224,8 @@ function finally1() {
function finally2() {
return Promise.resolve().finally(foo1);
-}
-//# sourceURL=test.js`, 7, 26);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
index 80225b1618..27ee79fb33 100644
--- a/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
@@ -1,4 +1,6 @@
Tests super long async stacks.
+
+Running test: test
(anonymous) (expr.js:0:26)
callWithAsyncStack (utils.js:3:4)
call1 (wrapper.js:0:20)
diff --git a/deps/v8/test/inspector/debugger/async-stack-load-more.js b/deps/v8/test/inspector/debugger/async-stack-load-more.js
index 3aaaa13076..dcc3c6c798 100644
--- a/deps/v8/test/inspector/debugger/async-stack-load-more.js
+++ b/deps/v8/test/inspector/debugger/async-stack-load-more.js
@@ -15,7 +15,9 @@ function callWithAsyncStack(f, depth) {
}
//# sourceURL=utils.js`);
-(async function test() {
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([async function test() {
Protocol.Debugger.enable();
Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 4});
Protocol.Runtime.evaluate({
@@ -40,5 +42,4 @@ function callWithAsyncStack(f, depth) {
break;
}
}
- InspectorTest.completeTest();
-})()
+}]);
diff --git a/deps/v8/test/inspector/debugger/call-frame-url-expected.txt b/deps/v8/test/inspector/debugger/call-frame-url-expected.txt
index b27b40dd3d..e19908fefb 100644
--- a/deps/v8/test/inspector/debugger/call-frame-url-expected.txt
+++ b/deps/v8/test/inspector/debugger/call-frame-url-expected.txt
@@ -4,12 +4,12 @@ Tests url in Debugger.CallFrame.
url :
}
[1] : {
- url : source-url.js
+ url :
}
[2] : {
- url : test.js
+ url :
}
[3] : {
- url : expr.js
+ url :
}
]
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
index 19a944eebd..21da2d844c 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
@@ -1,7 +1,7 @@
Test private class methods
Running test: testScopesPaused
-privateProperties on the base class instance
+private properties on the base class instance
[
[0] : {
name : #inc
@@ -54,6 +54,42 @@ privateProperties on the base class instance
}
}
]
+private accessors properties on the base class instance
+[
+ [0] : {
+ name : #writeOnly
+ set : {
+ className : Function
+ description : set #writeOnly(val) { this.#field = val; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [1] : {
+ get : {
+ className : Function
+ description : get #readOnly() { return this.#field; }
+ objectId : <objectId>
+ type : function
+ }
+ name : #readOnly
+ }
+ [2] : {
+ get : {
+ className : Function
+ description : get #accessor() { return this.#field; }
+ objectId : <objectId>
+ type : function
+ }
+ name : #accessor
+ set : {
+ className : Function
+ description : set #accessor(val) { this.#field = val; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+]
Evaluating private methods
{
result : {
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt
new file mode 100644
index 0000000000..4e1b681a19
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt
@@ -0,0 +1,66 @@
+Test getting private class methods from an instance that calls nested super()
+
+Running test: testScopesPaused
+properties after super() is called in IIFE
+[
+ [0] : {
+ name : #b
+ value : {
+ className : Function
+ description : #b() {}
+ objectId : <objectId>
+ type : function
+ }
+ }
+]
+privateProperties after super() is called in arrow function
+[
+ [0] : {
+ name : #b
+ value : {
+ className : Function
+ description : #b() {}
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [1] : {
+ get : {
+ className : Function
+ description : get #c() {}
+ objectId : <objectId>
+ type : function
+ }
+ name : #c
+ }
+]
+privateProperties after super() is called in eval()
+[
+ [0] : {
+ name : #b
+ value : {
+ className : Function
+ description : #b() {}
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [1] : {
+ get : {
+ className : Function
+ description : get #c() {}
+ objectId : <objectId>
+ type : function
+ }
+ name : #c
+ }
+ [2] : {
+ name : #d
+ set : {
+ className : Function
+ description : set #d(val) {}
+ objectId : <objectId>
+ type : function
+ }
+ }
+]
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js b/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js
new file mode 100644
index 0000000000..5a8452f55c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js
@@ -0,0 +1,79 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Test getting private class methods from an instance that calls nested super()"
+);
+
+contextGroup.addScript(`
+function run() {
+ class A {}
+ class B extends A {
+ #b() {}
+ constructor() {
+ (() => super())();
+ }
+ test() { debugger; }
+ };
+ (new B()).test();
+
+ class C extends B {
+ get #c() {}
+ constructor() {
+ const callSuper = () => super();
+ callSuper();
+ }
+ test() { debugger; }
+ };
+ (new C()).test();
+
+ class D extends C {
+ set #d(val) {}
+ constructor(str) {
+ eval(str);
+ }
+ test() { debugger; }
+ };
+ (new D('super();')).test();
+}`);
+
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({ expression: "run()" });
+
+ let {
+ params: { callFrames }
+ } = await Protocol.Debugger.oncePaused(); // inside B constructor
+ let frame = callFrames[0];
+ let { result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ });
+
+ InspectorTest.log('properties after super() is called in IIFE');
+ InspectorTest.logMessage(result.privateProperties);
+ Protocol.Debugger.resume();
+
+ ({ params: { callFrames } }
+ = await Protocol.Debugger.oncePaused()); // inside C constructor
+ frame = callFrames[0];
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ }));
+ InspectorTest.log('privateProperties after super() is called in arrow function');
+ InspectorTest.logMessage(result.privateProperties);
+ Protocol.Debugger.resume();
+ ({ params: { callFrames } }
+ = await Protocol.Debugger.oncePaused()); // inside D constructor
+ frame = callFrames[0];
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ }));
+ InspectorTest.log('privateProperties after super() is called in eval()');
+ InspectorTest.logMessage(result.privateProperties);
+ Protocol.Debugger.resume();
+
+ Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/class-private-methods.js b/deps/v8/test/inspector/debugger/class-private-methods.js
index f86ea6dee1..86839f87d6 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods.js
@@ -56,7 +56,15 @@ InspectorTest.runAsyncTestSuite([
objectId: frame.this.objectId
});
- InspectorTest.log('privateProperties on the base class instance');
+ InspectorTest.log('private properties on the base class instance');
+ InspectorTest.logMessage(result.privateProperties);
+
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId,
+ accessorPropertiesOnly: true,
+ }));
+
+ InspectorTest.log('private accessors properties on the base class instance');
InspectorTest.logMessage(result.privateProperties);
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
index a3b6826878..3c541d3496 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
@@ -8,9 +8,9 @@ asyncFact (test.js:9:2)
asyncFact (test.js:11:2)
-- await --
-asyncFact (test.js:3:20)
-asyncFact (test.js:3:20)
-asyncFact (test.js:3:20)
+asyncFact (test.js:10:20)
+asyncFact (test.js:10:20)
+asyncFact (test.js:10:20)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js
index c33ff6b93c..02962781f8 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js
@@ -4,7 +4,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Check that continue-to-location works with different strategies.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
async function asyncFact(n) {
if (n == 0) return 1;
let r = n * await asyncFact(n - 1);
@@ -20,15 +21,18 @@ function fact(n) {
}
function topLevel() {
- eval(` + '`' + `
+ eval(` +
+ '`' +
+ `
var a = 1;
var b = 2;
fact(3);
console.log(a + b);
- ` + '`' + `);
-}
-
-//# sourceURL=test.js`, 7, 26);
+ ` +
+ '`' +
+ `);
+}`,
+ 'test.js');
session.setupScriptMap();
InspectorTest.runAsyncTestSuite([
diff --git a/deps/v8/test/inspector/debugger/destroy-in-break-program2.js b/deps/v8/test/inspector/debugger/destroy-in-break-program2.js
index 94df496520..b0a6566905 100644
--- a/deps/v8/test/inspector/debugger/destroy-in-break-program2.js
+++ b/deps/v8/test/inspector/debugger/destroy-in-break-program2.js
@@ -27,6 +27,7 @@ const {session, contextGroup, Protocol} = InspectorTest.start(
const contextGroup = new InspectorTest.ContextGroup();
const session2 = contextGroup.connect();
const Protocol2 = session2.Protocol;
+ session2.setupScriptMap();
Protocol2.Runtime.enable();
Protocol2.Debugger.enable();
@@ -40,7 +41,8 @@ const {session, contextGroup, Protocol} = InspectorTest.start(
});
const paused = (await Protocol2.Debugger.oncePaused()).params;
- InspectorTest.log(`paused in: ${paused.callFrames[0].url}`);
+ InspectorTest.log(
+ `paused in: ${session2.getCallFrameUrl(paused.callFrames[0])}`);
// Now if we're paused in the wrong place, we will likely crash.
session2.disconnect();
diff --git a/deps/v8/test/inspector/debugger/external-stack-trace.js b/deps/v8/test/inspector/debugger/external-stack-trace.js
index 78725937c2..fe1a0346c7 100644
--- a/deps/v8/test/inspector/debugger/external-stack-trace.js
+++ b/deps/v8/test/inspector/debugger/external-stack-trace.js
@@ -11,6 +11,9 @@ let contextGroup2 = new InspectorTest.ContextGroup();
let session2 = contextGroup2.connect();
let Protocol2 = session2.Protocol;
+session1.setupScriptMap();
+session2.setupScriptMap();
+
let utilsScript = `
function store(description) {
let buffer = inspector.storeCurrentStackTrace(description);
@@ -23,11 +26,10 @@ function started(id) {
function finished(id) {
inspector.externalAsyncTaskFinished(Int32Array.from(JSON.parse(id)).buffer);
-}
-//# sourceURL=utils.js`;
+}`;
-contextGroup1.addScript(utilsScript);
-contextGroup2.addScript(utilsScript);
+contextGroup1.addScript(utilsScript, 0, 0, 'utils.js');
+contextGroup2.addScript(utilsScript, 0, 0, 'utils.js');
InspectorTest.runAsyncTestSuite([
async function testDebuggerId() {
diff --git a/deps/v8/test/inspector/debugger/framework-break-expected.txt b/deps/v8/test/inspector/debugger/framework-break-expected.txt
index b8469f4ecb..414a72a4ac 100644
--- a/deps/v8/test/inspector/debugger/framework-break-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-break-expected.txt
@@ -20,7 +20,7 @@ throwUncaughtError (framework.js:21:2)
Running test: testUncaughtExceptionWithInlinedFrame
> mixed top frame in framework:
-throwUserException (user.js:66:2)
+throwUserException (user.js:76:2)
inlinedWrapper (framework.js:56:4)
throwInlinedUncaughtError (framework.js:60:2)
(anonymous) (framework.js:0:0)
@@ -50,7 +50,7 @@ syncDOMBreakpoint (framework.js:33:12)
Running test: testSyncDOMBreakpointWithInlinedUserFrame
> mixed, top frame in framework:
syncDOMBreakpoint (framework.js:33:12)
-userFunction (user.js:70:2)
+userFunction (user.js:80:2)
inlinedWrapper (framework.js:65:4)
syncDOMBreakpointWithInlinedUserFrame (framework.js:69:2)
(anonymous) (framework.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/framework-break.js b/deps/v8/test/inspector/debugger/framework-break.js
index 45cdf5c2b2..04455d9b0f 100644
--- a/deps/v8/test/inspector/debugger/framework-break.js
+++ b/deps/v8/test/inspector/debugger/framework-break.js
@@ -5,7 +5,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that breaks in framework code correctly processed.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function frameworkAssert() {
console.assert(false);
}
@@ -67,20 +68,19 @@ function syncDOMBreakpointWithInlinedUserFrame() {
%PrepareFunctionForOptimization(inlinedWrapper);
%OptimizeFunctionOnNextCall(inlinedWrapper);
inlinedWrapper();
-}
-
-//# sourceURL=framework.js`, 8, 26);
+}`,
+ 'framework.js');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function throwUserException() {
throw new Error();
}
function userFunction() {
syncDOMBreakpoint();
-}
-
-//# sourceURL=user.js`, 64, 26)
+}`,
+ 'user.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
index 3e6299154d..721097d84f 100644
--- a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
+++ b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
@@ -4,7 +4,8 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks nested scheduled break in framework code.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function frameworkCall(callback) {
inspector.callWithScheduledBreak(doFrameworkWork.bind(null, callback),
'top-framework-scheduled-break',
@@ -18,11 +19,11 @@ function doFrameworkWork(callback) {
function doFrameworkBreak() {
inspector.breakProgram('framework-break', JSON.stringify({ data: 'data for framework-break' }));
-}
-
-//# sourceURL=framework.js`, 7, 26);
+}`,
+ 'framework.js');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function testFunction() {
inspector.callWithScheduledBreak(frameworkCall.bind(null, callback),
'top-scheduled-break', '');
@@ -31,9 +32,8 @@ function testFunction() {
function callback() {
inspector.breakProgram('user-break', JSON.stringify({ data: 'data for user-break' }));
return 42;
-}
-
-//# sourceURL=user.js`, 25, 26);
+}`,
+ 'user.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/framework-precise-ranges.js b/deps/v8/test/inspector/debugger/framework-precise-ranges.js
index 4f76033a96..1ecd1703e8 100644
--- a/deps/v8/test/inspector/debugger/framework-precise-ranges.js
+++ b/deps/v8/test/inspector/debugger/framework-precise-ranges.js
@@ -4,7 +4,7 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks framework debugging with blackboxed ranges.');
-contextGroup.addScript(
+contextGroup.addInlineScript(
`
function foo() {
return boo();
@@ -14,9 +14,8 @@ function boo() {
}
function testFunction() {
foo();
-}
-//# sourceURL=test.js`,
- 7, 26);
+}`,
+ 'test.js');
session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
diff --git a/deps/v8/test/inspector/debugger/framework-stepping-expected.txt b/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
index fa2bb35f5a..346ca93766 100644
--- a/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
@@ -5,21 +5,21 @@ Running test: testStepIntoFromUser
Executing stepInto...
Executing stepInto...
-userFoo (user.js:23:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userFoo (user.js:21:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing stepInto...
Executing stepInto...
-userBoo (user.js:27:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userBoo (user.js:25:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing stepInto...
Executing stepInto...
-testStepFromUser (user.js:32:0)
+testStepFromUser (user.js:30:0)
(anonymous) (expr.js:0:0)
Executing resume...
@@ -29,21 +29,21 @@ Running test: testStepOverFromUser
Executing stepInto...
Executing stepInto...
-userFoo (user.js:23:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userFoo (user.js:21:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing stepOver...
Executing stepOver...
-userBoo (user.js:27:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userBoo (user.js:25:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing stepOver...
Executing stepOver...
-testStepFromUser (user.js:32:0)
+testStepFromUser (user.js:30:0)
(anonymous) (expr.js:0:0)
Executing resume...
@@ -53,50 +53,50 @@ Running test: testStepOutFromUser
Executing stepInto...
Executing stepInto...
-userFoo (user.js:23:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userFoo (user.js:21:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing stepOut...
-userBoo (user.js:27:2)
-frameworkCall (framework.js:10:23)
-testStepFromUser (user.js:31:2)
+userBoo (user.js:25:2)
+frameworkCall (framework.js:9:23)
+testStepFromUser (user.js:29:2)
(anonymous) (expr.js:0:0)
Executing resume...
Running test: testStepIntoFromFramework
-frameworkBreakAndCall (framework.js:14:12)
-testStepFromFramework (user.js:35:2)
+frameworkBreakAndCall (framework.js:13:12)
+testStepFromFramework (user.js:33:2)
(anonymous) (expr.js:0:0)
Executing stepInto...
-userFoo (user.js:23:2)
-frameworkBreakAndCall (framework.js:15:23)
-testStepFromFramework (user.js:35:2)
+userFoo (user.js:21:2)
+frameworkBreakAndCall (framework.js:14:23)
+testStepFromFramework (user.js:33:2)
(anonymous) (expr.js:0:0)
Executing resume...
Running test: testStepOverFromFramework
-frameworkBreakAndCall (framework.js:14:12)
-testStepFromFramework (user.js:35:2)
+frameworkBreakAndCall (framework.js:13:12)
+testStepFromFramework (user.js:33:2)
(anonymous) (expr.js:0:0)
Executing stepOver...
-testStepFromFramework (user.js:36:0)
+testStepFromFramework (user.js:34:0)
(anonymous) (expr.js:0:0)
Executing resume...
Running test: testStepOutFromFramework
-frameworkBreakAndCall (framework.js:14:12)
-testStepFromFramework (user.js:35:2)
+frameworkBreakAndCall (framework.js:13:12)
+testStepFromFramework (user.js:33:2)
(anonymous) (expr.js:0:0)
Executing stepOut...
-testStepFromFramework (user.js:36:0)
+testStepFromFramework (user.js:34:0)
(anonymous) (expr.js:0:0)
Executing resume...
diff --git a/deps/v8/test/inspector/debugger/framework-stepping.js b/deps/v8/test/inspector/debugger/framework-stepping.js
index f91c06ba52..3ce20d8801 100644
--- a/deps/v8/test/inspector/debugger/framework-stepping.js
+++ b/deps/v8/test/inspector/debugger/framework-stepping.js
@@ -4,7 +4,7 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks stepping with blackboxed frames on stack');
-contextGroup.addScript(
+contextGroup.addInlineScript(
`
function frameworkCall(funcs) {
for (var f of funcs) f();
@@ -13,11 +13,10 @@ function frameworkCall(funcs) {
function frameworkBreakAndCall(funcs) {
inspector.breakProgram('', '');
for (var f of funcs) f();
-}
-//# sourceURL=framework.js`,
- 8, 4);
+}`,
+ 'framework.js');
-contextGroup.addScript(
+contextGroup.addInlineScript(
`
function userFoo() {
return 1;
@@ -33,9 +32,8 @@ function testStepFromUser() {
function testStepFromFramework() {
frameworkBreakAndCall([userFoo, userBoo]);
-}
-//# sourceURL=user.js`,
- 21, 4);
+}`,
+ 'user.js');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
index 6968ed3eab..4362e8dd6a 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
@@ -1,7 +1,5 @@
Checks if we keep alive breakpoint information for top-level functions when calling getPossibleBreakpoints.
-Result of get possible breakpoints in topLevel.js
-[{"scriptId":"3","lineNumber":0,"columnNumber":0},{"scriptId":"3","lineNumber":0,"columnNumber":8,"type":"call"},{"scriptId":"3","lineNumber":0,"columnNumber":43,"type":"return"}]
Result of get possible breakpoints in moduleFunc.js
-[{"scriptId":"5","lineNumber":0,"columnNumber":22},{"scriptId":"5","lineNumber":0,"columnNumber":30,"type":"call"},{"scriptId":"5","lineNumber":0,"columnNumber":63,"type":"return"},{"scriptId":"5","lineNumber":0,"columnNumber":64,"type":"return"}]
+[{"scriptId":"3","lineNumber":0,"columnNumber":22},{"scriptId":"3","lineNumber":0,"columnNumber":30,"type":"call"},{"scriptId":"3","lineNumber":0,"columnNumber":63,"type":"return"},{"scriptId":"3","lineNumber":0,"columnNumber":64,"type":"return"}]
Result of get possible breakpoints in mixedFunctions.js
-[{"scriptId":"7","lineNumber":0,"columnNumber":15,"type":"return"},{"scriptId":"7","lineNumber":1,"columnNumber":2},{"scriptId":"7","lineNumber":1,"columnNumber":10,"type":"call"},{"scriptId":"7","lineNumber":2,"columnNumber":0,"type":"return"}]
+[{"scriptId":"5","lineNumber":0,"columnNumber":15,"type":"return"},{"scriptId":"5","lineNumber":1,"columnNumber":2},{"scriptId":"5","lineNumber":1,"columnNumber":10,"type":"call"},{"scriptId":"5","lineNumber":2,"columnNumber":0,"type":"return"}]
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
index 097d0b99af..da281a47fc 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
@@ -17,7 +17,6 @@ const callGarbageCollector = `
%CollectGarbage("");
`;
-const topLevelFunction = `console.log('This is a top level function')`;
const moduleFunction =
`function testFunc() { console.log('This is a module function') }`;
let mixedFunctions = ` function A() {}
@@ -34,8 +33,6 @@ function onDebuggerEnabled() {
async function onExecutionContextCreated(messageObject) {
executionContextId = messageObject.params.context.id;
await testGetPossibleBreakpoints(
- executionContextId, topLevelFunction, 'topLevel.js');
- await testGetPossibleBreakpoints(
executionContextId, moduleFunction, 'moduleFunc.js');
await testGetPossibleBreakpoints(
executionContextId, mixedFunctions, 'mixedFunctions.js');
diff --git a/deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt b/deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt
index e5dc5542f2..475fa67d5a 100644
--- a/deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt
+++ b/deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt
@@ -1,23 +1,37 @@
Test that all 'other' reasons are explicitly encoded on a pause event if they overlap with another reason
Running test: testBreakpointPauseReason
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"foo.js","scriptId":"3"}},{"reason":"other"}]}.
+Paused with reason instrumentation, data {"scriptId":"3","url":"foo.js"} and scriptId: 3.
+Paused with reason other, data {} and scriptId: 3.
Running test: testTriggeredPausePauseReason
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"foo.js","scriptId":"4"}},{"reason":"other"}]}.
+Paused with reason instrumentation, data {"scriptId":"4","url":"foo.js"} and scriptId: 4.
+Paused with reason other, data {} and scriptId: 4.
Running test: testSteppingPauseReason
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"foo.js","scriptId":"5"}},{"reason":"other"}]}.
-Paused with reason: other and data: {}.
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"bar.js","scriptId":"6"}},{"reason":"other"}]}.
+Paused with reason instrumentation, data {"scriptId":"5","url":"foo.js"} and scriptId: 5.
+Paused with reason other, data {} and scriptId: 5.
+Paused with reason other, data {} and scriptId: 5.
+Paused with reason instrumentation, data {"scriptId":"6","url":"bar.js"} and scriptId: 6.
+Paused with reason other, data {} and scriptId: 6.
Running test: testOnlyReportOtherWithEmptyDataOnce
-Paused with reason: other and data: {}.
+Paused with reason other, data {} and scriptId: 7.
Running test: testDebuggerStatementReason
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"foo.js","scriptId":"8"}},{"reason":"other"}]}.
+Paused with reason instrumentation, data {"scriptId":"8","url":"foo.js"} and scriptId: 8.
+Paused with reason other, data {} and scriptId: 8.
Running test: testAsyncSteppingPauseReason
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"foo.js","scriptId":"9"}},{"reason":"other"}]}.
-Paused with reason: other and data: {}.
-Paused with reason: ambiguous and data: {"reasons":[{"reason":"instrumentation","auxData":{"url":"bar.js","scriptId":"10"}},{"reason":"other"}]}.
+Paused with reason instrumentation, data {"scriptId":"9","url":"foo.js"} and scriptId: 9.
+Paused with reason other, data {} and scriptId: 9.
+Paused with reason other, data {} and scriptId: 9.
+Paused with reason instrumentation, data {"scriptId":"10","url":"bar.js"} and scriptId: 10.
+Paused with reason other, data {} and scriptId: 10.
+Paused with reason other, data {} and scriptId: 10.
+
+Running test: testSteppingOutPauseReason
+Paused with reason instrumentation, data {"scriptId":"11","url":"foo.js"} and scriptId: 11.
+Paused with reason other, data {} and scriptId: 11.
+Paused with reason instrumentation, data {"scriptId":"12","url":"bar.js"} and scriptId: 12.
+Paused with reason other, data {} and scriptId: 11.
diff --git a/deps/v8/test/inspector/debugger/other-pause-reasons.js b/deps/v8/test/inspector/debugger/other-pause-reasons.js
index 41520ae734..b307bd516b 100644
--- a/deps/v8/test/inspector/debugger/other-pause-reasons.js
+++ b/deps/v8/test/inspector/debugger/other-pause-reasons.js
@@ -5,12 +5,22 @@
const { session, contextGroup, Protocol } = InspectorTest.start(
`Test that all 'other' reasons are explicitly encoded on a pause event if they overlap with another reason`);
-function resumeOnPause({params: {reason, data}}) {
- InspectorTest.log(`Paused with reason: ${reason} and data: ${
- data ? JSON.stringify(data) : '{}'}.`)
- Protocol.Debugger.resume();
+function handlePause(
+ noInstrumentationStepAction, options,
+ {params: {reason, data, callFrames}}) {
+ const scriptId = callFrames[0].functionLocation.scriptId;
+ InspectorTest.log(`Paused with reason ${reason}, data ${
+ data ? JSON.stringify(data) : '{}'} and scriptId: ${scriptId}.`);
+
+ if (reason === 'instrumentation') {
+ Protocol.Debugger.resume();
+ } else {
+ Protocol.Debugger[noInstrumentationStepAction](options);
+ }
}
+const resumeOnPause = handlePause.bind(null, 'resume', null);
+
async function setUpEnvironment() {
await Protocol.Debugger.enable();
await Protocol.Runtime.enable();
@@ -53,17 +63,6 @@ InspectorTest.runAsyncTestSuite([
await setUpEnvironment();
await Protocol.Debugger.setInstrumentationBreakpoint(
{instrumentation: 'beforeScriptExecution'});
- const stepOnPause = (({params: {reason, data}}) => {
- InspectorTest.log(`Paused with reason: ${reason} and data: ${
- data ? JSON.stringify(data) : '{}'}.`);
- if (reason === 'instrumentation') {
- Protocol.Debugger.resume();
- } else {
- Protocol.Debugger.stepInto();
- }
- });
- Protocol.Debugger.onPaused(stepOnPause);
-
const {result: {scriptId}} = await Protocol.Runtime.compileScript({
expression: `setTimeout('console.log(3);//# sourceURL=bar.js', 0);`,
sourceURL: 'foo.js',
@@ -74,7 +73,17 @@ InspectorTest.runAsyncTestSuite([
url: 'foo.js',
});
- await Protocol.Runtime.runScript({scriptId});
+ const runPromise = Protocol.Runtime.runScript({scriptId});
+ // Pausing 5 times:
+ // 2x instrumentation breaks,
+ // 1x breakpoint,
+ // 2x step ins: end of setTimeout function, start of inner script.
+ for (var i = 0; i < 5; ++i) {
+ const msg = await Protocol.Debugger.oncePaused();
+ handlePause('stepInto', null, msg);
+ }
+
+ await runPromise;
await tearDownEnvironment();
},
async function testOnlyReportOtherWithEmptyDataOnce() {
@@ -110,17 +119,41 @@ InspectorTest.runAsyncTestSuite([
await setUpEnvironment();
await Protocol.Debugger.setInstrumentationBreakpoint(
{instrumentation: 'beforeScriptExecution'});
- const stepOnPause = (({params: {reason, data}}) => {
- InspectorTest.log(`Paused with reason: ${reason} and data: ${
- data ? JSON.stringify(data) : '{}'}.`);
- Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- });
- Protocol.Debugger.onPaused(stepOnPause);
const expression =
`debugger; setTimeout('console.log(3);//# sourceURL=bar.js', 0);`;
const {result: {scriptId}} = await Protocol.Runtime.compileScript(
{expression, sourceURL: 'foo.js', persistScript: true});
- await Protocol.Runtime.runScript({scriptId});
+ const runPromise = Protocol.Runtime.runScript({scriptId});
+ // Pausing 6 times:
+ // 2x instrumentation breaks,
+ // 1x debugger statement,
+ // 3x steps in: start of setTimeout, start of inner script, end of inner script.
+ for (var i = 0; i < 6; ++i) {
+ const msg = await Protocol.Debugger.oncePaused();
+ handlePause('stepInto', {breakOnAsyncCall: true}, msg);
+ }
+ await runPromise;
+ await tearDownEnvironment();
+ },
+ async function testSteppingOutPauseReason() {
+ await setUpEnvironment();
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const expression = `
+ function test() {
+ debugger;
+ eval('console.log(3);//# sourceURL=bar.js');
+ }
+ test();
+ `
+ const {result: {scriptId}} = await Protocol.Runtime.compileScript(
+ {expression, sourceURL: 'foo.js', persistScript: true});
+
+ const runPromise = Protocol.Runtime.runScript({scriptId});
+ const stepOutOnPause = handlePause.bind(this, 'stepOut', null);
+ Protocol.Debugger.onPaused(stepOutOnPause);
+
+ await runPromise;
await tearDownEnvironment();
},
]);
diff --git a/deps/v8/test/inspector/debugger/pause-at-negative-offset.js b/deps/v8/test/inspector/debugger/pause-at-negative-offset.js
index 4d0928b2ec..a709b48eea 100644
--- a/deps/v8/test/inspector/debugger/pause-at-negative-offset.js
+++ b/deps/v8/test/inspector/debugger/pause-at-negative-offset.js
@@ -8,7 +8,7 @@ let {session, contextGroup, Protocol} =
(async function test() {
session.setupScriptMap();
await Protocol.Debugger.enable();
- contextGroup.addScript(`debugger;//# sourceURL=test.js`, -3, -3);
+ contextGroup.addScript(`debugger;`, -3, -3, 'test.js');
let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
session.logCallFrames(callFrames);
InspectorTest.completeTest();
diff --git a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized-expected.txt b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized-expected.txt
index af9afabce4..2f2791e7e5 100644
--- a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized-expected.txt
@@ -1,4 +1,6 @@
Checks pause inside blackboxed optimized function call.
+
+Running test: test
bar (test.js:2:4)
foo (framework.js:2:15)
(anonymous) (expr.js:1:12)
diff --git a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
index 36f523d8ad..2d66f08300 100644
--- a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
+++ b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
@@ -26,7 +26,9 @@ contextGroup.addScript(`
//# sourceURL=test.js
`);
-(async function test(){
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([async function test() {
Protocol.Debugger.enable();
Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
Protocol.Runtime.evaluate({expression: `
@@ -35,5 +37,4 @@ contextGroup.addScript(`
`});
const {params:{callFrames}} = await Protocol.Debugger.oncePaused();
session.logCallFrames(callFrames);
- InspectorTest.completeTest();
-})();
+}]);
diff --git a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
index 9e26f2bdd4..c0be81b257 100644
--- a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
+++ b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
@@ -124,7 +124,7 @@ Test runtime stack trace:
}
Test debugger stack trace:
[
- [0] : prefix://url
- [1] : boo.js
- [2] : prefix://url
+ [0] :
+ [1] :
+ [2] :
]
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt
index 773f69990e..512ae69fcb 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt
@@ -1,5 +1,5 @@
Checks if we keep alive breakpoint information for top-level functions.
Result of setting breakpoint in topLevel.js
-[{"scriptId":"3","lineNumber":0,"columnNumber":0}]
+[]
Result of setting breakpoint in moduleFunc.js
-[{"scriptId":"5","lineNumber":0,"columnNumber":22}] \ No newline at end of file
+[{"scriptId":"5","lineNumber":0,"columnNumber":22}]
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation-expected.txt
new file mode 100644
index 0000000000..bade1a9791
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation-expected.txt
@@ -0,0 +1,26 @@
+Test if breakpoints are hit that are set on instrumentation pause in js
+
+Running test: testSetBreakpointOnInstrumentationPause
+set breakpoint and evaluate script..
+Setting breakpoint at instrumentation break location
+Paused at foo.js with reason "instrumentation".
+Hit breakpoints: []
+Paused at foo.js with reason "other".
+Hit breakpoints: ["4:0:20:3"]
+Done.
+
+Running test: testSetConditionalBreakpointTrueConditionOnInstrumentationPause
+set breakpoint and evaluate script..
+Setting breakpoint at instrumentation break location
+Paused at foo.js with reason "instrumentation".
+Hit breakpoints: []
+Paused at foo.js with reason "other".
+Hit breakpoints: ["4:0:20:4"]
+Done.
+
+Running test: testSetConditionalBreakpointFalseConditionOnInstrumentationPause
+set breakpoint and evaluate script..
+Setting breakpoint at instrumentation break location
+Paused at foo.js with reason "instrumentation".
+Hit breakpoints: []
+Done.
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation.js b/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation.js
new file mode 100644
index 0000000000..41f885a06e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation.js
@@ -0,0 +1,87 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test if breakpoints are hit that are set on instrumentation pause in js');
+
+session.setupScriptMap();
+function setBreakpoint(msg, condition) {
+ const reason = msg.params.reason;
+ if (reason === 'instrumentation') {
+ const top_frame = msg.params.callFrames[0];
+ const scriptId = top_frame.location.scriptId;
+ const columnNumber = top_frame.location.columnNumber;
+
+ InspectorTest.log('Setting breakpoint at instrumentation break location');
+ const breakpoint_info = {
+ 'location': {scriptId, 'lineNumber': 0, columnNumber}
+ };
+ if (condition) {
+ breakpoint_info.condition = condition;
+ }
+ return Protocol.Debugger.setBreakpoint(breakpoint_info);
+ }
+ return Promise.resolve();
+}
+
+function handlePause(msg) {
+ const top_frame = msg.params.callFrames[0];
+ const reason = msg.params.reason;
+ const url = session.getCallFrameUrl(top_frame);
+ InspectorTest.log(`Paused at ${url} with reason "${reason}".`);
+ InspectorTest.log(
+ `Hit breakpoints: ${JSON.stringify(msg.params.hitBreakpoints)}`)
+ return Protocol.Debugger.resume();
+};
+
+// Helper function to check if we can successfully set and evaluate breakpoints
+// on an instrumentation pause.
+async function runSetBreakpointOnInstrumentationTest(condition) {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+
+ InspectorTest.log('set breakpoint and evaluate script..');
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const runPromise =
+ Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+
+ // First pause: instrumentation
+ const msg = await Protocol.Debugger.oncePaused();
+ await setBreakpoint(msg, condition);
+ await handlePause(msg);
+
+ // Second pause: if condition evaluates to true
+ if (!condition || eval(condition)) {
+ await handlePause(await Protocol.Debugger.oncePaused());
+ }
+
+ InspectorTest.log('Done.');
+ await runPromise;
+ await Protocol.Runtime.disable();
+ await Protocol.Debugger.disable();
+}
+
+InspectorTest.runAsyncTestSuite([
+ // Test if we can set a breakpoint on the first breakable location (which is
+ // the same location as where the instrumentation breakpoint hits) and
+ // successfully hit the breakpoint.
+ async function testSetBreakpointOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest();
+ },
+
+ // Test if we can set a conditional breakpoint on the first breakable location
+ // and successfully hit the breakpoint.
+ async function
+ testSetConditionalBreakpointTrueConditionOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest('4 > 3');
+ },
+
+ // Test if we can set a conditional breakpoint on the first breakable location
+ // which evaluates to false, and therefore does not trigger a pause.
+ async function
+ testSetConditionalBreakpointFalseConditionOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest('3 > 4');
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt
index 94f58aacd1..c0fb6f18a4 100644
--- a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt
@@ -87,3 +87,40 @@ paused with reason: instrumentation
sourceMapURL : boo.js
url : foo.js
}
+
+Running test: testRemoveAfterCompile
+set breakpoint..
+compile script..
+Remove instrumentation breakpoint..
+evaluate script..
+no breakpoint was hit
+
+Running test: testRemoveBeforeEvaluate
+set breakpoint..
+Remove instrumentation breakpoint..
+evaluate script..
+no breakpoint was hit
+
+Running test: testRemoveAfterOnePause
+set breakpoint..
+evaluate script..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ url : foo.js
+}
+Remove instrumentation breakpoint..
+evaluate another script..
+no breakpoint was hit
+
+Running test: testInstrumentationCoincideWithScheduledPauseOnNextStatement
+set breakpoint..
+set instrumentation
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ url :
+}
+paused with reason: instrumentation:scriptFirstStatement
+{
+}
diff --git a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js
index 3c52bdf16d..c5939bdab0 100644
--- a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js
@@ -127,5 +127,100 @@ InspectorTest.runAsyncTestSuite([
}
await Protocol.Debugger.disable();
await Protocol.Runtime.disable();
+ },
+
+ async function testRemoveAfterCompile() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint..');
+ const { result : {breakpointId} } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+ InspectorTest.log('compile script..');
+ const { result: { scriptId } } = await Protocol.Runtime.compileScript({
+ expression: 'console.log(3)', sourceURL: 'foo.js', persistScript: true });
+
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+
+ InspectorTest.log('evaluate script..');
+ await Protocol.Runtime.runScript({ scriptId });
+ InspectorTest.log('no breakpoint was hit');
+
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testRemoveBeforeEvaluate() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint..');
+ const { result : {breakpointId} } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+
+ InspectorTest.log('evaluate script..');
+ await Protocol.Runtime.evaluate({expression: 'console.log(3) //# sourceURL=foo.js'});
+ InspectorTest.log('no breakpoint was hit');
+
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testRemoveAfterOnePause() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint..');
+ const { result : {breakpointId} } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+
+ InspectorTest.log('evaluate script..');
+ Protocol.Runtime.evaluate({expression: 'console.log(3) //# sourceURL=foo.js'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+
+ InspectorTest.log('evaluate another script..');
+ await Protocol.Runtime.evaluate({expression: 'console.log(3) //# sourceURL=foo.js'});
+ InspectorTest.log('no breakpoint was hit');
+
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testInstrumentationCoincideWithScheduledPauseOnNextStatement() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint..');
+ InspectorTest.log('set instrumentation');
+ await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+ contextGroup.schedulePauseOnNextStatement('instrumentation:scriptFirstStatement', '{}');
+ const runPromise = Protocol.Runtime.evaluate({expression: 'console.log(3)'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ Protocol.Debugger.resume();
+ }
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ Protocol.Debugger.resume();
+ }
+ await runPromise;
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
}
]);
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
index 417f4a3936..b859d03924 100644
--- a/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
@@ -5,6 +5,8 @@
let {session, contextGroup, Protocol} =
InspectorTest.start('Test for Debugger.stepInto with breakOnAsyncCall.');
+session.setupScriptMap();
+
InspectorTest.runAsyncTestSuite([
async function testSetTimeout() {
Protocol.Debugger.enable();
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
index ce82054f06..b2e7e3e86b 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
@@ -1,4 +1,6 @@
Test for step-into remote async task
+
+Running test: test
Setup debugger agents..
Pause before stack trace is captured..
Run stepInto with breakOnAsyncCall flag
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task.js b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
index 59b78c1630..30ef3bc321 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task.js
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
@@ -1,6 +1,8 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --no-compilation-cache
InspectorTest.log('Test for step-into remote async task');
@@ -18,11 +20,8 @@ function store(description) {
}
//# sourceURL=utils.js`;
-// TODO(rmcilroy): This has to be in this order since the i::Script object gets
-// reused via the CompilationCache, and we want OnAfterCompile to be called
-// for contextGroup1 last on this script.
-contextGroup2.addScript(utilsScript);
contextGroup1.addScript(utilsScript);
+contextGroup2.addScript(utilsScript);
let frameworkScript = `
function call(id, f) {
@@ -38,7 +37,7 @@ contextGroup2.addScript(frameworkScript);
session1.setupScriptMap();
session2.setupScriptMap();
-(async function test() {
+InspectorTest.runAsyncTestSuite([async function test() {
InspectorTest.log('Setup debugger agents..');
let debuggerId1 = (await Protocol1.Debugger.enable()).result.debuggerId;
let debuggerId2 = (await Protocol2.Debugger.enable()).result.debuggerId;
@@ -77,7 +76,7 @@ session2.setupScriptMap();
let debuggers = new Map(
[[debuggerId1, Protocol1.Debugger], [debuggerId2, Protocol2.Debugger]]);
let sessions = new Map([[debuggerId1, session1], [debuggerId2, session2]]);
- let currentDebuggerId = debuggerId1;
+ let currentDebuggerId = debuggerId2;
while (true) {
sessions.get(currentDebuggerId).logCallFrames(callFrames);
if (asyncStackTraceId) {
@@ -98,6 +97,4 @@ session2.setupScriptMap();
Protocol2.Debugger.setAsyncCallStackDepth({maxDepth: 0});
await Protocol1.Debugger.disable();
await Protocol2.Debugger.disable();
-
- InspectorTest.completeTest();
-})()
+}]);
diff --git a/deps/v8/test/inspector/debugger/step-into-next-script-expected.txt b/deps/v8/test/inspector/debugger/step-into-next-script-expected.txt
index b0e34cceec..1e1a7e73ed 100644
--- a/deps/v8/test/inspector/debugger/step-into-next-script-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-next-script-expected.txt
@@ -8,7 +8,7 @@ test (foo.js:12:2)
(anonymous) (timeout1.js:0:8)
-foo (timeout2.js:2:2)
+foo (timeout2.js:19:2)
(anonymous) (timeout3.js:0:8)
@@ -28,9 +28,9 @@ test (foo.js:13:0)
(anonymous) (timeout1.js:0:35)
-foo (timeout2.js:2:2)
+foo (timeout2.js:19:2)
-foo (timeout2.js:2:12)
+foo (timeout2.js:19:12)
(anonymous) (timeout3.js:0:8)
@@ -61,9 +61,9 @@ test (foo.js:13:0)
(anonymous) (timeout1.js:0:35)
-foo (timeout2.js:2:2)
+foo (timeout2.js:19:2)
-foo (timeout2.js:2:12)
+foo (timeout2.js:19:12)
(anonymous) (timeout3.js:0:8)
diff --git a/deps/v8/test/inspector/debugger/step-into-next-script.js b/deps/v8/test/inspector/debugger/step-into-next-script.js
index 80e9a9180f..7624536444 100644
--- a/deps/v8/test/inspector/debugger/step-into-next-script.js
+++ b/deps/v8/test/inspector/debugger/step-into-next-script.js
@@ -4,20 +4,22 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Debugger breaks in next script after stepOut from previous one.');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function test() {
setTimeout('var a = 1;//# sourceURL=timeout1.js', 0);
setTimeout(foo, 0);
setTimeout('var a = 3;//# sourceURL=timeout3.js', 0);
debugger;
-}
-//# sourceURL=foo.js`, 7, 26);
+}`,
+ 'foo.js');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function foo() {
return 42;
-}
-//# sourceURL=timeout2.js`)
+}`,
+ 'timeout2.js');
session.setupScriptMap();
var stepAction;
diff --git a/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt b/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
index 83efd83046..2294d1df57 100644
--- a/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
@@ -1,6 +1,6 @@
StepOut from return position of async function.
-Running test: testStepInto
+Running test: testStepIntoAtReturnPosition
p.then(() => 1);
#debugger;
return p;
@@ -13,12 +13,12 @@ Running test: testStepInto
return p;#
}
- await p;
- p.then(() => #1);
- debugger;
+ await foo();
+ #}
-Running test: testStepOver
+
+Running test: testStepOverAtReturnPosition
p.then(() => 1);
#debugger;
return p;
@@ -31,20 +31,12 @@ Running test: testStepOver
return p;#
}
- await p;
- p.then(() => #1);
- debugger;
-
- await p;
- p.then(() => 1#);
- debugger;
-
await foo();
#}
-Running test: testStepOut
+Running test: testStepOutAtReturnPosition
p.then(() => 1);
#debugger;
return p;
@@ -57,9 +49,15 @@ Running test: testStepOut
return p;#
}
- await p;
- p.then(() => #1);
- debugger;
+ await foo();
+ #}
+
+
+
+Running test: testStepOut
+ p.then(() => 1);
+ #debugger;
+ return p;
await foo();
#}
diff --git a/deps/v8/test/inspector/debugger/step-out-async-await.js b/deps/v8/test/inspector/debugger/step-out-async-await.js
index 4164aa2871..1fd8a98eff 100644
--- a/deps/v8/test/inspector/debugger/step-out-async-await.js
+++ b/deps/v8/test/inspector/debugger/step-out-async-await.js
@@ -2,10 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(kozyatinskiy): on StepOut and probably StepOver at return position
-// of async generator we should break at next instruction of resumed generator
-// instead of next scheduled microtask.
-
let {session, contextGroup, Protocol} = InspectorTest.start('StepOut from return position of async function.');
contextGroup.addScript(`
@@ -22,48 +18,90 @@ contextGroup.addScript(`
`);
session.setupScriptMap();
-Protocol.Debugger.enable();
+
InspectorTest.runAsyncTestSuite([
- async function testStepInto() {
- Protocol.Runtime.evaluate({expression: 'testFunction()'});
+ async function testStepIntoAtReturnPosition() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ ]);
+ const evalPromise =
+ Protocol.Runtime.evaluate({expression: 'testFunction()'});
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.resume();
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Debugger.disable(),
+ Protocol.Runtime.disable(),
+ ]);
},
- async function testStepOver() {
- Protocol.Runtime.evaluate({expression: 'testFunction()'});
- await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ async function testStepOverAtReturnPosition() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ ]);
+ const evalPromise =
+ Protocol.Runtime.evaluate({expression: 'testFunction()'});
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepOver();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepOver();
+ await Protocol.Debugger.stepOver();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepOver();
- await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.resume();
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Debugger.disable(),
+ Protocol.Runtime.disable(),
+ ]);
},
- async function testStepOut() {
- Protocol.Runtime.evaluate({expression: 'testFunction()'});
+ async function testStepOutAtReturnPosition() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ ]);
+ const evalPromise =
+ Protocol.Runtime.evaluate({expression: 'testFunction()'});
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepInto();
+ await Protocol.Debugger.stepInto();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepOut();
+ await Protocol.Debugger.stepOut();
+ await logPauseLocation(await Protocol.Debugger.oncePaused());
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Debugger.disable(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testStepOut() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ ]);
+ const evalPromise =
+ Protocol.Runtime.evaluate({expression: 'testFunction()'});
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.stepOut();
+ await Protocol.Debugger.stepOut();
await logPauseLocation(await Protocol.Debugger.oncePaused());
- Protocol.Debugger.resume();
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Debugger.disable(),
+ Protocol.Runtime.disable(),
+ ]);
},
]);
diff --git a/deps/v8/test/inspector/debugger/wasm-externref-global.js b/deps/v8/test/inspector/debugger/wasm-externref-global.js
index 8daa70033e..5c8ff40b0e 100644
--- a/deps/v8/test/inspector/debugger/wasm-externref-global.js
+++ b/deps/v8/test/inspector/debugger/wasm-externref-global.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes
-
utils.load('test/inspector/wasm-inspector-test.js');
let {session, contextGroup, Protocol} =
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
index df1078822d..ea687a1716 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
@@ -3,12 +3,12 @@ Tests GC object inspection.
Running test: test
Instantiating.
Waiting for wasm script (ignoring first non-wasm script).
-Setting breakpoint at offset 107 on script wasm://wasm/151aafd6
+Setting breakpoint at offset 109 on script wasm://wasm/b18cf04a
Calling main()
Paused:
-Script wasm://wasm/151aafd6 byte offset 107: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/b18cf04a byte offset 109: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at $main (0:107):
+at $main (0:109):
- scope (wasm-expression-stack):
0: Array ((ref $ArrC))
object details:
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
index 4306e20ec5..724e86080a 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
@@ -14,8 +14,10 @@ const module_bytes = [
0x00, 0x61, 0x73, 0x6d, 1, 0, 0, 0, // wasm magic
0x01, // type section
- 0x16, // section length
- 0x04, // number of types
+ 0x18, // section length
+ 0x01, // number of type section entries
+ 0x4f, // recursive type group
+ 0x04, // number of types in the recursive group
// type 0: struct $StrA (field ($byte i8) ($word i16) ($pointer (ref $StrB)))
0x5f, // struct
0x03, // field count
@@ -172,7 +174,7 @@ Protocol.Debugger.onPaused(async msg => {
var lineNumber = frame.location.lineNumber;
var columnNumber = frame.location.columnNumber;
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
- if (!/^wasm/.test(frame.url)) {
+ if (!/^wasm/.test(session.getCallFrameUrl(frame))) {
InspectorTest.log(' -- skipped');
continue;
}
@@ -219,7 +221,7 @@ InspectorTest.runAsyncTestSuite([
// Ignore javascript and full module wasm script, get scripts for functions.
const [, {params: wasm_script}] =
await Protocol.Debugger.onceScriptParsed(2);
- let offset = 107; // "local.set $varC" at the end.
+ let offset = 109; // "local.set $varC" at the end.
await setBreakpoint(offset, wasm_script.scriptId, wasm_script.url);
InspectorTest.log('Calling main()');
await WasmInspectorTest.evalWithUrl('instance.exports.main()', 'runWasm');
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
index 2d1669e3f6..bb3a01e096 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
@@ -4,7 +4,7 @@ Running test: test
Script wasm://wasm/38e28046 byte offset 51: Wasm opcode 0x20 (kExprLocalGet)
GC triggered
Debugger.resume
-Hello World (v8://test/instantiate:11:36)
+log: Hello World (v8://test/instantiate:11:36)
at bar (v8://test/instantiate:11:36)
at $wasm_A (wasm://wasm/38e28046:1:54)
at test (test.js:4:20)
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
index 90680a7b4a..9f8c252870 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --expose-gc
+// Flags: --expose-gc
utils.load('test/inspector/wasm-inspector-test.js');
let {session, contextGroup, Protocol} =
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
index 01d4b3018a..ebb627ae4d 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
@@ -82,3 +82,69 @@ Paused at wasm://wasm/c8e3a856 with reason "instrumentation".
Script wasm://wasm/c8e3a856 byte offset 33: Wasm opcode 0x01 (kExprNop)
Hit breakpoints: []
Done.
+
+Running test: testRemoveBeforeCompile
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Remove instrumentation breakpoint..
+Compiling wasm module.
+Instantiating module should not trigger a break.
+Done.
+
+Running test: testRemoveBeforeInstantiate
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Remove instrumentation breakpoint..
+Instantiating module should not trigger a break.
+Done.
+
+Running test: testRemoveAfterOnePause
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Instantiating module should trigger a break.
+Paused at v8://test/instantiate with reason "instrumentation".
+Hit breakpoints: []
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: []
+Remove instrumentation breakpoint..
+Compiling another wasm module.
+Instantiating module should not trigger a break.
+Done.
+
+Running test: testDisableEnable
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Disable debugger..
+Enable debugger
+Instantiating module should not trigger a break.
+Done.
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
index 35b528f250..8eca2fd304 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
@@ -12,8 +12,9 @@ Protocol.Debugger.onPaused(async msg => {
let top_frame = msg.params.callFrames[0];
let reason = msg.params.reason;
let hitBreakpoints = msg.params.hitBreakpoints;
- InspectorTest.log(`Paused at ${top_frame.url} with reason "${reason}".`);
- if (!top_frame.url.startsWith('v8://test/')) {
+ const url = session.getCallFrameUrl(top_frame);
+ InspectorTest.log(`Paused at ${url} with reason "${reason}".`);
+ if (!url.startsWith('v8://test/')) {
await session.logSourceLocation(top_frame.location);
}
// Report the hit breakpoints to make sure that it is empty, as
@@ -74,8 +75,7 @@ InspectorTest.runAsyncTestSuite([
async function testBreakInExportedFunction() {
const builder = new WasmModuleBuilder();
- const func =
- builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
await Protocol.Runtime.enable();
await Protocol.Debugger.enable();
@@ -99,8 +99,7 @@ InspectorTest.runAsyncTestSuite([
async function testBreakOnlyWithSourceMap() {
const builder = new WasmModuleBuilder();
- const func =
- builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
const bytes_no_source_map = builder.toArray();
builder.addCustomSection('sourceMappingURL', [3, 97, 98, 99]);
const bytes_with_source_map = builder.toArray();
@@ -129,4 +128,108 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Runtime.disable();
},
+ async function testRemoveBeforeCompile() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ const addMsg = await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'})
+ InspectorTest.logMessage(addMsg);
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint(
+ {breakpointId: addMsg.result.breakpointId});
+ InspectorTest.log('Compiling wasm module.');
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Instantiating module should not trigger a break.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testRemoveBeforeInstantiate() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ const addMsg = await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'})
+ InspectorTest.logMessage(addMsg);
+ InspectorTest.log('Compiling wasm module.');
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint(
+ {breakpointId: addMsg.result.breakpointId});
+ InspectorTest.log('Instantiating module should not trigger a break.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testRemoveAfterOnePause() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ const addMsg = await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'})
+ InspectorTest.logMessage(addMsg);
+ InspectorTest.log('Compiling wasm module.');
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Instantiating module should trigger a break.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log('Remove instrumentation breakpoint..');
+ await Protocol.Debugger.removeBreakpoint(
+ {breakpointId: addMsg.result.breakpointId});
+
+ InspectorTest.log('Compiling another wasm module.');
+ builder.addFunction('end', kSig_v_v).addBody([kExprNop]);
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Instantiating module should not trigger a break.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testDisableEnable() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ const addMsg = await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'})
+ InspectorTest.logMessage(addMsg);
+ InspectorTest.log('Compiling wasm module.');
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Disable debugger..');
+ await Protocol.Debugger.disable();
+ InspectorTest.log('Enable debugger');
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating module should not trigger a break.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ },
+
]);
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation-expected.txt
new file mode 100644
index 0000000000..6aaa1a05b5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation-expected.txt
@@ -0,0 +1,49 @@
+Test if breakpoints are hit that are set on instrumentation pause in wasm.
+
+Running test: testSetBreakpointOnInstrumentationPause
+Setting instrumentation breakpoint
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Instantiating module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Hit breakpoints: []
+Setting breakpoint at instrumentation break location
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: []
+Paused at wasm://wasm/20da547a with reason "other".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: ["4:0:26:4"]
+Done.
+
+Running test: testSetConditionalBreakpointTrueConditionOnInstrumentationPause
+Setting instrumentation breakpoint
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Instantiating module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Hit breakpoints: []
+Setting breakpoint at instrumentation break location
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: []
+Paused at wasm://wasm/20da547a with reason "other".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: ["4:0:26:4"]
+Done.
+
+Running test: testSetConditionalBreakpointFalseConditionOnInstrumentationPause
+Setting instrumentation breakpoint
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Hit breakpoints: []
+Instantiating module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Hit breakpoints: []
+Setting breakpoint at instrumentation break location
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Hit breakpoints: []
+Done.
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation.js
new file mode 100644
index 0000000000..c8a4f582ae
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation.js
@@ -0,0 +1,106 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test if breakpoints are hit that are set on instrumentation pause in wasm.');
+session.setupScriptMap();
+
+function setBreakpoint(msg, condition) {
+ const top_frame = msg.params.callFrames[0];
+ const reason = msg.params.reason;
+ const url = session.getCallFrameUrl(top_frame);
+ if (reason === 'instrumentation' && url.startsWith('wasm://')) {
+ const scriptId = top_frame.location.scriptId;
+ const columnNumber = top_frame.location.columnNumber;
+
+ InspectorTest.log('Setting breakpoint at instrumentation break location');
+ const breakpoint_info = {
+ 'location': {scriptId, 'lineNumber': 0, columnNumber}
+ };
+ if (condition) {
+ breakpoint_info.condition = condition;
+ }
+ return Protocol.Debugger.setBreakpoint(breakpoint_info);
+ }
+ return Promise.resolve();
+}
+
+async function handlePause(msg) {
+ const top_frame = msg.params.callFrames[0];
+ const reason = msg.params.reason;
+ const url = session.getCallFrameUrl(top_frame);
+ InspectorTest.log(`Paused at ${url} with reason "${reason}".`);
+ if (!url.startsWith('v8://test/')) {
+ await session.logSourceLocation(top_frame.location);
+ }
+ InspectorTest.log(
+ `Hit breakpoints: ${JSON.stringify(msg.params.hitBreakpoints)}`)
+ return Protocol.Debugger.resume();
+};
+
+// Helper function to run tests to check if we can successfully set and evaluate
+// breakpoints on an instrumentation pause.
+async function runSetBreakpointOnInstrumentationTest(condition) {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ InspectorTest.log('Compiling wasm module.');
+ WasmInspectorTest.compile(builder.toArray());
+
+ // First pause: compile script.
+ await handlePause(await Protocol.Debugger.oncePaused());
+
+ InspectorTest.log('Instantiating module.');
+ const evalPromise = WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+
+ // Second pause: instantiate script.
+ await handlePause(await Protocol.Debugger.oncePaused());
+
+ // Third pause: wasm script. This will set a breakpoint. Pass on a condition.
+ const msg = await Protocol.Debugger.oncePaused();
+ await setBreakpoint(msg, condition);
+ await handlePause(msg);
+
+ // Fourth pause: wasm script, if condition evaluates to true.
+ if (!condition || eval(condition)) {
+ await handlePause(await Protocol.Debugger.oncePaused());
+ }
+
+ InspectorTest.log('Done.');
+ await evalPromise;
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+}
+
+InspectorTest.runAsyncTestSuite([
+ // Test if we can set a breakpoint on the first breakable location (which is
+ // the same location as where the instrumentation breakpoint hits) and
+ // successfully hit the breakpoint.
+ async function testSetBreakpointOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest();
+ },
+
+ // Test if we can set a conditional breakpoint on the first breakable location
+ // and successfully hit the breakpoint.
+ async function
+ testSetConditionalBreakpointTrueConditionOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest('3 < 5');
+ },
+
+ // Test if we can set a conditional breakpoint on the first breakable location
+ // which evaluates to false, and therefore does not trigger a pause.
+ async function
+ testSetConditionalBreakpointFalseConditionOnInstrumentationPause() {
+ await runSetBreakpointOnInstrumentationTest('3 > 5');
+ },
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index a5bc8b333e..7f5bd096d9 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -58,7 +58,8 @@ Protocol.Debugger.onPaused(async msg => {
var lineNumber = frame.location.lineNumber;
var columnNumber = frame.location.columnNumber;
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
- if (!/^wasm/.test(frame.url)) {
+ var url = session.getCallFrameUrl(frame);
+ if (!/^wasm/.test(url)) {
InspectorTest.log(' -- skipped');
continue;
}
@@ -73,10 +74,10 @@ Protocol.Debugger.onPaused(async msg => {
if (first_iteration && loc.columnNumber == func_a.body_offset) {
// Check that setting breakpoints on active instances of A and B takes
// effect immediately.
- setBreakpoint(func_a.body_offset + 1, loc.scriptId, frame.url);
+ setBreakpoint(func_a.body_offset + 1, loc.scriptId, url);
// All of the following breakpoints are in reachable code, except offset 17.
for (offset of [18, 17, 11, 10, 8, 6, 2, 4]) {
- setBreakpoint(func_b.body_offset + offset, loc.scriptId, frame.url);
+ setBreakpoint(func_b.body_offset + offset, loc.scriptId, url);
}
first_iteration = false;
}
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap.js b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
index 182b2a16b9..9244bee77b 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
@@ -34,7 +34,8 @@ Protocol.Debugger.onPaused(async msg => {
for (let [nr, frame] of msg.params.callFrames.entries()) {
InspectorTest.log(`--- ${nr} ---`);
await session.logSourceLocation(frame.location);
- if (/^wasm/.test(frame.url)) await printLocalScope(frame);
+ if (/^wasm/.test(session.getCallFrameUrl(frame)))
+ await printLocalScope(frame);
}
InspectorTest.log('-------------');
let action = actions.shift();
diff --git a/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs-expected.txt b/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs-expected.txt
new file mode 100644
index 0000000000..b6aa513a42
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs-expected.txt
@@ -0,0 +1,7 @@
+Tests weakness of edges from JSWeakRef and WeakCell.
+
+Running test: testHeapSnapshotJSWeakRefs
+WeakRef target edge type: weak
+WeakCell target edge type: weak
+WeakCell holdings edge type: hidden
+WeakCell unregister token edge type: weak
diff --git a/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs.js b/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs.js
new file mode 100644
index 0000000000..37a08776dd
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs.js
@@ -0,0 +1,129 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests weakness of edges from JSWeakRef and WeakCell.');
+
+const kNodeName = 1;
+const kNodeEdgeCount = 4;
+const kNodeSize = 7;
+const kEdgeType = 0;
+const kEdgeName = 1;
+const kEdgeTarget = 2;
+const kEdgeSize = 3;
+
+function EdgeName(snapshot, edgeIndex) {
+ return snapshot['strings'][snapshot['edges'][edgeIndex + kEdgeName]];
+}
+
+function EdgeTarget(snapshot, edgeIndex) {
+ return snapshot['edges'][edgeIndex + kEdgeTarget];
+}
+
+function EdgeType(snapshot, edgeIndex) {
+ return snapshot['edges'][edgeIndex + kEdgeType];
+}
+
+function EdgeCount(snapshot, nodeIndex) {
+ return snapshot['nodes'][nodeIndex + kNodeEdgeCount];
+}
+
+function NodeName(snapshot, nodeIndex) {
+ return snapshot['strings'][snapshot['nodes'][nodeIndex + kNodeName]];
+}
+
+function NodeEdges(snapshot, nodeIndex) {
+ let startEdgeIndex = 0;
+ for (let i = 0; i < nodeIndex; i += kNodeSize) {
+ startEdgeIndex += EdgeCount(snapshot, i);
+ }
+ let endEdgeIndex = startEdgeIndex + EdgeCount(snapshot, nodeIndex);
+ let result = [];
+ for (let i = startEdgeIndex; i < endEdgeIndex; ++i) {
+ result.push(i * kEdgeSize);
+ }
+ return result;
+}
+
+function NodeByName(snapshot, name, start = 0) {
+ let count = snapshot['nodes'].length / kNodeSize;
+ for (let i = start; i < count; i++) {
+ if (NodeName(snapshot, i * kNodeSize) == name) return i * kNodeSize;
+ }
+ InspectorTest.log(`Cannot find node ${name}`);
+ return 0;
+}
+
+function FindEdge(snapshot, sourceIndex, targetName) {
+ let edges = NodeEdges(snapshot, sourceIndex);
+ for (let edge of edges) {
+ let target = EdgeTarget(snapshot, edge);
+ if (NodeName(snapshot, target) == targetName) return edge;
+ }
+ InspectorTest.log(
+ `Cannot find edge between ${sourceIndex} and ${targetName}`);
+ return 0;
+}
+
+function EdgeByName(snapshot, name, start = 0) {
+ let count = snapshot.edges.length / kEdgeSize;
+ for (let i = start; i < count; i++) {
+ if (EdgeName(snapshot, i * kEdgeSize) == name) return i * kEdgeSize;
+ }
+ InspectorTest.log(`Cannot find edge ${name}`);
+ return 0;
+}
+
+function EdgeTypeString(snapshot, edgeIndex) {
+ return snapshot.snapshot.meta.edge_types[0][EdgeType(snapshot, edgeIndex)];
+}
+
+contextGroup.addScript(`
+class Class1 {}
+class Class2 {}
+class Class3 {}
+class Class4 {}
+var class1Instance = new Class1();
+var class2Instance = new Class2();
+var class3Instance = new Class3();
+var class4Instance = new Class4();
+var weakRef = new WeakRef(class1Instance);
+var finalizationRegistry = new FinalizationRegistry(()=>{});
+finalizationRegistry.register(class2Instance, class3Instance, class4Instance);
+//# sourceURL=test.js`);
+
+Protocol.HeapProfiler.enable();
+
+InspectorTest.runAsyncTestSuite([
+ async function testHeapSnapshotJSWeakRefs() {
+ let snapshot_string = '';
+ function onChunk(message) {
+ snapshot_string += message['params']['chunk'];
+ }
+ Protocol.HeapProfiler.onAddHeapSnapshotChunk(onChunk)
+ await Protocol.HeapProfiler.takeHeapSnapshot({ reportProgress: false })
+ let snapshot = JSON.parse(snapshot_string);
+
+ // There should be a single edge named "weakRef", representing the global
+ // variable of that name. It contains a weak ref to an instance of Class1.
+ let weakRef = EdgeTarget(snapshot, EdgeByName(snapshot, "weakRef"));
+ let edge = FindEdge(snapshot, weakRef, "Class1");
+ let edgeType = EdgeTypeString(snapshot, edge);
+ InspectorTest.log(`WeakRef target edge type: ${edgeType}`);
+
+ // There should be a WeakCell representing the item registered in the
+ // FinalizationRegistry. It retains the holdings strongly, but has weak
+ // references to the target and unregister token.
+ let weakCell = NodeByName(snapshot, "system / WeakCell");
+ edge = FindEdge(snapshot, weakCell, "Class2");
+ edgeType = EdgeTypeString(snapshot, edge);
+ InspectorTest.log(`WeakCell target edge type: ${edgeType}`);
+ edge = FindEdge(snapshot, weakCell, "Class3");
+ edgeType = EdgeTypeString(snapshot, edge);
+ InspectorTest.log(`WeakCell holdings edge type: ${edgeType}`);
+ edge = FindEdge(snapshot, weakCell, "Class4");
+ edgeType = EdgeTypeString(snapshot, edge);
+ InspectorTest.log(`WeakCell unregister token edge type: ${edgeType}`);
+ }
+]);
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 81395445ac..6c74b36a7e 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -418,6 +418,33 @@ bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
return accessing_context.IsEmpty();
}
+class ConsoleExtension : public InspectorIsolateData::SetupGlobalTask {
+ public:
+ ~ConsoleExtension() override = default;
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ v8::Local<v8::String> name =
+ v8::String::NewFromUtf8Literal(isolate, "console");
+ global->SetAccessor(name, &ConsoleGetterCallback, nullptr, {}, v8::DEFAULT,
+ v8::DontEnum);
+ }
+
+ private:
+ static void ConsoleGetterCallback(
+ v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::String> name =
+ v8::String::NewFromUtf8Literal(isolate, "console");
+ v8::Local<v8::Object> console = context->GetExtrasBindingObject()
+ ->Get(context, name)
+ .ToLocalChecked()
+ .As<v8::Object>();
+ info.GetReturnValue().Set(console);
+ }
+};
+
class InspectorExtension : public InspectorIsolateData::SetupGlobalTask {
public:
~InspectorExtension() override = default;
@@ -750,9 +777,9 @@ int InspectorTestMain(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<Platform> platform(platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
FLAG_abort_on_contradictory_flags = true;
@@ -777,6 +804,7 @@ int InspectorTestMain(int argc, char* argv[]) {
{
InspectorIsolateData::SetupGlobalTasks frontend_extensions;
frontend_extensions.emplace_back(new UtilsExtension());
+ frontend_extensions.emplace_back(new ConsoleExtension());
TaskRunner frontend_runner(std::move(frontend_extensions),
kFailOnUncaughtExceptions, &ready_semaphore,
startup_data.data ? &startup_data : nullptr,
@@ -791,6 +819,7 @@ int InspectorTestMain(int argc, char* argv[]) {
InspectorIsolateData::SetupGlobalTasks backend_extensions;
backend_extensions.emplace_back(new SetTimeoutExtension());
+ backend_extensions.emplace_back(new ConsoleExtension());
backend_extensions.emplace_back(new InspectorExtension());
TaskRunner backend_runner(
std::move(backend_extensions), kStandardPropagateUncaughtExceptions,
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index c74a09a3de..b2e8be8ea8 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -187,6 +187,10 @@
'cpu-profiler/coverage-block': [SKIP],
'runtime/internal-properties-entries': [SKIP],
+ # TODO(v8:12590): the test became too slow with external code space enabled.
+ # Skip it for now.
+ 'debugger/wasm-step-a-lot': [SKIP],
+
# Skip tests that might fail with concurrent allocation
'debugger/pause-on-oom-wide': [SKIP],
}], # stress_concurrent_allocation
@@ -515,10 +519,9 @@
}], # third_party_heap
##############################################################################
-['variant == turboprop or variant == turboprop_as_toptier or variant == future or (tsan and not concurrent_marking)', {
-
+['tsan and not concurrent_marking', {
'cpu-profiler/coverage-block': [SKIP],
-}], # variant == turboprop or variant = turboprop_as_toptier
+}], # tsan and not concurrent_marking
##############################################################################
['no_i18n == True', {
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 976a862907..e698ac45dd 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -456,6 +456,25 @@ void InspectorIsolateData::consoleAPIMessage(
const v8_inspector::StringView& url, unsigned lineNumber,
unsigned columnNumber, v8_inspector::V8StackTrace* stack) {
if (!log_console_api_message_calls_) return;
+ switch (level) {
+ case v8::Isolate::kMessageLog:
+ fprintf(stdout, "log: ");
+ break;
+ case v8::Isolate::kMessageDebug:
+ fprintf(stdout, "debug: ");
+ break;
+ case v8::Isolate::kMessageInfo:
+ fprintf(stdout, "info: ");
+ break;
+ case v8::Isolate::kMessageError:
+ fprintf(stdout, "error: ");
+ break;
+ case v8::Isolate::kMessageWarning:
+ fprintf(stdout, "warning: ");
+ break;
+ case v8::Isolate::kMessageAll:
+ break;
+ }
Print(isolate_.get(), message);
fprintf(stdout, " (");
Print(isolate_.get(), url);
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index ac6e1405f4..c0f744c41c 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -264,11 +264,15 @@ InspectorTest.Session = class {
this._scriptMap = new Map();
}
+ getCallFrameUrl(frame) {
+ const {scriptId} = frame.location ? frame.location : frame;
+ return (this._scriptMap.get(scriptId) ?? frame).url;
+ }
+
logCallFrames(callFrames) {
for (var frame of callFrames) {
var functionName = frame.functionName || '(anonymous)';
- var scriptId = frame.location ? frame.location.scriptId : frame.scriptId;
- var url = frame.url ? frame.url : this._scriptMap.get(scriptId).url;
+ var url = this.getCallFrameUrl(frame);
var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
InspectorTest.log(`${functionName} (${url}:${lineNumber}:${columnNumber})`);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1220203-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1220203-expected.txt
new file mode 100644
index 0000000000..8db19670c8
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1220203-expected.txt
@@ -0,0 +1,8 @@
+Regression test for crbug.com/1220203.
+
+Running test: testBreakOnUncaughtException
+Uncaught exception at
+function throwError() {
+ #throw new Error();
+}
+
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1220203.js b/deps/v8/test/inspector/regress/regress-crbug-1220203.js
new file mode 100644
index 0000000000..dcaa5bfef0
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1220203.js
@@ -0,0 +1,42 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Regression test for crbug.com/1220203.');
+
+contextGroup.addScript(`
+async function *generatorFunction() {
+ await 1;
+ throwError();
+}
+
+function throwError() {
+ throw new Error();
+}
+
+async function main() {
+ for await (const value of generatorFunction()) {}
+}`);
+
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testBreakOnUncaughtException() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ Protocol.Debugger.setPauseOnExceptions({state: 'uncaught'}),
+ ]);
+ const pausedPromise = Protocol.Debugger.oncePaused();
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'main()', awaitPromise: true});
+ const {params: {callFrames, data}} = await pausedPromise;
+ InspectorTest.log(`${data.uncaught ? 'Uncaught' : 'Caught'} exception at`);
+ await session.logSourceLocation(callFrames[0].location);
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Runtime.disable(),
+ Protocol.Debugger.disable(),
+ ]);
+ },
+]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1281031-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1281031-expected.txt
new file mode 100644
index 0000000000..dea907b6de
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1281031-expected.txt
@@ -0,0 +1,2 @@
+Did not crash upon invalid non-dictionary state passed to utils.connectSession()
+
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1281031.js b/deps/v8/test/inspector/regress/regress-crbug-1281031.js
new file mode 100644
index 0000000000..a58d1d1e02
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1281031.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const contextGroupId = utils.createContextGroup();
+const sessionId = utils.connectSession(contextGroupId, '0', () => {});
+utils.disconnectSession(sessionId);
+utils.print('Did not crash upon invalid non-dictionary state passed to utils.connectSession()');
+utils.quit();
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1283049-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1283049-expected.txt
new file mode 100644
index 0000000000..9d5665256e
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1283049-expected.txt
@@ -0,0 +1,5 @@
+Regression test for crbug/1283049
+
+Running test: test
+foo (foo.js:0:17)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1283049.js b/deps/v8/test/inspector/regress/regress-crbug-1283049.js
new file mode 100644
index 0000000000..a9798460e1
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1283049.js
@@ -0,0 +1,29 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Regression test for crbug/1283049');
+
+session.setupScriptMap();
+
+contextGroup.addInlineScript(
+ `function foo() { debugger; }
+//# sourceURL=foo.js`,
+ 'regress-crbug-1283049.js');
+
+InspectorTest.runAsyncTestSuite([async function test() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ ]);
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'foo()'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Promise.all([
+ Protocol.Debugger.resume(),
+ evalPromise,
+ Protocol.Runtime.disable(),
+ Protocol.Debugger.disable(),
+ ]);
+}]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1290861-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1290861-expected.txt
new file mode 100644
index 0000000000..ec6f20d0bc
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1290861-expected.txt
@@ -0,0 +1,4 @@
+Ensure that catch prediction is correct for [[Reject]] handlers.
+
+Running test: test
+Uncaught exception in rejectHandler
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1290861.js b/deps/v8/test/inspector/regress/regress-crbug-1290861.js
new file mode 100644
index 0000000000..dd1e1b73f5
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1290861.js
@@ -0,0 +1,39 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Ensure that catch prediction is correct for [[Reject]] handlers.');
+
+contextGroup.addScript(`function throwInPromiseCaughtAndRethrown() {
+ var reject;
+ var promise = new Promise(function(res, rej) { reject = rej; }).catch(
+ function rejectHandler(e) {
+ throw e;
+ }
+ );
+ reject(new Error());
+ return promise;
+}`);
+
+Protocol.Debugger.onPaused(({params: {callFrames, data}}) => {
+ InspectorTest.log(`${data.uncaught ? 'Uncaught' : 'Caught'} exception in ${
+ callFrames[0].functionName}`);
+ Protocol.Debugger.resume();
+});
+
+InspectorTest.runAsyncTestSuite([async function test() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ Protocol.Debugger.setPauseOnExceptions({state: 'uncaught'}),
+ ]);
+ await Protocol.Runtime.evaluate({
+ awaitPromise: true,
+ expression: 'throwInPromiseCaughtAndRethrown()',
+ });
+ await Promise.all([
+ Protocol.Runtime.disable(),
+ Protocol.Debugger.disable(),
+ ]);
+}]);
diff --git a/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt b/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt
index 699b390a8d..440771946d 100644
--- a/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt
+++ b/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt
@@ -1,7 +1,7 @@
Checks that we passed correct arguments in V8InspectorClient::consoleAPIMessage. Note: lines and columns are 1-based.
-42 (:1:9)
+log: 42 (:1:9)
at (anonymous function) (:1:9)
-239 (:13:15)
+info: 239 (:13:15)
at b (:13:15)
at a (:15:5)
at consoleTrace (:17:3)
diff --git a/deps/v8/test/inspector/runtime/console-context-expected.txt b/deps/v8/test/inspector/runtime/console-context-expected.txt
index 658238aaa2..8a66dca15e 100644
--- a/deps/v8/test/inspector/runtime/console-context-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-context-expected.txt
@@ -10,28 +10,28 @@ console.context description:
}
console.context() methods:
[
- [0] : debug
- [1] : error
- [2] : info
- [3] : log
- [4] : warn
+ [0] : assert
+ [1] : clear
+ [2] : count
+ [3] : countReset
+ [4] : debug
[5] : dir
[6] : dirXml
- [7] : table
- [8] : trace
- [9] : group
- [10] : groupCollapsed
- [11] : groupEnd
- [12] : clear
- [13] : count
- [14] : countReset
- [15] : assert
- [16] : profile
- [17] : profileEnd
+ [7] : error
+ [8] : group
+ [9] : groupCollapsed
+ [10] : groupEnd
+ [11] : info
+ [12] : log
+ [13] : profile
+ [14] : profileEnd
+ [15] : table
+ [16] : time
+ [17] : timeEnd
[18] : timeLog
- [19] : time
- [20] : timeEnd
- [21] : timeStamp
+ [19] : timeStamp
+ [20] : trace
+ [21] : warn
]
Running test: testDefaultConsoleContext
diff --git a/deps/v8/test/inspector/runtime/console-context.js b/deps/v8/test/inspector/runtime/console-context.js
index 74996ae595..6d076357c5 100644
--- a/deps/v8/test/inspector/runtime/console-context.js
+++ b/deps/v8/test/inspector/runtime/console-context.js
@@ -11,9 +11,14 @@ InspectorTest.runAsyncTestSuite([
expression: 'console.context'});
InspectorTest.logMessage(result);
+ // Enumerate the methods alpha-sorted to make the test
+ // independent of the (unspecified) enumeration order
+ // of console.context() methods.
InspectorTest.log('console.context() methods:');
- var {result:{result:{value}}} = await Protocol.Runtime.evaluate({
- expression: 'Object.keys(console.context())', returnByValue: true});
+ var {result: {result: {value}}} = await Protocol.Runtime.evaluate({
+ expression: 'Object.keys(console.context()).sort()',
+ returnByValue: true
+ });
InspectorTest.logMessage(value);
},
diff --git a/deps/v8/test/inspector/runtime/console-formatter-expected.txt b/deps/v8/test/inspector/runtime/console-formatter-expected.txt
new file mode 100644
index 0000000000..02a25d5a0d
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-formatter-expected.txt
@@ -0,0 +1,700 @@
+Test for console formatting
+
+Running test: testFloatFormatter
+Testing console.debug('%f', 3.1415)...
+debug[
+ [0] : {
+ type : string
+ value : %f
+ }
+ [1] : {
+ description : 3.1415
+ type : number
+ value : 3.1415
+ }
+]
+Testing console.error('%f', '3e2')...
+error[
+ [0] : {
+ type : string
+ value : %f
+ }
+ [1] : {
+ description : 300
+ type : number
+ value : 300
+ }
+]
+Testing console.info('%f', Symbol('1.1'))...
+info[
+ [0] : {
+ type : string
+ value : %f
+ }
+ [1] : {
+ description : NaN
+ type : number
+ unserializableValue : NaN
+ }
+]
+Testing console.log('%f', {toString() { return '42'; }})...
+log[
+ [0] : {
+ type : string
+ value : %f
+ }
+ [1] : {
+ description : 42
+ type : number
+ value : 42
+ }
+]
+Testing console.trace('%f', {[Symbol.toPrimitive]() { return 2.78; }})...
+trace[
+ [0] : {
+ type : string
+ value : %f
+ }
+ [1] : {
+ description : 2.78
+ type : number
+ value : 2.78
+ }
+]
+Testing console.warn('%f', {toString() { throw new Error(); }})...
+{
+ columnNumber : 33
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:40) at parseFloat (<anonymous>) at console.warn (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 39
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+
+Running test: testIntegerFormatter
+Testing console.debug('%d', 42)...
+debug[
+ [0] : {
+ type : string
+ value : %d
+ }
+ [1] : {
+ description : 42
+ type : number
+ value : 42
+ }
+]
+Testing console.error('%i', '987654321')...
+error[
+ [0] : {
+ type : string
+ value : %i
+ }
+ [1] : {
+ description : 987654321
+ type : number
+ value : 987654321
+ }
+]
+Testing console.info('%d', Symbol('12345'))...
+info[
+ [0] : {
+ type : string
+ value : %d
+ }
+ [1] : {
+ description : NaN
+ type : number
+ unserializableValue : NaN
+ }
+]
+Testing console.log('%i', {toString() { return '42'; }})...
+log[
+ [0] : {
+ type : string
+ value : %i
+ }
+ [1] : {
+ description : 42
+ type : number
+ value : 42
+ }
+]
+Testing console.trace('%d', {[Symbol.toPrimitive]() { return 256; }})...
+trace[
+ [0] : {
+ type : string
+ value : %d
+ }
+ [1] : {
+ description : 256
+ type : number
+ value : 256
+ }
+]
+Testing console.warn('%i', {toString() { throw new Error(); }})...
+{
+ columnNumber : 33
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:40) at parseInt (<anonymous>) at console.warn (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 39
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+
+Running test: testStringFormatter
+Testing console.debug('%s', 42)...
+debug[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : 42
+ }
+]
+Testing console.error('%s', 'Test string')...
+error[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : Test string
+ }
+]
+Testing console.info('%s', Symbol('Test symbol'))...
+info[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : Symbol(Test symbol)
+ }
+]
+Testing console.log('%s', {toString() { return 'Test object'; }})...
+log[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : Test object
+ }
+]
+Testing console.trace('%s', {[Symbol.toPrimitive]() { return true; }})...
+trace[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : true
+ }
+]
+Testing console.warn('%s', {toString() { throw new Error(); }})...
+{
+ columnNumber : 33
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:40) at String (<anonymous>) at console.warn (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 39
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+
+Running test: testOtherFormatters
+Testing console.debug('%c', 'color:red')...
+debug[
+ [0] : {
+ type : string
+ value : %c
+ }
+ [1] : {
+ type : string
+ value : color:red
+ }
+]
+Testing console.error('%o', {toString() { throw new Error(); }})...
+error[
+ [0] : {
+ type : string
+ value : %o
+ }
+ [1] : {
+ className : Object
+ description : Object
+ objectId : 1.1.7
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : toString
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+]
+Testing console.info('%O', {toString() { throw new Error(); }})...
+info[
+ [0] : {
+ type : string
+ value : %O
+ }
+ [1] : {
+ className : Object
+ description : Object
+ objectId : 1.1.8
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : toString
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+]
+Testing console.log('We have reached 100% of our users', 'with this!')...
+log[
+ [0] : {
+ type : string
+ value : We have reached 100% of our users
+ }
+ [1] : {
+ type : string
+ value : with this!
+ }
+]
+
+Running test: testMultipleFormatters
+Testing console.debug('%s%some Text%i', '', 'S', 1)...
+debug[
+ [0] : {
+ type : string
+ value : %s%some Text%i
+ }
+ [1] : {
+ type : string
+ value :
+ }
+ [2] : {
+ type : string
+ value : S
+ }
+ [3] : {
+ description : 1
+ type : number
+ value : 1
+ }
+]
+Testing console.error('%c%i%c%s', 'color:red', 42, 'color:green', 'Message!')...
+error[
+ [0] : {
+ type : string
+ value : %c%i%c%s
+ }
+ [1] : {
+ type : string
+ value : color:red
+ }
+ [2] : {
+ description : 42
+ type : number
+ value : 42
+ }
+ [3] : {
+ type : string
+ value : color:green
+ }
+ [4] : {
+ type : string
+ value : Message!
+ }
+]
+Testing console.info('%s', {toString() { return '%i% %s %s'; }}, {toString() { return '100'; }}, 'more', 'arguments')...
+info[
+ [0] : {
+ type : string
+ value : %s
+ }
+ [1] : {
+ type : string
+ value : %i% %s %s
+ }
+ [2] : {
+ description : 100
+ type : number
+ value : 100
+ }
+ [3] : {
+ type : string
+ value : more
+ }
+ [4] : {
+ type : string
+ value : arguments
+ }
+]
+Testing console.log('%s %s', {toString() { return 'Too %s %s'; }}, 'many', 'specifiers')...
+log[
+ [0] : {
+ type : string
+ value : %s %s
+ }
+ [1] : {
+ type : string
+ value : Too %s %s
+ }
+ [2] : {
+ type : string
+ value : many
+ }
+ [3] : {
+ type : string
+ value : specifiers
+ }
+]
+Testing console.trace('%s %f', {toString() { return '%s'; }}, {[Symbol.toPrimitive]() { return 'foo'; }}, 1, 'Test')...
+trace[
+ [0] : {
+ type : string
+ value : %s %f
+ }
+ [1] : {
+ type : string
+ value : %s
+ }
+ [2] : {
+ type : string
+ value : foo
+ }
+ [3] : {
+ description : 1
+ type : number
+ value : 1
+ }
+ [4] : {
+ type : string
+ value : Test
+ }
+]
+
+Running test: testAssert
+Testing console.assert(true, '%s', {toString() { throw new Error(); }})...
+Testing console.assert(false, '%s %i', {toString() { return '%s'; }}, {[Symbol.toPrimitive]() { return 1; }}, 1, 'Test')...
+assert[
+ [0] : {
+ type : string
+ value : %s %i
+ }
+ [1] : {
+ type : string
+ value : %s
+ }
+ [2] : {
+ type : string
+ value : 1
+ }
+ [3] : {
+ description : 1
+ type : number
+ value : 1
+ }
+ [4] : {
+ type : string
+ value : Test
+ }
+]
+Testing console.assert(false, '%s', {toString() { throw new Error(); }})...
+{
+ columnNumber : 42
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:49) at String (<anonymous>) at console.assert (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 48
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+
+Running test: testGroup
+Testing console.group('%s', {toString() { throw new Error(); }})...
+{
+ columnNumber : 34
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:41) at String (<anonymous>) at console.group (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 40
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+Testing console.group('%s%i', 'Gruppe', {[Symbol.toPrimitive]() { return 1; }})...
+startGroup[
+ [0] : {
+ type : string
+ value : %s%i
+ }
+ [1] : {
+ type : string
+ value : Gruppe
+ }
+ [2] : {
+ description : 1
+ type : number
+ value : 1
+ }
+]
+Testing console.groupEnd()...
+endGroup[
+ [0] : {
+ type : string
+ value : console.groupEnd
+ }
+]
+
+Running test: testGroupCollapsed
+Testing console.groupCollapsed('%d', {toString() { throw new Error(); }})...
+{
+ columnNumber : 43
+ exception : {
+ className : Error
+ description : Error at Object.toString (<anonymous>:1:50) at parseInt (<anonymous>) at console.groupCollapsed (<anonymous>) at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 49
+ functionName : toString
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+}
+Testing console.groupCollapsed('%s%f', {[Symbol.toPrimitive]() { return 'Gruppe'; }}, 3.1415)...
+startGroupCollapsed[
+ [0] : {
+ type : string
+ value : %s%f
+ }
+ [1] : {
+ type : string
+ value : Gruppe
+ }
+ [2] : {
+ description : 3.1415
+ type : number
+ value : 3.1415
+ }
+]
+Testing console.groupEnd()...
+endGroup[
+ [0] : {
+ type : string
+ value : console.groupEnd
+ }
+]
+
+Running test: testNonStandardFormatSpecifiers
+Testing console.log('%_ %s', {toString() { throw new Error(); }}, {toString() { return 'foo'; }})...
+log[
+ [0] : {
+ type : string
+ value : %_ %s
+ }
+ [1] : {
+ className : Object
+ description : Object
+ objectId : 1.1.15
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : toString
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ [2] : {
+ type : string
+ value : foo
+ }
+]
+Testing console.log('%%s', {toString() { throw new Error(); }})...
+log[
+ [0] : {
+ type : string
+ value : %%s
+ }
+ [1] : {
+ className : Object
+ description : Object
+ objectId : 1.1.16
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : toString
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+]
diff --git a/deps/v8/test/inspector/runtime/console-formatter.js b/deps/v8/test/inspector/runtime/console-formatter.js
new file mode 100644
index 0000000000..34d4b4a368
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-formatter.js
@@ -0,0 +1,144 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test for console formatting');
+
+Protocol.Runtime.onConsoleAPICalled(({params: {args, type}}) => {
+ InspectorTest.logObject(args, type);
+});
+
+async function test(expression) {
+ InspectorTest.logMessage(`Testing ${expression}...`);
+ const {result} = await Protocol.Runtime.evaluate({expression});
+ if ('exceptionDetails' in result) {
+ InspectorTest.logMessage(result.exceptionDetails);
+ }
+}
+
+InspectorTest.runAsyncTestSuite([
+ async function testFloatFormatter() {
+ await Protocol.Runtime.enable();
+ await test(`console.debug('%f', 3.1415)`);
+ await test(`console.error('%f', '3e2')`);
+ await test(`console.info('%f', Symbol('1.1'))`);
+ await test(`console.log('%f', {toString() { return '42'; }})`);
+ await test(
+ `console.trace('%f', {[Symbol.toPrimitive]() { return 2.78; }})`);
+ await test(`console.warn('%f', {toString() { throw new Error(); }})`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testIntegerFormatter() {
+ await Protocol.Runtime.enable();
+ await test(`console.debug('%d', 42)`);
+ await test(`console.error('%i', '987654321')`);
+ await test(`console.info('%d', Symbol('12345'))`);
+ await test(`console.log('%i', {toString() { return '42'; }})`);
+ await test(`console.trace('%d', {[Symbol.toPrimitive]() { return 256; }})`);
+ await test(`console.warn('%i', {toString() { throw new Error(); }})`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testStringFormatter() {
+ await Protocol.Runtime.enable();
+ await test(`console.debug('%s', 42)`);
+ await test(`console.error('%s', 'Test string')`);
+ await test(`console.info('%s', Symbol('Test symbol'))`);
+ await test(`console.log('%s', {toString() { return 'Test object'; }})`);
+ await test(
+ `console.trace('%s', {[Symbol.toPrimitive]() { return true; }})`);
+ await test(`console.warn('%s', {toString() { throw new Error(); }})`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testOtherFormatters() {
+ await Protocol.Runtime.enable();
+ await test(`console.debug('%c', 'color:red')`);
+ await test(`console.error('%o', {toString() { throw new Error(); }})`);
+ await test(`console.info('%O', {toString() { throw new Error(); }})`);
+ await test(
+ `console.log('We have reached 100% of our users', 'with this!')`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testMultipleFormatters() {
+ await Protocol.Runtime.enable();
+ await test(`console.debug('%s%some Text%i', '', 'S', 1)`);
+ await test(
+ `console.error('%c%i%c%s', 'color:red', 42, 'color:green', 'Message!')`);
+ await test(
+ `console.info('%s', {toString() { return '%i% %s %s'; }}, {toString() { return '100'; }}, 'more', 'arguments')`);
+ await test(
+ `console.log('%s %s', {toString() { return 'Too %s %s'; }}, 'many', 'specifiers')`);
+ await test(
+ `console.trace('%s %f', {toString() { return '%s'; }}, {[Symbol.toPrimitive]() { return 'foo'; }}, 1, 'Test')`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testAssert() {
+ await Protocol.Runtime.enable();
+ await test(
+ `console.assert(true, '%s', {toString() { throw new Error(); }})`);
+ await test(
+ `console.assert(false, '%s %i', {toString() { return '%s'; }}, {[Symbol.toPrimitive]() { return 1; }}, 1, 'Test')`);
+ await test(
+ `console.assert(false, '%s', {toString() { throw new Error(); }})`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testGroup() {
+ await Protocol.Runtime.enable();
+ await test(`console.group('%s', {toString() { throw new Error(); }})`);
+ await test(
+ `console.group('%s%i', 'Gruppe', {[Symbol.toPrimitive]() { return 1; }})`);
+ await test(`console.groupEnd()`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testGroupCollapsed() {
+ await Protocol.Runtime.enable();
+ await test(
+ `console.groupCollapsed('%d', {toString() { throw new Error(); }})`);
+ await test(
+ `console.groupCollapsed('%s%f', {[Symbol.toPrimitive]() { return 'Gruppe'; }}, 3.1415)`);
+ await test(`console.groupEnd()`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testNonStandardFormatSpecifiers() {
+ await Protocol.Runtime.enable();
+ await test(
+ `console.log('%_ %s', {toString() { throw new Error(); }}, {toString() { return 'foo'; }})`);
+ await test(`console.log('%%s', {toString() { throw new Error(); }})`);
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/console-message-before-enable-expected.txt b/deps/v8/test/inspector/runtime/console-message-before-enable-expected.txt
new file mode 100644
index 0000000000..33fa13f023
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-message-before-enable-expected.txt
@@ -0,0 +1,93 @@
+Checks that console messages before Runtime.enable include a single stack frame
+
+Running test: testEnable
+{
+ args : [
+ [0] : {
+ type : string
+ value : Error on toplevel
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 10
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : error
+}
+{
+ args : [
+ [0] : {
+ type : string
+ value : Hello from foo!
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : foo
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+}
+{
+ args : [
+ [0] : {
+ type : string
+ value : Hello from bar!
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : bar
+ lineNumber : 6
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : trace
+}
+{
+ args : [
+ [0] : {
+ type : string
+ value : Hello from foo!
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : foo
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+}
+
+Running test: testEnableAfterDiscard
diff --git a/deps/v8/test/inspector/runtime/console-message-before-enable.js b/deps/v8/test/inspector/runtime/console-message-before-enable.js
new file mode 100644
index 0000000000..0dbe8058ae
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-message-before-enable.js
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks that console messages before Runtime.enable include a single stack frame');
+
+contextGroup.addScript(`
+function foo() {
+ console.log("Hello from foo!");
+}
+
+function bar() {
+ console.trace("Hello from bar!");
+ foo();
+}
+
+console.error('Error on toplevel');
+foo();
+bar();
+//# sourceURL=test.js`);
+
+Protocol.Runtime.onConsoleAPICalled(
+ ({params}) => InspectorTest.logMessage(params));
+
+InspectorTest.runAsyncTestSuite([
+ async function testEnable() {
+ await Protocol.Runtime.enable();
+ await Protocol.Runtime.disable();
+ },
+
+ async function testEnableAfterDiscard() {
+ await Protocol.Runtime.discardConsoleEntries();
+ await Protocol.Runtime.enable();
+ await Protocol.Runtime.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/error-stack-expected.txt b/deps/v8/test/inspector/runtime/error-stack-expected.txt
new file mode 100644
index 0000000000..27b1a0b8d8
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-stack-expected.txt
@@ -0,0 +1,420 @@
+Checks that error.stack works correctly
+
+Running test: testErrorStackWithRuntimeDisabled
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testErrorStackWithRuntimeEnabled
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [10] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [11] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [12] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [13] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [14] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [15] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [16] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [17] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [18] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [19] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [20] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [21] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [22] : {
+ columnNumber : 2
+ functionName : foo
+ lineNumber : 7
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [23] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/error-stack-trace-limit-expected.txt b/deps/v8/test/inspector/runtime/error-stack-trace-limit-expected.txt
new file mode 100644
index 0000000000..33a137e3b1
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-stack-trace-limit-expected.txt
@@ -0,0 +1,820 @@
+Checks that Error.stackTraceLimit works correctly
+
+Running test: testErrorStackTraceLimitWithRuntimeDisabled
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testErrorStackTraceLimitWithRuntimeEnabled
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [10] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [11] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [12] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [13] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [14] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [15] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [16] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [17] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [18] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [19] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [20] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [21] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [22] : {
+ columnNumber : 2
+ functionName : foo
+ lineNumber : 7
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [23] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo! at test.js:9:11 at recurse (test.js:4:10) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23) at recurse (test.js:3:23)
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testErrorStackTraceLimitNonNumber
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [10] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [11] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [12] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [13] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [14] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [15] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [16] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [17] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [18] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [19] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [20] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [21] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [22] : {
+ columnNumber : 2
+ functionName : foo
+ lineNumber : 7
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [23] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testErrorStackTraceLimitDeleted
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 8
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName :
+ lineNumber : 8
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 9
+ functionName : recurse
+ lineNumber : 3
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [2] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [3] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [4] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [5] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [6] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [7] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [8] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [9] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [10] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [11] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [12] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [13] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [14] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [15] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [16] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [17] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [18] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [19] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [20] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [21] : {
+ columnNumber : 22
+ functionName : recurse
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [22] : {
+ columnNumber : 2
+ functionName : foo
+ lineNumber : 7
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [23] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Thrown from foo!
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/error-stack-trace-limit.js b/deps/v8/test/inspector/runtime/error-stack-trace-limit.js
new file mode 100644
index 0000000000..d30315a672
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-stack-trace-limit.js
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks that Error.stackTraceLimit works correctly');
+
+contextGroup.addScript(`
+function recurse(f, n) {
+ if (n-- > 0) return recurse(f, n);
+ return f();
+}
+
+function foo() {
+ recurse(() => {
+ throw new Error('Thrown from foo!');
+ }, 20);
+}
+//# sourceURL=test.js
+`);
+
+InspectorTest.runAsyncTestSuite([
+ async function testErrorStackTraceLimitWithRuntimeDisabled() {
+ await Protocol.Runtime.evaluate({expression: 'Error.stackTraceLimit = 2'});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ },
+
+ async function testErrorStackTraceLimitWithRuntimeEnabled() {
+ await Protocol.Runtime.enable();
+ await Protocol.Runtime.evaluate({expression: 'Error.stackTraceLimit = 2'});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ for (let size = 0; size <= 10; size += 5) {
+ await Protocol.Runtime.evaluate(
+ {expression: `Error.stackTraceLimit = ${size}`});
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ }
+ await Protocol.Runtime.disable();
+ },
+
+ async function testErrorStackTraceLimitNonNumber() {
+ await Protocol.Runtime.enable();
+ await Protocol.Runtime.evaluate(
+ {expression: 'Error.stackTraceLimit = "Invalid"'});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ await Protocol.Runtime.disable();
+ },
+
+ async function testErrorStackTraceLimitDeleted() {
+ await Protocol.Runtime.enable();
+ await Protocol.Runtime.evaluate(
+ {expression: 'delete Error.stackTraceLimit'});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ await Protocol.Runtime.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/error-stack.js b/deps/v8/test/inspector/runtime/error-stack.js
new file mode 100644
index 0000000000..22bf331531
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-stack.js
@@ -0,0 +1,39 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks that error.stack works correctly');
+
+contextGroup.addScript(`
+function recurse(f, n) {
+ if (n-- > 0) return recurse(f, n);
+ return f();
+}
+
+function foo() {
+ recurse(() => {
+ throw new Error('Thrown from foo!');
+ }, 20);
+}
+//# sourceURL=test.js
+`);
+
+InspectorTest.runAsyncTestSuite([
+ async function testErrorStackWithRuntimeDisabled() {
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ },
+
+ async function testErrorStackWithRuntimeEnabled() {
+ await Protocol.Runtime.enable();
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ for (let size = 0; size <= 10; size += 5) {
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size});
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'foo()'}));
+ }
+ await Protocol.Runtime.disable();
+ },
+]);
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 2be82c8b90..386b5ede34 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -123,7 +123,7 @@ console.log(239)
this : {
type : undefined
}
- url : module3
+ url :
}
]
hitBreakpoints : [
diff --git a/deps/v8/test/inspector/runtime/get-exception-details-expected.txt b/deps/v8/test/inspector/runtime/get-exception-details-expected.txt
new file mode 100644
index 0000000000..e4f70db4ce
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-exception-details-expected.txt
@@ -0,0 +1,112 @@
+Tests that Runtime.getExceptionDetails works
+
+Running test: itShouldReturnExceptionDetailsForJSErrorObjects
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 9
+ exception : {
+ className : Error
+ description : Error: error 1 at foo (<anonymous>:3:10) at <anonymous>:5:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 2
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 9
+ functionName : foo
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 4
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Error: error 1
+ }
+ }
+}
+
+Running test: itShouldReturnIncompleteDetailsForJSErrorWithNoStack
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : -1
+ exception : {
+ className : Error
+ description : Error: error 1 at foo (<anonymous>:3:10) at <anonymous>:5:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : -1
+ scriptId : <scriptId>
+ text : Error: error 1
+ }
+ }
+}
+
+Running test: itShouldReportAnErrorForNonJSErrorObjects
+{
+ error : {
+ code : -32000
+ message : errorObjectId is not a JS error object
+ }
+ id : <messageId>
+}
+
+Running test: itShouldIncludeMetaData
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 9
+ exception : {
+ className : Error
+ description : Error: myerror at foo (<anonymous>:3:10) at <anonymous>:5:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ exceptionMetaData : {
+ foo : bar
+ }
+ lineNumber : 2
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 9
+ functionName : foo
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 4
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Error: myerror
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/get-exception-details.js b/deps/v8/test/inspector/runtime/get-exception-details.js
new file mode 100644
index 0000000000..76bcd8c7f2
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-exception-details.js
@@ -0,0 +1,49 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests that Runtime.getExceptionDetails works');
+
+const expression = `
+function foo() {
+ return new Error("error 1");
+}
+foo();
+`;
+
+const expressionWithMeta = `
+function foo() {
+ return new inspector.newExceptionWithMetaData('myerror', 'foo', 'bar');
+}
+foo();
+`;
+
+InspectorTest.runAsyncTestSuite([
+ async function itShouldReturnExceptionDetailsForJSErrorObjects() {
+ await Protocol.Runtime.enable(); // Enable detailed stacktrace capturing.
+ const {result} = await Protocol.Runtime.evaluate({expression});
+ InspectorTest.logMessage(await Protocol.Runtime.getExceptionDetails(
+ {errorObjectId: result.result.objectId}));
+ },
+
+ async function itShouldReturnIncompleteDetailsForJSErrorWithNoStack() {
+ await Protocol.Runtime.disable(); // Disable detailed stacktrace capturing.
+ const {result} = await Protocol.Runtime.evaluate({expression});
+ InspectorTest.logMessage(await Protocol.Runtime.getExceptionDetails(
+ {errorObjectId: result.result.objectId}));
+ },
+
+ async function itShouldReportAnErrorForNonJSErrorObjects() {
+ const {result} = await Protocol.Runtime.evaluate({expression: '() =>({})'});
+ InspectorTest.logMessage(await Protocol.Runtime.getExceptionDetails(
+ {errorObjectId: result.result.objectId}));
+ },
+
+ async function itShouldIncludeMetaData() {
+ await Protocol.Runtime.enable(); // Enable detailed stacktrace capturing.
+ const {result} = await Protocol.Runtime.evaluate({expression: expressionWithMeta});
+ InspectorTest.logMessage(await Protocol.Runtime.getExceptionDetails(
+ {errorObjectId: result.result.objectId}));
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/set-max-call-stack-size-expected.txt b/deps/v8/test/inspector/runtime/set-max-call-stack-size-expected.txt
index 31a42be067..2a5680c593 100644
--- a/deps/v8/test/inspector/runtime/set-max-call-stack-size-expected.txt
+++ b/deps/v8/test/inspector/runtime/set-max-call-stack-size-expected.txt
@@ -1,4 +1,42 @@
Checks Runtime.setMaxCallStackSizeToCapture.
+
+Running test: testBeforeEnable
+{
+ code : -32000
+ message : Runtime agent is not enabled
+}
+
+Running test: testNegativeSize
+{
+ code : -32000
+ message : maxCallStackSizeToCapture should be non-negative
+}
+
+Running test: testConsoleLogBeforeEnable
+{
+ args : [
+ [0] : {
+ type : string
+ value : Log message.
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testConsoleLog
+ lineNumber : 2
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+}
+
+Running test: testConsoleTrace
Test with max size 0.
{
args : [
@@ -23,9 +61,9 @@ Test with max size 1.
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 10
+ columnNumber : 12
functionName : bar
- lineNumber : 2
+ lineNumber : 7
scriptId : <scriptId>
url : test.js
}
@@ -33,9 +71,9 @@ Test with max size 1.
parent : {
callFrames : [
[0] : {
- columnNumber : 2
- functionName : test
- lineNumber : 10
+ columnNumber : 4
+ functionName : executor
+ lineNumber : 16
scriptId : <scriptId>
url : test.js
}
@@ -58,16 +96,16 @@ Test with max size 2.
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 10
+ columnNumber : 12
functionName : bar
- lineNumber : 2
+ lineNumber : 7
scriptId : <scriptId>
url : test.js
}
[1] : {
- columnNumber : 2
+ columnNumber : 4
functionName : foo
- lineNumber : 6
+ lineNumber : 12
scriptId : <scriptId>
url : test.js
}
@@ -75,18 +113,18 @@ Test with max size 2.
parent : {
callFrames : [
[0] : {
- columnNumber : 2
- functionName : test
- lineNumber : 10
+ columnNumber : 4
+ functionName : executor
+ lineNumber : 16
scriptId : <scriptId>
url : test.js
}
[1] : {
- columnNumber : 0
- functionName :
- lineNumber : 0
+ columnNumber : 9
+ functionName : testConsoleTrace
+ lineNumber : 15
scriptId : <scriptId>
- url : expr.js
+ url : test.js
}
]
description : setTimeout
@@ -95,3 +133,79 @@ Test with max size 2.
timestamp : <timestamp>
type : trace
}
+
+Running test: testException
+Test with max size 0.
+{
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error at bar (test.js:23:11) at foo (test.js:27:5) at testThrow (test.js:30:3) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 22
+ scriptId : <scriptId>
+ text : Uncaught
+}
+Test with max size 1.
+{
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error at bar (test.js:23:11) at foo (test.js:27:5) at testThrow (test.js:30:3) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 22
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : bar
+ lineNumber : 22
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+}
+Test with max size 2.
+{
+ columnNumber : 4
+ exception : {
+ className : Error
+ description : Error at bar (test.js:23:11) at foo (test.js:27:5) at testThrow (test.js:30:3) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 22
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : bar
+ lineNumber : 22
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 4
+ functionName : foo
+ lineNumber : 26
+ scriptId : <scriptId>
+ url : test.js
+ }
+ ]
+ }
+ text : Uncaught
+}
diff --git a/deps/v8/test/inspector/runtime/set-max-call-stack-size.js b/deps/v8/test/inspector/runtime/set-max-call-stack-size.js
index b1872407c3..c2a0463791 100644
--- a/deps/v8/test/inspector/runtime/set-max-call-stack-size.js
+++ b/deps/v8/test/inspector/runtime/set-max-call-stack-size.js
@@ -4,34 +4,98 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks Runtime.setMaxCallStackSizeToCapture.');
-Protocol.Runtime.enable();
Protocol.Runtime.onConsoleAPICalled(
- message => InspectorTest.logMessage(message.params));
+ ({params}) => InspectorTest.logMessage(params));
contextGroup.addScript(`
-function bar() {
- console.trace("Nested call.");
+function testConsoleLog() {
+ console.log("Log message.");
}
-function foo() {
- bar();
+function testConsoleTrace() {
+ function bar(callback) {
+ console.trace("Nested call.");
+ callback();
+ }
+
+ function foo(callback) {
+ bar(callback);
+ }
+
+ return new Promise(function executor(resolve) {
+ setTimeout(foo.bind(undefined, resolve), 0);
+ });
}
-async function test() {
- setTimeout(foo, 0);
+function testThrow() {
+ function bar() {
+ throw new Error();
+ }
+
+ function foo() {
+ bar();
+ }
+
+ foo();
}
//# sourceURL=test.js`);
-Protocol.Runtime.setAsyncCallStackDepth({maxDepth: 10});
-(async function test() {
- await Protocol.Runtime.setMaxCallStackSizeToCapture({size: 0});
- InspectorTest.log('Test with max size 0.');
- await Protocol.Runtime.evaluate({ expression: 'test()//# sourceURL=expr.js'});
- await Protocol.Runtime.setMaxCallStackSizeToCapture({size: 1});
- InspectorTest.log('Test with max size 1.');
- await Protocol.Runtime.evaluate({ expression: 'test()//# sourceURL=expr.js'});
- await Protocol.Runtime.setMaxCallStackSizeToCapture({size: 2});
- InspectorTest.log('Test with max size 2.');
- await Protocol.Runtime.evaluate({ expression: 'test()//# sourceURL=expr.js'});
- InspectorTest.completeTest();
-})();
+InspectorTest.runAsyncTestSuite([
+ async function testBeforeEnable() {
+ const {error} =
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size: 0});
+ InspectorTest.logMessage(error);
+ },
+
+ async function testNegativeSize() {
+ await Protocol.Runtime.enable();
+ const {error} =
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size: -42});
+ InspectorTest.logMessage(error);
+ await Protocol.Runtime.disable();
+ },
+
+ async function testConsoleLogBeforeEnable() {
+ await Protocol.Runtime.evaluate({expression: 'testConsoleLog()'});
+ await Protocol.Runtime.enable();
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testConsoleTrace() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Runtime.setAsyncCallStackDepth({maxDepth: 10}),
+ ]);
+ for (let size = 0; size <= 2; ++size) {
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size});
+ InspectorTest.log(`Test with max size ${size}.`);
+ await Protocol.Runtime.evaluate(
+ {expression: 'testConsoleTrace()', awaitPromise: true});
+ }
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ },
+
+ async function testException() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Runtime.setAsyncCallStackDepth({maxDepth: 0}),
+ ]);
+ for (let size = 0; size <= 2; ++size) {
+ await Protocol.Runtime.setMaxCallStackSizeToCapture({size});
+ InspectorTest.log(`Test with max size ${size}.`);
+ const {result: {exceptionDetails}} =
+ await Protocol.Runtime.evaluate({expression: 'testThrow()'});
+ InspectorTest.logMessage(exceptionDetails);
+ }
+ await Promise.all([
+ Protocol.Runtime.discardConsoleEntries(),
+ Protocol.Runtime.disable(),
+ ]);
+ }
+])
diff --git a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
index de81967eb0..d6944a5fbd 100644
--- a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
@@ -35,12 +35,12 @@ Evaluating debugger in 1
Paused in 1:
reason: other
hit breakpoints:
- location: bar@25
+ location: bar@26
data: null
Paused in 2:
reason: other
hit breakpoints:
- location: bar@25
+ location: bar@26
data: null
Resuming in 2
Resumed in 1
@@ -77,12 +77,12 @@ Evaluating debugger in 2
Paused in 1:
reason: other
hit breakpoints:
- location: bar@25
+ location: bar@26
data: null
Paused in 2:
reason: other
hit breakpoints:
- location: bar@25
+ location: bar@26
data: null
Resuming in 2
Resumed in 1
@@ -187,7 +187,7 @@ Evaluating bar() in 2
Paused in 1:
reason: other
hit breakpoints:
- location: bar@25
+ location: bar@26
data: null
Resuming in 1
Resumed in 1
diff --git a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints.js b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints.js
index 64a60fd1dd..2f72169358 100644
--- a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints.js
+++ b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints.js
@@ -6,7 +6,8 @@ InspectorTest.log('Tests how multiple sessions interact while pausing, stepping,
var contextGroup = new InspectorTest.ContextGroup();
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function foo() {
return 1;
}
@@ -17,14 +18,15 @@ function stepping() {
debugger;
var a = 1;
var b = 1;
-}
-//# sourceURL=test.js`, 9, 25);
+}`,
+ 'test.js');
-contextGroup.addScript(`
+contextGroup.addInlineScript(
+ `
function bar() {
debugger;
-}
-//# sourceURL=test2.js`, 23, 25);
+}`,
+ 'test2.js');
(async function test() {
InspectorTest.log('Connecting session 1');
diff --git a/deps/v8/test/intl/enumeration/calendar-sorted.js b/deps/v8/test/intl/enumeration/calendar-sorted.js
index 11e19c06f8..41f6ec10f7 100644
--- a/deps/v8/test/intl/enumeration/calendar-sorted.js
+++ b/deps/v8/test/intl/enumeration/calendar-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of calendar is sorted
let name = "calendar";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/enumeration/callendar-syntax-valid.js b/deps/v8/test/intl/enumeration/callendar-syntax-valid.js
index 881c7e603c..0b0b6fd483 100644
--- a/deps/v8/test/intl/enumeration/callendar-syntax-valid.js
+++ b/deps/v8/test/intl/enumeration/callendar-syntax-valid.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of calendar fit 'type'
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
Intl.supportedValuesOf("calendar").forEach(
diff --git a/deps/v8/test/intl/enumeration/collation-sorted.js b/deps/v8/test/intl/enumeration/collation-sorted.js
index d40e9be3bb..65251e0a95 100644
--- a/deps/v8/test/intl/enumeration/collation-sorted.js
+++ b/deps/v8/test/intl/enumeration/collation-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of collation is sorted
let name = "collation";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/enumeration/collation-syntax-valid.js b/deps/v8/test/intl/enumeration/collation-syntax-valid.js
index e68d565fa8..c41e52ec69 100644
--- a/deps/v8/test/intl/enumeration/collation-syntax-valid.js
+++ b/deps/v8/test/intl/enumeration/collation-syntax-valid.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of collation fit 'type'
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
Intl.supportedValuesOf("collation").forEach(
diff --git a/deps/v8/test/intl/enumeration/currency-sorted.js b/deps/v8/test/intl/enumeration/currency-sorted.js
index 55ff1bc611..a206750cca 100644
--- a/deps/v8/test/intl/enumeration/currency-sorted.js
+++ b/deps/v8/test/intl/enumeration/currency-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of currency is sorted
let name = "currency";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/enumeration/currency-syntax-valid.js b/deps/v8/test/intl/enumeration/currency-syntax-valid.js
index cce3c612bd..da21545f9d 100644
--- a/deps/v8/test/intl/enumeration/currency-syntax-valid.js
+++ b/deps/v8/test/intl/enumeration/currency-syntax-valid.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of currency fit 'type'
let regex = /^[A-Z]{3}$/;
Intl.supportedValuesOf("currency").forEach(
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js b/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js
index bf8c1a11eb..e38267ac0c 100644
--- a/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js
+++ b/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Chrome filter out data of algorithm numberingSystems so we need to test none
// of them got returned.
let name = "numberingSystem";
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-sorted.js b/deps/v8/test/intl/enumeration/numberingSystem-sorted.js
index 7cd0d85052..4bd9c877d3 100644
--- a/deps/v8/test/intl/enumeration/numberingSystem-sorted.js
+++ b/deps/v8/test/intl/enumeration/numberingSystem-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of numberingSystem is sorted
let name = "numberingSystem";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js b/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js
index d80976d519..285a50f54f 100644
--- a/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js
+++ b/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of numberingSystem fit 'type'
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
Intl.supportedValuesOf("numberingSystem").forEach(
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js b/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js
index 27651df74c..08a3594e56 100644
--- a/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js
+++ b/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test invalid keys
["calendars", "collations", "currencies", "numberingSystems", "timeZones", "units",
1, 0.3, true, false, {}, [] ].forEach(
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-name.js b/deps/v8/test/intl/enumeration/supported-values-of-name.js
index a0cbfd5333..7d94529632 100644
--- a/deps/v8/test/intl/enumeration/supported-values-of-name.js
+++ b/deps/v8/test/intl/enumeration/supported-values-of-name.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
assertEquals("supportedValuesOf", Intl.supportedValuesOf.name);
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-property.js b/deps/v8/test/intl/enumeration/supported-values-of-property.js
index 52b0778b54..6dd46406cc 100644
--- a/deps/v8/test/intl/enumeration/supported-values-of-property.js
+++ b/deps/v8/test/intl/enumeration/supported-values-of-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
let descriptor = Object.getOwnPropertyDescriptor(
Intl, "supportedValuesOf");
assertTrue(descriptor.writable);
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js b/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js
index ac5b4d8e5d..a3f40c6432 100644
--- a/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js
+++ b/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test valid keys
["calendar", "collation", "currency", "numberingSystem", "timeZone", "unit"].forEach(
function(key) {
diff --git a/deps/v8/test/intl/enumeration/timeZone-sorted.js b/deps/v8/test/intl/enumeration/timeZone-sorted.js
index e3b5b484e2..971150c1b8 100644
--- a/deps/v8/test/intl/enumeration/timeZone-sorted.js
+++ b/deps/v8/test/intl/enumeration/timeZone-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of timeZone is sorted
let name = "timeZone";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/enumeration/unit-sorted.js b/deps/v8/test/intl/enumeration/unit-sorted.js
index 08dd1d93e8..dbca115f08 100644
--- a/deps/v8/test/intl/enumeration/unit-sorted.js
+++ b/deps/v8/test/intl/enumeration/unit-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_enumeration
-
// Test the return items of unit is sorted
let name = "unit";
let items = Intl.supportedValuesOf(name);
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 4350e57bca..6e6eb6a439 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -28,8 +28,9 @@
[
################################################################################
[ALWAYS, {
-# TODO(jochen): The following test is flaky.
+# TODO(ftang,jshin): The following test is flaky.
'overrides/caching': [PASS, FAIL],
+ 'number-format/rounding-increment-resolved-match-v3': [FAIL],
}], # ALWAYS
################################################################################
diff --git a/deps/v8/test/intl/locale/locale-calendars.js b/deps/v8/test/intl/locale/locale-calendars.js
index 620440b01e..e3dde03b20 100644
--- a/deps/v8/test/intl/locale/locale-calendars.js
+++ b/deps/v8/test/intl/locale/locale-calendars.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Test the return items of calendars fit 'type'
let a_to_z = "abcdefghijklmnopqrstuvwxyz";
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
diff --git a/deps/v8/test/intl/locale/locale-collations.js b/deps/v8/test/intl/locale/locale-collations.js
index 97eccaa3a9..cc8bdbaeb8 100644
--- a/deps/v8/test/intl/locale/locale-collations.js
+++ b/deps/v8/test/intl/locale/locale-collations.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Test the return items of collations fit 'type'
let a_to_z = "abcdefghijklmnopqrstuvwxyz";
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
diff --git a/deps/v8/test/intl/locale/locale-info-check-property.js b/deps/v8/test/intl/locale/locale-info-check-property.js
index 77c676e6ec..76d219b361 100644
--- a/deps/v8/test/intl/locale/locale-info-check-property.js
+++ b/deps/v8/test/intl/locale/locale-info-check-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Check getter properties against the spec.
function checkProperties(property) {
let desc = Object.getOwnPropertyDescriptor(Intl.Locale.prototype, property);
diff --git a/deps/v8/test/intl/locale/locale-info-check-return-types.js b/deps/v8/test/intl/locale/locale-info-check-return-types.js
index 61f9c537de..a2b9033c13 100644
--- a/deps/v8/test/intl/locale/locale-info-check-return-types.js
+++ b/deps/v8/test/intl/locale/locale-info-check-return-types.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
function checkLocale(locale) {
let l = new Intl.Locale(locale);
assertTrue(Array.isArray(l.calendars));
diff --git a/deps/v8/test/intl/locale/locale-info-ext.js b/deps/v8/test/intl/locale/locale-info-ext.js
index 647e6c8a4e..c0ca81a3be 100644
--- a/deps/v8/test/intl/locale/locale-info-ext.js
+++ b/deps/v8/test/intl/locale/locale-info-ext.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Test Unicode extension.
function testExt(base, ukey, uvalue, property) {
let baseLocale = new Intl.Locale(base);
diff --git a/deps/v8/test/intl/locale/locale-info-no-undefined.js b/deps/v8/test/intl/locale/locale-info-no-undefined.js
index f9ccd3a1a9..1284be7f20 100644
--- a/deps/v8/test/intl/locale/locale-info-no-undefined.js
+++ b/deps/v8/test/intl/locale/locale-info-no-undefined.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Check the return array has no undefined
function checkNoUndefined(l, items) {
items.forEach( function(item) {
diff --git a/deps/v8/test/intl/locale/locale-info-timezones-sorted.js b/deps/v8/test/intl/locale/locale-info-timezones-sorted.js
index 004ed894b2..60f1ffe818 100644
--- a/deps/v8/test/intl/locale/locale-info-timezones-sorted.js
+++ b/deps/v8/test/intl/locale/locale-info-timezones-sorted.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Check the return array are sorted
function checkSortedArray(l, name, items) {
assertEquals([...items].sort(), items,
diff --git a/deps/v8/test/intl/locale/locale-numberingSystems.js b/deps/v8/test/intl/locale/locale-numberingSystems.js
index ab65f06538..82941647e1 100644
--- a/deps/v8/test/intl/locale/locale-numberingSystems.js
+++ b/deps/v8/test/intl/locale/locale-numberingSystems.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony_intl_locale_info
-
// Test the return items of numberingSystems fit 'type'
let a_to_z = "abcdefghijklmnopqrstuvwxyz";
let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
diff --git a/deps/v8/test/intl/number-format/format-range-v3.js b/deps/v8/test/intl/number-format/format-range-v3.js
new file mode 100644
index 0000000000..4ca9f80c92
--- /dev/null
+++ b/deps/v8/test/intl/number-format/format-range-v3.js
@@ -0,0 +1,158 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+const validRanges = [[-12345, -5678], [-12345, 56789], [12345, 56789]];
+
+const nf = new Intl.NumberFormat("en", {signDisplay: "exceptZero"});
+['formatRange', 'formatRangeToParts'].forEach(function(method) {
+ assertEquals("function", typeof nf[method]);
+
+ // 2. Perform ? RequireInternalSlot(nf, [[InitializedNumberFormat]]).
+ // Assert if called without nf
+ let f = nf[method];
+ assertThrows(() => { f(1, 23) }, TypeError);
+
+ // Assert normal call success
+ assertDoesNotThrow(() => nf[method](1, 23));
+
+ // 3. If start is undefined ..., throw a TypeError exception.
+ assertThrows(() => { nf[method](undefined, 23) }, TypeError);
+ // 3. If ... end is undefined, throw a TypeError exception.
+ assertThrows(() => { nf[method](1, undefined) }, TypeError);
+
+ // 4. Let x be ? ToNumeric(start).
+ // Verify it won't throw error
+ assertDoesNotThrow(() => nf[method](null, 23));
+ assertDoesNotThrow(() => nf[method](false, 23));
+ assertDoesNotThrow(() => nf[method](true, 23));
+ assertDoesNotThrow(() => nf[method](12, 23));
+ assertDoesNotThrow(() => nf[method](12n, 23));
+ // Verify it will throw error
+ assertThrows(() => { nf[method](Symbol(12), 23) }, TypeError);
+
+ // 5. Let y be ? ToNumeric(end).
+ // Verify it won't throw error
+ assertDoesNotThrow(() => nf[method](-12, null));
+ assertDoesNotThrow(() => nf[method](-12, false));
+ assertDoesNotThrow(() => nf[method](-12, true));
+ assertDoesNotThrow(() => nf[method](12, 23));
+ assertDoesNotThrow(() => nf[method](12, 23n));
+
+ // Verify it will throw error
+ assertThrows(() => { nf[method](12, Symbol(23)) }, TypeError);
+
+ // 6. If x is NaN ..., throw a RangeError exception.
+ assertThrows(() => { nf[method](NaN, 23) }, RangeError);
+
+ // 6. If ... y is NaN, throw a RangeError exception.
+ assertThrows(() => { nf[method](12, NaN) }, RangeError);
+
+ // 8. If x is greater than y, throw a RangeError exception.
+ // neither x nor y are bigint.
+ assertThrows(() => { nf[method](23, 12) }, RangeError);
+ assertDoesNotThrow(() => nf[method](12, 23));
+ // x is not bigint but y is.
+ assertThrows(() => { nf[method](23, 12n) }, RangeError);
+ assertDoesNotThrow(() => nf[method](12, 23n));
+ // x is bigint but y is not.
+ assertThrows(() => { nf[method](23n, 12) }, RangeError);
+ assertDoesNotThrow(() => nf[method](12n, 23));
+ // both x and y are bigint.
+ assertThrows(() => { nf[method](23n, 12n) }, RangeError);
+ assertDoesNotThrow(() => nf[method](12n, 23n));
+
+ validRanges.forEach(
+ function([x, y]) {
+ const X = BigInt(x);
+ const Y = BigInt(y);
+ const formatted_x_y = nf[method](x, y);
+ const formatted_X_y = nf[method](X, y);
+ const formatted_x_Y = nf[method](x, Y);
+ const formatted_X_Y = nf[method](X, Y);
+ assertEquals(formatted_x_y, formatted_X_y);
+ assertEquals(formatted_x_y, formatted_x_Y);
+ assertEquals(formatted_x_y, formatted_X_Y);
+
+ });
+});
+
+// Check the number of part with type: "plusSign" and "minusSign" are corre
+validRanges.forEach(
+ function([x, y]) {
+ const expectedPlus = (x > 0) ? ((y > 0) ? 2 : 1) : ((y > 0) ? 1 : 0);
+ const expectedMinus = (x < 0) ? ((y < 0) ? 2 : 1) : ((y < 0) ? 1 : 0);
+ let actualPlus = 0;
+ let actualMinus = 0;
+ const parts = nf.formatRangeToParts(x, y);
+ parts.forEach(function(part) {
+ if (part.type == "plusSign") actualPlus++;
+ if (part.type == "minusSign") actualMinus++;
+ });
+ const method = "formatRangeToParts(" + x + ", " + y + "): ";
+ assertEquals(expectedPlus, actualPlus,
+ method + "Number of type: 'plusSign' in parts is incorrect");
+ assertEquals(expectedMinus, actualMinus,
+ method + "Number of type: 'minusSign' in parts is incorrect");
+ });
+
+// From https://github.com/tc39/proposal-intl-numberformat-v3#formatrange-ecma-402-393
+const nf2 = new Intl.NumberFormat("en-US", {
+ style: "currency",
+ currency: "EUR",
+ maximumFractionDigits: 0,
+});
+
+// README.md said it expect "€3–5"
+assertEquals("€3 – €5", nf2.formatRange(3, 5));
+
+const nf3 = new Intl.NumberFormat("en-US", {
+ style: "currency",
+ currency: "EUR",
+ maximumFractionDigits: 0,
+});
+const actual3 = nf3.formatRangeToParts(3, 5);
+/*
+[
+ {type: "currency", value: "€", source: "startRange"}
+ {type: "integer", value: "3", source: "startRange"}
+ {type: "literal", value: "–", source: "shared"}
+ {type: "integer", value: "5", source: "endRange"}
+]
+*/
+assertEquals(5, actual3.length);
+assertEquals("currency", actual3[0].type);
+assertEquals("€", actual3[0].value);
+assertEquals("startRange", actual3[0].source);
+assertEquals("integer", actual3[1].type);
+assertEquals("3", actual3[1].value);
+assertEquals("startRange", actual3[1].source);
+assertEquals("literal", actual3[2].type);
+assertEquals(" – ", actual3[2].value);
+assertEquals("shared", actual3[2].source);
+assertEquals("currency", actual3[3].type);
+assertEquals("€", actual3[3].value);
+assertEquals("endRange", actual3[3].source);
+assertEquals("integer", actual3[4].type);
+assertEquals("5", actual3[4].value);
+assertEquals("endRange", actual3[4].source);
+
+const nf4 = new Intl.NumberFormat("en-US", {
+ style: "currency",
+ currency: "EUR",
+ maximumFractionDigits: 0,
+});
+assertEquals("~€3", nf4.formatRange(2.9, 3.1));
+
+const nf5 = new Intl.NumberFormat("en-US", {
+ style: "currency",
+ currency: "EUR",
+ signDisplay: "always",
+});
+assertEquals("~+€3.00", nf5.formatRange(2.999, 3.001));
+
+const nf6 = new Intl.NumberFormat("en");
+assertEquals("3–∞", nf6.formatRange(3, 1/0));
+assertThrows(() => { nf6.formatRange(3, 0/0); }, RangeError);
diff --git a/deps/v8/test/intl/number-format/rounding-increment-resolved-match-v3.js b/deps/v8/test/intl/number-format/rounding-increment-resolved-match-v3.js
new file mode 100644
index 0000000000..c76e48954a
--- /dev/null
+++ b/deps/v8/test/intl/number-format/rounding-increment-resolved-match-v3.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let validRoundingIncrements = [
+ 1, 2, 5, 10, 20, 25, 50, 100, 200, 250, 500, 1000, 2000, 2500, 5000];
+
+validRoundingIncrements.forEach(function(roundingIncrement) {
+ let nf = new Intl.NumberFormat(undefined, {roundingIncrement});
+ assertEquals(roundingIncrement, nf.resolvedOptions().roundingIncrement);
+});
diff --git a/deps/v8/test/intl/number-format/rounding-increment-v3.js b/deps/v8/test/intl/number-format/rounding-increment-v3.js
new file mode 100644
index 0000000000..8b30a7a38b
--- /dev/null
+++ b/deps/v8/test/intl/number-format/rounding-increment-v3.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let validRoundingIncrements = [
+ 1, 2, 5, 10, 20, 25, 50, 100, 200, 250, 500, 1000, 2000, 2500, 5000];
+
+let invalidRoundingIncrements = [
+ -1, -5, 0, 3, 1001, 1100, 5500, 10000, 20000, 25000, 100000, 200000, 500005, 10000000
+];
+
+validRoundingIncrements.forEach(function(roundingIncrement) {
+ assertDoesNotThrow(() => {
+ new Intl.NumberFormat(undefined, {roundingIncrement})});
+});
+
+invalidRoundingIncrements.forEach(function(roundingIncrement) {
+ assertThrows(() => {
+ let nf = new Intl.NumberFormat(undefined, {roundingIncrement})},
+ RangeError);
+});
diff --git a/deps/v8/test/intl/number-format/rounding-increment-value-v3.js b/deps/v8/test/intl/number-format/rounding-increment-value-v3.js
new file mode 100644
index 0000000000..a6693fb155
--- /dev/null
+++ b/deps/v8/test/intl/number-format/rounding-increment-value-v3.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let penny = new Intl.NumberFormat(
+ "en", { minimumFractionDigits: 2, maximumFractionDigits: 2, roundingIncrement: 1 });
+let nickel = new Intl.NumberFormat(
+ "en", { minimumFractionDigits: 2, maximumFractionDigits: 2, roundingIncrement: 5 });
+let dime = new Intl.NumberFormat(
+ "en", { minimumFractionDigits: 2, maximumFractionDigits: 2, roundingIncrement: 10 });
+
+// https://necs.com/knowledgebase/sysprefs_prc_mod_roundmeth.htm
+assertEquals("10.15", penny.format(10.154));
+assertEquals("10.16", penny.format(10.155));
+assertEquals("10.10", nickel.format(10.124));
+assertEquals("10.15", nickel.format(10.125));
+assertEquals("10.40", dime.format(10.444));
+// mistake in the above page, the result should be 10.40 not 10.50
+// assertEquals("10.50", dime.format(10.445));
+assertEquals("10.40", dime.format(10.445));
+assertEquals("10.50", dime.format(10.45));
diff --git a/deps/v8/test/intl/number-format/rounding-mode-table-v3.js b/deps/v8/test/intl/number-format/rounding-mode-table-v3.js
new file mode 100644
index 0000000000..72af49eb9d
--- /dev/null
+++ b/deps/v8/test/intl/number-format/rounding-mode-table-v3.js
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+// Check the rounding behavior.
+// Based on https://tc39.es/proposal-intl-numberformat-v3/out/numberformat/diff.html#table-intl-rounding-modes
+let inputs = [-1.5, 0.4, 0.5, 0.6, 1.5];
+let expectations = {
+ "ceil": ["-1", "1", "1", "1", "2"],
+ "floor": ["-2", "0", "0", "0", "1"],
+ "expand": ["-2", "1", "1", "1", "2"],
+ "trunc": ["-1", "0", "0", "0", "1"],
+ "halfCeil": ["-1", "0", "1", "1", "2"],
+ "halfFloor": ["-2", "0", "0", "1", "1"],
+ "halfExpand": ["-2", "0", "1", "1", "2"],
+ "halfTrunc": ["-1", "0", "0", "1", "1"],
+ "halfEven": ["-2", "0", "0", "1", "2"],
+};
+Object.keys(expectations).forEach(function(roundingMode) {
+ let exp = expectations[roundingMode];
+ let idx = 0;
+ let nf = new Intl.NumberFormat("en", {roundingMode, maximumFractionDigits: 0});
+ assertEquals(roundingMode, nf.resolvedOptions().roundingMode);
+ inputs.forEach(function(input) {
+ let msg = "input: " + input + " with roundingMode: " + roundingMode;
+ assertEquals(exp[idx++], nf.format(input), msg);
+ })
+});
diff --git a/deps/v8/test/intl/number-format/rounding-mode-v3.js b/deps/v8/test/intl/number-format/rounding-mode-v3.js
new file mode 100644
index 0000000000..0a1425ced0
--- /dev/null
+++ b/deps/v8/test/intl/number-format/rounding-mode-v3.js
@@ -0,0 +1,60 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let validRoundingMode = [
+ "ceil",
+ "floor",
+ "expand",
+ "halfCeil",
+ "halfExpand",
+ "halfFloor",
+ "halfTrunc",
+ "halfEven",
+ "trunc",
+];
+
+let invalidRoundingMode = [
+ "ceiling",
+ "down",
+ "Down",
+ "flooring",
+ "halfDown",
+ "halfUp",
+ "halfup",
+ "halfeven",
+ "halfdown",
+ "half-up",
+ "half-even",
+ "half-down",
+ "up",
+ "Up",
+];
+
+validRoundingMode.forEach(function(roundingMode) {
+ let nf = new Intl.NumberFormat(undefined, {roundingMode});
+ assertEquals(roundingMode, nf.resolvedOptions().roundingMode);
+});
+
+invalidRoundingMode.forEach(function(roundingMode) {
+ assertThrows(() => {
+ let nf = new Intl.NumberFormat(undefined, {roundingMode}); });
+});
+
+// Check default is "halfExpand"
+assertEquals("halfExpand", (new Intl.NumberFormat().resolvedOptions().roundingMode));
+assertEquals("halfExpand", (new Intl.NumberFormat(
+ undefined, {roundingMode: undefined}).resolvedOptions().roundingMode));
+
+// Check roundingMode is read once after reading signDisplay
+
+let read = [];
+let options = {
+ get signDisplay() { read.push('signDisplay'); return undefined; },
+ get roundingMode() { read.push('roundingMode'); return undefined; },
+};
+
+assertDoesNotThrow(() => new Intl.NumberFormat(undefined, options));
+assertEquals("signDisplay,roundingMode", read.join(","));
diff --git a/deps/v8/test/intl/number-format/sign-display-v3.js b/deps/v8/test/intl/number-format/sign-display-v3.js
new file mode 100644
index 0000000000..6345517ec4
--- /dev/null
+++ b/deps/v8/test/intl/number-format/sign-display-v3.js
@@ -0,0 +1,29 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-intl-number-format-v3
+
+// Test default.
+let nf = new Intl.NumberFormat();
+assertEquals("auto", nf.resolvedOptions().signDisplay);
+
+nf = new Intl.NumberFormat("en");
+assertEquals("auto", nf.resolvedOptions().signDisplay);
+
+const testData = [
+ ["auto", "-123", "-0", "0", "123"],
+ ["always", "-123", "-0", "+0", "+123"],
+ ["never", "123", "0", "0", "123"],
+ ["exceptZero", "-123", "0", "0", "+123"],
+ ["negative", "-123", "0", "0", "123"],
+];
+
+for (const [signDisplay, neg, negZero, zero, pos] of testData) {
+ nf = new Intl.NumberFormat("en", {signDisplay});
+ assertEquals(signDisplay, nf.resolvedOptions().signDisplay);
+ assertEquals(neg, nf.format(-123));
+ assertEquals(negZero, nf.format(-0));
+ assertEquals(zero, nf.format(0));
+ assertEquals(pos, nf.format(123));
+}
diff --git a/deps/v8/test/intl/number-format/trailing-zero-display-resolved-options-v3.js b/deps/v8/test/intl/number-format/trailing-zero-display-resolved-options-v3.js
new file mode 100644
index 0000000000..d21f682f91
--- /dev/null
+++ b/deps/v8/test/intl/number-format/trailing-zero-display-resolved-options-v3.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let defaultFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2 });
+let autoFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2,
+ trailingZeroDisplay: 'auto'});
+let stripIfIntegerFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2,
+ trailingZeroDisplay: 'stripIfInteger'});
+
+assertEquals("auto", defaultFmt.resolvedOptions().trailingZeroDisplay);
+assertEquals("auto", autoFmt.resolvedOptions().trailingZeroDisplay);
+assertEquals("stripIfInteger",
+ stripIfIntegerFmt.resolvedOptions().trailingZeroDisplay);
diff --git a/deps/v8/test/intl/number-format/trailing-zero-display-v3.js b/deps/v8/test/intl/number-format/trailing-zero-display-v3.js
new file mode 100644
index 0000000000..ef3d03ef6e
--- /dev/null
+++ b/deps/v8/test/intl/number-format/trailing-zero-display-v3.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let defaultFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2 });
+let autoFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2,
+ trailingZeroDisplay: 'auto'});
+let stripIfIntegerFmt = new Intl.NumberFormat("en",
+ { minimumFractionDigits: 2, maximumFractionDigits: 2,
+ trailingZeroDisplay: 'stripIfInteger'});
+
+assertEquals("3.14", defaultFmt.format(3.1411));
+assertEquals("3.14", autoFmt.format(3.1411));
+assertEquals("3.14", stripIfIntegerFmt.format(3.1411));
+assertEquals("3.00", defaultFmt.format(3.001411));
+assertEquals("3.00", autoFmt.format(3.001411));
+assertEquals("3", stripIfIntegerFmt.format(3.001411));
+assertEquals("3.00", defaultFmt.format(2.999411));
+assertEquals("3.00", autoFmt.format(2.999411));
+assertEquals("3", stripIfIntegerFmt.format(2.999411));
diff --git a/deps/v8/test/intl/number-format/use-grouping-v3.js b/deps/v8/test/intl/number-format/use-grouping-v3.js
new file mode 100644
index 0000000000..2fc833cf37
--- /dev/null
+++ b/deps/v8/test/intl/number-format/use-grouping-v3.js
@@ -0,0 +1,114 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+
+let validUseGrouping = [
+ "min2",
+ "auto",
+ "always",
+ false,
+];
+
+let invalidUseGrouping = [
+ "min-2",
+ "true",
+];
+
+validUseGrouping.forEach(function(useGrouping) {
+ let nf = new Intl.NumberFormat(undefined, {useGrouping});
+ assertEquals(useGrouping, nf.resolvedOptions().useGrouping);
+});
+
+invalidUseGrouping.forEach(function(useGrouping) {
+ assertThrows(() => {
+ let nf = new Intl.NumberFormat(undefined, {useGrouping}); });
+});
+
+// useGrouping: undefined get "auto"
+assertEquals("auto",
+ (new Intl.NumberFormat()).resolvedOptions().useGrouping);
+assertEquals("auto",
+ (new Intl.NumberFormat(undefined, {useGrouping: undefined}))
+ .resolvedOptions().useGrouping);
+
+// useGrouping: true get "always"
+assertEquals("always",
+ (new Intl.NumberFormat(undefined, {useGrouping: true}))
+ .resolvedOptions().useGrouping);
+
+// useGrouping: false get false
+// useGrouping: "" get false
+assertEquals(false,
+ (new Intl.NumberFormat(undefined, {useGrouping: false}))
+ .resolvedOptions().useGrouping);
+assertEquals(false,
+ (new Intl.NumberFormat(undefined, {useGrouping: ""}))
+ .resolvedOptions().useGrouping);
+
+// Some locales with default minimumGroupingDigits
+let mgd1 = ["en"];
+// Some locales with default minimumGroupingDigits{"2"}
+let mgd2 = ["es", "pl", "lv"];
+let all = mgd1.concat(mgd2);
+
+// Check "always"
+all.forEach(function(locale) {
+ let off = new Intl.NumberFormat(locale, {useGrouping: false});
+ let msg = "locale: " + locale + " useGrouping: false";
+ // In useGrouping: false, no grouping.
+ assertEquals(3, off.format(123).length, msg);
+ assertEquals(4, off.format(1234).length, msg);
+ assertEquals(5, off.format(12345).length, msg);
+ assertEquals(6, off.format(123456).length, msg);
+ assertEquals(7, off.format(1234567).length, msg);
+});
+
+// Check false
+all.forEach(function(locale) {
+ let always = new Intl.NumberFormat(locale, {useGrouping: "always"});
+ let msg = "locale: " + locale + " useGrouping: 'always'";
+ assertEquals(3, always.format(123).length);
+ // In useGrouping: "always", has grouping when more than 3 digits..
+ assertEquals(4 + 1, always.format(1234).length, msg);
+ assertEquals(5 + 1, always.format(12345).length, msg);
+ assertEquals(6 + 1, always.format(123456).length, msg);
+ assertEquals(7 + 2, always.format(1234567).length, msg);
+});
+
+// Check "min2"
+all.forEach(function(locale) {
+ let always = new Intl.NumberFormat(locale, {useGrouping: "min2"});
+ let msg = "locale: " + locale + " useGrouping: 'min2'";
+ assertEquals(3, always.format(123).length);
+ // In useGrouping: "min2", no grouping for 4 digits but has grouping
+ // when more than 4 digits..
+ assertEquals(4, always.format(1234).length, msg);
+ assertEquals(5 + 1, always.format(12345).length, msg);
+ assertEquals(6 + 1, always.format(123456).length, msg);
+ assertEquals(7 + 2, always.format(1234567).length, msg);
+});
+
+// Check "auto"
+mgd1.forEach(function(locale) {
+ let auto = new Intl.NumberFormat(locale, {useGrouping: "auto"});
+ let msg = "locale: " + locale + " useGrouping: 'auto'";
+ assertEquals(3, auto.format(123).length, msg);
+ assertEquals(4 + 1, auto.format(1234).length, msg);
+ assertEquals(5 + 1, auto.format(12345).length, msg);
+ assertEquals(6 + 1, auto.format(123456).length, msg);
+ assertEquals(7 + 2, auto.format(1234567).length, msg);
+});
+mgd2.forEach(function(locale) {
+ let auto = new Intl.NumberFormat(locale, {useGrouping: "auto"});
+ let msg = "locale: " + locale + " useGrouping: 'auto'";
+ assertEquals(3, auto.format(123).length, msg);
+ // In useGrouping: "auto", since these locales has
+ // minimumGroupingDigits{"2"}, no grouping for 4 digits but has grouping
+ // when more than 4 digits..
+ assertEquals(4, auto.format(1234).length, msg);
+ assertEquals(5 + 1, auto.format(12345).length, msg);
+ assertEquals(6 + 1, auto.format(123456).length, msg);
+ assertEquals(7 + 2, auto.format(1234567).length, msg);
+});
diff --git a/deps/v8/test/intl/plural-rules/select-range.js b/deps/v8/test/intl/plural-rules/select-range.js
new file mode 100644
index 0000000000..0b65174230
--- /dev/null
+++ b/deps/v8/test/intl/plural-rules/select-range.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-number-format-v3
+const pl = new Intl.PluralRules("sl");
+assertEquals("few", pl.selectRange(102, 201));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index f04bf19c8c..c324304ac9 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -72,11 +72,13 @@ class TestCase(testcase.D8TestCase):
return self._env
def _get_files_params(self):
- files = map(lambda f: os.path.join(self.suite.root, f), [
+ files = [
+ os.path.join(self.suite.root, f) for f in [
'assert.js',
'utils.js',
self.path + self._get_suffix(),
- ])
+ ]
+ ]
if self._test_config.isolates:
files += ['--isolate'] + files
diff --git a/deps/v8/test/js-perf-test/Array/includes.js b/deps/v8/test/js-perf-test/Array/includes.js
new file mode 100644
index 0000000000..5be93443d9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/includes.js
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+ function make_includes() {
+ return new Function('result = array.includes(target)');
+ }
+
+ createSuite('SmiIncludes', 1000, make_includes(), SmiIncludesSetup);
+ createSuite('SparseSmiIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
+ createSuite('DoubleIncludes', 1000, make_includes(), SmiIncludesSetup);
+ createSuite('SparseDoubleIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
+ createSuite('ObjectIncludes', 1000, make_includes(), SmiIncludesSetup);
+ createSuite('SparseObjectIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
+ createSuite('StringIncludes', 1000, make_includes(), StringIncludesSetup);
+ createSuite('SparseStringIncludes', 1000, make_includes(), SparseStringIncludesSetup);
+
+ function SmiIncludesSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = i;
+ target = array[array_size-1];
+ }
+
+ function SparseSmiIncludesSetup() {
+ SmiIncludesSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function StringIncludesSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = `Item no. ${i}`;
+ target = array[array_size-1];
+ }
+
+ function SparseStringIncludesSetup() {
+ StringIncludesSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function DoubleIncludesSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = i;
+ target = array[array_size-1];
+ }
+
+ function SparseDoubleIncludesSetup() {
+ DoubleIncludesSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function ObjectIncludesSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = {i};
+ target = array[array_size-1];
+ }
+
+ function SparseObjectIncludesSetup() {
+ ObjectIncludesSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ })();
diff --git a/deps/v8/test/js-perf-test/Array/index-of.js b/deps/v8/test/js-perf-test/Array/index-of.js
new file mode 100644
index 0000000000..5e606382b1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/index-of.js
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+ function make_indexOf() {
+ return new Function('result = array.indexOf(target)');
+ }
+
+ createSuite('SmiIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
+ createSuite('SparseSmiIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
+ createSuite('DoubleIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
+ createSuite('SparseDoubleIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
+ createSuite('ObjectIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
+ createSuite('SparseObjectIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
+ createSuite('StringIndexOf', 1000, make_indexOf(), StringIndexOfSetup);
+ createSuite('SparseStringIndexOf', 1000, make_indexOf(), SparseStringIndexOfSetup);
+
+ function SmiIndexOfSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = i;
+ target = array[array_size-1];
+ }
+
+ function SparseSmiIndexOfSetup() {
+ SmiIndexOfSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function StringIndexOfSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = `Item no. ${i}`;
+ target = array[array_size-1];
+ }
+
+ function SparseStringIndexOfSetup() {
+ StringIndexOfSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function DoubleIndexOfSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = i;
+ target = array[array_size-1];
+ }
+
+ function SparseDoubleIndexOfSetup() {
+ DoubleIndexOfSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ function ObjectIndexOfSetup() {
+ array = new Array();
+ for (let i = 0; i < array_size; ++i) array[i] = {i};
+ target = array[array_size-1];
+ }
+
+ function SparseObjectIndexOfSetup() {
+ ObjectIndexOfSetup();
+ array.length = array.length * 2;
+ target = array[array_size-1];
+ }
+
+ })();
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index c73f5531a2..a7b48f0b17 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -10,6 +10,7 @@ let array;
let func = 0;
let this_arg;
let result;
+let target;
const array_size = 100;
const max_index = array_size - 1;
// Matches what {FastSetup} below produces.
@@ -34,34 +35,46 @@ function MakeHoley(array) {
}
function SmiSetup() {
- array = Array.from({ length: array_size }, (_, i) => i);
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(i);
+ // TODO(v8:10105): May still create holey arrays (allocation sites?).
+ // assert(%HasFastPackedElements(array));
assert(%HasSmiElements(array));
}
function HoleySmiSetup() {
- SmiSetup();
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(i);
MakeHoley(array);
assert(%HasSmiElements(array));
}
function DoubleSetup() {
- array = Array.from({ length: array_size }, (_, i) => i + 0.5);
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(i + 0.5);
+ // TODO(v8:10105): May still create holey arrays (allocation sites?).
+ // assert(%HasFastPackedElements(array));
assert(%HasDoubleElements(array));
}
function HoleyDoubleSetup() {
- DoubleSetup();
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(i + 0.5);
MakeHoley(array);
assert(%HasDoubleElements(array));
}
function FastSetup() {
- array = Array.from({ length: array_size }, (_, i) => `value ${i}`);
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(`value ${i}`);
+ // TODO(v8:10105): May still create holey arrays (allocation sites?).
+ // assert(%HasFastPackedElements(array));
assert(%HasObjectElements(array));
}
function HoleyFastSetup() {
- FastSetup();
+ array = [];
+ for (let i = 0; i < array_size; i++) array.push(`value ${i}`);
MakeHoley(array);
assert(%HasObjectElements(array));
}
@@ -129,6 +142,8 @@ d8.file.execute('join.js');
d8.file.execute('to-string.js');
d8.file.execute('slice.js');
d8.file.execute('copy-within.js');
+d8.file.execute('index-of.js')
+d8.file.execute('includes.js')
var success = true;
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js b/deps/v8/test/js-perf-test/BytecodeHandlers/GetKeyedProperty.js
index b0da481a4c..b0da481a4c 100644
--- a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/GetKeyedProperty.js
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js b/deps/v8/test/js-perf-test/BytecodeHandlers/GetNamedProperty.js
index c924db84c6..c924db84c6 100644
--- a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/GetNamedProperty.js
diff --git a/deps/v8/test/js-perf-test/JSTests2.json b/deps/v8/test/js-perf-test/JSTests2.json
index 0933c7da07..0ef7e4cc04 100644
--- a/deps/v8/test/js-perf-test/JSTests2.json
+++ b/deps/v8/test/js-perf-test/JSTests2.json
@@ -60,7 +60,8 @@
"resources": [
"filter.js", "map.js", "every.js", "join.js", "some.js", "reduce.js",
"reduce-right.js", "to-string.js", "find.js", "find-index.js",
- "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js"
+ "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js",
+ "index-of.js", "includes.js"
],
"flags": [
"--allow-natives-syntax"
@@ -181,7 +182,23 @@
{"name": "SmiCopyWithin"},
{"name": "StringCopyWithin"},
{"name": "SparseSmiCopyWithin"},
- {"name": "SparseStringCopyWithin"}
+ {"name": "SparseStringCopyWithin"},
+ {"name": "SmiIndexOf"},
+ {"name": "SparseSmiIndexOf"},
+ {"name": "DoubleIndexOf"},
+ {"name": "SparseDoubleIndexOf"},
+ {"name": "ObjectIndexOf"},
+ {"name": "SparseObjectIndexOf"},
+ {"name": "StringIndexOf"},
+ {"name": "SparseStringIncludes"},
+ {"name": "SmiIncludes"},
+ {"name": "SparseSmiIncludes"},
+ {"name": "DoubleIncludes"},
+ {"name": "SparseDoubleIncludes"},
+ {"name": "ObjectIncludes"},
+ {"name": "SparseObjectIncludes"},
+ {"name": "StringIncludes"},
+ {"name": "SparseStringIncludes"}
]
}
]
diff --git a/deps/v8/test/js-perf-test/JSTests3.json b/deps/v8/test/js-perf-test/JSTests3.json
index 98b24028ae..244a7e728d 100644
--- a/deps/v8/test/js-perf-test/JSTests3.json
+++ b/deps/v8/test/js-perf-test/JSTests3.json
@@ -422,10 +422,10 @@
]
},
{
- "name": "LdaNamedProperty",
+ "name": "GetNamedProperty",
"main": "run.js",
- "resources": [ "LdaNamedProperty.js" ],
- "test_flags": [ "LdaNamedProperty" ],
+ "resources": [ "GetNamedProperty.js" ],
+ "test_flags": [ "GetNamedProperty" ],
"results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
"tests": [
{"name": "Smi-Value"},
@@ -433,10 +433,10 @@
]
},
{
- "name": "LdaKeyedProperty",
+ "name": "GetKeyedProperty",
"main": "run.js",
- "resources": [ "LdaKeyedProperty.js" ],
- "test_flags": [ "LdaKeyedProperty" ],
+ "resources": [ "GetKeyedProperty.js" ],
+ "test_flags": [ "GetKeyedProperty" ],
"results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
"tests": [
{"name": "Object-Lookup-String-Constant"},
diff --git a/deps/v8/test/message/README.md b/deps/v8/test/message/README.md
index ba36b14bfe..6cab8b0cf8 100644
--- a/deps/v8/test/message/README.md
+++ b/deps/v8/test/message/README.md
@@ -11,10 +11,6 @@ foo.js
foo.out
```
-**All tests must end with an exception**. The test runner does not
-handle output from multiple runs, e.g., `--stress-opt`. Without an exception,
-the output will be generated several times and the comparison will fail.
-
You can use a regex in the expected output instead of the exact
path:
diff --git a/deps/v8/test/message/asm-assignment-undeclared.js b/deps/v8/test/message/asm-assignment-undeclared.js
index f7f530b5a2..28a2373893 100644
--- a/deps/v8/test/message/asm-assignment-undeclared.js
+++ b/deps/v8/test/message/asm-assignment-undeclared.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-function-mismatch-def.js b/deps/v8/test/message/asm-function-mismatch-def.js
index 84b46af397..e464bff60d 100644
--- a/deps/v8/test/message/asm-function-mismatch-def.js
+++ b/deps/v8/test/message/asm-function-mismatch-def.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because use of {g} in {f} has return type different from {g}.
diff --git a/deps/v8/test/message/asm-function-mismatch-use.js b/deps/v8/test/message/asm-function-mismatch-use.js
index 0f0935af88..2aa37a8a45 100644
--- a/deps/v8/test/message/asm-function-mismatch-use.js
+++ b/deps/v8/test/message/asm-function-mismatch-use.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because {g} has return type different from use of {g} in {f}.
diff --git a/deps/v8/test/message/asm-function-redefined.js b/deps/v8/test/message/asm-function-redefined.js
index 77f6aac4d5..fdc1b54581 100644
--- a/deps/v8/test/message/asm-function-redefined.js
+++ b/deps/v8/test/message/asm-function-redefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because symbol {f} is defined as module function twice.
diff --git a/deps/v8/test/message/asm-function-undefined.js b/deps/v8/test/message/asm-function-undefined.js
index ce39409963..b1e789b2dc 100644
--- a/deps/v8/test/message/asm-function-undefined.js
+++ b/deps/v8/test/message/asm-function-undefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-function-variable-collision.js b/deps/v8/test/message/asm-function-variable-collision.js
index fbea44b1ad..c6f3814d2a 100644
--- a/deps/v8/test/message/asm-function-variable-collision.js
+++ b/deps/v8/test/message/asm-function-variable-collision.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because symbol {f} is used as module variable and function.
diff --git a/deps/v8/test/message/asm-import-wrong-annotation.js b/deps/v8/test/message/asm-import-wrong-annotation.js
index 0b57c1a986..ab59ec54e7 100644
--- a/deps/v8/test/message/asm-import-wrong-annotation.js
+++ b/deps/v8/test/message/asm-import-wrong-annotation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module(stdlib, foreign, heap) {
"use asm"
diff --git a/deps/v8/test/message/asm-import-wrong-object.js b/deps/v8/test/message/asm-import-wrong-object.js
index d077e04d91..99e9686061 100644
--- a/deps/v8/test/message/asm-import-wrong-object.js
+++ b/deps/v8/test/message/asm-import-wrong-object.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module(stdlib, foreign, heap) {
"use asm"
diff --git a/deps/v8/test/message/asm-linking-bogus-heap.js b/deps/v8/test/message/asm-linking-bogus-heap.js
index a520dfb282..004a90e77f 100644
--- a/deps/v8/test/message/asm-linking-bogus-heap.js
+++ b/deps/v8/test/message/asm-linking-bogus-heap.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module(stdlib, foreign, heap) {
"use asm"
diff --git a/deps/v8/test/message/asm-linking-bogus-stdlib.js b/deps/v8/test/message/asm-linking-bogus-stdlib.js
index 0a64422440..60cdf45f20 100644
--- a/deps/v8/test/message/asm-linking-bogus-stdlib.js
+++ b/deps/v8/test/message/asm-linking-bogus-stdlib.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module(stdlib, foreign, heap) {
"use asm"
diff --git a/deps/v8/test/message/asm-linking-missing-heap.js b/deps/v8/test/message/asm-linking-missing-heap.js
index a33b59ad19..6e5e9a5856 100644
--- a/deps/v8/test/message/asm-linking-missing-heap.js
+++ b/deps/v8/test/message/asm-linking-missing-heap.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module(stdlib, foreign, heap) {
"use asm"
diff --git a/deps/v8/test/message/asm-missing-parameter-annotation.js b/deps/v8/test/message/asm-missing-parameter-annotation.js
index 64a57a0c03..588b0cc566 100644
--- a/deps/v8/test/message/asm-missing-parameter-annotation.js
+++ b/deps/v8/test/message/asm-missing-parameter-annotation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-missing-return-annotation.js b/deps/v8/test/message/asm-missing-return-annotation.js
index 0f57df8c27..60971ab2cf 100644
--- a/deps/v8/test/message/asm-missing-return-annotation.js
+++ b/deps/v8/test/message/asm-missing-return-annotation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-mismatch-def.js b/deps/v8/test/message/asm-table-mismatch-def.js
index 47e692cc38..607743f57d 100644
--- a/deps/v8/test/message/asm-table-mismatch-def.js
+++ b/deps/v8/test/message/asm-table-mismatch-def.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js {funTable} definition doesn't match the use in {f}.
diff --git a/deps/v8/test/message/asm-table-mismatch-use.js b/deps/v8/test/message/asm-table-mismatch-use.js
index 7615ee4456..6d95bc4a72 100644
--- a/deps/v8/test/message/asm-table-mismatch-use.js
+++ b/deps/v8/test/message/asm-table-mismatch-use.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js {funTable} use in {f} doesn't match its use in {g}.
diff --git a/deps/v8/test/message/asm-table-redefined.js b/deps/v8/test/message/asm-table-redefined.js
index bac6d67e5a..8c9e134788 100644
--- a/deps/v8/test/message/asm-table-redefined.js
+++ b/deps/v8/test/message/asm-table-redefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-undefined.js b/deps/v8/test/message/asm-table-undefined.js
index 8092f56657..7137c96e41 100644
--- a/deps/v8/test/message/asm-table-undefined.js
+++ b/deps/v8/test/message/asm-table-undefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-variable-collision.js b/deps/v8/test/message/asm-table-variable-collision.js
index da8e6195b8..0e7ca5d009 100644
--- a/deps/v8/test/message/asm-table-variable-collision.js
+++ b/deps/v8/test/message/asm-table-variable-collision.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+// Flags: --validate-asm --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/fail/console.js b/deps/v8/test/message/fail/console.js
index d8cbfa28c9..4bd1ccf235 100644
--- a/deps/v8/test/message/fail/console.js
+++ b/deps/v8/test/message/fail/console.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
console.time();
console.timeEnd();
diff --git a/deps/v8/test/message/fail/console.out b/deps/v8/test/message/fail/console.out
index ab1e412be3..92c8c35eb2 100644
--- a/deps/v8/test/message/fail/console.out
+++ b/deps/v8/test/message/fail/console.out
@@ -6,11 +6,11 @@ log more
console.warn: warn 2
console.debug: debug
console.info: info
-console.info: *%(basename)s:24: Error: exception
+console.info: *%(basename)s:22: Error: exception
console.info({ toString: () => {throw new Error("exception");} })
^
Error: exception
- at Object.toString (*%(basename)s:24:39)
+ at Object.toString (*%(basename)s:22:39)
at console.info (<anonymous>)
- at *%(basename)s:24:9
+ at *%(basename)s:22:9
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-1.js b/deps/v8/test/message/fail/data-view-invalid-length-1.js
new file mode 100644
index 0000000000..47a645b853
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-1.js
@@ -0,0 +1,6 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+let t = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]);
+let t2 = new DataView(t.buffer, 8, 1);
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-1.out b/deps/v8/test/message/fail/data-view-invalid-length-1.out
new file mode 100644
index 0000000000..71fff30c85
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-1.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: RangeError: Invalid DataView length 1
+let t2 = new DataView(t.buffer, 8, 1);
+ ^
+RangeError: Invalid DataView length 1
+ at new DataView (<anonymous>)
+ at *%(basename)s:6:10
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-2.js b/deps/v8/test/message/fail/data-view-invalid-length-2.js
new file mode 100644
index 0000000000..53975f92e6
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-2.js
@@ -0,0 +1,6 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+let t = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]);
+let t2 = new DataView(t.buffer, 7, -1);
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-2.out b/deps/v8/test/message/fail/data-view-invalid-length-2.out
new file mode 100644
index 0000000000..b7cc2391dd
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-2.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: RangeError: Invalid DataView length -1
+let t2 = new DataView(t.buffer, 7, -1);
+ ^
+RangeError: Invalid DataView length -1
+ at new DataView (<anonymous>)
+ at *%(basename)s:6:10
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-3.js b/deps/v8/test/message/fail/data-view-invalid-length-3.js
new file mode 100644
index 0000000000..892846e987
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-3.js
@@ -0,0 +1,6 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+let t = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]);
+let t2 = new DataView(t.buffer, 7, {valueOf() { return -1; }});
diff --git a/deps/v8/test/message/fail/data-view-invalid-length-3.out b/deps/v8/test/message/fail/data-view-invalid-length-3.out
new file mode 100644
index 0000000000..76a8b9e7ca
--- /dev/null
+++ b/deps/v8/test/message/fail/data-view-invalid-length-3.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: RangeError: Invalid DataView length -1
+let t2 = new DataView(t.buffer, 7, {valueOf() { return -1; }});
+ ^
+RangeError: Invalid DataView length -1
+ at new DataView (<anonymous>)
+ at *%(basename)s:6:10
diff --git a/deps/v8/test/message/fail/settimeout.js b/deps/v8/test/message/fail/settimeout.js
index 59ed1c6517..6f3066447b 100644
--- a/deps/v8/test/message/fail/settimeout.js
+++ b/deps/v8/test/message/fail/settimeout.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
var state = 0;
function inc() {
console.log("increment state");
diff --git a/deps/v8/test/message/fail/settimeout.out b/deps/v8/test/message/fail/settimeout.out
index 7951436fdf..8b9822daa5 100644
--- a/deps/v8/test/message/fail/settimeout.out
+++ b/deps/v8/test/message/fail/settimeout.out
@@ -6,8 +6,8 @@ increment state
current state: 2
increment state
current state: 3
-*%(basename)s:19: Error
+*%(basename)s:17: Error
setTimeout(function() { throw new Error(); });
^
Error
- at *%(basename)s:19:35
+ at *%(basename)s:17:35
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 0cd2932535..8f7bbed28e 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -37,12 +37,11 @@
'fail/modules-skip*': [SKIP],
}], # ALWAYS
-# Liftoff is currently only sufficiently implemented on x64, ia32, arm, arm64.
-# TODO(clemensb): Implement on all other platforms (crbug.com/v8/6600).
-['arch != x64 and arch != ia32 and arch != arm64 and arch != arm', {
+# Skip Liftoff tests on platforms that don't support Liftoff.
+['arch != x64 and arch != ia32 and arch != arm64 and arch != arm and arch != s390x', {
'wasm-trace-memory-liftoff': [SKIP],
'wasm-trace-liftoff': [SKIP],
-}], # arch != x64 and arch != ia32 and arch != arm64 and arch != arm
+}], # arch != x64 and arch != ia32 and arch != arm64 and arch != arm and arch != s390x
['variant == code_serializer', {
# Code serializer output is incompatible with all message tests
diff --git a/deps/v8/test/message/mjsunit/fail/assert_not_same.out b/deps/v8/test/message/mjsunit/fail/assert_not_same.out
index f0000f5ed3..ebab9fc418 100644
--- a/deps/v8/test/message/mjsunit/fail/assert_not_same.out
+++ b/deps/v8/test/message/mjsunit/fail/assert_not_same.out
@@ -3,7 +3,7 @@ test/mjsunit/mjsunit.js:{NUMBER}: Failure: expected <not same as 1> found <1>
Stack: MjsUnitAssertionError
at assertNotSame *mjsunit.js {NUMBER}:{NUMBER}
at *%(basename)s 7:1
- throw new MjsUnitAssertionError(message);
+ throw new MjsUnitAssertionError(
^
MjsUnitAssertionError
at assertNotSame *mjsunit.js {NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/mjsunit/fail/assert_true.out b/deps/v8/test/message/mjsunit/fail/assert_true.out
index 055cac4cde..a2898798fc 100644
--- a/deps/v8/test/message/mjsunit/fail/assert_true.out
+++ b/deps/v8/test/message/mjsunit/fail/assert_true.out
@@ -3,7 +3,7 @@ test/mjsunit/mjsunit.js:{NUMBER}: Failure: expected <true> found <false>
Stack: MjsUnitAssertionError
at assertTrue *mjsunit.js {NUMBER}:{NUMBER}
at *%(basename)s 7:1
- throw new MjsUnitAssertionError(message);
+ throw new MjsUnitAssertionError(
^
MjsUnitAssertionError
at assertTrue *mjsunit.js {NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/mjsunit/success/assert-promise-result-rejects.js b/deps/v8/test/message/mjsunit/success/assert-promise-result-rejects.js
index cab77e1643..31626f5e39 100644
--- a/deps/v8/test/message/mjsunit/success/assert-promise-result-rejects.js
+++ b/deps/v8/test/message/mjsunit/success/assert-promise-result-rejects.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
d8.file.execute("test/mjsunit/mjsunit.js");
let obj = {f: 6254};
diff --git a/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves-empty.js b/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves-empty.js
index a49a42fe65..558359d270 100644
--- a/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves-empty.js
+++ b/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves-empty.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
d8.file.execute("test/mjsunit/mjsunit.js");
let resolve_handler;
diff --git a/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves.js b/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves.js
index 105aa2abd5..efd591167b 100644
--- a/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves.js
+++ b/deps/v8/test/message/mjsunit/success/assert-promise-result-resolves.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
d8.file.execute("test/mjsunit/mjsunit.js");
let obj = {f: 1254};
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 7bc5ddf52f..6801c76c5d 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -53,7 +53,8 @@ class TestCase(testcase.D8TestCase):
self._base_path = os.path.join(self.suite.root, self.path)
source = self.get_source()
self._source_files = self._parse_source_files(source)
- self._source_flags = self._parse_source_flags(source)
+ # Do not stress-opt message tests, since that changes the output.
+ self._source_flags = ['--no-stress-opt'] + self._parse_source_flags(source)
def _parse_source_files(self, source):
files = []
diff --git a/deps/v8/test/message/wasm-finish-compilation.js b/deps/v8/test/message/wasm-finish-compilation.js
index a85808dc02..55096b2cf4 100644
--- a/deps/v8/test/message/wasm-finish-compilation.js
+++ b/deps/v8/test/message/wasm-finish-compilation.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt
-
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
// Test that d8 does not terminate until wasm compilation has finished and the
diff --git a/deps/v8/test/message/wasm-function-name-async.js b/deps/v8/test/message/wasm-function-name-async.js
index e10ed54511..4496c434cc 100644
--- a/deps/v8/test/message/wasm-function-name-async.js
+++ b/deps/v8/test/message/wasm-function-name-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --no-stress-opt
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/mjsunit.js');
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/message/wasm-function-name-streaming.js b/deps/v8/test/message/wasm-function-name-streaming.js
index 5537193c10..fdad9a06ec 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.js
+++ b/deps/v8/test/message/wasm-function-name-streaming.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --wasm-test-streaming --no-stress-opt
+// Flags: --expose-wasm --wasm-test-streaming
d8.file.execute("test/message/wasm-function-name-async.js");
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.js b/deps/v8/test/message/wasm-module-and-function-name-async.js
index 4eec0398d1..e461e81b31 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.js
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --no-stress-opt
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/mjsunit.js');
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.js b/deps/v8/test/message/wasm-module-and-function-name-streaming.js
index 675d253a92..a9f6b04015 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.js
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --wasm-test-streaming --no-stress-opt
+// Flags: --expose-wasm --wasm-test-streaming
d8.file.execute("test/message/wasm-module-and-function-name-async.js");
diff --git a/deps/v8/test/message/wasm-module-name-async.js b/deps/v8/test/message/wasm-module-name-async.js
index a483f8a7fe..b43a47bb2e 100644
--- a/deps/v8/test/message/wasm-module-name-async.js
+++ b/deps/v8/test/message/wasm-module-name-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --no-stress-opt
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/mjsunit.js');
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/message/wasm-module-name-streaming.js b/deps/v8/test/message/wasm-module-name-streaming.js
index a86b55134a..1898b585dc 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.js
+++ b/deps/v8/test/message/wasm-module-name-streaming.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --wasm-test-streaming --no-stress-opt
+// Flags: --expose-wasm --wasm-test-streaming
d8.file.execute("test/message/wasm-module-name-async.js");
diff --git a/deps/v8/test/message/wasm-no-name-async.js b/deps/v8/test/message/wasm-no-name-async.js
index b470b94397..655a38b216 100644
--- a/deps/v8/test/message/wasm-no-name-async.js
+++ b/deps/v8/test/message/wasm-no-name-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --no-stress-opt
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/mjsunit.js');
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/message/wasm-no-name-streaming.js b/deps/v8/test/message/wasm-no-name-streaming.js
index 8841f7b0e5..fbccc30011 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.js
+++ b/deps/v8/test/message/wasm-no-name-streaming.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --wasm-test-streaming --no-stress-opt
+// Flags: --expose-wasm --wasm-test-streaming
d8.file.execute("test/message/wasm-no-name-async.js");
diff --git a/deps/v8/test/message/wasm-trace-liftoff.js b/deps/v8/test/message/wasm-trace-liftoff.js
index 1618395c64..4cf5d88f78 100644
--- a/deps/v8/test/message/wasm-trace-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-liftoff.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --trace-wasm --no-wasm-tier-up --liftoff --no-stress-opt
+// Flags: --trace-wasm --no-wasm-tier-up --liftoff
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.js b/deps/v8/test/message/wasm-trace-memory-liftoff.js
index d6d5689312..6261278486 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --trace-wasm-memory --liftoff
+// Flags: --trace-wasm-memory --liftoff
// Flags: --no-wasm-tier-up --experimental-wasm-simd
// Force enable sse3 and sse4-1, since that will determine which execution tier
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index cfab758b75..670192e204 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --trace-wasm-memory --no-liftoff
+// Flags: --trace-wasm-memory --no-liftoff
// Flags: --experimental-wasm-simd
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/message/wasm-trace-turbofan.js b/deps/v8/test/message/wasm-trace-turbofan.js
index ab1cb7e372..a7f21e2150 100644
--- a/deps/v8/test/message/wasm-trace-turbofan.js
+++ b/deps/v8/test/message/wasm-trace-turbofan.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --trace-wasm --no-liftoff --no-stress-opt
+// Flags: --trace-wasm --no-liftoff
d8.file.execute('test/message/wasm-trace-liftoff.js')
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.js b/deps/v8/test/message/weakref-finalizationregistry-error.js
index 42f5eb3bc0..369f78ffc2 100644
--- a/deps/v8/test/message/weakref-finalizationregistry-error.js
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --expose-gc --noincremental-marking
-// Flags: --no-stress-opt
// Since cleanup tasks are top-level tasks, errors thrown from them don't stop
// future cleanup tasks from running.
diff --git a/deps/v8/test/mjsunit/BUILD.gn b/deps/v8/test/mjsunit/BUILD.gn
index 7b828aaa36..b749aa43df 100644
--- a/deps/v8/test/mjsunit/BUILD.gn
+++ b/deps/v8/test/mjsunit/BUILD.gn
@@ -13,14 +13,15 @@ group("v8_mjsunit") {
data = [
"./",
"../../tools/arguments.mjs",
- "../../tools/clusterfuzz/v8_mock.js",
- "../../tools/clusterfuzz/v8_mock_archs.js",
- "../../tools/clusterfuzz/v8_mock_webassembly.js",
+ "../../tools/clusterfuzz/foozzie/v8_mock.js",
+ "../../tools/clusterfuzz/foozzie/v8_mock_archs.js",
+ "../../tools/clusterfuzz/foozzie/v8_mock_webassembly.js",
"../../tools/codemap.mjs",
"../../tools/sourcemap.mjs",
"../../tools/consarray.mjs",
"../../tools/csvparser.mjs",
"../../tools/dumpcpp.mjs",
+ "../../tools/js/helper.mjs",
"../../tools/logreader.mjs",
"../../tools/profile.mjs",
"../../tools/profile_view.mjs",
diff --git a/deps/v8/test/mjsunit/asm/asm-validation.js b/deps/v8/test/mjsunit/asm/asm-validation.js
index c15e019932..b682af9f65 100644
--- a/deps/v8/test/mjsunit/asm/asm-validation.js
+++ b/deps/v8/test/mjsunit/asm/asm-validation.js
@@ -8,7 +8,7 @@
// valid asm.js and then break them with invalid instantiation arguments. If
// this script is run more than once (e.g. --stress-opt) then modules remain
// broken in the second run and assertions would fail. We prevent re-runs.
-// Flags: --nostress-opt
+// Flags: --no-stress-opt
function assertValidAsm(func) {
assertTrue(%IsAsmWasmCode(func));
@@ -533,3 +533,29 @@ function assertValidAsm(func) {
/Uint8Array is not a constructor/);
assertFalse(%IsAsmWasmCode(regress1068355));
})();
+
+(function TestTooManyParametersToImport() {
+ function MakeModule(num_arguments) {
+ let template = `
+ 'use asm';
+ var imported = foreign.imported;
+ function main() {
+ imported(ARGS);
+ }
+
+ return main;
+ `;
+ let args = new Array(num_arguments).fill('0').join(', ');
+ return new Function('stdlib', 'foreign', template.replace('ARGS', args));
+ }
+
+ // V8 has an internal limit of 1000 parameters (see wasm-limits.h).
+ let Module1000Params = MakeModule(1000);
+ let Module1001Params = MakeModule(1001);
+
+ Module1000Params({}, {imported: i => i});
+ Module1001Params({}, {imported: i => i});
+
+ assertTrue(%IsAsmWasmCode(Module1000Params));
+ assertFalse(%IsAsmWasmCode(Module1001Params));
+})();
diff --git a/deps/v8/test/mjsunit/baseline/batch-compilation.js b/deps/v8/test/mjsunit/baseline/batch-compilation.js
index 31aaa3f240..e739c177be 100644
--- a/deps/v8/test/mjsunit/baseline/batch-compilation.js
+++ b/deps/v8/test/mjsunit/baseline/batch-compilation.js
@@ -5,7 +5,7 @@
// Flags: --sparkplug --no-always-sparkplug --sparkplug-filter="test*"
// Flags: --allow-natives-syntax --expose-gc --no-always-opt
// Flags: --baseline-batch-compilation --baseline-batch-compilation-threshold=200
-// Flags: --scale-factor-for-feedback-allocation=4
+// Flags: --interrupt-budget-factor-for-feedback-allocation=4
// Flags: --no-concurrent-sparkplug
// Flags to drive Fuzzers into the right direction
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
index f230dad80a..1388de7c10 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
@@ -14,7 +14,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertFalse(foo());
})();
@@ -28,7 +28,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertFalse(foo());
})();
@@ -42,7 +42,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertFalse(foo());
})();
@@ -56,7 +56,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertFalse(foo());
})();
@@ -70,7 +70,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertFalse(foo());
})();
@@ -86,7 +86,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -107,7 +107,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -128,7 +128,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -149,7 +149,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(null));
@@ -173,7 +173,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(null));
diff --git a/deps/v8/test/mjsunit/compiler/bound-functions-serialize.js b/deps/v8/test/mjsunit/compiler/bound-functions-serialize.js
index 319c9aa738..0f55cc920c 100644
--- a/deps/v8/test/mjsunit/compiler/bound-functions-serialize.js
+++ b/deps/v8/test/mjsunit/compiler/bound-functions-serialize.js
@@ -1,9 +1,9 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --opt --noalways-opt
-// Flags: --allow-natives-syntax --concurrent-inlining
-// Flags: --opt --noalways-opt --noturboprop
class C {};
const c = new C;
const getPrototypeOf = Object.getPrototypeOf;
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
index 143233fc39..8a2fd22729 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
@@ -38,7 +38,7 @@
assertTrue(sum_js_got_interpreted);
// The protector should be invalidated, which prevents inlining.
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('AxB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
index 4173efd690..7ffa65af88 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
@@ -38,7 +38,7 @@
assertEquals('AundefinedB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('AundefinedB', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
assertOptimized(foo);
@@ -49,7 +49,7 @@
// Now the call will not be inlined.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('AxB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
index fb3e11d591..5614a3530c 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
@@ -42,7 +42,7 @@
assertTrue(sum_js_got_interpreted);
// Compile function foo; inlines 'sum_js' into 'foo'.
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(78, foo(26, 6, 46, null));
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
index c8c012320b..0004efaeb3 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
@@ -49,8 +49,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionForTopTier(log);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(log);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(42, foo());
// The call with spread should not have been inlined, because of the
// generator/iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
index 1dedca93f0..179fb51681 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
@@ -49,8 +49,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionForTopTier(log);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(log);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(42, foo());
// The call with spread should not have been inlined, because of the
// generator/iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
index f41c9333b7..c73ed3c2ab 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
@@ -41,8 +41,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionForTopTier(log);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(log);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
// The call with spread should have been inlined.
assertFalse(log_got_interpreted);
@@ -63,7 +63,7 @@
// Recompile 'foo'.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(42, foo());
// The call with spread will not be inlined because we have redefined the
// array iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
index a97984aa0d..7ac66876cd 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
@@ -34,7 +34,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertOptimized(foo);
@@ -74,7 +74,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('AundefinedB', foo('A', 'B'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('AundefinedB', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -96,7 +96,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals(45.31, foo(16.11, 26.06));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
// This is expected to deoptimize
@@ -107,7 +107,7 @@
// Optimize again
%PrepareFunctionForOptimization(foo);
assertEquals(45.31, foo(16.11, 26.06));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
// This should stay optimized, but with the call not inlined.
@@ -134,7 +134,7 @@
%PrepareFunctionForOptimization(foo);
// Here array size changes.
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
// Here it should deoptimize.
assertEquals('abc', foo('a', 'b', 'c'));
@@ -142,7 +142,7 @@
assertTrue(sum_js_got_interpreted);
// Now speculation mode prevents the optimization.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('abc', foo('a', 'b', 'c'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
@@ -163,7 +163,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals(56.34, foo(11.03, 16.11, 26.06));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals(56.34, foo(11.03, 16.11, 26.06));
assertFalse(sum_js_got_interpreted);
@@ -185,7 +185,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(42, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(got_interpreted);
assertEquals(42, foo());
assertFalse(got_interpreted);
@@ -212,7 +212,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(44, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(got_interpreted);
assertEquals(44, foo());
assertTrue(got_interpreted);
@@ -239,7 +239,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -261,7 +261,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(42, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(got_interpreted);
assertEquals(42, foo());
assertFalse(got_interpreted);
@@ -288,7 +288,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(44, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(got_interpreted);
assertEquals(44, foo());
assertTrue(got_interpreted);
@@ -316,7 +316,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -339,7 +339,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -365,7 +365,7 @@
%PrepareFunctionForOptimization(max);
%PrepareFunctionForOptimization(foo);
assertEquals(5, foo(1, 2, 3));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(max_got_interpreted);
assertEquals(5, foo(1, 2, 3));
assertTrue(max_got_interpreted);
@@ -394,7 +394,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abccba', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abccba', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -421,7 +421,7 @@
len = 0;
%PrepareFunctionForOptimization(foo);
assertEquals(3, foo(1, 2, 3));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(3, foo(1, 2, 3));
assertOptimized(foo);
// Deoptimize when input of Math.max is not number
@@ -432,7 +432,7 @@
len = 2;
%PrepareFunctionForOptimization(foo1);
assertEquals(3, foo1(1, 2, 3));
- %OptimizeFunctionForTopTier(foo1);
+ %OptimizeFunctionOnNextCall(foo1);
assertEquals(3, foo1(1, 2, 3));
//Deoptimize when array length changes
assertUnoptimized(foo1);
@@ -458,7 +458,7 @@
len = 0;
%PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1, 2, 3));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(2, foo(1, 2, 3));
assertOptimized(foo);
// Deoptimzie when input of Math.max is not number
@@ -469,7 +469,7 @@
len = 2;
%PrepareFunctionForOptimization(foo1);
assertEquals(3, foo1(1, 2, 3));
- %OptimizeFunctionForTopTier(foo1);
+ %OptimizeFunctionOnNextCall(foo1);
assertEquals(3, foo1(1, 2, 3));
assertOptimized(foo1);
// No Deoptimization when array length changes
@@ -497,8 +497,8 @@
%PrepareFunctionForOptimization(foo_closure);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionForTopTier(foo_closure);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo_closure);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
@@ -523,7 +523,7 @@
assertEquals(166, foo(40, 42, 44));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(166, foo(40, 42, 44));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -547,7 +547,7 @@
assertEquals(166, foo(40, 42, 44));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(166, foo(40, 42, 44));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -570,7 +570,7 @@
assertEquals('42abc', foo('a', 'b', 'c'));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('42abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -593,7 +593,7 @@
assertEquals('45abc', foo('a', 'b', 'c'));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('45abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -614,7 +614,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('ABundefined3', foo('A', 'B'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('ABundefined3', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -636,7 +636,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -659,7 +659,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('ABundefined3', foo('A', 'B'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('ABundefined3', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -682,7 +682,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -704,7 +704,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abcde', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abcde', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -729,7 +729,7 @@
assertTrue(sum_js_got_interpreted);
// The call is not inlined with CreateArguments.
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals('abc', foo('a', 'b', 'c'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
@@ -757,7 +757,7 @@
assertTrue(sum_js_got_interpreted);
// Optimization also works if the call is in an inlined function.
- %OptimizeFunctionForTopTier(bar);
+ %OptimizeFunctionOnNextCall(bar);
assertEquals('cba', bar('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/compiler/catch-block-load.js b/deps/v8/test/mjsunit/compiler/catch-block-load.js
index 573195d44e..c753b2aaa0 100644
--- a/deps/v8/test/mjsunit/compiler/catch-block-load.js
+++ b/deps/v8/test/mjsunit/compiler/catch-block-load.js
@@ -31,7 +31,7 @@ function boom() {
foo();
foo();
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
foo();
})();
@@ -62,6 +62,6 @@ function boom() {
foo();
foo();
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
foo();
})();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
index b5cdafd26f..9cbdbc863f 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
@@ -22,5 +22,5 @@ function foo() { return %TurbofanStaticAssert(bar(global)); }
bar({gaga() {}});
foo();
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
index 4c35303986..e3e63d195c 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
@@ -22,5 +22,5 @@ function foo(obj) { obj.gaga; %TurbofanStaticAssert(bar(obj)); }
bar({gaga() {}});
foo(global);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(global);
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
index 875f9be756..e824cabda6 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
@@ -12,6 +12,6 @@ function foo(x) {
%PrepareFunctionForOptimization(foo);
foo(121);
foo(122);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(123);
})();
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
index 15c5aab560..0f8891769b 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
@@ -12,7 +12,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a.length = 1;
@@ -28,7 +28,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -44,7 +44,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -58,7 +58,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -72,7 +72,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -87,7 +87,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -101,7 +101,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -115,7 +115,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
diff --git a/deps/v8/test/mjsunit/compiler/construct-bound-function.js b/deps/v8/test/mjsunit/compiler/construct-bound-function.js
index 6f7f5696ff..94abd80cfb 100644
--- a/deps/v8/test/mjsunit/compiler/construct-bound-function.js
+++ b/deps/v8/test/mjsunit/compiler/construct-bound-function.js
@@ -30,5 +30,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/construct-object.js b/deps/v8/test/mjsunit/compiler/construct-object.js
index 5d3b8a7952..f074781bfc 100644
--- a/deps/v8/test/mjsunit/compiler/construct-object.js
+++ b/deps/v8/test/mjsunit/compiler/construct-object.js
@@ -26,5 +26,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/construct-receiver.js b/deps/v8/test/mjsunit/compiler/construct-receiver.js
index be937748fd..e030745a25 100644
--- a/deps/v8/test/mjsunit/compiler/construct-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/construct-receiver.js
@@ -25,5 +25,5 @@ new class extends C { constructor() { super(); this.c = 1 } }
new class extends C { constructor() { super(); this.d = 1 } }
foo();
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-pretenure.js b/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
index 7823dc94fa..b5a49a6ef2 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
// Flags: --allocation-site-pretenuring --stress-gc-during-compilation
-// Flags: --stress-scavenge=0
+// Flags: --stress-scavenge=0 --gc-interval=-1
// Flags: --max-optimized-bytecode-size=132000
function CheckOptimizationStatus(func, expectedOptimizationStatus) {
diff --git a/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
index 06390565fb..4bce475c0d 100644
--- a/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
+++ b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
@@ -18,5 +18,5 @@ function foo(cond, v1, v2) {
%PrepareFunctionForOptimization(foo);
foo(1, 10, 20); foo(2, 30, 40);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(1, 10, 20); foo(2, 30, 40);
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls-wasm.js b/deps/v8/test/mjsunit/compiler/fast-api-calls-wasm.js
new file mode 100644
index 0000000000..8f01bf8940
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls-wasm.js
@@ -0,0 +1,141 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-fast-api-calls --expose-fast-api
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+assertThrows(() => d8.test.FastCAPI());
+const fast_c_api = new d8.test.FastCAPI();
+
+function buildWasm(name, sig, body) {
+ const builder = new WasmModuleBuilder();
+ const add_all_no_options = builder.addImport(
+ 'fast_c_api',
+ 'add_all_no_options',
+ makeSig(
+ [kWasmI32, kWasmI32, kWasmI32, kWasmI64, kWasmI64, kWasmF32, kWasmF64],
+ [kWasmF64],
+ ),
+ );
+ const add_all_no_options_mismatch = builder.addImport(
+ 'fast_c_api',
+ 'add_all_no_options',
+ makeSig(
+ [kWasmI32, kWasmI32, kWasmI32, kWasmI64, kWasmF32, kWasmI64, kWasmF64],
+ [kWasmF64],
+ ),
+ );
+ const add_all_nested_bound = builder.addImport(
+ 'fast_c_api',
+ 'add_all_nested_bound',
+ makeSig(
+ [kWasmI32, kWasmI32, kWasmI32, kWasmI64, kWasmI64, kWasmF32, kWasmF64],
+ [kWasmF64],
+ ),
+ );
+ builder
+ .addFunction(name, sig)
+ .addBody(body({
+ add_all_no_options,
+ add_all_no_options_mismatch,
+ add_all_nested_bound,
+ }))
+ .exportFunc();
+ const x = {};
+ const module = builder.instantiate({
+ fast_c_api: {
+ add_all_no_options: fast_c_api.add_all_no_options.bind(fast_c_api),
+ add_all_no_options_mismatch: fast_c_api.add_all_no_options.bind(fast_c_api),
+ add_all_nested_bound: fast_c_api.add_all_no_options
+ .bind(fast_c_api)
+ .bind(x),
+ },
+ });
+ return module.exports[name];
+}
+
+// ----------- add_all -----------
+// `add_all` has the following signature:
+// double add_all(bool /*should_fallback*/, int32_t, uint32_t,
+// int64_t, uint64_t, float, double)
+
+const max_safe_float = 2**24 - 1;
+const add_all_result = -42 + 45 + Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
+ max_safe_float * 0.5 + Math.PI;
+
+const add_all_wasm = buildWasm(
+ 'add_all_wasm', makeSig([], [kWasmF64]),
+ ({ add_all_no_options }) => [
+ ...wasmI32Const(0),
+ ...wasmI32Const(-42),
+ ...wasmI32Const(45),
+ kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x70, // Number.MIN_SAFE_INTEGER
+ kExprI64Const, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, // Number.MAX_SAFE_INTEGER
+ ...wasmF32Const(max_safe_float * 0.5),
+ ...wasmF64Const(Math.PI),
+ kExprCallFunction, add_all_no_options,
+ kExprReturn,
+ ],
+);
+
+if (fast_c_api.supports_fp_params) {
+ // Test wasm hits fast path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all_wasm());
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(0, fast_c_api.slow_call_count());
+} else {
+ // Test wasm hits slow path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all_wasm());
+ assertEquals(0, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+}
+
+// ----------- Test add_all signature mismatch -----------
+
+const add_all_mismatch_wasm = buildWasm(
+ 'add_all_mismatch_wasm', makeSig([], [kWasmF64]),
+ ({ add_all_no_options_mismatch }) => [
+ ...wasmI32Const(0),
+ ...wasmI32Const(45),
+ ...wasmI32Const(-42),
+ kExprI64Const, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, // Number.MAX_SAFE_INTEGER
+ ...wasmF32Const(max_safe_float * 0.5),
+ kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x70, // Number.MIN_SAFE_INTEGER
+ ...wasmF64Const(Math.PI),
+ kExprCallFunction, add_all_no_options_mismatch,
+ kExprReturn,
+ ],
+);
+
+// Test that wasm takes slow path.
+fast_c_api.reset_counts();
+add_all_mismatch_wasm();
+assertEquals(0, fast_c_api.fast_call_count());
+assertEquals(1, fast_c_api.slow_call_count());
+
+// ----------- Test add_all nested bound function -----------
+
+const add_all_nested_bound_wasm = buildWasm(
+ 'add_all_nested_bound_wasm', makeSig([], [kWasmF64]),
+ ({ add_all_nested_bound }) => [
+ ...wasmI32Const(0),
+ ...wasmI32Const(-42),
+ ...wasmI32Const(45),
+ kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x70, // Number.MIN_SAFE_INTEGER
+ kExprI64Const, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, // Number.MAX_SAFE_INTEGER
+ ...wasmF32Const(max_safe_float * 0.5),
+ ...wasmF64Const(Math.PI),
+ kExprCallFunction, add_all_nested_bound,
+ kExprReturn,
+ ],
+);
+
+// Test wasm hits slow path.
+fast_c_api.reset_counts();
+assertEquals(add_all_result, add_all_nested_bound_wasm());
+assertEquals(0, fast_c_api.fast_call_count());
+assertEquals(1, fast_c_api.slow_call_count());
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
index deb914dc20..65cb16ffd9 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
@@ -19,7 +19,7 @@ inline_polymorphic(make_closure());
// Compile using top tier since we need value numbering phase for the
// TurbofanStaticAssert to deduce answer is 42 at compile time. In Turboprop
// this phase is disabled.
-%OptimizeFunctionForTopTier(inline_polymorphic);
+%OptimizeFunctionOnNextCall(inline_polymorphic);
inline_polymorphic(make_closure());
try {
diff --git a/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js b/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
index 269a1c184e..cc93eede86 100644
--- a/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
+++ b/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
@@ -13,5 +13,5 @@ function foo() { return %IsBeingInterpreted(); }
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionForTopTier(bar);
+%OptimizeFunctionOnNextCall(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js b/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
index 534b50871d..9d996eb94a 100644
--- a/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
+++ b/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
@@ -13,5 +13,5 @@ function foo() { return %IsBeingInterpreted(); }
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionForTopTier(bar);
+%OptimizeFunctionOnNextCall(bar);
assertTrue(bar());
diff --git a/deps/v8/test/mjsunit/compiler/js-create-arguments.js b/deps/v8/test/mjsunit/compiler/js-create-arguments.js
index dc2f8911b4..e37ac06a55 100644
--- a/deps/v8/test/mjsunit/compiler/js-create-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/js-create-arguments.js
@@ -37,5 +37,5 @@ function main() {
main();
main();
-%OptimizeFunctionForTopTier(main);
+%OptimizeFunctionOnNextCall(main);
main();
diff --git a/deps/v8/test/mjsunit/compiler/js-create.js b/deps/v8/test/mjsunit/compiler/js-create.js
index 88eff498e8..6ddc1d164c 100644
--- a/deps/v8/test/mjsunit/compiler/js-create.js
+++ b/deps/v8/test/mjsunit/compiler/js-create.js
@@ -29,5 +29,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
index 811dc753cc..0f215c2d80 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -30,7 +30,7 @@
%PrepareFunctionForOptimization(lit_const_smi);
lit_const_smi(); lit_const_smi();
- %OptimizeFunctionForTopTier(lit_const_smi); lit_const_smi();
+ %OptimizeFunctionOnNextCall(lit_const_smi); lit_const_smi();
function lit_const_object() {
@@ -46,7 +46,7 @@
%PrepareFunctionForOptimization(lit_const_object);
lit_const_object(); lit_const_object();
- %OptimizeFunctionForTopTier(lit_const_object); lit_const_object();
+ %OptimizeFunctionOnNextCall(lit_const_object); lit_const_object();
function lit_computed_smi(k) {
@@ -62,11 +62,11 @@
%PrepareFunctionForOptimization(lit_computed_smi);
lit_computed_smi(1); lit_computed_smi(2);
- %OptimizeFunctionForTopTier(lit_computed_smi); lit_computed_smi(3);
+ %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3);
// TODO(bmeurer): Fix const tracking for double fields in object literals
// lit_computed_smi(1.1); lit_computed_smi(2.2);
- // %OptimizeFunctionForTopTier(lit_computed_smi); lit_computed_smi(3.3);
+ // %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3.3);
function lit_param_object(k) {
@@ -81,7 +81,7 @@
%PrepareFunctionForOptimization(lit_param_object);
lit_param_object({x: 1}); lit_param_object({x: 2});
- %OptimizeFunctionForTopTier(lit_param_object); lit_param_object({x: 3});
+ %OptimizeFunctionOnNextCall(lit_param_object); lit_param_object({x: 3});
function nested_lit_param(k) {
@@ -96,11 +96,11 @@
%PrepareFunctionForOptimization(nested_lit_param);
nested_lit_param(1); nested_lit_param(2);
- %OptimizeFunctionForTopTier(nested_lit_param); nested_lit_param(3);
+ %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3);
// TODO(bmeurer): Fix const tracking for double fields in object literals
// nested_lit_param(1.1); nested_lit_param(2.2);
- // %OptimizeFunctionForTopTier(nested_lit_param); nested_lit_param(3.3);
+ // %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3.3);
function nested_lit_param_object(k) {
@@ -115,7 +115,7 @@
%PrepareFunctionForOptimization(nested_lit_param_object);
nested_lit_param_object({x: 1}); nested_lit_param_object({x: 2});
- %OptimizeFunctionForTopTier(nested_lit_param_object);
+ %OptimizeFunctionOnNextCall(nested_lit_param_object);
nested_lit_param_object({x: 3});
@@ -138,16 +138,16 @@
%PrepareFunctionForOptimization(inst_param);
inst_param(1); inst_param(2);
- %OptimizeFunctionForTopTier(inst_param); inst_param(3);
+ %OptimizeFunctionOnNextCall(inst_param); inst_param(3);
// TODO(gsps): Reenable once we fully support const field information
// tracking in the presence of pointer compression.
// inst_param(1.1); inst_param(2.2);
- // %OptimizeFunctionForTopTier(inst_param); inst_param(3.3);
+ // %OptimizeFunctionOnNextCall(inst_param); inst_param(3.3);
%PrepareFunctionForOptimization(inst_param);
inst_param({x: 1}); inst_param({x: 2});
- %OptimizeFunctionForTopTier(inst_param); inst_param({x: 3});
+ %OptimizeFunctionOnNextCall(inst_param); inst_param({x: 3});
function inst_computed(k) {
@@ -168,9 +168,9 @@
%PrepareFunctionForOptimization(inst_computed);
inst_computed(1); inst_computed(2);
- %OptimizeFunctionForTopTier(inst_computed); inst_computed(3);
+ %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3);
%PrepareFunctionForOptimization(inst_computed);
inst_computed(1.1); inst_computed(2.2);
- %OptimizeFunctionForTopTier(inst_computed); inst_computed(3.3);
+ %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3.3);
})();
diff --git a/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js b/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
index b8d1e93602..380a6ceac2 100644
--- a/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
+++ b/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
@@ -22,7 +22,7 @@ function TestFunctionPrototypeApply(x) {
%PrepareFunctionForOptimization(TestFunctionPrototypeApply);
assertEquals(TestFunctionPrototypeApply(-13), 13);
assertEquals(TestFunctionPrototypeApply(42), 42);
-%OptimizeFunctionForTopTier(TestFunctionPrototypeApply);
+%OptimizeFunctionOnNextCall(TestFunctionPrototypeApply);
assertEquals(TestFunctionPrototypeApply(-13), 13);
assertOptimized(TestFunctionPrototypeApply);
TestFunctionPrototypeApply("abc");
@@ -39,7 +39,7 @@ function TestFunctionPrototypeApplyReceiver(func, x, y) {
%PrepareFunctionForOptimization(TestFunctionPrototypeApplyReceiver);
assertEquals(-13, TestFunctionPrototypeApplyReceiver(MathMin, -13, 42));
assertEquals(-4, TestFunctionPrototypeApplyReceiver(MathMin, 3, -4));
-%OptimizeFunctionForTopTier(TestFunctionPrototypeApplyReceiver);
+%OptimizeFunctionOnNextCall(TestFunctionPrototypeApplyReceiver);
assertEquals(7, TestFunctionPrototypeApplyReceiver(MathMin, 7, 9));
assertOptimized(TestFunctionPrototypeApplyReceiver);
TestFunctionPrototypeApplyReceiver(MathMin, "abc");
@@ -60,14 +60,14 @@ assertUnoptimized(TestFunctionPrototypeApplyReceiver);
%PrepareFunctionForOptimization(foo);
%PrepareFunctionForOptimization(test);
assertEquals(-13, test(-13, 42));
- %OptimizeFunctionForTopTier(test);
+ %OptimizeFunctionOnNextCall(test);
assertEquals(-13, test(-13, 42));
assertOptimized(test);
%PrepareFunctionForOptimization(test);
F = Math.max;
assertEquals(42, test(-13, 42));
assertUnoptimized(test);
- %OptimizeFunctionForTopTier(test);
+ %OptimizeFunctionOnNextCall(test);
assertEquals(42, test(-13, 42));
F = Math.min;
assertEquals(-13, test(-13, 42));
@@ -82,7 +82,7 @@ function TestFunctionPrototypeCall(x) {
%PrepareFunctionForOptimization(TestFunctionPrototypeCall);
TestFunctionPrototypeCall(42);
TestFunctionPrototypeCall(52);
-%OptimizeFunctionForTopTier(TestFunctionPrototypeCall);
+%OptimizeFunctionOnNextCall(TestFunctionPrototypeCall);
TestFunctionPrototypeCall(12);
assertOptimized(TestFunctionPrototypeCall);
TestFunctionPrototypeCall("abc");
@@ -97,7 +97,7 @@ function TestArrayForEach(x) {
%PrepareFunctionForOptimization(TestArrayForEach);
TestArrayForEach([1, 3, -4]);
TestArrayForEach([-9, 9, 0]);
-%OptimizeFunctionForTopTier(TestArrayForEach);
+%OptimizeFunctionOnNextCall(TestArrayForEach);
TestArrayForEach([1, 3, -4]);
assertOptimized(TestArrayForEach);
TestArrayForEach(["abc", "xy"]);
@@ -112,7 +112,7 @@ function TestArrayReduce(x) {
%PrepareFunctionForOptimization(TestArrayReduce);
assertEquals(TestArrayReduce([1, 2, -3, 4]), -24);
assertEquals(TestArrayReduce([3, 5, 7]), 105);
-%OptimizeFunctionForTopTier(TestArrayReduce);
+%OptimizeFunctionOnNextCall(TestArrayReduce);
assertEquals(TestArrayReduce([1, 2, -3, 4]), -24);
assertOptimized(TestArrayReduce);
TestArrayReduce(["abc", "xy"]);
@@ -127,7 +127,7 @@ function TestArrayReduceRight(x) {
%PrepareFunctionForOptimization(TestArrayReduceRight);
assertEquals(TestArrayReduceRight([1, 2, -3, 4]), -24);
assertEquals(TestArrayReduceRight([3, 5, 7]), 105);
-%OptimizeFunctionForTopTier(TestArrayReduceRight);
+%OptimizeFunctionOnNextCall(TestArrayReduceRight);
assertEquals(TestArrayReduceRight([1, 2, -3, 4]), -24);
assertOptimized(TestArrayReduceRight);
TestArrayReduceRight(["abc", "xy"]);
@@ -142,7 +142,7 @@ function TestArrayMap(x) {
%PrepareFunctionForOptimization(TestArrayMap);
assertEquals(TestArrayMap([1, -2, -3, 4]), [1, 2, 3, 4]);
assertEquals(TestArrayMap([5, -5, 5, -5]), [5, 5, 5, 5]);
-%OptimizeFunctionForTopTier(TestArrayMap);
+%OptimizeFunctionOnNextCall(TestArrayMap);
assertEquals(TestArrayMap([1, -2, 3, -4]), [1, 2, 3, 4]);
assertOptimized(TestArrayMap);
TestArrayMap(["abc", "xy"]);
@@ -157,7 +157,7 @@ function TestArrayFilter(x) {
%PrepareFunctionForOptimization(TestArrayFilter);
assertEquals(TestArrayFilter([-2, 0, 3, -4]), [-2, 3, -4]);
assertEquals(TestArrayFilter([0, 1, 1, 0]), [1, 1]);
-%OptimizeFunctionForTopTier(TestArrayFilter);
+%OptimizeFunctionOnNextCall(TestArrayFilter);
assertEquals(TestArrayFilter([-2, 0, 3, -4]), [-2, 3, -4]);
assertOptimized(TestArrayFilter);
TestArrayFilter(["abc", "xy"]);
@@ -172,7 +172,7 @@ function TestArrayFind(x) {
%PrepareFunctionForOptimization(TestArrayFind);
assertEquals(TestArrayFind([0, 0, -3, 12]), -3);
assertEquals(TestArrayFind([0, -18]), -18);
-%OptimizeFunctionForTopTier(TestArrayFind);
+%OptimizeFunctionOnNextCall(TestArrayFind);
assertEquals(TestArrayFind([0, 0, -3, 12]), -3);
assertOptimized(TestArrayFind);
TestArrayFind(["", "abc", "xy"]);
@@ -187,7 +187,7 @@ function TestArrayFindIndex(x) {
%PrepareFunctionForOptimization(TestArrayFindIndex);
assertEquals(TestArrayFindIndex([0, 0, -3, 12]), 2);
assertEquals(TestArrayFindIndex([0, -18]), 1);
-%OptimizeFunctionForTopTier(TestArrayFindIndex);
+%OptimizeFunctionOnNextCall(TestArrayFindIndex);
assertEquals(TestArrayFindIndex([0, 0, -3, 12]), 2);
assertOptimized(TestArrayFindIndex);
TestArrayFindIndex(["", "abc", "xy"]);
@@ -202,7 +202,7 @@ function TestArrayEvery(x) {
%PrepareFunctionForOptimization(TestArrayEvery);
assertEquals(TestArrayEvery([3, 0, -9]), false);
assertEquals(TestArrayEvery([2, 12, -1]), true);
-%OptimizeFunctionForTopTier(TestArrayEvery);
+%OptimizeFunctionOnNextCall(TestArrayEvery);
assertEquals(TestArrayEvery([3, 0, -9]), false);
assertOptimized(TestArrayEvery);
TestArrayEvery(["abc", "xy"]);
@@ -217,7 +217,7 @@ function TestArraySome(x) {
%PrepareFunctionForOptimization(TestArraySome);
assertEquals(TestArraySome([3, 0, -9]), true);
assertEquals(TestArraySome([0, 0]), false);
-%OptimizeFunctionForTopTier(TestArraySome);
+%OptimizeFunctionOnNextCall(TestArraySome);
assertEquals(TestArraySome([3, 0, -9]), true);
assertOptimized(TestArraySome);
TestArraySome(["abc", "xy"]);
@@ -233,7 +233,7 @@ function TestJSCallWithJSFunction(x) {
%PrepareFunctionForOptimization(TestJSCallWithJSFunction);
assertEquals(TestJSCallWithJSFunction(-14), 42);
assertEquals(TestJSCallWithJSFunction(14), -42);
-%OptimizeFunctionForTopTier(TestJSCallWithJSFunction);
+%OptimizeFunctionOnNextCall(TestJSCallWithJSFunction);
assertEquals(TestJSCallWithJSFunction(-14), 42);
assertOptimized(TestJSCallWithJSFunction);
TestJSCallWithJSFunction("abc");
@@ -248,7 +248,7 @@ function TestJSCallWithJSBoundFunction(x) {
%PrepareFunctionForOptimization(TestJSCallWithJSBoundFunction);
assertEquals(TestJSCallWithJSBoundFunction(-14), 42);
assertEquals(TestJSCallWithJSBoundFunction(14), -42);
-%OptimizeFunctionForTopTier(TestJSCallWithJSBoundFunction);
+%OptimizeFunctionOnNextCall(TestJSCallWithJSBoundFunction);
assertEquals(TestJSCallWithJSBoundFunction(-14), 42);
assertOptimized(TestJSCallWithJSBoundFunction);
TestJSCallWithJSBoundFunction("abc");
@@ -268,7 +268,7 @@ function TestReflectApply(x) {
%PrepareFunctionForOptimization(TestReflectApply);
assertEquals(TestReflectApply(-9), 9);
assertEquals(TestReflectApply(7), 7);
-%OptimizeFunctionForTopTier(TestReflectApply);
+%OptimizeFunctionOnNextCall(TestReflectApply);
assertEquals(TestReflectApply(-9), 9);
assertOptimized(TestReflectApply);
TestReflectApply("abc");
@@ -288,7 +288,7 @@ function TestCallWithSpread(x) {
%PrepareFunctionForOptimization(TestCallWithSpread);
assertEquals(TestCallWithSpread(-13), 169);
assertEquals(TestCallWithSpread(7), 49);
-%OptimizeFunctionForTopTier(TestCallWithSpread);
+%OptimizeFunctionOnNextCall(TestCallWithSpread);
assertEquals(TestCallWithSpread(-13), 169);
assertOptimized(TestCallWithSpread);
TestCallWithSpread("abc");
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
index 6c86d9327c..ae7f92a33d 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -21,7 +21,7 @@
%PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertInstanceof(foo(), Promise);
assertOptimized(foo);
@@ -57,7 +57,7 @@
%PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
- %OptimizeFunctionForTopTier(foo);
+ %OptimizeFunctionOnNextCall(foo);
assertInstanceof(foo(), Promise);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1225607.js b/deps/v8/test/mjsunit/compiler/regress-1225607.js
index 398d49c473..cdb7d40482 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1225607.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1225607.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --interrupt-budget=1024 --concurrent-inlining
+// Flags: --interrupt-budget=1024
const v2 = {};
const v4 = {a:42};
diff --git a/deps/v8/test/mjsunit/compiler/regress-1226988.js b/deps/v8/test/mjsunit/compiler/regress-1226988.js
deleted file mode 100644
index 1bd073e76f..0000000000
--- a/deps/v8/test/mjsunit/compiler/regress-1226988.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --turboprop
-var __v_0 = {
- x: 2,
- y: 1
-};
-function __f_0() {}
-function __f_1() {
- for (var __v_1 = 0; __v_1 < 100000; __v_1++) {
- var __v_2 = __v_0.x + __f_0();
- }
- var __v_3 = [{
- x: 2.5,
- y: 1
- }];
-}
-__f_1();
-__f_1();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1227324.js b/deps/v8/test/mjsunit/compiler/regress-1227324.js
index ac9d33256f..f830ff85a3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1227324.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1227324.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --concurrent-inlining
+// Flags: --allow-natives-syntax
(function() {
var use_symbol = {
diff --git a/deps/v8/test/mjsunit/compiler/regress-9945-1.js b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
index 3206d44627..8bd826a1a9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9945-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
@@ -47,7 +47,7 @@ assertOptimized(bar);
// Instead we trigger optimization of foo, which will inline bar (this time
// based on the new PACKED_ELEMENTS map.
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(a);
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
@@ -63,6 +63,6 @@ assertOptimized(bar);
// Now ensure there is no deopt-loop. There used to be a deopt-loop because, as
// a result of over-eager checkpoint elimination, we used to deopt into foo
// (right before the call to bar) rather than into bar (right before the load).
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(b);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9945-2.js b/deps/v8/test/mjsunit/compiler/regress-9945-2.js
index 67f4350d42..005553a3ee 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9945-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9945-2.js
@@ -33,7 +33,7 @@ foo(a);
foo(a);
// Trigger optimization of bar, based on PACKED_SMI_ELEMENTS feedback.
-%OptimizeFunctionForTopTier(bar);
+%OptimizeFunctionOnNextCall(bar);
bar(a);
assertOptimized(bar);
%PrepareFunctionForOptimization(bar);
@@ -49,7 +49,7 @@ assertOptimized(bar);
// Instead we trigger optimization of foo, which will inline bar (this time
// based on the new PACKED_ELEMENTS map.
assertOptimized(bar);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
assertOptimized(bar);
foo(a);
assertOptimized(bar);
@@ -66,6 +66,6 @@ assertOptimized(bar);
// Now ensure there is no deopt-loop. There used to be a deopt-loop because, as
// a result of over-eager checkpoint elimination, we used to deopt into foo
// (right before the call to bar) rather than into bar (right before the load).
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(b);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-store-store-elim.js b/deps/v8/test/mjsunit/compiler/regress-store-store-elim.js
new file mode 100644
index 0000000000..e084bd1a31
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-store-store-elim.js
@@ -0,0 +1,29 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap --turbo-store-elimination
+
+// Check that transitioning stores are not eliminated.
+
+let obj = { a: 42 }
+
+function foo() {
+ // Force GC on the next allocation to trigger heap verification.
+ %SimulateNewspaceFull();
+
+ // Transitioning store. Must not be eliminated.
+ this.f = obj;
+
+ this.f = {
+ a: 43
+ };
+}
+
+%PrepareFunctionForOptimization(foo);
+var a;
+a = new foo();
+a = new foo();
+%OptimizeFunctionOnNextCall(foo);
+a = new foo();
+assertEquals(43, a.f.a);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-accessors.js b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
index 2042b7f66a..27aff13d6d 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-accessors.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
@@ -38,5 +38,5 @@ function foo() {
foo();
foo();
expect_interpreted = false;
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/serializer-apply.js b/deps/v8/test/mjsunit/compiler/serializer-apply.js
index 8f438b751c..20154b09ba 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-apply.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-apply.js
@@ -23,5 +23,5 @@ function bar() {
%PrepareFunctionForOptimization(apply);
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionForTopTier(bar);
+%OptimizeFunctionOnNextCall(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/serializer-call.js b/deps/v8/test/mjsunit/compiler/serializer-call.js
index 2c62d3e361..d4299a6880 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-call.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-call.js
@@ -23,5 +23,5 @@ function bar() {
%PrepareFunctionForOptimization(call);
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionForTopTier(bar);
+%OptimizeFunctionOnNextCall(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
index e10520da2b..3367a08e3e 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
@@ -39,5 +39,5 @@ assertTrue(main(true, true));
assertTrue(main(true, true));
assertTrue(main(false, true));
assertTrue(main(false, true));
-%OptimizeFunctionForTopTier(main);
+%OptimizeFunctionOnNextCall(main);
assertFalse(main(false));
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
index b5f311ab4d..3f24649f04 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
@@ -37,5 +37,5 @@ assertTrue(main(true, true));
assertTrue(main(true, true));
assertTrue(main(false, true));
assertTrue(main(false, true));
-%OptimizeFunctionForTopTier(main);
+%OptimizeFunctionOnNextCall(main);
assertFalse(main(false));
diff --git a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
index ab20f06b8a..13e88639db 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
@@ -23,5 +23,5 @@ bar({bla: 1});
bar({blu: 1});
bar({blo: 1});
foo(obj);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(obj);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
index f4669bff76..5622719f7d 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
@@ -28,5 +28,5 @@ bar({bla: 1});
bar({blu: 1});
bar({blo: 1});
foo(obj);
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo(obj);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
index b2e2ee9163..f759dffa0c 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
@@ -55,5 +55,5 @@ var g = new G;
foo();
foo();
expect_interpreted = false;
-%OptimizeFunctionForTopTier(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/store-data-property-in-literal-private.js b/deps/v8/test/mjsunit/compiler/store-data-property-in-literal-private.js
index ab4b9142e3..07c2f4844a 100644
--- a/deps/v8/test/mjsunit/compiler/store-data-property-in-literal-private.js
+++ b/deps/v8/test/mjsunit/compiler/store-data-property-in-literal-private.js
@@ -10,8 +10,9 @@ let privateName = %CreatePrivateNameSymbol("privateName");
function test() {
"use strict";
- // These computed properties are translated into JSStoreDataPropertyInLiteral
- // ops, and AccessInfoFactory::ComputePropertyAccessInfo should find a
+ // These computed properties are translated into
+ // JSDefineKeyedOwnPropertyInLiteral ops,
+ // and AccessInfoFactory::ComputePropertyAccessInfo should find a
// suitable map transition when optimizing. Even if the implementation details
// are ignored, we still want to assert that these properties are installed as
// non-enumerable, due to being private symbols.
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js
deleted file mode 100644
index 552d2564cb..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function f(o) {
- return o.b;
-}
-
-var o = {a:10, b:20};
-var o1 = {a:10, b:20};
-var o2 = {a:10, b:20};
-var o3 = {a:10, b:20, c:30};
-%PrepareFunctionForOptimization(f);
-// Transition IC state to polymorphic.
-f(o);
-f(o3);
-%OptimizeFunctionOnNextCall(f);
-f(o);
-assertOptimized(f);
-f(o);
-
-// Deprecates O's map.
-o1.b = 10.23;
-// Deoptimizes but retains code.
-f(o1);
-assertOptimized(f);
-
-// Continues to use optimized code since deprecated map is still in the
-// feedback. ICs don't drop deprecated maps in the polymoprhic case.
-f(o);
-f(o);
-assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js
deleted file mode 100644
index ee5abffe99..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function f(o) {
- return o.b;
-}
-
-var o = {a:10, b:20};
-var o1 = {a:10, b:20};
-var o2 = {a:10, b:20};
-%PrepareFunctionForOptimization(f);
-f(o);
-%OptimizeFunctionOnNextCall(f);
-f(o);
-assertOptimized(f);
-
-// Deprecates o's map.
-o1.b = 10.23;
-
-// Bails out but retains code.
-f(o1);
-assertOptimized(f);
-
-// Passing in original object should not cause any deopts.
-f(o);
-f(o);
-assertOptimized(f);
-
-// o and o2 have the same Map, so there should be no deopts.
-f(o2);
-f(o2);
-assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js
deleted file mode 100644
index d3c201bc9d..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks
-// Flags: --opt --no-always-opt --deopt-every-n-times=0
-
-function b(a) { return a; }
-
-function f(o, should_bailout) {
- b(o.a);
- let did_bailout = (%GetOptimizationStatus(f) &
- V8OptimizationStatus.kTopmostFrameIsTurboFanned) == 0;
- assertEquals(should_bailout, did_bailout);
-}
-
-var o = {a:10, b:20, c:30};
-var o1 = {a:10, b:20, c:30};
-var o2 = {a:10, b:20, c:30};
-%PrepareFunctionForOptimization(f);
-f(o, true);
-%OptimizeFunctionOnNextCall(f);
-f(o, false);
-assertOptimized(f);
-
-// Transition o to a new map and deprecate the old one (which is embedded in the
-// optimized code for the dynamic map check).
-o.b = 10.23;
-f(o, true);
-f(o1, false);
-f(o2, false);
-assertOptimized(f);
-
-// Deprecate o's new map again and update the feedback vector but don't migrate
-// o.
-o1.c = 20.23;
-f(o1, true);
-assertOptimized(f);
-
-// We should migrates o's map with a bailout, but then should not bailout after
-// migrating.
-f(o, true);
-f(o, false);
-f(o1, false);
-f(o2, false);
-assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps3.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps3.js
deleted file mode 100644
index fa6dc826f0..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps3.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks
-// Flags: --opt --no-always-opt --deopt-every-n-times=0
-
-function b(a) { return a; }
-
-function f(o, should_bailout) {
- b(o.a);
- let did_bailout = (%GetOptimizationStatus(f) &
- V8OptimizationStatus.kTopmostFrameIsTurboFanned) == 0;
- assertEquals(should_bailout, did_bailout);
-}
-
-var o = {a:10, b:20, c:30};
-var o1 = {a:10, b:20, c:30};
-var o2 = {a:10, b:20, c:30};
-
-// Make o's map a migration target.
-o1.b = 10.23;
-o.a;
-
-%PrepareFunctionForOptimization(f);
-f(o, true);
-%OptimizeFunctionOnNextCall(f);
-f(o, false);
-assertOptimized(f);
-
-// Deprecate o's new map again and update the feedback vector but don't migrate
-// o.
-o1.c = 20.23;
-f(o1, true);
-assertOptimized(f);
-
-// We migrates o's map without deopting or bailing out.
-f(o, false);
-f(o1, false);
-f(o2, false);
-assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js
deleted file mode 100644
index 9058fc00b2..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function load(obj){
- return obj.x;
-}
-
-var o = {x:20, y:30};
-var o1 = {x:20, y:30, z:40};
-
-%PrepareFunctionForOptimization(load);
-load(o);
-load(o1);
-
-%OptimizeFunctionOnNextCall(load);
-load(o);
-load(o1);
-assertOptimized(load);
-
-// deprecate maps in IC
-o.x = 21.32;
-o1.x = 21.32;
-
-// transition poly -> mono
-var o2 = {y:20, x:20};
-// This bails out to interpreter and updates the IC state
-load(o2);
-// Optimized code sees monomorphic and should deopt.
-load(o2);
-// should deptimize since we wouldn't generate checks for monomorphic when
-// starting off with polymorphic
-assertUnoptimized(load);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js
deleted file mode 100644
index 0de7ae0c86..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function load(obj){
- return obj.x;
-}
-
-%PrepareFunctionForOptimization(load);
-obj = {};
-obj.x = 1;
-
-//get mono feedback
-load(obj);
-
-// optimize as mono
-%OptimizeFunctionOnNextCall(load);
-load(obj);
-assertOptimized(load);
-load(obj);
-
-// change the object's representation.
-obj.x = 2.3;
-load(obj);
-// deoptimizes on a wrong map but retains the code
-assertOptimized(load);
-
-// deoptimizes on the wrong handler.
-load(obj);
-assertUnoptimized(load);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js
deleted file mode 100644
index 4f938f6744..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function load(obj){
- return obj.x;
-}
-
-var o = {x: 10, y:20};
-var o1 = {x:10, y:20, z:30};
-
-%PrepareFunctionForOptimization(load);
-// polymorphic with same handler
-load(o);
-load(o1);
-
-%OptimizeFunctionOnNextCall(load);
-load(o);
-load(o1);
-assertOptimized(load);
-
-var o2 = {y: 10, x:20};
-// deopts but stays optimized
-load(o2);
-assertOptimized(load);
-
-// deopts and discard code on wrong handler
-load(o2);
-assertUnoptimized(load);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js
deleted file mode 100644
index f3c5289dbe..0000000000
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks --opt
-// Flags: --no-always-opt
-
-function load(obj){
- return obj.x;
-}
-
-%PrepareFunctionForOptimization(load);
-obj = {};
-obj.x = 1;
-
-//get mono feedback
-load(obj);
-load(obj);
-
-// optimize as mono
-%OptimizeFunctionOnNextCall(load);
-load(obj);
-assertOptimized(load);
-load(obj);
-
-// transition to poly - should retain optimized code
-obj.y = 2;
-load(obj);
-assertOptimized(load);
-load(obj);
-
-// transition to more polymorphic
-obj.z = 3;
-load(obj);
-obj.q =4;
-load(obj);
-
-// transition to megamorphic
-assertOptimized(load);
-obj.r = 5;
-load(obj);
-obj.s = 6;
-load(obj);
-assertUnoptimized(load);
-load(obj);
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
index 2ca1f8a7d9..bb12ed3702 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --concurrent-recompilation
-// Flags: --no-stress-opt --no-always-opt --no-turboprop
+// Flags: --no-stress-opt --no-always-opt
//
// --nostress-opt is in place because this particular optimization
// (guaranteeing that the Array prototype chain has no elements) is
diff --git a/deps/v8/test/mjsunit/const-field-tracking-2.js b/deps/v8/test/mjsunit/const-field-tracking-2.js
deleted file mode 100644
index 34511cc836..0000000000
--- a/deps/v8/test/mjsunit/const-field-tracking-2.js
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// TODO(gsathya): This test will probably break when v8 tiers up to turbofan
-// from turboprop.
-//
-// Flags: --allow-natives-syntax --opt --no-always-opt --turboprop
-// Flags: --turbo-dynamic-map-checks
-
-var global = this;
-var unique_id = 0;
-// Creates a function with unique SharedFunctionInfo to ensure the feedback
-// vector is unique for each test case.
-function MakeFunctionWithUniqueSFI(...args) {
- assertTrue(args.length > 0);
- var body = `/* Unique comment: ${unique_id++} */ ` + args.pop();
- return new Function(...args, body);
-}
-
-
-//
-// Load constant field from constant object directly.
-//
-function TestLoadFromConstantFieldOfAConstantObject(the_value, other_value) {
- function A(v) { this.v = v; }
- function O() { this.a = new A(the_value); }
- var the_object = new O();
-
- // Ensure that {the_object.a}'s map is not stable to complicate compiler's
- // life.
- new A(the_value).blah = 0;
-
- // Ensure that constant tracking is enabled for {contant_object}.
- delete global.constant_object;
- global.constant_object = the_object;
- assertEquals(the_object, constant_object);
-
- assertTrue(%HasFastProperties(the_object));
-
- // {constant_object} is known to the compiler via global property cell
- // tracking.
- var load = MakeFunctionWithUniqueSFI("return constant_object.a.v;");
- %PrepareFunctionForOptimization(load);
- load();
- load();
- %OptimizeFunctionOnNextCall(load);
- assertEquals(the_value, load());
- assertOptimized(load);
- var a = new A(other_value);
- assertTrue(%HaveSameMap(a, the_object.a));
- // Make constant field mutable by assigning another value
- // to some other instance of A.
- new A(the_value).v = other_value;
- assertTrue(%HaveSameMap(a, new A(the_value)));
- assertTrue(%HaveSameMap(a, the_object.a));
- assertOptimized(load);
- assertEquals(the_value, load());
- assertOptimized(load);
- assertEquals(the_value, load());
-}
-
-//Test constant tracking with Smi value.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with double value.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42;
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with function value.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with heap object value.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-
-//
-// Load constant field from a prototype.
-//
-function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
- function Proto() { this.v = the_value; }
- var the_prototype = new Proto();
-
- function O() {}
- O.prototype = the_prototype;
- var the_object = new O();
-
- // Ensure O.prototype is in fast mode by loading from its field.
- function warmup() { return new O().v; }
- %EnsureFeedbackVectorForFunction(warmup);
- warmup(); warmup(); warmup();
- if (!%IsDictPropertyConstTrackingEnabled())
- assertTrue(%HasFastProperties(O.prototype));
-
- // The parameter object is not constant but all the values have the same
- // map and therefore the compiler knows the prototype object and can
- // optimize load of "v".
- var load = MakeFunctionWithUniqueSFI("o", "return o.v;");
- %PrepareFunctionForOptimization(load);
- load(new O());
- load(new O());
- %OptimizeFunctionOnNextCall(load);
- assertEquals(the_value, load(new O()));
- assertOptimized(load);
- // Invalidation of mutability should trigger deoptimization with a
- // "field-owner" reason.
- the_prototype.v = other_value;
- assertUnoptimized(load);
-}
-
-// Test constant tracking with Smi value.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with double value.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42;
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with function value.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with heap object value.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-
-//
-// Store to constant field of a constant object.
-//
-function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
- function A(v) { this.v = v; }
- function O() { this.a = new A(the_value); }
- var the_object = new O();
-
- // Ensure that {the_object.a}'s map is not stable to complicate compiler's
- // life.
- new A(the_value).blah = 0;
-
- // Ensure that constant tracking is enabled for {contant_object}.
- delete global.constant_object;
- global.constant_object = the_object;
- assertEquals(the_object, constant_object);
-
- assertTrue(%HasFastProperties(the_object));
-
- // {constant_object} is known to the compiler via global property cell
- // tracking.
- var store = MakeFunctionWithUniqueSFI("v", "constant_object.a.v = v;");
- %PrepareFunctionForOptimization(store);
- store(the_value);
- store(the_value);
- %OptimizeFunctionOnNextCall(store);
- store(the_value);
- assertEquals(the_value, constant_object.a.v);
- assertOptimized(store);
- // Storing of the same value does not deoptimize.
- store(the_value);
- assertEquals(the_value, constant_object.a.v);
- assertOptimized(store);
-
- var a = new A(other_value);
-
- assertOptimized(store);
- // Storing other value deoptimizes because of failed value check.
- store(other_value);
- assertUnoptimized(store);
- assertEquals(other_value, constant_object.a.v);
-}
-
-// Test constant tracking with Smi values.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with double values.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with function values.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with heap object values.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
diff --git a/deps/v8/test/mjsunit/d8/d8-multiple-module-exec.js b/deps/v8/test/mjsunit/d8/d8-multiple-module-exec.js
new file mode 100644
index 0000000000..6a6b6675e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-multiple-module-exec.js
@@ -0,0 +1,8 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: test/mjsunit/modules-skip-1.mjs test/mjsunit/modules-skip-1.mjs
+
+// Just test that d8 doesn't crash when running the same module on the
+// command line twice.
diff --git a/deps/v8/test/mjsunit/d8/d8-worker.js b/deps/v8/test/mjsunit/d8/d8-worker.js
index 752bb5ced2..39ff969bbe 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker.js
@@ -82,8 +82,16 @@ var workerScript =
if (t[i] !== i)
throw new Error('ArrayBuffer transfer value ' + i);
break;
+ case 10:
+ if (JSON.stringify(m) !== '{"foo":{},"err":{}}')
+ throw new Error('Object ' + JSON.stringify(m));
+ break;
+ case 11:
+ if (m.message != "message")
+ throw new Error('Error ' + JSON.stringify(m));
+ break;
}
- if (c == 10) {
+ if (c == 12) {
postMessage('DONE');
}
};`;
@@ -162,6 +170,13 @@ if (this.Worker) {
assertEquals("undefined", typeof foo);
+ // Transfer Error
+ const err = new Error();
+ w.postMessage({ foo: err, err })
+
+ // Transfer single Error
+ w.postMessage(new Error("message"))
+
// Read a message from the worker.
assertEquals("DONE", w.getMessage());
diff --git a/deps/v8/test/mjsunit/debugPrint.js b/deps/v8/test/mjsunit/debugPrint.js
index 682a41463c..e67d10f4db 100644
--- a/deps/v8/test/mjsunit/debugPrint.js
+++ b/deps/v8/test/mjsunit/debugPrint.js
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Test various debug flags
+
+// Flags: --allow-natives-syntax --trace-gc-object-stats --gc-global
+// Flags: --trace-zone-stats --expose-gc --trace-gc
var largeArray = [];
largeArray[0xFFFF00] = 123;
@@ -25,7 +28,22 @@ function slowSloppyArguments2(a, b) {
return arguments;
}
+let proto_obj = { fn1() { return 1 } }
+let obj_with_enum_cache = {
+ __proto__: proto_obj,
+ a: 1,
+ b: 2,
+ c: "c"
+};
+
+for (let k in obj_with_enum_cache) {
+ // do something
+ obj_with_enum_cache.a += obj_with_enum_cache.fn1();
+}
+
+let string_1 = "aasdfasdfasdfasdf asd fa sdf asdf as dfa sdf asd f"
+let string_2 = "aasdfasdfasdfasdf asd fa sdf UC16\u2028asdf as dfa sdf asd f"
var objects = [
this,
true, false, null, undefined,
@@ -33,8 +51,10 @@ var objects = [
9007199254740991.0, 9007199254740991.0 + 10,
-9007199254740992.0, -9007199254740992.0 - 10,
Infinity, -Infinity, NaN,
- "aasdfasdfasdfasdf", "a"+"b",
+ string_1, string_1+"b", string_1.slice(1),
+ string_2, string_2+"b", string_2.slice(1),
{}, {1:1}, {a:1}, {1:1, 2:2}, Object.create(null),
+ obj_with_enum_cache,
[], [{}, {}], [1, 1, 1], [1.1, 1.1, 1.1, 1.1, 2], largeArray,
new Proxy({},{}),
new Date(), new String(" a"),
@@ -53,3 +73,6 @@ var objects = [
];
for (var o of objects) %DebugPrint(o);
+
+// Trigger some gcs to trigger heap and zone stats
+for (let i = 0; i <= 4; i++) gc();
diff --git a/deps/v8/test/mjsunit/es6/classes-super-in-heritage.js b/deps/v8/test/mjsunit/es6/classes-super-in-heritage.js
new file mode 100644
index 0000000000..02a8647d7e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-super-in-heritage.js
@@ -0,0 +1,49 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {
+ getValue() {
+ return 'A';
+ }
+
+ static extend() {
+ return class extends this {
+ getValue() {
+ return 'A.extend:' + super.getValue();
+ }
+ }
+ }
+}
+
+class B extends A {
+ getValue() {
+ return 'B:' + super.getValue();
+ }
+
+ static extend() {
+ return class extends super.extend() {
+ getValue() {
+ return 'B.extend:' + super.getValue();
+ }
+ }
+ }
+
+ static extend2() {
+ // Have 2 uses of super to test the Scope's cache.
+ let x = super.extend();
+ return class extends super.extend() {
+ getValue() {
+ return 'B.extend:' + super.getValue();
+ }
+ }
+ }
+}
+
+const C = B.extend();
+const c = new C();
+assertEquals(c.getValue(), 'B.extend:A.extend:B:A');
+
+const C2 = B.extend2();
+const c2 = new C2();
+assertEquals(c2.getValue(), 'B.extend:A.extend:B:A');
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js b/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js
deleted file mode 100644
index 4e3d02e5e3..0000000000
--- a/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --super-ic --opt
-// Flags: --no-always-opt --no-stress-opt --turboprop
-// Flags: --turbo-dynamic-map-checks --deopt-every-n-times=0
-
-// This file contains tests which require --dynamic-map-chekcs.
-
-(function TestMinimorphicPropertyAccess() {
- class A {}
- A.prototype.bar = "wrong value: A.prototype.bar";
-
- class B extends A {};
- B.prototype.bar = "correct value";
-
- class C extends B {
- foo(should_bailout) {
- const r = super.bar;
- const did_bailout = (
- %GetOptimizationStatus(C.prototype.foo) &
- V8OptimizationStatus.kTopmostFrameIsTurboFanned) == 0;
- assertEquals(should_bailout, did_bailout);
- return r;
- }
- }
- C.prototype.bar = "wrong value: C.prototype.bar";
- %PrepareFunctionForOptimization(C.prototype.foo);
-
- let o = new C();
- o.bar = "wrong value: o.bar";
-
- // Fill in the feedback.
- let r = o.foo(true);
- assertEquals("correct value", r);
- %OptimizeFunctionOnNextCall(C.prototype.foo);
-
- // Test the optimized function.
- r = o.foo(false);
- assertEquals("correct value", r);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js b/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js
new file mode 100644
index 0000000000..f69f4293fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js
@@ -0,0 +1,183 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-grouping
+
+assertEquals(Array.prototype[Symbol.unscopables].groupBy, true);
+
+var array = [-0, 1, 0, 2];
+var groupBy = () => {
+ let result = array.groupBy(v => v > 0);
+ result = Array.from(Object.entries(result));
+ return result;
+}
+
+// entry order matters
+assertEquals(groupBy(), [
+ ['false', [-0, 0]],
+ ['true', [1, 2]],
+]);
+
+Object.defineProperty(array, 4, {
+ enumerable: true,
+ configurable: true,
+ writable: true,
+ value: 3,
+});
+assertEquals(groupBy(), [
+ ['false', [-0, 0]],
+ ['true', [1, 2, 3]],
+]);
+
+Object.defineProperty(array, 5, {
+ enumerable: true,
+ configurable: true,
+ get: () => 4,
+});
+var result = groupBy();
+assertEquals(result, [
+ ['false', [-0, 0]],
+ ['true', [1, 2, 3, 4]],
+]);
+assertSame(result[0][1][0], -0);
+
+
+// fairly large result array
+var length = 20000;
+var array = new Array(length);
+for (var idx = 0; idx < length; idx++) {
+ array[idx] = idx;
+}
+var groupBy = () => {
+ let result = array.groupBy(v => v % 2);
+ result = Array.from(Object.entries(result));
+ return result;
+}
+var result = groupBy();
+assertEquals(result, [
+ ['0', array.filter(v => v % 2 === 0)],
+ ['1', array.filter(v => v % 2 === 1)],
+]);
+
+// check array changed by callbackfn
+var array = [-0, 0, 1, 2];
+groupBy = () => {
+ let result = array.groupBy((v, idx) => {
+ if (idx === 1) {
+ array[2] = {a: 'b'};
+ }
+ return v > 0;
+ });
+ result = Array.from(Object.entries(result));
+ return result;
+}
+
+assertEquals(groupBy(), [
+ ['false', [-0, 0, {a: 'b'}]],
+ ['true', [2]],
+]);
+
+
+// check array with holes
+var array = [1, , 2, , 3, , 4];
+var groupBy = () => {
+ let result = array.groupBy(v => v % 2 === 0 ? 'even' : 'not_even');
+ result = Array.from(Object.entries(result));
+ return result;
+};
+function checkNoHoles(arr) {
+ for (let idx = 0; idx < arr.length; idx++) {
+ assertTrue(Object.getOwnPropertyDescriptor(arr, idx) !== undefined);
+ }
+}
+var result = groupBy();
+assertEquals(result, [
+ ['not_even', [1, undefined, undefined, 3, undefined]],
+ ['even', [2, 4]],
+]);
+checkNoHoles(result[0][1]);
+checkNoHoles(result[1][1]);
+
+var array = [1, undefined, 2, undefined, 3, undefined, 4];
+result = groupBy();
+assertEquals(result, [
+ ['not_even', [1, undefined, undefined, 3, undefined]],
+ ['even', [2, 4]],
+]);
+checkNoHoles(result[0][1]);
+checkNoHoles(result[1][1]);
+
+// array like objects
+var arrayLikeObjects = [
+ {
+ '0': -1,
+ '1': 1,
+ '2': 2,
+ length: 3,
+ },
+ (function () { return arguments })(-1, 1, 2),
+ Int8Array.from([-1, 1, 2]),
+ Float32Array.from([-1, 1, 2]),
+];
+var groupBy = () => {
+ let result = Array.prototype.groupBy.call(array, v => v > 0);
+ result = Array.from(Object.entries(result));
+ return result;
+};
+for (var array of arrayLikeObjects) {
+ assertEquals(groupBy(), [
+ ['false', [-1]],
+ ['true', [1, 2]],
+ ]);
+}
+
+
+// check proto elements
+var array = [,];
+var groupBy = () => {
+ let result = array.groupBy(v => v);
+ result = Array.from(Object.entries(result));
+ return result;
+}
+
+assertEquals(groupBy(), [
+ ['undefined', [,]],
+]);
+
+array.__proto__.push(6);
+assertEquals(groupBy(), [
+ ['6', [6]],
+]);
+
+
+// callbackfn throws
+var array = [-0, 1, 0, 2];
+assertThrows(
+ () => array.groupBy(() => { throw new Error('foobar'); }),
+ Error,
+ 'foobar'
+);
+
+
+// ToPropertyKey throws
+var array = [-0, 1, 0, 2];
+assertThrows(
+ () => array.groupBy(() => {
+ return {
+ toString() {
+ throw new Error('foobar');
+ },
+ };
+ }),
+ Error,
+ 'foobar'
+);
+
+
+// callbackfn is not callable
+var array = [-0, 1, 0, 2];
+assertThrows(
+ () => array.groupBy('foobar'),
+ TypeError,
+);
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js b/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js
new file mode 100644
index 0000000000..18bd348750
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js
@@ -0,0 +1,172 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-grouping
+
+assertEquals(Array.prototype[Symbol.unscopables].groupByToMap, true);
+
+var array = [-0, 1, 0, 2];
+var groupByToMap = () => {
+ let result = array.groupByToMap(v => v > 0);
+ result = Array.from(result.entries());
+ return result;
+}
+
+// entry order matters
+assertEquals(groupByToMap(), [
+ [false, [-0, 0]],
+ [true, [1, 2]],
+]);
+
+Object.defineProperty(array, 4, {
+ enumerable: true,
+ configurable: true,
+ writable: true,
+ value: 3,
+});
+assertEquals(groupByToMap(), [
+ [false, [-0, 0]],
+ [true, [1, 2, 3]],
+]);
+
+Object.defineProperty(array, 5, {
+ enumerable: true,
+ configurable: true,
+ get: () => 4,
+});
+var result = groupByToMap();
+assertEquals(result, [
+ [false, [-0, 0]],
+ [true, [1, 2, 3, 4]],
+]);
+assertSame(result[0][1][0], -0);
+
+
+// fairly large result array
+var length = 20000;
+var array = new Array(length);
+for (var idx = 0; idx < length; idx++) {
+ array[idx] = idx;
+}
+var groupByToMap = () => {
+ let result = array.groupByToMap(v => v % 2);
+ result = Array.from(result.entries());
+ return result;
+}
+var result = groupByToMap();
+assertEquals(result, [
+ [0, array.filter(v => v % 2 === 0)],
+ [1, array.filter(v => v % 2 === 1)],
+]);
+
+
+// check section groupByToMap 6.d
+var array = [-0, 0];
+var result = array.groupByToMap(v => v);
+assertEquals(result.get(0), [-0, 0]);
+
+
+// check array changed by callbackfn
+var array = [-0, 0, 1, 2];
+var groupByToMap = () => {
+ let result = array.groupByToMap((v, idx) => {
+ if (idx === 1) {
+ array[2] = {a: 'b'};
+ }
+ return v > 0;
+ });
+ result = Array.from(result.entries());
+ return result;
+}
+
+assertEquals(groupByToMap(), [
+ [false, [-0, 0, {a: 'b'}]],
+ [true, [2]],
+]);
+
+// check array with holes
+var array = [1, , 2, , 3, , 4];
+var groupByToMap = () => {
+ let result = array.groupByToMap(v => v % 2 === 0 ? 'even' : 'not_even');
+ result = Array.from(result.entries());
+ return result;
+};
+function checkNoHoles(arr) {
+ for (let idx = 0; idx < arr.length; idx++) {
+ assertTrue(Object.getOwnPropertyDescriptor(arr, idx) !== undefined);
+ }
+}
+var result = groupByToMap();
+assertEquals(result, [
+ ['not_even', [1, undefined, undefined, 3, undefined]],
+ ['even', [2, 4]],
+]);
+checkNoHoles(result[0][1]);
+checkNoHoles(result[1][1]);
+
+var array = [1, undefined, 2, undefined, 3, undefined, 4];
+result = groupByToMap();
+assertEquals(result, [
+ ['not_even', [1, undefined, undefined, 3, undefined]],
+ ['even', [2, 4]],
+]);
+checkNoHoles(result[0][1]);
+checkNoHoles(result[1][1]);
+
+// array like objects
+var arrayLikeObjects = [
+ {
+ '0': -1,
+ '1': 1,
+ '2': 2,
+ length: 3,
+ },
+ (function () { return arguments })(-1, 1, 2),
+ Int8Array.from([-1, 1, 2]),
+ Float32Array.from([-1, 1, 2]),
+];
+var groupByToMap = () => {
+ let result = Array.prototype.groupByToMap.call(array, v => v > 0);
+ result = Array.from(result.entries());
+ return result;
+};
+for (var array of arrayLikeObjects) {
+ assertEquals(groupByToMap(), [
+ [false, [-1]],
+ [true, [1, 2]],
+ ]);
+}
+
+// check proto elements
+var array = [,];
+var groupByToMap = () => {
+ let result = array.groupByToMap(v => v);
+ result = Array.from(result.entries());
+ return result;
+}
+
+assertEquals(groupByToMap(), [
+ [undefined, [,]],
+]);
+array.__proto__.push(6);
+assertEquals(groupByToMap(), [
+ [6, [6]],
+]);
+
+
+// callbackfn throws
+var array = [-0, 1, 0, 2];
+assertThrows(
+ () => array.groupByToMap(() => { throw new Error('foobar'); }),
+ Error,
+ 'foobar'
+);
+
+
+// callbackfn is not callable
+var array = [-0, 1, 0, 2];
+assertThrows(
+ () => array.groupByToMap('foobar'),
+ TypeError,
+);
diff --git a/deps/v8/test/mjsunit/harmony/optional-chaining-this-private.js b/deps/v8/test/mjsunit/harmony/optional-chaining-this-private.js
index 1dd27aa59f..2a708a6a94 100644
--- a/deps/v8/test/mjsunit/harmony/optional-chaining-this-private.js
+++ b/deps/v8/test/mjsunit/harmony/optional-chaining-this-private.js
@@ -10,6 +10,11 @@ class C {
assertEquals((C?.#m)(), C);
assertEquals(C?.#c?.#m(), C);
assertEquals((C?.#c?.#m)(), C);
+
+ assertEquals(C?.#m(42), C);
+ assertEquals((C?.#m)(42), C);
+ assertEquals(C?.#c?.#m(42), C);
+ assertEquals((C?.#c?.#m)(42), C);
}
}
diff --git a/deps/v8/test/mjsunit/harmony/private-brand-nested-super.js b/deps/v8/test/mjsunit/harmony/private-brand-nested-super.js
new file mode 100644
index 0000000000..ac59fd584f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-brand-nested-super.js
@@ -0,0 +1,131 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+// Tests that brand initialization works when super() is called in a nested
+// arrow function or in eval().
+
+// IIFE nested super().
+{
+ class A extends class {} {
+ #method() { }
+ constructor() {
+ (() => super())();
+ }
+ test() { this.#method(); }
+ check() { return #method in this; }
+ }
+ const a = new A();
+ a.test();
+ assertTrue(a.check());
+}
+
+// Non-IIFE nested super().
+{
+ class A extends class {} {
+ #method() { }
+ constructor() {
+ const callSuper = () => super();
+ callSuper();
+ }
+ test() { this.#method(); }
+ check() { return #method in this; }
+ }
+ const a = new A();
+ a.test();
+ assertTrue(a.check());
+}
+
+// Eval'ed nested super().
+{
+ class A extends class {} {
+ #method() { }
+ constructor(str) {
+ eval(str);
+ }
+
+ test() { this.#method(); }
+ check() { return #method in this; }
+ }
+ const a = new A("super()");
+ a.test();
+ assertTrue(a.check());
+}
+
+// Test that private brands don't leak into class in heritage
+// position with the class scope optimized away.
+{
+ class A extends class B extends class {} {
+ constructor() { (() => super())(); }
+ static get B() { return B; }
+ } {
+ #method() {}
+ static run(obj) { obj.#method(); }
+ static get B() { return super.B; }
+ }
+
+ const b = new (A.B)();
+ assertThrows(() => A.run(b));
+}
+
+{
+ class C {
+ #c() { }
+ #field = 1;
+ static A = class A extends class B extends Object {
+ constructor() {
+ (() => super())();
+ }
+ field(obj) { return obj.#field; }
+ } {};
+ static run(obj) { obj.#c(); }
+ }
+ const a = new (C.A);
+ assertThrows(() => C.run(a));
+ const c = new C;
+ assertEquals(a.field(c), 1);
+}
+
+{
+ class C {
+ #c() { }
+ #field = 1;
+ static A = class A extends class B extends Object {
+ constructor() {
+ (() => {
+ eval("super()");
+ })();
+ }
+ field(obj) { return obj.#field; }
+ } {};
+ static run(obj) { obj.#c(); }
+ }
+ const a = new (C.A);
+ assertThrows(() => C.run(a));
+ const c = new C;
+ assertEquals(a.field(c), 1);
+}
+
+{
+ class C {
+ #c() { }
+ #field = 1;
+ static A = class A extends class B extends Object {
+ constructor() {
+ (() => {
+ {
+ super();
+ }
+ })();
+ }
+ field(obj) { return obj.#field; }
+ } {};
+ static run(obj) { obj.#c(); }
+ }
+ const a = new (C.A);
+ assertThrows(() => C.run(a));
+ const c = new C;
+ assertEquals(a.field(c), 1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-name-surrogate-pair.js b/deps/v8/test/mjsunit/harmony/private-name-surrogate-pair.js
new file mode 100644
index 0000000000..0ec6526562
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-name-surrogate-pair.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class C1 {
+ #𖥸 = 42;
+ m() { return this.#𖥸; }
+}
+
+assertEquals((new C1).m(), 42);
+
+class C2 {
+ #𖥸() { return 42; }
+ m() { return this.#𖥸(); }
+}
+
+assertEquals((new C2).m(), 42);
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
new file mode 100644
index 0000000000..a6709f210e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
@@ -0,0 +1,64 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm
+
+var shadowRealm = new ShadowRealm();
+
+// re-throwing with SyntaxError
+assertThrows(() => shadowRealm.evaluate('...'), SyntaxError, 'Unexpected end of input')
+
+// builtin
+var wrapped = shadowRealm.evaluate('String.prototype.substring');
+assertEquals(wrapped.call('123', 1), '23');
+
+// bound function
+var wrapped = shadowRealm.evaluate('(function it() { return this.a }).bind({ a: 1 })');
+assertEquals(wrapped(), 1);
+
+// nested bound function
+var wrapped = shadowRealm.evaluate('(function it() { return this.a }).bind({ a: 1 }).bind().bind()');
+assertEquals(wrapped(), 1);
+
+// function with function context
+var wrapped = shadowRealm.evaluate(`
+(function () {
+ var a = 1;
+ function it() { return a++; };
+ return it;
+})()
+`);
+assertEquals(wrapped(), 1);
+assertEquals(wrapped(), 2);
+
+// callable proxy
+var wrapped = shadowRealm.evaluate('new Proxy(() => 1, {})');
+assertEquals(wrapped(), 1);
+
+// nested callable proxy
+var wrapped = shadowRealm.evaluate('new Proxy(new Proxy(new Proxy(() => 1, {}), {}), {})');
+assertEquals(wrapped(), 1);
+
+// revocable proxy
+var wrapped = shadowRealm.evaluate(`
+var revocable = Proxy.revocable(() => 1, {});
+globalThis.revoke = () => {
+ revocable.revoke();
+};
+
+revocable.proxy;
+`);
+var revoke = shadowRealm.evaluate('globalThis.revoke');
+assertEquals(wrapped(), 1);
+revoke();
+assertThrows(() => wrapped(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
+
+// revoked proxy
+var wrapped = shadowRealm.evaluate(`
+var revocable = Proxy.revocable(() => 1, {});
+revocable.revoke();
+revocable.proxy;
+`);
+var revoke = shadowRealm.evaluate('globalThis.revoke');
+assertThrows(() => wrapped(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js
new file mode 100644
index 0000000000..5c39a71f5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm
+
+var shadowRealm = new ShadowRealm();
+
+// create a proxy on the wrapped value
+var wrapped = shadowRealm.evaluate('() => 1');
+assertEquals(wrapped(), 1);
+var proxy = new Proxy(wrapped, {
+ call: function(target, thisArg, args) {
+ assertEquals(target, wrapped);
+ return target();
+ },
+});
+assertEquals(proxy(), 1);
+
+// create a revocable proxy on the wrapped value
+var revocable = Proxy.revocable(wrapped, {
+ call: function(target, thisArg, args) {
+ assertEquals(target, wrapped);
+ return target();
+ },
+});
+var proxy = revocable.proxy;
+assertEquals(proxy(), 1);
+revocable.revoke();
+assertThrows(() => proxy(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
index 7d7ec6f991..4496419966 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --no-concurrent-inlining
+// Flags: --expose-gc --noincremental-marking --no-concurrent-recompilation
let cleanup_called = false;
function cleanup(holdings) {
diff --git a/deps/v8/test/mjsunit/interrupt-budget-override.js b/deps/v8/test/mjsunit/interrupt-budget-override.js
index 37d6a13a95..20f6286446 100644
--- a/deps/v8/test/mjsunit/interrupt-budget-override.js
+++ b/deps/v8/test/mjsunit/interrupt-budget-override.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --opt --interrupt-budget=100 --budget-for-feedback-vector-allocation=10 --allow-natives-syntax
+// Flags: --opt --interrupt-budget=100 --interrupt-budget-for-feedback-allocation=10 --allow-natives-syntax
function f() {
let s = 0;
diff --git a/deps/v8/test/mjsunit/maglev/00.js b/deps/v8/test/mjsunit/maglev/00.js
new file mode 100644
index 0000000000..21bb8ca3f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/00.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(x) {
+ if (x) return 1;
+ return 2;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f(true));
+assertEquals(2, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f(true));
+assertEquals(2, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/01.js b/deps/v8/test/mjsunit/maglev/01.js
new file mode 100644
index 0000000000..d1f07ea614
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/01.js
@@ -0,0 +1,20 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+var xyz = 42;
+
+function f(x) {
+ if (x) return 1;
+ return xyz;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f(true));
+assertEquals(42, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f(true));
+assertEquals(42, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/02.js b/deps/v8/test/mjsunit/maglev/02.js
new file mode 100644
index 0000000000..fe381f80c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/02.js
@@ -0,0 +1,20 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(x) {
+ if (x < 0) return -1;
+ return 1;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(-1, f(-2));
+assertEquals(1, f(0));
+assertEquals(1, f(2));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(-1, f(-2));
+assertEquals(1, f(0));
+assertEquals(1, f(2));
diff --git a/deps/v8/test/mjsunit/maglev/03.js b/deps/v8/test/mjsunit/maglev/03.js
new file mode 100644
index 0000000000..119f6f2505
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/03.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(x) {
+ var y = 0;
+ for (var i = 0; i < x; i++) {
+ y = 1;
+ }
+ return y;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f(true));
+assertEquals(0, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f(true));
+assertEquals(0, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/04.js b/deps/v8/test/mjsunit/maglev/04.js
new file mode 100644
index 0000000000..3816ec5f7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/04.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+function f(x) {
+ while (true) {
+ if (x) return 10;
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(10, f(true));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(10, f(true));
diff --git a/deps/v8/test/mjsunit/maglev/05.js b/deps/v8/test/mjsunit/maglev/05.js
new file mode 100644
index 0000000000..7e4fb1b0ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/05.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i, end) {
+ do {
+ do {
+ i = end;
+ } while (end);
+ end = i;
+ } while (i);
+ return 10;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(10, f(false, false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(10, f(false, false));
diff --git a/deps/v8/test/mjsunit/maglev/06.js b/deps/v8/test/mjsunit/maglev/06.js
new file mode 100644
index 0000000000..f87680b9b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/06.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i, j) {
+ var x = 1;
+ var y = 2;
+ if (i) {
+ x = y;
+ if (j) {
+ x = 3
+ }
+ }
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f(false, true));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f(false, false));
+assertEquals(2, f(true, false));
+assertEquals(3, f(true, true));
diff --git a/deps/v8/test/mjsunit/maglev/07.js b/deps/v8/test/mjsunit/maglev/07.js
new file mode 100644
index 0000000000..eec87f7698
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/07.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i) {
+ var x = 1;
+ if (i) { x = 2 }
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(2, f(true));
+assertEquals(1, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(2, f(true));
+assertEquals(1, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/08.js b/deps/v8/test/mjsunit/maglev/08.js
new file mode 100644
index 0000000000..09fb938bba
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/08.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i) {
+ var x = 1;
+ if (i) {} else { x = 2 }
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f(true));
+assertEquals(2, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f(true));
+assertEquals(2, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/09.js b/deps/v8/test/mjsunit/maglev/09.js
new file mode 100644
index 0000000000..4120d3323f
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/09.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i) {
+ var x = 1;
+ if (i) {
+ if (i) { x = 3 } else {}
+ } else { x = 2 }
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(3, f(true));
+assertEquals(2, f(false));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(3, f(true));
+assertEquals(2, f(false));
diff --git a/deps/v8/test/mjsunit/maglev/10.js b/deps/v8/test/mjsunit/maglev/10.js
new file mode 100644
index 0000000000..6e0a8741ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/10.js
@@ -0,0 +1,26 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+const ys = [0,1,2];
+
+function g() {
+ %CollectGarbage(42);
+ return [0,1,2];
+}
+%NeverOptimizeFunction(g);
+
+const o = { g: g };
+
+function f(o) {
+ // Using CallProperty since plain calls are still unimplemented.
+ return o.g();
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(ys, f(o));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(ys, f(o));
diff --git a/deps/v8/test/mjsunit/maglev/11.js b/deps/v8/test/mjsunit/maglev/11.js
new file mode 100644
index 0000000000..1f880940c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/11.js
@@ -0,0 +1,39 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(x) {
+ return x.a
+}
+
+function Foo(a) {
+ this.a = a
+}
+%PrepareFunctionForOptimization(f);
+
+// Smi
+var o1_1 = new Foo(1);
+var o1_2 = new Foo(1);
+
+// Transition map to double, o1 is deprecated, o1's map is a deprecation target.
+var o2 = new Foo(1.2);
+
+// Transition map to tagged, o1 is still deprecated.
+var an_object = {};
+var o3 = new Foo(an_object);
+
+assertEquals(1, f(o1_1));
+assertEquals(1.2, f(o2));
+assertEquals(an_object, f(o3));
+
+// o1_1 got migrated, but o1_2 hasn't yet.
+assertTrue(%HaveSameMap(o1_1,o3));
+assertFalse(%HaveSameMap(o1_2,o3));
+%OptimizeMaglevOnNextCall(f);
+
+// Deprecated map works
+assertEquals(1, f(o1_2));
+// Non-deprecated map works
+assertEquals(an_object, f(o3));
diff --git a/deps/v8/test/mjsunit/maglev/12.js b/deps/v8/test/mjsunit/maglev/12.js
new file mode 100644
index 0000000000..44969408f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/12.js
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function g() {
+ %CollectGarbage(42);
+ return 43;
+}
+%NeverOptimizeFunction(g);
+
+const o = { g: g };
+
+function f(o, x) {
+ var y = 42;
+ if (x) y = 43;
+ // Using CallProperty since plain calls are still unimplemented.
+ o.g();
+ return y;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(43, f(o, true));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(43, f(o, true));
diff --git a/deps/v8/test/mjsunit/maglev/13.js b/deps/v8/test/mjsunit/maglev/13.js
new file mode 100644
index 0000000000..5f8569dde6
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/13.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(a) {
+ while(true) {
+ if(5 < ++a) return a;
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(f(0), 6);
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(f(0), 6);
diff --git a/deps/v8/test/mjsunit/maglev/14.js b/deps/v8/test/mjsunit/maglev/14.js
new file mode 100644
index 0000000000..01555244d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/14.js
@@ -0,0 +1,31 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(i) {
+ a:{
+ b: {
+ c: {
+ if (i < 100) {
+ break c;
+ } else {
+ break b;
+ }
+ i = 3;
+ }
+ i = 4;
+ break a;
+ }
+ i = 5;
+ }
+ return i;
+}
+
+
+%PrepareFunctionForOptimization(f);
+assertEquals(f(1), 4);
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(f(1), 4);
diff --git a/deps/v8/test/mjsunit/maglev/15.js b/deps/v8/test/mjsunit/maglev/15.js
new file mode 100644
index 0000000000..eaccb2fa3d
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/15.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+var xyz = 42;
+
+function f(x) {
+ return x < x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(f(1), false);
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(f(1), false);
diff --git a/deps/v8/test/mjsunit/maglev/16.js b/deps/v8/test/mjsunit/maglev/16.js
new file mode 100644
index 0000000000..093def86cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/16.js
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function f(x) {
+ return x.a;
+}
+
+function Foo(a) {
+ this.a = a;
+}
+
+function Goo(a) {
+ this.a = a;
+}
+
+%PrepareFunctionForOptimization(f);
+
+var o1 = new Foo(42);
+var o2 = new Goo(4.2);
+
+assertEquals(f(o1), 42);
+assertEquals(f(o2), 4.2);
+
+%OptimizeMaglevOnNextCall(f);
+
+assertEquals(f(o1), 42);
+assertEquals(f(o2), 4.2);
diff --git a/deps/v8/test/mjsunit/maglev/17.js b/deps/v8/test/mjsunit/maglev/17.js
new file mode 100644
index 0000000000..c712ae1d64
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/17.js
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function setA(o, val) {
+ o.a = val;
+}
+
+function Foo() {
+ this.a = 0;
+}
+
+var foo = new Foo();
+
+%PrepareFunctionForOptimization(setA);
+
+setA(foo, 1);
+assertEquals(foo.a, 1);
+setA(foo, 2);
+assertEquals(foo.a, 2);
+
+%OptimizeMaglevOnNextCall(setA);
+
+setA(foo, 42);
+assertEquals(foo.a, 42);
diff --git a/deps/v8/test/mjsunit/maglev/18.js b/deps/v8/test/mjsunit/maglev/18.js
new file mode 100644
index 0000000000..7243ece122
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/18.js
@@ -0,0 +1,26 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --no-stress-opt
+
+function f(x) {
+ var y = 0;
+ for (var i = 0; i < x; i++) {
+ y = 1;
+ }
+ return y;
+}
+
+function g() {
+ // Test that normal tiering (without OptimizeMaglevOnNextCall) works.
+ for (let i = 0; i < 1000; i++) {
+ if (%ActiveTierIsMaglev(f)) break;
+ f(10);
+ }
+}
+%NeverOptimizeFunction(g);
+
+g();
+
+assertTrue(%ActiveTierIsMaglev(f));
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 42455baa61..ae8816c6b9 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -25,18 +25,34 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-function MjsUnitAssertionError(message) {
- this.message = message;
- // Temporarily install a custom stack trace formatter and restore the
- // previous value.
- let prevPrepareStackTrace = Error.prepareStackTrace;
- try {
- Error.prepareStackTrace = MjsUnitAssertionError.prepareStackTrace;
- // This allows fetching the stack trace using TryCatch::StackTrace.
- this.stack = new Error("MjsUnitAssertionError").stack;
- } finally {
- Error.prepareStackTrace = prevPrepareStackTrace;
+var MjsUnitAssertionError = class MjsUnitAssertionError {
+ #cached_message = undefined;
+ #message_func = undefined;
+
+ constructor(message_func) {
+ this.#message_func = message_func;
+ // Temporarily install a custom stack trace formatter and restore the
+ // previous value.
+ let prevPrepareStackTrace = Error.prepareStackTrace;
+ try {
+ Error.prepareStackTrace = MjsUnitAssertionError.prepareStackTrace;
+ // This allows fetching the stack trace using TryCatch::StackTrace.
+ this.stack = new Error("MjsUnitAssertionError").stack;
+ } finally {
+ Error.prepareStackTrace = prevPrepareStackTrace;
+ }
+ }
+
+ get message() {
+ if (this.#cached_message === undefined) {
+ this.#cached_message = this.#message_func();
+ }
+ return this.#cached_message
}
+
+ toString() {
+ return this.message + "\n\nStack: " + this.stack;
+ };
}
/*
@@ -45,11 +61,6 @@ function MjsUnitAssertionError(message) {
* the f-word and ignore all other lines.
*/
-MjsUnitAssertionError.prototype.toString = function () {
- return this.message + "\n\nStack: " + this.stack;
-};
-
-
// Expected and found values the same objects, or the same primitive
// values.
// For known primitive values, please use assertEquals.
@@ -258,69 +269,82 @@ var prettyPrinted;
prettyPrinted = function prettyPrinted(value) {
- switch (typeof value) {
- case "string":
- return JSONStringify(value);
- case "bigint":
- return String(value) + "n";
- case "number":
- if (value === 0 && (1 / value) < 0) return "-0";
- // FALLTHROUGH.
- case "boolean":
- case "undefined":
- case "function":
- case "symbol":
- return String(value);
- case "object":
- if (value === null) return "null";
- var objectClass = classOf(value);
- switch (objectClass) {
- case "Number":
- case "BigInt":
- case "String":
- case "Boolean":
- case "Date":
- return objectClass + "(" + prettyPrinted(ValueOf(value)) + ")";
- case "RegExp":
- return RegExpPrototypeToString.call(value);
- case "Array":
- var mapped = ArrayPrototypeMap.call(
- value, prettyPrintedArrayElement);
- var joined = ArrayPrototypeJoin.call(mapped, ",");
- return "[" + joined + "]";
- case "Uint8Array":
- case "Int8Array":
- case "Int16Array":
- case "Uint16Array":
- case "Uint32Array":
- case "Int32Array":
- case "Float32Array":
- case "Float64Array":
- var joined = ArrayPrototypeJoin.call(value, ",");
- return objectClass + "([" + joined + "])";
- case "Object":
- break;
+ let visited = new Set();
+ function prettyPrint(value) {
+ try {
+ switch (typeof value) {
+ case "string":
+ return JSONStringify(value);
+ case "bigint":
+ return String(value) + "n";
+ case "number":
+ if (value === 0 && (1 / value) < 0) return "-0";
+ // FALLTHROUGH.
+ case "boolean":
+ case "undefined":
+ case "function":
+ case "symbol":
+ return String(value);
+ case "object":
+ if (value === null) return "null";
+ // Guard against re-visiting.
+ if (visited.has(value)) return "<...>";
+ visited.add(value);
+ var objectClass = classOf(value);
+ switch (objectClass) {
+ case "Number":
+ case "BigInt":
+ case "String":
+ case "Boolean":
+ case "Date":
+ return objectClass + "(" + prettyPrint(ValueOf(value)) + ")";
+ case "RegExp":
+ return RegExpPrototypeToString.call(value);
+ case "Array":
+ var mapped = ArrayPrototypeMap.call(
+ value, (v,i,array)=>{
+ if (v === undefined && !(i in array)) return "";
+ return prettyPrint(v, visited);
+ });
+ var joined = ArrayPrototypeJoin.call(mapped, ",");
+ return "[" + joined + "]";
+ case "Uint8Array":
+ case "Int8Array":
+ case "Int16Array":
+ case "Uint16Array":
+ case "Uint32Array":
+ case "Int32Array":
+ case "Float32Array":
+ case "Float64Array":
+ var joined = ArrayPrototypeJoin.call(value, ",");
+ return objectClass + "([" + joined + "])";
+ case "Object":
+ break;
+ default:
+ return objectClass + "(" + String(value) + ")";
+ }
+ // classOf() returned "Object".
+ var name = value.constructor?.name ?? "Object";
+ var pretty_properties = [];
+ for (let [k,v] of Object.entries(value)) {
+ ArrayPrototypePush.call(
+ pretty_properties, `${k}:${prettyPrint(v, visited)}`);
+ }
+ var joined = ArrayPrototypeJoin.call(pretty_properties, ",");
+ return `${name}({${joined}})`;
default:
- return objectClass + "(" + String(value) + ")";
+ return "-- unknown value --";
}
- // classOf() returned "Object".
- var name = value.constructor.name;
- if (name) return name + "()";
- return "Object()";
- default:
- return "-- unknown value --";
+ } catch (e) {
+ // Guard against general exceptions (especially stack overflows).
+ return "<error>"
+ }
}
+ return prettyPrint(value);
}
-
- function prettyPrintedArrayElement(value, index, array) {
- if (value === undefined && !(index in array)) return "";
- return prettyPrinted(value);
- }
-
-
failWithMessage = function failWithMessage(message) {
- throw new MjsUnitAssertionError(message);
+ throw new MjsUnitAssertionError(()=>message);
}
formatFailureText = function(expectedText, found, name_opt) {
@@ -340,7 +364,8 @@ var prettyPrinted;
}
function fail(expectedText, found, name_opt) {
- return failWithMessage(formatFailureText(expectedText, found, name_opt));
+ throw new MjsUnitAssertionError(
+ ()=>formatFailureText(expectedText, found, name_opt));
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 41fe71f384..61e59c67e6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -36,6 +36,7 @@
'wasm/wasm-module-builder': [SKIP],
'compiler/fast-api-helpers': [SKIP],
'typedarray-helpers': [SKIP],
+ 'web-snapshot/web-snapshot-helpers': [SKIP],
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
@@ -43,11 +44,66 @@
##############################################################################
# Temporal tests to be implemented
# https://crbug.com/v8/11544
- 'temporal/plain*': [FAIL],
- 'temporal/duration*': [FAIL],
- 'temporal/calendar*': [FAIL],
- 'temporal/zoned*': [FAIL],
- 'temporal/instant*': [FAIL],
+ 'temporal/calendar-date-add': [FAIL],
+ 'temporal/calendar-date-from-fields': [FAIL],
+ 'temporal/calendar-date-until': [FAIL],
+ 'temporal/calendar-day': [FAIL],
+ 'temporal/calendar-day-of-week': [FAIL],
+ 'temporal/calendar-day-of-year': [FAIL],
+ 'temporal/calendar-days-in-month': [FAIL],
+ 'temporal/calendar-days-in-week': [FAIL],
+ 'temporal/calendar-days-in-year': [FAIL],
+ 'temporal/calendar-fields': [FAIL],
+ 'temporal/calendar-in-leap-year': [FAIL],
+ 'temporal/calendar-merge-fields': [FAIL],
+ 'temporal/calendar-month': [FAIL],
+ 'temporal/calendar-month-code': [FAIL],
+ 'temporal/calendar-month-day-from-fields': [FAIL],
+ 'temporal/calendar-months-in-year': [FAIL],
+ 'temporal/calendar-week-of-year': [FAIL],
+ 'temporal/calendar-year': [FAIL],
+ 'temporal/calendar-year-month-from-fields': [FAIL],
+ 'temporal/duration-abs': [FAIL],
+ 'temporal/duration-add': [FAIL],
+ 'temporal/duration-from': [FAIL],
+ 'temporal/duration-negated': [FAIL],
+ 'temporal/duration-to-json': [FAIL],
+ 'temporal/duration-with': [FAIL],
+ 'temporal/instant-add': [FAIL],
+ 'temporal/instant-compare': [FAIL],
+ 'temporal/instant-constructor': [FAIL],
+ 'temporal/instant-equals': [FAIL],
+ 'temporal/instant-from-epoch-microseconds': [FAIL],
+ 'temporal/instant-from-epoch-milliseconds': [FAIL],
+ 'temporal/instant-from-epoch-nanoseconds': [FAIL],
+ 'temporal/instant-from-epoch-seconds': [FAIL],
+ 'temporal/instant-subtract': [FAIL],
+ 'temporal/instant-to-json': [FAIL],
+ 'temporal/instant-toJSON': [FAIL],
+ 'temporal/plain-date-add': [FAIL],
+ 'temporal/plain-date-compare': [FAIL],
+ 'temporal/plain-date-equals': [FAIL],
+ 'temporal/plain-date-from': [FAIL],
+ 'temporal/plain-date-time-add': [FAIL],
+ 'temporal/plain-date-time-compare': [FAIL],
+ 'temporal/plain-date-time-equals': [FAIL],
+ 'temporal/plain-date-time-from': [FAIL],
+ 'temporal/plain-date-time-subtract': [FAIL],
+ 'temporal/plain-date-time-to-json': [FAIL],
+ 'temporal/plain-date-time-to-plain-date': [FAIL],
+ 'temporal/plain-date-time-to-plain-month-day': [FAIL],
+ 'temporal/plain-date-time-to-plain-time': [FAIL],
+ 'temporal/plain-date-time-to-plain-year-month': [FAIL],
+ 'temporal/plain-date-time-valueOf': [FAIL],
+ 'temporal/plain-date-time-with': [FAIL],
+ 'temporal/plain-date-time-with-plain-date': [FAIL],
+ 'temporal/plain-date-time-with-plain-time': [FAIL],
+ 'temporal/plain-date-to-json': [FAIL],
+ 'temporal/plain-date-to-plain-date-time': [FAIL],
+ 'temporal/plain-date-to-plain-month-day': [FAIL],
+ 'temporal/plain-date-to-plain-year-month': [FAIL],
+ 'temporal/plain-date-valueOf': [FAIL],
+ 'temporal/plain-date-with': [FAIL],
##############################################################################
# Open bugs.
@@ -87,6 +143,10 @@
# https://crbug.com/v8/10948
'wasm/atomics': [PASS, ['arch == arm and not simulator_run', SKIP]],
+ # crbug.com/v8/12472 Stack overflow during regexp node generation.
+ 'regress/regress-crbug-595657': [SKIP],
+ 'regress/regress-475705': [SKIP],
+
##############################################################################
# Tests where variants make no sense.
'd8/enable-tracing': [PASS, NO_VARIANTS],
@@ -202,7 +262,6 @@
'regress/regress-500980': [PASS, HEAVY],
'regress/regress-599414-array-concat-fast-path': [PASS, HEAVY],
'regress/regress-678917': [PASS, HEAVY],
- 'regress/regress-752764': [PASS, HEAVY],
'regress/regress-779407': [PASS, HEAVY],
'regress/regress-852258': [PASS, HEAVY],
'regress/regress-862433': [PASS, HEAVY],
@@ -214,6 +273,10 @@
# TODO(v8:10915): Fails with --future.
'harmony/weakrefs/stress-finalizationregistry-dirty-enqueue': [SKIP],
+
+ # BUG(v8:12645)
+ 'shared-memory/shared-struct-workers': [SKIP],
+ 'shared-memory/shared-struct-atomics-workers': [SKIP],
}], # ALWAYS
##############################################################################
@@ -247,8 +310,6 @@
# worker creation/shutdown is very slow in debug mode
'd8/d8-worker-shutdown*': [SLOW],
- # Allocates a huge string and then flattens it.
- 'regress/regress-752764': [SLOW],
# BUG(v8:11745) The test allocates too much memory, making it slow on debug.
'compiler/regress-crbug-11564': [SKIP],
@@ -307,8 +368,6 @@
'regress/regress-1122': [SKIP],
'regress/regress-331444': [SKIP],
'regress/regress-353551': [SKIP],
- 'regress/regress-1138075': [SKIP],
- 'regress/regress-1138611': [SKIP],
'regress/regress-crbug-119926': [SKIP],
'regress/short-circuit': [SKIP],
'stack-traces-overflow': [SKIP],
@@ -365,6 +424,9 @@
# BUG(v8:11240)
'regress/regress-v8-9267-1': [SKIP],
+
+ # BUG(v8:12561)
+ 'spread-large-map': [SKIP],
}], # 'gc_stress'
##############################################################################
@@ -379,6 +441,8 @@
# Tests tracing when generating wasm in TurboFan.
'tools/compiler-trace-flags-wasm': [SKIP],
+
+ 'compiler/fast-api-calls-wasm': [SKIP],
}], # not has_webassembly or variant == jitless
##############################################################################
@@ -449,6 +513,13 @@
# Temporal intl tests won't work in no_i18n
'temporal/function-exist': [FAIL],
+ 'temporal/plain-date-get-era': [FAIL],
+ 'temporal/plain-date-get-eraYear': [FAIL],
+ 'temporal/plain-date-time-get-era': [FAIL],
+ 'temporal/plain-date-time-get-eraYear': [FAIL],
+
+ # Non-BMP characters currently aren't considered identifiers in no_i18n
+ 'harmony/private-name-surrogate-pair': [PASS,FAIL],
}], # 'no_i18n'
##############################################################################
@@ -635,7 +706,6 @@
# BUG(v8:6924). The test uses a lot of memory.
'regress/wasm/regress-694433': [SKIP],
'es6/typedarray': [PASS, NO_VARIANTS],
- 'regress/regress-752764': [PASS, SLOW, NO_VARIANTS],
# BUG(v8:9242). Uses a lot of memory.
'regress/regress-599414-array-concat-fast-path': [PASS, SLOW],
@@ -761,7 +831,6 @@
'math-floor-of-div-minus-zero': [SKIP],
# Requires too much memory on MIPS.
- 'regress/regress-752764': [SKIP],
'regress/regress-779407': [SKIP],
'harmony/bigint/regressions': [SKIP],
@@ -818,7 +887,6 @@
'math-floor-of-div-minus-zero': [SKIP],
# Requires too much memory on MIPS.
- 'regress/regress-752764': [SKIP],
'regress/regress-779407': [SKIP],
}], # 'arch == mips64el or arch == mips64'
@@ -861,7 +929,6 @@
'math-floor-of-div-minus-zero': [SKIP],
# Requires too much memory on RISC-V.
- 'regress/regress-752764': [SKIP],
'regress/regress-779407': [SKIP],
'harmony/bigint/regressions': [SKIP],
@@ -874,10 +941,6 @@
# This often fails in debug mode because it is too slow
'd8/d8-performance-now': [PASS, ['mode == debug', SKIP]],
- # https://github.com/v8-riscv/v8/issues/418
- 'regress/regress-1138075': [SKIP],
- 'regress/regress-1138611': [SKIP],
-
# SIMD not be implemented
'regress/wasm/regress-1054466': [SKIP],
'regress/wasm/regress-1065599': [SKIP],
@@ -897,7 +960,6 @@
'regress/wasm/regress-1199662': [SKIP],
'regress/wasm/regress-1231950': [SKIP],
'regress/wasm/regress-1264462': [SKIP],
- 'regress/regress-1172797': [SKIP],
'regress/wasm/regress-1179025': [SKIP],
'wasm/multi-value-simd': [SKIP],
'wasm/liftoff-simd-params': [SKIP],
@@ -949,8 +1011,6 @@
'tzoffset-transition-new-york-noi18n': [SKIP],
'tzoffset-seoul': [SKIP],
'tzoffset-seoul-noi18n': [SKIP],
- # OOM:
- 'regress/regress-752764': [FAIL],
# Flaky OOM:
'regress/regress-748069': [SKIP],
'regress/regress-779407': [SKIP],
@@ -984,6 +1044,11 @@
# in the module, that can be modified by all instances.
'wasm/wasm-dynamic-tiering': [SKIP],
+ # The test relies on precise switching of code kinds of wasm functions. With
+ # multiple isolates that share the wasm functions, the precise switching is
+ # not possible.
+ 'wasm/serialization-with-compilation-hints': [SKIP],
+
# waitAsync tests modify the global state (across Isolates)
'harmony/atomics-waitasync': [SKIP],
'harmony/atomics-waitasync-1thread-2timeout': [SKIP],
@@ -1131,6 +1196,9 @@
# BUG(v8:11858): Deadlocks in predictable mode when waiting for the native
# module cache entry to be completed.
'regress/wasm/regress-709684': [SKIP],
+
+ # BUG(v8:12605): flaky test.
+ 'wasm/grow-shared-memory': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -1243,12 +1311,6 @@
}], # variant == nooptimization
##############################################################################
-['(arch == arm or arch == arm64)', {
- # Flaky tests: https://crbug.com/v8/8090
- 'regress/regress-752764': [SKIP],
-}], # (arch == arm or arch == arm64)
-
-##############################################################################
['gcov_coverage', {
# Tests taking too long.
'array-functions-prototype-misc': [SKIP],
@@ -1284,6 +1346,13 @@
}], # no_harness
##############################################################################
+['arch != x64 or not pointer_compression or variant in (nooptimization, jitless)', {
+ # Maglev is x64-only for now.
+ # TODO(v8:7700): Update as we extend support.
+ 'maglev/*': [SKIP],
+}], # arch != x64 or not pointer_compression or variant in (nooptimization, jitless)
+
+##############################################################################
['arch != x64 or deopt_fuzzer', {
# Skip stress-deopt-count tests since it's in x64 only
'compiler/stress-deopt-count-*': [SKIP],
@@ -1303,9 +1372,9 @@
}], # arch not in (x64, ia32, arm64, arm, s390x, ppc64, mipsel, mips64el, loong64)
##############################################################################
-['system != linux or virtual_memory_cage == True', {
+['system != linux or sandbox == True', {
# Multi-mapped mock allocator is only available on Linux, and only if the
- # virtual memory cage is not enabled.
+ # sandbox is not enabled.
'regress/regress-crbug-1041232': [SKIP],
'regress/regress-crbug-1104608': [SKIP],
}],
@@ -1402,13 +1471,13 @@
'regress/wasm/regress-9017': [SKIP],
}], # variant == slow_path
-['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64, s390x])', {
+['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64])', {
# Requires scalar lowering for 64x2 SIMD instructions, which are not
# implemented yet.
# Also skip tests on archs that don't support SIMD and lowering doesn't yet work correctly.
# Condition copied from cctest.status.
'regress/wasm/regress-10831': [SKIP],
-}], # ((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64, s390x])
+}], # ((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64])
##############################################################################
['variant == stress_sampling', {
@@ -1431,6 +1500,10 @@
# stress-concurrent-allocation reverts the pretenuring decision due to low
# survival rate in old generation.
'compiler/deopt-pretenure': [SKIP],
+ # BUG(v8:9506): slow tests.
+ 'wasm/shared-memory-worker-gc-stress': [SKIP],
+ # BUG(v8:12607): flaky test.
+ 'harmony/sharedarraybuffer-worker-gc-stress': [SKIP],
}], # variant == stress_concurrent_allocation
##############################################################################
@@ -1462,6 +1535,15 @@
'regress/wasm/regress-1242300': [SKIP],
'regress/wasm/regress-1242689': [SKIP],
'regress/wasm/regress-1264462': [SKIP],
+ 'regress/wasm/regress-1271244': [SKIP],
+ 'regress/wasm/regress-1271538': [SKIP],
+ 'regress/wasm/regress-1282224': [SKIP],
+ 'regress/wasm/regress-1283042': [SKIP],
+ 'regress/wasm/regress-1284980': [SKIP],
+ 'regress/wasm/regress-1286253': [SKIP],
+ 'regress/wasm/regress-1283395': [SKIP],
+ 'regress/wasm/regress-1289678': [SKIP],
+ 'regress/wasm/regress-1290079': [SKIP],
}], # no_simd_hardware == True
##############################################################################
@@ -1471,7 +1553,7 @@
##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
-['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64)', {
+['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64, s390x)', {
'baseline/*': [SKIP],
'regress/regress-1242306': [SKIP],
}],
@@ -1503,6 +1585,7 @@
'compiler/test-literal-map-migration': [SKIP],
'compiler/deopt-pretenure': [SKIP],
'compiler/fast-api-sequences-x64': [SKIP],
+ 'compiler/regress-store-store-elim': [SKIP],
# TODO(v8:12031): Reimplement elements kinds transitions when concurrent
# inlining.
@@ -1712,4 +1795,9 @@
'tzoffset-transition-moscow': [FAIL],
}], # variant == google3_nociu
+# Tests that cannot run without JS shared memory
+['no_js_shared_memory', {
+ 'shared-memory/*': [SKIP],
+}], # 'no_js_shared_memory'
+
]
diff --git a/deps/v8/test/mjsunit/optimized-array-includes.js b/deps/v8/test/mjsunit/optimized-array-includes.js
new file mode 100644
index 0000000000..a38b2e15af
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-includes.js
@@ -0,0 +1,358 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+// normal case
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, 0);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ testArrayIncludes();
+ assertOptimized(testArrayIncludes);
+})();
+
+// from_index is not smi will lead to bailout
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, {
+ valueOf: () => {
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Length change detected during get from_index, will bailout
+(() => {
+ let called_values;
+ function testArrayIncludes(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ return a.includes(9, {
+ valueOf: () => {
+ if (deopt) {
+ a.length = 3;
+ }
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ assertEquals(true, testArrayIncludes());
+ testArrayIncludes();
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ assertEquals(true, testArrayIncludes());
+ assertEquals(false, testArrayIncludes(true));
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Input array change during get from_index, will bailout
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function testArrayIncludes() {
+ return a.includes(9, {
+ valueOf: () => {
+ a[0] = 9;
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ assertEquals(true, testArrayIncludes());
+ testArrayIncludes();
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ assertEquals(true, testArrayIncludes());
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is undefined, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, undefined);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is null, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, undefined);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is float, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, 0.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is symbol, will throw
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, Symbol.for('123'));
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ assertThrows(() => testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ assertThrows(() => testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is string, will bailout
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, '0');
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes()
+ assertEquals(true, testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle from_index is object which cannot convert to smi, will throw
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20, {
+ valueOf: () => {
+ return Symbol.for('123')
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ assertThrows(() => testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ assertThrows(() => testArrayIncludes());
+ assertFalse(isOptimized(testArrayIncludes));
+})();
+
+// Handle input array is smi packed elements and search_element is number
+// , will be inlined
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIncludes() {
+ return a.includes(20);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+})();
+
+// Handle input array is double packed elements, will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
+ ];
+ function testArrayIncludes() {
+ return a.includes(20.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+})();
+
+// Handle input array is double packed elements and has NaN, will be inlined
+(() => {
+ const a = [
+ NaN, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
+ ];
+ function testArrayIncludes() {
+ return a.includes(NaN);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+
+})();
+
+// Handle input array is packed elements, will reach slow path
+(() => {
+ const a = [
+ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIncludes() {
+ return a.includes(20.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+
+})();
+
+
+// Handle input array is packed elements, will be inlined
+(() => {
+ const obj = {}
+ const a = [
+ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, obj
+ ];
+ function testArrayIncludes() {
+ return a.includes(obj);
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+
+})();
+
+
+// Handle input array is packed elements and search_element is symbol
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIncludes() {
+ return a.includes(Symbol.for("123"));
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+
+})();
+
+// Handle input array is packed elements and search_element is BigInt
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIncludes() {
+ return a.includes(BigInt(123));
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+
+})();
+
+// Handle input array is packed elements and search_element is string
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIncludes() {
+ return a.includes("4.5");
+ }
+ %PrepareFunctionForOptimization(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ %OptimizeFunctionOnNextCall(testArrayIncludes);
+ testArrayIncludes();
+ assertEquals(true, testArrayIncludes());
+ assertOptimized(testArrayIncludes);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-array-indexof.js b/deps/v8/test/mjsunit/optimized-array-indexof.js
new file mode 100644
index 0000000000..d0fe067a6a
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-indexof.js
@@ -0,0 +1,360 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+
+// normal case
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, 0);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ testArrayIndexOf();
+ assertOptimized(testArrayIndexOf);
+})();
+
+// from_index is not smi will lead to bailout
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, {
+ valueOf: () => {
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Length change detected during get from_index, will bailout
+(() => {
+ let called_values;
+ function testArrayIndexOf(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ return a.indexOf(9, {
+ valueOf: () => {
+ if (deopt) {
+ a.length = 3;
+ }
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ assertEquals(8, testArrayIndexOf());
+ testArrayIndexOf();
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ assertEquals(8, testArrayIndexOf());
+ assertEquals(-1, testArrayIndexOf(true));
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Input array change during get from_index, will bailout
+(() => {
+ function testArrayIndexOf(deopt) {
+ const a = [1, 2, 3, 4, 5];
+ return a.indexOf(9, {
+ valueOf: () => {
+ if (deopt) {
+ a[0] = 9;
+ }
+ return 0;
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ assertEquals(-1, testArrayIndexOf());
+ testArrayIndexOf();
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ assertEquals(0, testArrayIndexOf(true));
+ assertEquals(-1, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is undefined, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, undefined);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is null, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, undefined);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is float, will bail out
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, 0.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is symbol, will throw
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, Symbol.for('123'));
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ assertThrows(() => testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ assertThrows(() => testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is string, will bailout
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, '0');
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf()
+ assertEquals(19, testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle from_index is object which cannot convert to smi, will throw
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20, {
+ valueOf: () => {
+ return Symbol.for('123')
+ }
+ });
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ assertThrows(() => testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ assertThrows(() => testArrayIndexOf());
+ assertFalse(isOptimized(testArrayIndexOf));
+})();
+
+// Handle input array is smi packed elements and search_element is number
+// , will be inlined
+(() => {
+ const a = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is double packed elements, will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is double packed elements and has NaN, will be inlined
+(() => {
+ const a = [
+ NaN, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(NaN);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(-1, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(-1, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is packed elements and search_element is double,
+// will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(20.5);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(19, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+
+// Handle input array is packed elements and search_element is object,
+// will be inlined
+(() => {
+ const obj = {}
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, obj
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(obj);
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(24, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(24, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is packed elements and search_element is symbol,
+// will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(Symbol.for("123"));
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(2, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(2, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is packed elements and search_element is BigInt,
+// will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf(BigInt(123));
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(4, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(4, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
+
+// Handle input array is packed elements and search_element is string,
+// will be inlined
+(() => {
+ const a = [
+ 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
+ 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
+ 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
+ ];
+ function testArrayIndexOf() {
+ return a.indexOf("4.5");
+ }
+ %PrepareFunctionForOptimization(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(3, testArrayIndexOf());
+ %OptimizeFunctionOnNextCall(testArrayIndexOf);
+ testArrayIndexOf();
+ assertEquals(3, testArrayIndexOf());
+ assertOptimized(testArrayIndexOf);
+})();
diff --git a/deps/v8/test/mjsunit/promise-hooks.js b/deps/v8/test/mjsunit/promise-hooks.js
index c30a3f36da..60bab26b43 100644
--- a/deps/v8/test/mjsunit/promise-hooks.js
+++ b/deps/v8/test/mjsunit/promise-hooks.js
@@ -37,6 +37,14 @@ function printLog(message) {
}
}
+let has_promise_hooks = false;
+try {
+ d8.promise.setHooks();
+ has_promise_hooks = true;
+} catch {
+ has_promise_hooks = false;
+}
+
function assertNextEvent(type, args) {
const [ promiseOrId, parentOrId ] = args;
const nextEvent = log.shift();
@@ -212,72 +220,75 @@ function optimizerBailout(test, verify) {
d8.promise.setHooks();
}
-optimizerBailout(async () => {
- await Promise.resolve();
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-optimizerBailout(async () => {
- await { then (cb) { cb() } };
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2, 1 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('after', [ 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-basicTest();
-exceptions();
-
-(function regress1126309() {
- function __f_16(test) {
- test();
- d8.promise.setHooks(undefined, () => {});
+if (has_promise_hooks) {
+ optimizerBailout(async () => {
+ await Promise.resolve();
+ }, () => {
+ assertNextEvent('init', [ 1 ]);
+ assertNextEvent('init', [ 2 ]);
+ assertNextEvent('resolve', [ 2 ]);
+ assertNextEvent('init', [ 3, 2 ]);
+ assertNextEvent('before', [ 3 ]);
+ assertNextEvent('resolve', [ 1 ]);
+ assertNextEvent('resolve', [ 3 ]);
+ assertNextEvent('after', [ 3 ]);
+ assertEmptyLog();
+ });
+ optimizerBailout(async () => {
+ await { then (cb) { cb() } };
+ }, () => {
+ assertNextEvent('init', [ 1 ]);
+ assertNextEvent('init', [ 2, 1 ]);
+ assertNextEvent('init', [ 3, 2 ]);
+ assertNextEvent('before', [ 2 ]);
+ assertNextEvent('resolve', [ 2 ]);
+ assertNextEvent('after', [ 2 ]);
+ assertNextEvent('before', [ 3 ]);
+ assertNextEvent('resolve', [ 1 ]);
+ assertNextEvent('resolve', [ 3 ]);
+ assertNextEvent('after', [ 3 ]);
+ assertEmptyLog();
+ });
+ basicTest();
+ exceptions();
+
+ (function regress1126309() {
+ function __f_16(test) {
+ test();
+ d8.promise.setHooks(undefined, () => {});
+ %PerformMicrotaskCheckpoint();
+ d8.promise.setHooks();
+ }
+ __f_16(async () => { await Promise.resolve()});
+ })();
+
+ (function boundFunction() {
+ function hook() {};
+ const bound = hook.bind(this);
+ d8.promise.setHooks(bound, bound, bound, bound);
+ Promise.resolve();
+ Promise.reject();
%PerformMicrotaskCheckpoint();
d8.promise.setHooks();
- }
- __f_16(async () => { await Promise.resolve()});
-})();
-
-(function boundFunction() {
- function hook() {};
- const bound = hook.bind(this);
- d8.promise.setHooks(bound, bound, bound, bound);
- Promise.resolve();
- Promise.reject();
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-})();
+ })();
-(function promiseAll() {
- let initCount = 0;
- d8.promise.setHooks(() => { initCount++});
- Promise.all([Promise.resolve(1)]);
- %PerformMicrotaskCheckpoint();
- assertEquals(initCount, 3);
+ (function promiseAll() {
+ let initCount = 0;
+ d8.promise.setHooks(() => { initCount++});
+ Promise.all([Promise.resolve(1)]);
+ %PerformMicrotaskCheckpoint();
+ assertEquals(initCount, 3);
- d8.promise.setHooks();
-})();
+ d8.promise.setHooks();
+ })();
-(function overflow(){
- d8.promise.setHooks(() => { new Promise(()=>{}) });
- // Trigger overflow from JS code:
- Promise.all([Promise.resolve(1)]);
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-});
+ (function overflow(){
+ d8.promise.setHooks(() => { new Promise(()=>{}) });
+ // Trigger overflow from JS code:
+ Promise.all([Promise.resolve(1)]);
+ %PerformMicrotaskCheckpoint();
+ d8.promise.setHooks();
+ });
+
+}
diff --git a/deps/v8/test/mjsunit/regress-1146106.js b/deps/v8/test/mjsunit/regress-1146106.js
index 04205ff7bd..d51bd1560f 100644
--- a/deps/v8/test/mjsunit/regress-1146106.js
+++ b/deps/v8/test/mjsunit/regress-1146106.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --concurrent-inlining --no-use-ic --super-ic
+// Flags: --allow-natives-syntax --no-use-ic --super-ic
class A {
bar() { }
diff --git a/deps/v8/test/mjsunit/regress/regress-1000635.js b/deps/v8/test/mjsunit/regress/regress-1000635.js
index 2a02774f99..88bf9e2399 100644
--- a/deps/v8/test/mjsunit/regress/regress-1000635.js
+++ b/deps/v8/test/mjsunit/regress/regress-1000635.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --stress-compaction --detailed-error-stack-trace --gc-interval=1
+// Flags: --stress-compaction --detailed-error-stack-trace --gc-interval=6
function add(a, b) {
throw new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-1003730.js b/deps/v8/test/mjsunit/regress/regress-1003730.js
index e20a4e4a44..125cf59f61 100644
--- a/deps/v8/test/mjsunit/regress/regress-1003730.js
+++ b/deps/v8/test/mjsunit/regress/regress-1003730.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax --concurrent-inlining
+// Flags: --allow-natives-syntax
function bar(error) {
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-1076569.js b/deps/v8/test/mjsunit/regress/regress-1076569.js
deleted file mode 100644
index a223b600a6..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1076569.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop
-
-var array = new Int16Array();
-
-function foo() {
- array[0] = "123.12";
-}
-
-%PrepareFunctionForOptimization(foo);
-foo();
-foo();
-%OptimizeFunctionOnNextCall(foo);
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1079446.js b/deps/v8/test/mjsunit/regress/regress-1079446.js
deleted file mode 100644
index 2322843751..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1079446.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop
-
-arr = new Int16Array();
-function foo() {
- arr.__defineGetter__('a', function() { });
- arr[0] = "123.12";
-}
-
-%PrepareFunctionForOptimization(foo);
-foo();
-foo();
-%OptimizeFunctionOnNextCall(foo);
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1083272.js b/deps/v8/test/mjsunit/regress/regress-1083272.js
deleted file mode 100644
index 0f16db7040..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1083272.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop
-
-function foo(e, t) {
- for (var n = [e], s = e.length; s > 0; --s) {}
- for (var s = 0; s < n.length; s++) { t() }
-}
-
-var e = 'abc';
-function t() {};
-
-%PrepareFunctionForOptimization(foo);
-foo(e, t);
-foo(e, t);
-%OptimizeFunctionOnNextCall(foo);
-foo(e, t);
diff --git a/deps/v8/test/mjsunit/regress/regress-1083763.js b/deps/v8/test/mjsunit/regress/regress-1083763.js
deleted file mode 100644
index e0504c90e2..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1083763.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop
-
-function bar() {}
-
-function foo() {
- try {
- bar( "abc".charAt(4));
- } catch (e) {}
-}
-
-%PrepareFunctionForOptimization(foo);
-foo();
-foo();
-%OptimizeFunctionOnNextCall(foo);
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1084953.js b/deps/v8/test/mjsunit/regress/regress-1084953.js
deleted file mode 100644
index 57a27d741d..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1084953.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --allow-natives-syntax --turboprop
-
-function foo() {
- try {
- +Symbol();
- } catch {
- }
-}
-%PrepareFunctionForOptimization(foo);
-foo();
-%OptimizeFunctionOnNextCall(foo);
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1137979.js b/deps/v8/test/mjsunit/regress/regress-1137979.js
deleted file mode 100644
index 2e06a9c3c3..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1137979.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --no-lazy-feedback-allocation
-// Flags: --noanalyze-environment-liveness
-
-function foo() {
- try {
- bar();
- } catch (e) {}
- for (var i = 0; i < 3; i++) {
- try {
- %PrepareFunctionForOptimization(foo);
- %OptimizeOsr();
- } catch (e) {}
- }
-}
-
-foo();
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1138075.js b/deps/v8/test/mjsunit/regress/regress-1138075.js
deleted file mode 100644
index e68e1b5471..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1138075.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --max-semi-space-size=1
-
-function runNearStackLimit(f) {
- function t() {
- try {
- return t();
- } catch (e) {
- return f();
- }
- }
- %PrepareFunctionForOptimization(t);
- %OptimizeFunctionOnNextCall(t);
- return t();
-}
-
-function foo(a) {}
-function bar(a, b) {}
-
-for (let i = 0; i < 150; i++) {
- runNearStackLimit(() => {
- return foo(bar(3, 4) === false);
- });
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-1138611.js b/deps/v8/test/mjsunit/regress/regress-1138611.js
deleted file mode 100644
index bca6a4bd09..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1138611.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --gc-interval=1000
-
-function runNearStackLimit(f) {
- function t() {
- try {
- return t();
- } catch (e) {
- return f();
- }
- }
- %PrepareFunctionForOptimization(t);
- %OptimizeFunctionOnNextCall(t);
- return t();
-}
-
-function foo() {
- runNearStackLimit(() => {});
-}
-
-(function () {
- var a = 42;
- var b = 153;
- try {
- Object.defineProperty({});
- } catch (e) {}
- foo();
- foo();
-})();
-
-runNearStackLimit(() => {});
diff --git a/deps/v8/test/mjsunit/regress/regress-1154961.js b/deps/v8/test/mjsunit/regress/regress-1154961.js
deleted file mode 100644
index 5bc1a8e8b8..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1154961.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --stack-size=100
-
-function runNearStackLimit(f) {
- function t() {
- try {
- return t();
- } catch (e) {
- return f();
- }
- }
- return t();
-}
-
-function baz(f) {
- return [[f(1)], 1, 2, 3, 4, 5, 6, 7, 8];
-}
-
-function foo(__v_3) {
- try {
- var arr = baz(__v_3);
- } catch (e) {}
- try {
- for (var i = 0; i < arr.length; i++) {
- function bar() {
- return arr[i];
- }
- try {
- throw e;
- } catch (e) {}
- }
- } catch (e) {}
-}
-
-%PrepareFunctionForOptimization(foo);
-foo(a => a);
-foo(a => a);
-%OptimizeFunctionOnNextCall(foo);
-runNearStackLimit(() => { foo(a => a); });
diff --git a/deps/v8/test/mjsunit/regress/regress-1163715.js b/deps/v8/test/mjsunit/regress/regress-1163715.js
deleted file mode 100644
index c0838c213a..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1163715.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --turboprop --allow-natives-syntax
-
-let last_value;
-let throwFunc;
-
-function foo(count) {
- let val = 1;
- for (let i = 16; i < count; ++i) {
- try {
- throwFunc();
- } catch (e) {
- }
- val *= 2;
- last_value = val;
- }
-}
-
-%PrepareFunctionForOptimization(foo);
-foo(20);
-foo(21);
-%OptimizeFunctionOnNextCall(foo);
-foo(47);
-assertEquals(2147483648, last_value);
diff --git a/deps/v8/test/mjsunit/regress/regress-1168435.js b/deps/v8/test/mjsunit/regress/regress-1168435.js
index 9667f4d7fe..165f5a77ea 100644
--- a/deps/v8/test/mjsunit/regress/regress-1168435.js
+++ b/deps/v8/test/mjsunit/regress/regress-1168435.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --concurrent-inlining
+// Flags: --allow-natives-syntax
function bar() {
arr = new Array(4);
diff --git a/deps/v8/test/mjsunit/regress/regress-1172797.js b/deps/v8/test/mjsunit/regress/regress-1172797.js
deleted file mode 100644
index 05d39a1b86..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1172797.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --opt --no-always-opt
-
-
-var v_0 = {};
-function f_0(o, v) {
- o.f = v;
-}
-
-function f_1() {
- return v_0.f;
-}
-
-%PrepareFunctionForOptimization(f_0);
-f_0(v_0, 42);
-f_0(v_0, 42);
-%OptimizeFunctionOnNextCall(f_0);
-f_0(v_0, 42);
-
-// TP tier up
-%PrepareFunctionForOptimization(f_1);
-f_1();
-f_1();
-%OptimizeFunctionOnNextCall(f_1);
-f_1();
-// Now TF tier up
-%PrepareFunctionForOptimization(f_1);
-f_1();
-%TierupFunctionOnNextCall(f_1);
-f_1();
-
-assertOptimized(f_0);
-// TODO(mythria): Add an option to assert on the optimization tier and assert
-// f_1 is optimized with TurboFan.
-assertOptimized(f_1);
-// Store in f_0 should trigger a change to the constness of the field.
-f_0(v_0, 53);
-// f_0 does a eager deopt and lets the interpreter update the field constness.
-assertUnoptimized(f_0);
-if (!%IsTopTierTurboprop()) {
- // f_1 has TurboFan code and should deopt because of dependency change.
- assertUnoptimized(f_1);
-}
-assertEquals(v_0.f, 53);
-assertEquals(f_1(), 53);
diff --git a/deps/v8/test/mjsunit/regress/regress-1201114.js b/deps/v8/test/mjsunit/regress/regress-1201114.js
deleted file mode 100644
index 7f81d63c95..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1201114.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --turboprop --allow-natives-syntax --print-code
-
-var a = {b: 1};
-function nop() { return false; }
-function __f_4(a) { return a; }
-function __f_5(__v_2) {
- __f_4(__v_2.a);
- nop(__f_5)&a.b;
-}
-%PrepareFunctionForOptimization(__f_5);
-__f_5(true);
-%OptimizeFunctionOnNextCall(__f_5);
-try {
- __f_5();
-} catch {}
diff --git a/deps/v8/test/mjsunit/regress/regress-1208805.js b/deps/v8/test/mjsunit/regress/regress-1208805.js
new file mode 100644
index 0000000000..4b6490664e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1208805.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+'use strict'
+
+function outer_func() {
+ function inner_func() {
+ let step = 1;
+
+ for (let i = 0; i < 10; i = i + step) {
+ const v14 = step++;
+ const v15 = v14 * i;
+ function dumb_func() {
+ }
+ }
+ }
+ for (let v24 = 0; v24 < 10; v24++) {
+ inner_func();
+ }
+}
+for (let j = 0; j < 10000; j++) {
+ outer_func();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1223733.js b/deps/v8/test/mjsunit/regress/regress-1223733.js
deleted file mode 100644
index be5fd915f4..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1223733.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks
-
-function main() {
- // Store something onto a function prototype so we will bailout of the
- // function.prototype load optimization in NativeContextSpecialization.
- isNaN.prototype = 14;
- const v14 = isNaN.prototype;
-}
-%PrepareFunctionForOptimization(main);
-main();
-%OptimizeFunctionOnNextCall(main);
-main();
diff --git a/deps/v8/test/mjsunit/regress/regress-1225561.js b/deps/v8/test/mjsunit/regress/regress-1225561.js
deleted file mode 100644
index 5ec2d665ad..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1225561.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turboprop --turbo-dynamic-map-checks
-
-function bar(obj) {
- return Object.getPrototypeOf(obj);
-}
-
-function foo(a, b) {
- try {
- a.a;
- } catch (e) {}
- try {
- b[bar()] = 1;
- } catch (e) {}
-}
-
-var arg = {
- a: 10,
-};
-
-%PrepareFunctionForOptimization(foo);
-foo(arg);
-%OptimizeFunctionOnNextCall(foo);
-foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-12495.js b/deps/v8/test/mjsunit/regress/regress-12495.js
new file mode 100644
index 0000000000..282ef2dd00
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-12495.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt() {
+ try {
+ Reflect.apply("".localeCompare, undefined, [undefined]);
+ return false;
+ } catch(e) {
+ return true;
+ }
+}
+
+%PrepareFunctionForOptimization(opt);
+assertTrue(opt());
+assertTrue(opt());
+%OptimizeFunctionOnNextCall(opt);
+assertTrue(opt());
+assertTrue(opt());
diff --git a/deps/v8/test/mjsunit/regress/regress-12580.js b/deps/v8/test/mjsunit/regress/regress-12580.js
new file mode 100644
index 0000000000..3b43a95a84
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-12580.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Foo extends function () {
+ return new Proxy(Object.create(new.target.prototype), {}); } {
+ #bar = 7;
+ has() { return #bar in this; }
+};
+
+assertTrue((new Foo()).has());
diff --git a/deps/v8/test/mjsunit/regress/regress-12657.js b/deps/v8/test/mjsunit/regress/regress-12657.js
new file mode 100644
index 0000000000..e21b7174f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-12657.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --gc-global --expose-statistics --max-semi-space-size=1
+
+const a = new Array();
+for (var i = 0; i < 50000; i++) {
+ a[i] = new Object();
+}
+assertTrue(getV8Statistics().new_space_commited_bytes <= 2 * 1024 * 1024);
diff --git a/deps/v8/test/mjsunit/regress/regress-385565.js b/deps/v8/test/mjsunit/regress/regress-385565.js
index 96dded2a00..1403119681 100644
--- a/deps/v8/test/mjsunit/regress/regress-385565.js
+++ b/deps/v8/test/mjsunit/regress/regress-385565.js
@@ -50,10 +50,6 @@ callsFReceiver(o1);
var r2 = callsFReceiver(o1);
assertOptimized(callsFReceiver);
callsFReceiver(o2);
-if (%DynamicCheckMapsEnabled()) {
- // Call it again to ensure a deopt when dynamic map checks is enabled.
- callsFReceiver(o2);
-}
assertUnoptimized(callsFReceiver);
%PrepareFunctionForOptimization(callsFReceiver);
@@ -76,9 +72,4 @@ assertEquals(1, r1);
assertTrue(r1 === r2);
assertTrue(r2 === r3);
-
-if (%DynamicCheckMapsEnabled()) {
- assertEquals(11, calls);
-} else {
- assertEquals(10, calls);
-}
+assertEquals(10, calls);
diff --git a/deps/v8/test/mjsunit/regress/regress-3969.js b/deps/v8/test/mjsunit/regress/regress-3969.js
index 31aeae31ee..6a92d59ac2 100644
--- a/deps/v8/test/mjsunit/regress/regress-3969.js
+++ b/deps/v8/test/mjsunit/regress/regress-3969.js
@@ -27,7 +27,7 @@ assertEquals("OK", outer.boom());
assertEquals("OK", outer.boom());
inner = undefined;
-%SetAllocationTimeout(0 /*interval*/, 2 /*timeout*/);
+%SetAllocationTimeout(6 /*interval*/, 2 /*timeout*/);
// Call something that will do GC while holding a handle to outer's map.
// The key is that this lets inner's map die while keeping outer's map alive.
delete outer.inner;
diff --git a/deps/v8/test/mjsunit/regress/regress-4578.js b/deps/v8/test/mjsunit/regress/regress-4578.js
index d8dbd1c6bd..a22357beb4 100644
--- a/deps/v8/test/mjsunit/regress/regress-4578.js
+++ b/deps/v8/test/mjsunit/regress/regress-4578.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-gc --opt --no-concurrent-inlining
+// Flags: --allow-natives-syntax --expose-gc --opt --no-concurrent-recompilation
// Flags: --no-stress-opt --no-always-opt --no-assert-types
// This weak ref is for checking whether the closure-allocated object o got
diff --git a/deps/v8/test/mjsunit/regress/regress-752764.js b/deps/v8/test/mjsunit/regress/regress-752764.js
deleted file mode 100644
index 106d9edd87..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-752764.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --nostress-incremental-marking
-// Stress-testing this test is very slow and provides no useful coverage.
-// Flags: --nostress-opt --noalways-opt
-
-// This test uses a lot of memory and fails with flaky OOM when run
-// with --stress-incremental-marking on TSAN.
-
-a = "a".repeat(%StringMaxLength() - 3);
-assertThrows(() => new RegExp("a" + a), SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-794822.js b/deps/v8/test/mjsunit/regress/regress-794822.js
index bc769e703d..5b4861406f 100644
--- a/deps/v8/test/mjsunit/regress/regress-794822.js
+++ b/deps/v8/test/mjsunit/regress/regress-794822.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax
function* opt(arg = () => arg) {
- let tmp = opt.x; // LdaNamedProperty
+ let tmp = opt.x; // GetNamedProperty
for (;;) {
arg;
yield;
diff --git a/deps/v8/test/mjsunit/regress/regress-936077.js b/deps/v8/test/mjsunit/regress/regress-936077.js
index 2bc6dc52c7..13eb2e4d57 100644
--- a/deps/v8/test/mjsunit/regress/regress-936077.js
+++ b/deps/v8/test/mjsunit/regress/regress-936077.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --allow-natives-syntax
-// Flags: --concurrent-inlining --function-context-specialization
+// Flags: --expose-gc --allow-natives-syntax --function-context-specialization
function main() {
var obj = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-binary-bitwise-bigint-smi-mix-opt-depot.js b/deps/v8/test/mjsunit/regress/regress-binary-bitwise-bigint-smi-mix-opt-depot.js
new file mode 100644
index 0000000000..cc751fcde2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-binary-bitwise-bigint-smi-mix-opt-depot.js
@@ -0,0 +1,56 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --allow-natives-syntax --no-always-opt
+
+function foo(n) {
+ let v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v | 1;
+ v = i;
+ }
+
+ v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v ^ 1;
+ v = i;
+ }
+
+ v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v & 1;
+ v = i;
+ }
+
+ v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v << 1;
+ v = i;
+ }
+
+ v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v >> 1;
+ v = i;
+ }
+
+ v = 0;
+ for (let i = 0n; i < n; ++i) {
+ v = v >>> 1;
+ v = i;
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+assertDoesNotThrow(() => foo(1n));
+%OptimizeFunctionOnNextCall(foo);
+assertDoesNotThrow(() => foo(1n));
+assertOptimized(foo);
+%PrepareFunctionForOptimization(foo);
+assertThrows(() => foo(2n), TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertDoesNotThrow(() => foo(1n));
+assertOptimized(foo);
+assertThrows(() => foo(2n), TypeError);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1017159.js b/deps/v8/test/mjsunit/regress/regress-crbug-1017159.js
index 27d6ce97d9..e30955dc55 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1017159.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1017159.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax
function* foo() {
- __v_1 = foo.x; // LdaNamedProperty
+ __v_1 = foo.x; // GetNamedProperty
for (;;) {
try {
yield;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
index 2b26148cd0..44142462d4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --interrupt-budget=200 --stack-size=200
-// Flags: --budget-for-feedback-vector-allocation=100 --expose-gc
+// Flags: --interrupt-budget-for-feedback-allocation=100 --expose-gc
// Flags: --stress-flush-code --flush-bytecode
var i = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1206289.js b/deps/v8/test/mjsunit/regress/regress-crbug-1206289.js
new file mode 100644
index 0000000000..fe06bb18b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1206289.js
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-flush-bytecode
+
+function outer() {
+ function asm_broken(a, import_obj) {
+ "use asm";
+ // import_obj is expected to be an object and this causes
+ // asm_wasm_broken to be set to true when instantiating at runtime.
+ var v = import_obj.x;
+ function inner() {
+ }
+ return inner;
+ }
+ var m = asm_broken();
+}
+
+assertThrows(outer);
+gc();
+assertThrows(outer);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js b/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js
index 1b0eac4903..5356a6723c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --budget-for-feedback-vector-allocation=0 --interrupt-budget=1000
+// Flags: --interrupt-budget-for-feedback-allocation=0 --interrupt-budget=1000
(function() {
Empty = function() {};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1262750.js b/deps/v8/test/mjsunit/regress/regress-crbug-1262750.js
new file mode 100644
index 0000000000..c661ff7951
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1262750.js
@@ -0,0 +1,36 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test calling a class constructor on a polymorphic object throws a TypeError.
+function f(o) {
+ o.get();
+}
+
+let obj = new Map();
+%PrepareFunctionForOptimization(f);
+f(obj);
+f(obj);
+
+obj.get = class C {};
+assertThrows(() => f(obj), TypeError);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(() => f(obj), TypeError);
+
+// Test calling a closure of a class constructor throws a TypeError.
+function g(a) {
+ var f;
+ f = class {};
+ if (a == 1) {
+ f = function() {};
+ }
+ f();
+}
+
+%PrepareFunctionForOptimization(g);
+assertThrows(g, TypeError);
+assertThrows(g, TypeError);
+%OptimizeFunctionOnNextCall(g);
+assertThrows(g, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1276923.js b/deps/v8/test/mjsunit/regress/regress-crbug-1276923.js
new file mode 100644
index 0000000000..c3dd1ce075
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1276923.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=10
+
+// Base case where a GC observable store might be temporarily shadowed.
+function foo() {
+ let i = 0.1;
+ eval();
+ if (i) {
+ const c = {};
+ eval();
+ }
+}
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+// Stress execution with GCs.
+function bar() {
+ for (let cnt = 0, i = 655; cnt < 10000 && i !== 1; cnt++, i = i / 3) {
+ i %= 2;
+ const c = { "b": 1, "a":1, "c": 1, "d": 1 };
+ eval();
+ }
+}
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1277863.js b/deps/v8/test/mjsunit/regress/regress-crbug-1277863.js
new file mode 100644
index 0000000000..22475306d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1277863.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+class A extends Array {
+ [3] = [];
+}
+
+new A();
+new A();
+new A();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1278086.js b/deps/v8/test/mjsunit/regress/regress-crbug-1278086.js
new file mode 100644
index 0000000000..422649853c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1278086.js
@@ -0,0 +1,79 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+{
+ class C {
+ field = c.concat();
+ }
+
+ var c;
+ assertThrows(() => {
+ c = new C();
+ }, TypeError);
+}
+
+// Anonymous class
+{
+ const C = class {
+ field = c.concat();
+ }
+
+ var c;
+ assertThrows(() => {
+ c = new C();
+ }, TypeError);
+}
+
+class D {
+ field = ({ d } = undefined);
+}
+
+var d;
+assertThrows(
+ () => {
+ d = new D();
+ },
+ TypeError,
+ /Cannot destructure property 'd' of 'undefined' as it is undefined/);
+
+class B {
+ static B = class B {
+ field = b.concat();
+ }
+ static func() {
+ return B; // keep the context for class B
+ }
+}
+var b;
+assertThrows(() => {
+ b = new B.B();
+}, TypeError);
+
+class A {
+ static B = class B {
+ field = a.concat();
+ }
+ static func() {
+ return A; // keep the context for class A
+ }
+}
+var a;
+assertThrows(() => {
+ a = new A.B();
+}, TypeError);
+
+class E {
+ #x = 1;
+ static B = class B {
+ field = this.#x;
+ }
+}
+
+var e;
+assertThrows(
+ () => { e = new E.B(); },
+ TypeError,
+ /Cannot read private member #x from an object whose class did not declare it/);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1290587.js b/deps/v8/test/mjsunit/regress/regress-crbug-1290587.js
new file mode 100644
index 0000000000..c1934e38ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1290587.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = undefined;
+{
+ class C {
+ field = a.instantiate();
+ }
+
+ assertThrows(() => {
+ let c = new C;
+ }, TypeError);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-977089.js b/deps/v8/test/mjsunit/regress/regress-crbug-977089.js
index d7db660bfe..bde596a834 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-977089.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-977089.js
@@ -28,7 +28,7 @@ var foo = function() {
// transition with a different accessor.
//
// Note that the parent function `foo` can't be an IIFE, as then this callsite
- // would use the NoFeedback version of the LdaNamedProperty bytecode, and this
+ // would use the NoFeedback version of the GetNamedProperty bytecode, and this
// doesn't trigger the map update.
v1.x;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-990582.js b/deps/v8/test/mjsunit/regress/regress-crbug-990582.js
index e78775fdbb..fe4c196c4e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-990582.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-990582.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --invoke-weak-callbacks --budget-for-feedback-vector-allocation=0
+// Flags: --invoke-weak-callbacks --interrupt-budget-for-feedback-allocation=0
__v_0 = 0;
function __f_0() {
diff --git a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
index fceac248e9..c2e6212eb1 100644
--- a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
@@ -27,10 +27,6 @@ g1({ f : 1});
%OptimizeFunctionOnNextCall(g2);
g2({ f : 2});
g1({});
-if (%DynamicCheckMapsEnabled()) {
- // One more call to ensure a deopt even if dynamic map checks is enabled.
- g1({});
-}
assertUnoptimized(g1);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12122.js b/deps/v8/test/mjsunit/regress/regress-v8-12122.js
new file mode 100644
index 0000000000..bfd13d9ef4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12122.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const promiseAllCallOnNonObjectErrorMessage =
+ 'Promise.all called on non-object';
+const promiseAllSettledCallOnNonObjectErrorMessage =
+ 'Promise.allSettled called on non-object';
+
+assertThrows(
+ () => Promise.all.call(), TypeError, promiseAllCallOnNonObjectErrorMessage);
+assertThrows(
+ () => Promise.allSettled.call(), TypeError,
+ promiseAllSettledCallOnNonObjectErrorMessage);
+assertThrows(
+ () => Promise.all.apply(), TypeError,
+ promiseAllCallOnNonObjectErrorMessage);
+assertThrows(
+ () => Promise.allSettled.apply(), TypeError,
+ promiseAllSettledCallOnNonObjectErrorMessage);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12472.js b/deps/v8/test/mjsunit/regress/regress-v8-12472.js
new file mode 100644
index 0000000000..12ce4eee08
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12472.js
@@ -0,0 +1,10 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const manyAs = 'A'.repeat(0x10000);
+const manyas = manyAs.toLowerCase();
+const re = RegExp('^(?:' + manyas + '|' + manyAs + '|' + manyAs + ')$', 'i');
+
+// Shouldn't crash with a stack overflow.
+assertThrows(() => manyas.replace(re, manyAs));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12595.js b/deps/v8/test/mjsunit/regress/regress-v8-12595.js
new file mode 100644
index 0000000000..b11e190ab3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12595.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const iterable = {
+ [Symbol.iterator]: () => ({
+ next: () => ({
+ done: false,
+ get value() {
+ assertUnreachable()
+ print('"value" getter is called');
+ return 42;
+ }
+ })
+ })
+};
+
+[,] = iterable;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12671.js b/deps/v8/test/mjsunit/regress/regress-v8-12671.js
new file mode 100644
index 0000000000..5471f3ac4a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12671.js
@@ -0,0 +1,74 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// spec:
+// https://tc39.es/ecma262/multipage/ordinary-and-exotic-objects-behaviours.html
+// #sec-functiondeclarationinstantiation
+
+// This regession focus on checking [var declared "arguments" should bind to
+// "arguments exotic object" when has_simple_parameters_ is false and
+// inner_scope has var declared "arguments"]
+
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is false, and according to step 15-18,
+// argumentsObjectNeeded is true, according to step 27,
+// var declared arguments should bind to arguments exotic object
+function no_parameters_and_non_lexical_arguments() {
+ assertEquals(typeof arguments, 'object');
+ var arguments;
+}
+no_parameters_and_non_lexical_arguments()
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is false, and according to step 15-18,
+// argumentsObjectNeeded is true, according to step 28,
+// var declared arguments should bind to arguments exotic object
+function destructuring_parameters_and_non_lexical_arguments([_]) {
+ assertEquals(typeof arguments, 'object');
+ var arguments;
+}
+destructuring_parameters_and_non_lexical_arguments([])
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is false, and according to step 15-18,
+// argumentsObjectNeeded is true, according to step 28,
+// var declared arguments should bind to arguments exotic object
+function rest_parameters_and_non_lexical_arguments(..._) {
+ assertEquals(typeof arguments, 'object');
+ var arguments;
+}
+rest_parameters_and_non_lexical_arguments()
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is true, and according to step 15-18,
+// argumentsObjectNeeded is true, according to step 28,
+// var declared arguments should bind to arguments exotic object
+function initializer_parameters_and_non_lexical_arguments(_ = 0) {
+ assertEquals(typeof arguments, 'object');
+ var arguments;
+}
+initializer_parameters_and_non_lexical_arguments()
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is true, and according to step 15-18,
+// and argumentsObjectNeeded is true, according to step 34, should
+// throw because access to let declared arguments
+function initializer_parameters_and_lexical_arguments(_ = 0) {
+ return typeof arguments;
+ let arguments;
+}
+
+assertThrows(initializer_parameters_and_lexical_arguments);
+
+// according to ES#sec-functiondeclarationinstantiation step 8,
+// hasParameterExpressions is false, and according to step 15-18,
+// argumentsObjectNeeded is false, according to step 34,
+// should throw because access to let declared arguments
+function simple_parameters_and_lexical_arguments(_) {
+ return typeof arguments;
+ let arguments;
+}
+
+assertThrows(simple_parameters_and_lexical_arguments);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12688.js b/deps/v8/test/mjsunit/regress/regress-v8-12688.js
new file mode 100644
index 0000000000..8d2189d5c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12688.js
@@ -0,0 +1,33 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var yield;
+({p: yield} = class {
+ q = () => 42;
+});
+
+var yield;
+({p: yield} = class {
+ q = (a) => 42;
+});
+
+var yield;
+({p: yield} = class {
+ q = a => 42;
+});
+
+var yield;
+({p: yield} = class {
+ q = async a => 42;
+});
+
+var yield;
+({p: yield} = class {
+ q = async (a) => 42;
+});
+
+var yield;
+({p: yield} = class {
+ q = async () => 42;
+});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
index d57df6b3e3..65976751a1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --liftoff --no-wasm-tier-up --wasm-tier-mask-for-testing=2
-// Flags: --experimental-wasm-reftypes
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1200231.js b/deps/v8/test/mjsunit/regress/wasm/regress-1200231.js
index f836ba95cc..88a7d44d4c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1200231.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1200231.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --trace-turbo-graph
+// Flags: --trace-turbo-graph
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-12624.js b/deps/v8/test/mjsunit/regress/wasm/regress-12624.js
new file mode 100644
index 0000000000..ddb9da17d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-12624.js
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-liftoff
+
+// Under certain conditions, the following subgraph was not optimized correctly:
+// cond
+// | \
+// | Branch
+// | / \
+// | IfTrue IfFalse
+// | |
+// TrapUnless
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+
+builder.addGlobal(kWasmI32, true, WasmInitExpr.I32Const(1));
+
+builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprIf, kWasmI32,
+ kExprUnreachable,
+ kExprElse,
+ kExprLoop, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprBrIf, 1,
+ kExprI32Const, 7,
+ kExprLocalSet, 0,
+ kExprEnd,
+ kExprGlobalGet, 0,
+ kExprIf, kWasmVoid,
+ kExprI32Const, 0, kExprReturn,
+ kExprEnd,
+ kExprBr, 1,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 0,
+ kExprEnd,
+ kExprLocalGet, 0,
+ kExprI32DivU])
+ .exportFunc();
+
+let instance = builder.instantiate();
+assertEquals(0, instance.exports.main(0));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1271244.js b/deps/v8/test/mjsunit/regress/wasm/regress-1271244.js
new file mode 100644
index 0000000000..b3aff6ea6d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1271244.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('main', makeSig([], [kWasmI32, kWasmF64, kWasmF64]))
+ .addBody([
+ kExprI32Const, 1, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprF64x2PromoteLowF32x4, // f64x2.promote_low_f32x4
+ kSimdPrefix, kExprI8x16ExtractLaneS, 0, // i8x16.extract_lane_s
+ ...wasmF64Const(2), // f64.const
+ ...wasmF64Const(1), // f64.const
+ ]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1271538.js b/deps/v8/test/mjsunit/regress/wasm/regress-1271538.js
new file mode 100644
index 0000000000..d132914f20
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1271538.js
@@ -0,0 +1,44 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+builder.addFunction('main', makeSig([], [kWasmS128]))
+ .addBody([
+kExprI32Const, 0, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprI32Const, 2, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprI32Const, 3, // i32.const
+kSimdPrefix, kExprI16x8ShrS, 0x01, // i16x8.shr_s
+kExprI32Const, 0xc4, 0x88, 0x91, 0xa2, 0x04, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kSimdPrefix, kExprI16x8ExtAddPairwiseI8x16S, // i16x8.extadd_pairwise_i8x6_s
+kSimdPrefix, kExprI16x8AddSatU, 0x01, // i16x8.add_sat_u
+kExprI32Const, 0xac, 0x92, 0x01, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprF32Const, 0x2b, 0x2b, 0x2b, 0x49, // f32.const
+kSimdPrefix, kExprF32x4ReplaceLane, 0x00, // f32x4.replace_lane
+kSimdPrefix, kExprI16x8ExtAddPairwiseI8x16S, // i16x8.extadd_pairwise_i8x6_s
+kSimdPrefix, kExprI16x8RoundingAverageU, 0x01, // i16x8.avgr_u
+kExprI32Const, 0, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kSimdPrefix, kExprI64x2UConvertI32x4High, 0x01, // i64x2.convert_i32x4_high_u
+kSimdPrefix, kExprI64x2SConvertI32x4High, 0x01, // i64x2.convert_i32x4_high_s
+kExprI32Const, 0, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprF32Const, 0, 0, 0, 0, // f32.const
+kSimdPrefix, kExprF32x4ReplaceLane, 0x00, // f32x4.replace_lane
+kExprI32Const, 0, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kSimdPrefix, kExprI16x8ExtMulLowI8x16U, 0x01, // i16x8.extmul_low_i8x16_u
+kSimdPrefix, kExprI16x8LeU, // i16x8.le_u
+kSimdPrefix, kExprI8x16GtS, // i8x16.gt_s
+kSimdPrefix, kExprI32x4Ne, // i32x4.ne
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1272204.js b/deps/v8/test/mjsunit/regress/wasm/regress-1272204.js
new file mode 100644
index 0000000000..58975c90e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1272204.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addFunction(undefined, kSig_i_iii).addBody([
+ kExprI64Const, 0x7a, // i64.const
+ kExprI64Const, 0x7f, // i64.const
+ kExprI64Const, 0x7e, // i64.const
+ kExprI64Add, // i64.add
+ kExprI64DivS, // i64.div_s
+ kExprUnreachable, // unreachable
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1279151.js b/deps/v8/test/mjsunit/regress/wasm/regress-1279151.js
new file mode 100644
index 0000000000..066894ad71
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1279151.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+// --experimental-wasm-gc should not affect asm-js modules.
+
+function NewModule() {
+ "use asm";
+ function foo() {}
+ return {foo:foo};
+};
+
+var v = NewModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1282224.js b/deps/v8/test/mjsunit/regress/wasm/regress-1282224.js
new file mode 100644
index 0000000000..68d4dbfc5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1282224.js
@@ -0,0 +1,31 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals(kWasmS128, 2)
+ .addBody([
+ ...wasmF32Const(0), // f32.const
+ ...wasmI32Const(0), // f32.const
+ kExprF32SConvertI32, // f32.convert_i32_s
+ kExprLocalGet, 3, // local.get
+ kSimdPrefix, kExprI64x2AllTrue, 0x01, // i64x2.all_true
+ kExprSelect, // select
+ kExprLocalGet, 4, // local.get
+ ...wasmS128Const(new Array(16).fill(0)), // s128.const
+ kSimdPrefix, kExprI8x16Eq, // i8x16.eq
+ kSimdPrefix, kExprI64x2AllTrue, 0x01, // i64x2.all_true
+ kExprF32SConvertI32, // f32.convert_i32_s
+ ...wasmS128Const(new Array(16).fill(0)), // s128.const
+ kSimdPrefix, kExprI64x2AllTrue, 0x01, // i64x2.all_true
+ kExprSelect, // select
+ kExprF32Const, 0x00, 0x00, 0x80, 0x3f, // f32.const
+ kExprF32Ge, // f32.ge
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1283042.js b/deps/v8/test/mjsunit/regress/wasm/regress-1283042.js
new file mode 100644
index 0000000000..9455a1b297
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1283042.js
@@ -0,0 +1,29 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addFunction(undefined, kSig_i_iii)
+ .addBody([
+ ...wasmS128Const(new Array(16).fill(0)), // s128.const
+ kSimdPrefix, kExprF64x2ConvertLowI32x4U, 0x01, // f64x2.convert_low_i32x4_u
+ kSimdPrefix, kExprI64x2UConvertI32x4Low, 0x01, // i64x2.convert_i32x4_low_u
+ kSimdPrefix, kExprI64x2BitMask, 0x01, // i64x2.bitmask
+ ...wasmF64Const(0), // f64.const
+ kNumericPrefix, kExprI32SConvertSatF64, // i32.trunc_sat_f64_s
+ ...wasmI32Const(0), // i32.const
+ kExprCallFunction, 0, // call
+ kExprDrop, // drop
+ ...wasmI32Const(0), // i32.const
+ ...wasmI64Const(0), // i64.const
+ kExprI64StoreMem16, 0x00, 0x00, // i64.store16
+ ...wasmF32Const(0), // f32.const
+ kExprF32Sqrt, // f32.sqrt
+ kExprI32UConvertF32, // i32.trunc_f32_u
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1283395.js b/deps/v8/test/mjsunit/regress/wasm/regress-1283395.js
new file mode 100644
index 0000000000..a8055f6df8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1283395.js
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+// Generate function 1 (out of 3).
+builder.addFunction(undefined, makeSig([kWasmI32, kWasmI32, kWasmI32], []))
+ .addLocals(kWasmS128, 1)
+ .addBody([
+ kExprTry, kWasmVoid, // try i32
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprLocalTee, 0x03, // local.tee
+ kExprCallFunction, 2, // call function #2
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprLocalGet, 0x03, // local.get
+ kExprCallFunction, 2, // call function #2
+ kExprTry, kWasmVoid, // try
+ kExprLocalGet, 0x03, // local.get
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprCallFunction, 1, // call function #1
+ kExprCatchAll, // catch-all
+ kExprEnd, // end
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI32AtomicCompareExchange16U, 0x01, 0x80, 0x80, 0xc0, 0x9b, 0x07, // i32.atomic.cmpxchng16_u
+ kExprDrop, // drop
+ kExprCatchAll, // catch-all
+ kExprEnd, // end
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprCallFunction, 1, // call function #1
+]);
+// Generate function 2 (out of 3).
+builder.addFunction(undefined, makeSig([kWasmS128, kWasmF64, kWasmF32], []))
+ .addBody([kExprUnreachable]);
+// Generate function 3 (out of 3).
+builder.addFunction(undefined, makeSig([kWasmF64, kWasmS128], [])).addBody([
+ kExprUnreachable
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1284980.js b/deps/v8/test/mjsunit/regress/wasm/regress-1284980.js
new file mode 100644
index 0000000000..a173673aa0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1284980.js
@@ -0,0 +1,38 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], []));
+builder.addType(makeSig([kWasmS128], []));
+builder.addType(makeSig([], [kWasmF64, kWasmF64]));
+builder.addTag(makeSig([], []));
+// Generate function 1 (out of 3).
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals(kWasmI32, 2).addLocals(kWasmF32, 1).addLocals(kWasmI32, 1).addLocals(kWasmF64, 1)
+ .addBody([
+ kExprTry, kWasmVoid, // try i32
+ kExprCallFunction, 2, // call function #2
+ kExprI32Const, 0, // i32.const
+ kExprSelect, // select
+ kExprI32SConvertF64, // i32.trunc_f64_s
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprS128Store8Lane, 0x00, 0x00, 0x00, // s128.store8_lane
+ kExprCatch, 0, // catch
+ kExprCatchAll, // catch-all
+ kExprEnd, // end
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprCallFunction, 1, // call function #1
+]);
+// Generate function 2 (out of 3).
+builder.addFunction(undefined, 1 /* sig */).addBody([kExprUnreachable]);
+// Generate function 3 (out of 3).
+builder.addFunction(undefined, 2 /* sig */).addBody([kExprUnreachable]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1286253.js b/deps/v8/test/mjsunit/regress/wasm/regress-1286253.js
new file mode 100644
index 0000000000..e6f25d03c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1286253.js
@@ -0,0 +1,26 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_iii)
+ .addBody([
+ ...wasmS128Const(new Array(16).fill(0)), // s128.const
+ kSimdPrefix, kExprI8x16ExtractLaneU, 0x00, // i8x16.extract_lane_u
+ ...wasmS128Const(new Array(16).fill(0)), // s128.const
+ kSimdPrefix, kExprF32x4ExtractLane, 0x00, // f32x4.extract_lane
+ kNumericPrefix, kExprI64SConvertSatF32, // i64.trunc_sat_f32_s
+ kExprF32Const, 0x13, 0x00, 0x00, 0x00, // f32.const
+ kNumericPrefix, kExprI64SConvertSatF32, // i64.trunc_sat_f32_s
+ kExprI64Ior, // i64.or
+ kExprI32ConvertI64, // i32.wrap_i64
+ ...wasmF32Const(0), // f32.const
+ kNumericPrefix, kExprI64SConvertSatF32, // i64.trunc_sat_f32_s
+ kExprI32ConvertI64, // i32.wrap_i64
+ kExprSelect, // select
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1289678.js b/deps/v8/test/mjsunit/regress/wasm/regress-1289678.js
new file mode 100644
index 0000000000..1d6a6b6e82
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1289678.js
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([], [kWasmS128, kWasmF64, kWasmS128, kWasmF64, kWasmF64, kWasmF32, kWasmF64, kWasmS128, kWasmF32]));
+builder.addFunction('foo', kSig_v_v)
+ .addBody([
+kExprBlock, /* sig */ 0, // block
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprBr, 0, // br depth=0
+ kExprUnreachable, // unreachable
+ kExprEnd, // end
+kExprUnreachable, // unreachable
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1290079.js b/deps/v8/test/mjsunit/regress/wasm/regress-1290079.js
new file mode 100644
index 0000000000..7f5678a5ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1290079.js
@@ -0,0 +1,47 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff --turbo-force-mid-tier-regalloc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, makeSig([], [kWasmS128]))
+ .addBody([
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+kSimdPrefix, kExprI8x16GtS, // i8x16.gt_s
+kSimdPrefix, kExprI16x8Ne, // i16x8.ne
+...wasmS128Const(new Array(16).fill(1)), // s128.const
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+...wasmS128Const(new Array(16).fill(2)), // s128.const
+kSimdPrefix, kExprI16x8Eq, // i16x8.eq
+kSimdPrefix, kExprI16x8Ne, // i16x8.ne
+...wasmS128Const(new Array(16).fill(1)), // s128.const
+...wasmS128Const(new Array(16).fill(1)), // s128.const
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+kSimdPrefix, kExprI16x8AddSatU, 0x01, // i16x8.add_sat_u
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+kSimdPrefix, kExprI16x8Sub, 0x01, // i16x8.sub
+kSimdPrefix, kExprI64x2ExtMulHighI32x4U, 0x01, // i64x2.extmul_high_i32x4_u
+kSimdPrefix, kExprI64x2ExtMulLowI32x4S, 0x01, // i64x2.extmul_low_i32x4_s
+kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+kExprF32Mul, // f32.mul
+kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+...wasmS128Const(new Array(16).fill(0)), // s128.const
+kSimdPrefix, kExprI16x8ExtractLaneS, 0x00, // i16x8.extract_lane_s
+kExprSelect, // select
+kNumericPrefix, kExprI32SConvertSatF32, // i32.trunc_sat_f32_s
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+kSimdPrefix, kExprI16x8Ne, // i16x8.ne
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1294384.js b/deps/v8/test/mjsunit/regress/wasm/regress-1294384.js
new file mode 100644
index 0000000000..1bbd19e06c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1294384.js
@@ -0,0 +1,91 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([], [kWasmI32]));
+builder.addType(makeSig([kWasmF64, kWasmF32, kWasmF32, kWasmF32,
+ kWasmF32, kWasmF64, kWasmF64],
+ [kWasmF32, kWasmF32, kWasmF32, kWasmF32, kWasmF32,
+ kWasmF32, kWasmF32, kWasmF32, kWasmF32, kWasmF32]));
+
+// Generate function 1 (out of 2).
+// signature: i_iii
+builder.addFunction(undefined, 0 /* sig */)
+ .addBody([
+ // body:
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+ kExprCallFunction, 0x01,
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprI32SConvertF32]); // i32.trunc_f32_s
+
+// Generate function 2 (out of 2).
+builder.addFunction(undefined, 1 /* sig */)
+ .addBody([
+ // body:
+ kExprF32Const, 0x04, 0x04, 0x05, 0x04, // f32.const
+ kExprLoop, 0x40, // loop @24
+ kExprEnd, // end @26
+ kExprF32Ceil, // f32.ceil
+ kExprF32Const, 0x08, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprI32Const, 0x00, // i32.const
+ kExprBrIf, 0x00, // br_if depth=0
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprF32Ceil, // f32.ceil
+ kExprF32Ceil, // f32.ceil
+ kExprF32Const, 0xed, 0xed, 0xed, 0xed, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x65, 0x73, 0x61, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprI64Const, 0x00, // i64.const
+ kExprF32SConvertI64]); // f32.convert_i64_s
+
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(1, instance.exports.main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1296876.js b/deps/v8/test/mjsunit/regress/wasm/regress-1296876.js
new file mode 100644
index 0000000000..96ce17b56d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1296876.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addFunction('main', kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 1, // local.get
+ kExprLocalGet, 1, // local.get
+ kExprLocalGet, 0, // local.get
+ kExprLocalSet, 1, // local.set
+ kAtomicPrefix, kExprI32AtomicSub, 0x02, 0x26, // i32.atomic.sub32
+ ])
+ .exportFunc();
+const instance = builder.instantiate();
+assertEquals(0, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
index 92b14bbbd6..6e78b3bfa2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// The test needs --wasm-tier-up because we can't serialize and deserialize
+// The test needs --no-liftoff because we can't serialize and deserialize
// Liftoff code.
-// Flags: --allow-natives-syntax --experimental-wasm-reftypes --wasm-tier-up
+// Flags: --allow-natives-syntax --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
index 0aee72ea8c..1e62d6025e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// The test needs --wasm-tier-up because we can't serialize and deserialize
+// The test needs --no-liftoff because we can't serialize and deserialize
// Liftoff code.
-// Flags: --allow-natives-syntax --wasm-tier-up
+// Flags: --allow-natives-syntax --no-liftoff
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
index 4da9ebc963..6173b66390 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// The test needs --wasm-tier-up because we can't serialize and deserialize
+// The test needs --no-liftoff because we can't serialize and deserialize
// Liftoff code.
-// Flags: --allow-natives-syntax --throws --wasm-tier-up
+// Flags: --allow-natives-syntax --throws --no-liftoff
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let kTableSize = 3;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-964607.js b/deps/v8/test/mjsunit/regress/wasm/regress-964607.js
index 768d8c8df0..701ad819c6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-964607.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-964607.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js
index 22243e49bb..d9ccf9ce79 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --experimental-wasm-typed-funcref
+// Flags: --experimental-wasm-typed-funcref
let raw = new Uint8Array([
0x00, 0x61, 0x73, 0x6d, // wasm magic
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-inlining-throw.js b/deps/v8/test/mjsunit/regress/wasm/regress-inlining-throw.js
new file mode 100644
index 0000000000..54762590e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-inlining-throw.js
@@ -0,0 +1,71 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tier up quickly to save time:
+// Flags: --wasm-tiering-budget=100 --experimental-wasm-gc
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+builder.setNominal();
+let supertype = builder.addStruct([makeField(kWasmI32, true)]);
+let subtype = builder.addStruct(
+ [makeField(kWasmI32, true), makeField(kWasmI32, true)], supertype);
+let unused_type = builder.addStruct(
+ [makeField(kWasmI32, true), makeField(kWasmF64, true)], supertype);
+
+let sig = makeSig([wasmOptRefType(supertype)], [kWasmI32]);
+
+let callee1 = builder.addFunction('callee1', sig).addBody([
+ kExprBlock, kWasmRef, subtype,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastStatic, 0, subtype,
+ kGCPrefix, kExprRefCastStatic, unused_type,
+ kGCPrefix, kExprStructGet, unused_type, 0,
+ kExprReturn,
+ kExprEnd,
+ kGCPrefix, kExprStructGet, subtype, 1
+]);
+
+let callee2 = builder.addFunction('callee2', sig).addBody([
+ kExprBlock, kWasmRef, subtype,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastStatic, 0, subtype,
+ kExprUnreachable,
+ kExprReturn,
+ kExprEnd,
+ kGCPrefix, kExprStructGet, subtype, 1
+]);
+
+let callee3 = builder.addFunction('callee3', sig).addBody([
+ kExprBlock, kWasmRef, supertype,
+ kExprLocalGet, 0,
+ kExprBrOnNonNull, 0,
+ kExprUnreachable,
+ kExprReturn,
+ kExprEnd,
+ kGCPrefix, kExprRefCastStatic, subtype,
+ kGCPrefix, kExprStructGet, subtype, 1
+]);
+
+function MakeCaller(name, callee) {
+ builder.addFunction(name, kSig_i_v)
+ .addBody([
+ kExprI32Const, 10, kExprI32Const, 42,
+ kGCPrefix, kExprStructNew, subtype,
+ kExprCallFunction, callee.index
+ ])
+ .exportFunc();
+}
+MakeCaller("main1", callee1);
+MakeCaller("main2", callee2);
+MakeCaller("main3", callee3);
+
+var module = builder.instantiate();
+
+for (let i = 0; i < 100; i++) {
+ assertEquals(42, module.exports.main1());
+ assertEquals(42, module.exports.main2());
+ assertEquals(42, module.exports.main3());
+}
diff --git a/deps/v8/test/mjsunit/shared-memory/client-gc.js b/deps/v8/test/mjsunit/shared-memory/client-gc.js
new file mode 100644
index 0000000000..e545de9cbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/client-gc.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --allow-natives-syntax --expose-gc
+
+gc();
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string-in-code-object.js b/deps/v8/test/mjsunit/shared-memory/shared-string-in-code-object.js
new file mode 100644
index 0000000000..18b3051f55
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string-in-code-object.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --allow-natives-syntax --stress-compaction
+
+function foo() { return "foo"; }
+
+%PrepareFunctionForOptimization(foo);
+let value = foo();
+assertTrue(%IsSharedString(value));
+%OptimizeFunctionOnNextCall(foo);
+value = foo();
+assertTrue(%IsSharedString(value));
+%SharedGC();
+value = foo();
+assertTrue(%IsSharedString(value));
+assertEquals("foo", value);
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string-in-weak-map.js b/deps/v8/test/mjsunit/shared-memory/shared-string-in-weak-map.js
new file mode 100644
index 0000000000..cf4c95e8a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string-in-weak-map.js
@@ -0,0 +1,23 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --allow-natives-syntax --stress-compaction --expose-gc
+
+const val1 = "some value";
+assertTrue(%IsSharedString(val1));
+
+const wm = new WeakMap();
+const key1 = {};
+
+wm.set(key1, val1);
+assertTrue(wm.get(key1) == val1);
+assertTrue(%IsSharedString(wm.get(key1)));
+
+gc();
+assertTrue(wm.get(key1) == val1);
+assertTrue(%IsSharedString(wm.get(key1)));
+
+%SharedGC();
+assertTrue(wm.get(key1) == val1);
+assertTrue(%IsSharedString(wm.get(key1)));
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string.js b/deps/v8/test/mjsunit/shared-memory/shared-string.js
new file mode 100644
index 0000000000..da6fb39f43
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string.js
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --allow-natives-syntax
+
+if (this.Worker) {
+
+(function TestSharedStringPostMessage() {
+ let workerScript =
+ `postMessage("started");
+ onmessage = function(str) {
+ if (!%IsSharedString(str)) {
+ throw new Error("str isn't shared");
+ }
+ postMessage(str);
+ };`;
+
+ let worker = new Worker(workerScript, { type: 'string' });
+ let started = worker.getMessage();
+ assertTrue(%IsSharedString(started));
+ assertEquals("started", started);
+
+ // The string literal appears in source and is internalized, so should
+ // already be shared.
+ let str_to_send = 'foo';
+ assertTrue(%IsSharedString(str_to_send));
+ worker.postMessage(str_to_send);
+ let str_received = worker.getMessage();
+ assertTrue(%IsSharedString(str_received));
+ // Object.is and === won't check pointer equality of Strings.
+ assertTrue(%IsSameHeapObject(str_to_send, str_received));
+
+ worker.terminate();
+})();
+
+}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics-workers.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics-workers.js
new file mode 100644
index 0000000000..453af1478e
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics-workers.js
@@ -0,0 +1,41 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --harmony-struct --allow-natives-syntax
+
+"use strict";
+
+if (this.Worker) {
+
+(function TestSharedStructPostMessage() {
+ let workerScript =
+ `onmessage = function(struct) {
+ // Non-atomic write that will be made visible once main thread
+ // observes the atomic write below.
+ struct.struct_field.payload = 42;
+ Atomics.store(struct, "string_field", "worker");
+ };
+ postMessage("started");`;
+
+ let worker = new Worker(workerScript, { type: 'string' });
+ let started = worker.getMessage();
+ assertEquals("started", started);
+
+ let OuterStruct = new SharedStructType(['string_field', 'struct_field']);
+ let InnerStruct = new SharedStructType(['payload']);
+ let struct = new OuterStruct();
+ struct.struct_field = new InnerStruct();
+ struct.string_field = "main";
+ assertEquals("main", struct.string_field);
+ assertEquals(undefined, struct.struct_field.payload);
+ worker.postMessage(struct);
+ // Spin until we observe the worker's write of string_field.
+ while (Atomics.load(struct, "string_field") !== "worker") {}
+ // The non-atomic store write must also be visible.
+ assertEquals(42, struct.struct_field.payload);
+
+ worker.terminate();
+})();
+
+}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js
new file mode 100644
index 0000000000..12b7d57f8b
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js
@@ -0,0 +1,35 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --harmony-struct
+
+"use strict";
+
+let S = new SharedStructType(['field']);
+
+(function TestPrimitivesUsingAtomics() {
+ // All primitives can be stored in fields.
+ let s = new S();
+ for (let prim of [42, -0, undefined, null, true, false, "foo"]) {
+ Atomics.store(s, 'field', prim);
+ assertEquals(Atomics.load(s, 'field'), prim);
+ }
+})();
+
+(function TestObjectsUsingAtomics() {
+ let s = new S();
+ // Shared objects cannot point to non-shared objects.
+ assertThrows(() => { Atomics.store(s, 'field', []); });
+ assertThrows(() => { Atomics.store(s, 'field', {}); });
+ // Shared objects can point to other shared objects.
+ let shared_rhs = new S();
+ Atomics.store(s, 'field', shared_rhs);
+ assertEquals(Atomics.load(s, 'field'), shared_rhs);
+})();
+
+(function TestNotExtensibleUsingAtomics() {
+ let s = new S();
+ // Shared structs are non-extensible.
+ assertThrows(() => { Atomics.store(s, 'nonExistent', 42); });
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js
new file mode 100644
index 0000000000..97a47d0807
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js
@@ -0,0 +1,54 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --harmony-struct
+
+"use strict";
+
+let S = new SharedStructType(['field']);
+
+(function TestNoPrototype() {
+ // For now the experimental shared structs don't have a prototype, unlike the
+ // proposal explainer which says accessing the prototype throws.
+ assertNull(S.prototype);
+ assertNull(Object.getPrototypeOf(new S()));
+})();
+
+(function TestPrimitives() {
+ // All primitives can be stored in fields.
+ let s = new S();
+ for (let prim of [42, -0, Math.random(),
+ undefined, null, true, false,
+ "foo"]) {
+ s.field = prim;
+ assertEquals(s.field, prim);
+ }
+})();
+
+(function TestObjects() {
+ let s = new S();
+ // Shared objects cannot point to non-shared objects.
+ assertThrows(() => { s.field = []; });
+ assertThrows(() => { s.field = {}; });
+ // Shared objects can point to other shared objects.
+ let shared_rhs = new S();
+ s.field = shared_rhs;
+ assertEquals(s.field, shared_rhs);
+})();
+
+(function TestNotExtensible() {
+ let s = new S();
+ // Shared structs are non-extensible.
+ assertThrows(() => { s.nonExistent = 42; });
+ assertThrows(() => { Object.setPrototypeOf(s, {}); });
+ assertThrows(() => { Object.defineProperty(s, 'nonExistent', { value: 42 }); });
+})();
+
+(function TestTooManyFields() {
+ let field_names = [];
+ for (let i = 0; i < 1000; i++) {
+ field_names.push('field' + i);
+ }
+ assertThrows(() => { new SharedStructType(field_names); });
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js
new file mode 100644
index 0000000000..ddc78e5bbb
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js
@@ -0,0 +1,39 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --harmony-struct --allow-natives-syntax
+
+"use strict";
+
+if (this.Worker) {
+
+(function TestSharedStructPostMessage() {
+ let workerScript =
+ `onmessage = function(struct) {
+ struct.struct_field.payload = 42;
+ struct.string_field = "worker";
+ postMessage("done");
+ };
+ postMessage("started");`;
+
+ let worker = new Worker(workerScript, { type: 'string' });
+ let started = worker.getMessage();
+ assertEquals("started", started);
+
+ let OuterStruct = new SharedStructType(['string_field', 'struct_field']);
+ let InnerStruct = new SharedStructType(['payload']);
+ let struct = new OuterStruct();
+ struct.struct_field = new InnerStruct();
+ struct.string_field = "main";
+ assertEquals("main", struct.string_field);
+ assertEquals(undefined, struct.struct_field.payload);
+ worker.postMessage(struct);
+ assertEquals("done", worker.getMessage());
+ assertEquals("worker", struct.string_field);
+ assertEquals(42, struct.struct_field.payload);
+
+ worker.terminate();
+})();
+
+}
diff --git a/deps/v8/test/mjsunit/statistics-extension.js b/deps/v8/test/mjsunit/statistics-extension.js
new file mode 100644
index 0000000000..b6573736bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/statistics-extension.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-statistics
+
+assertEquals(typeof getV8Statistics, 'function');
+var result = getV8Statistics();
+assertEquals(typeof result, 'object');
+for (let key of Object.keys(result)) {
+ assertEquals(typeof result[key], 'number');
+}
diff --git a/deps/v8/test/mjsunit/temporal/calendar-constructor.js b/deps/v8/test/mjsunit/temporal/calendar-constructor.js
index fcdcbe4f2c..d0bf81f56c 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-constructor.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-constructor.js
@@ -6,14 +6,11 @@
// https://tc39.es/proposal-temporal/#sec-temporal.calendar
// 1. If NewTarget is undefined, then
// a. Throw a TypeError exception.
-assertThrows(() => Temporal.Calendar("iso8601"), TypeError,
- "Constructor Temporal.Calendar requires 'new'");
+assertThrows(() => Temporal.Calendar("iso8601"), TypeError);
-assertThrows(() => new Temporal.Calendar(), RangeError,
- "Invalid calendar specified: undefined");
+assertThrows(() => new Temporal.Calendar(), RangeError);
// Wrong case
-assertThrows(() => new Temporal.Calendar("ISO8601"), RangeError,
- "Invalid calendar specified: ISO8601");
+assertThrows(() => new Temporal.Calendar("ISO8601"), RangeError);
assertEquals("iso8601", (new Temporal.Calendar("iso8601")).id)
diff --git a/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js
index f272932b43..e8cdf4ecb1 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js
@@ -8,110 +8,100 @@ let cal = new Temporal.Calendar("iso8601")
// Check throw for first arg
assertThrows(() => cal.dateFromFields(),
- TypeError,
- "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ TypeError);
[undefined, true, false, 123, 456n, Symbol(), "string",
123.456, NaN, null].forEach(
function(fields) {
- assertThrows(() => cal.dateFromFields(fields), TypeError,
- "Temporal.Calendar.prototype.dateFromFields called on non-object");
- assertThrows(() => cal.dateFromFields(fields, undefined), TypeError,
- "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ assertThrows(() => cal.dateFromFields(fields), TypeError);
+ assertThrows(() => cal.dateFromFields(fields, undefined), TypeError);
assertThrows(() => cal.dateFromFields(fields, {overflow: "constrain"}),
- TypeError,
- "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ TypeError);
assertThrows(() => cal.dateFromFields(fields, {overflow: "reject"}),
- TypeError,
- "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ TypeError);
});
-assertThrows(() => cal.dateFromFields({month: 1, day: 17}),
- TypeError, "invalid_argument");
-assertThrows(() => cal.dateFromFields({year: 2021, day: 17}),
- TypeError, "invalid_argument");
-assertThrows(() => cal.dateFromFields({year: 2021, month: 12}),
- TypeError, "invalid_argument");
+assertThrows(() => cal.dateFromFields({month: 1, day: 17}), TypeError);
+assertThrows(() => cal.dateFromFields({year: 2021, day: 17}), TypeError);
+assertThrows(() => cal.dateFromFields({year: 2021, month: 12}), TypeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "m1", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M1", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "m01", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 12, monthCode: "M11",
- day: 17}), RangeError, "monthCode value is out of range.");
+ day: 17}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M00", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M19", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M99", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M13", day: 17}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: -1, day: 17}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: -Infinity, day: 17}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 7, day: -17}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 7, day: -Infinity}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 12, day: 0},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 12, day: 32},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 1, day: 32},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 2, day: 29},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 6, day: 31},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 9, day: 31},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 0, day: 5},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields({year: 2021, month: 13, day: 5},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M12", day: 0}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M12", day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M01", day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M02", day: 29}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M06", day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M09", day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M00", day: 5}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields(
{year: 2021, monthCode: "M13", day: 5}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 12, day: 0}), RangeError, "Invalid time value");
+ {year: 2021, month: 12, day: 0}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 0, day: 3}), RangeError, "Invalid time value");
+ {year: 2021, month: 0, day: 3}), RangeError);
// Check throw for the second arg
assertThrows(() => cal.dateFromFields(
{year: 2021, month: 7, day: 13}, {overflow: "invalid"}),
- RangeError,
- "Value invalid out of range for Temporal.Calendar.prototype.dateFromFields"
- + " options property overflow");
+ RangeError);
assertEquals("2021-07-15",
cal.dateFromFields({year: 2021, month: 7, day: 15}).toJSON());
@@ -180,41 +170,28 @@ assertEquals("2021-12-31",
cal.dateFromFields({year: 2021, monthCode: "M12", day: 500}).toJSON());
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 1, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 2, day: 29}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 3, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 3, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 4, day: 31}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 4, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 5, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 5, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 6, day: 31}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 7, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 7, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 8, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 8, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 9, day: 31}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 10, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 10, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 11, day: 31}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 11, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 12, day: 32}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.dateFromFields(
- {year: 2021, month: 13, day: 5}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}), RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/calendar-date-until.js b/deps/v8/test/mjsunit/temporal/calendar-date-until.js
index a403646118..8968dbe5a1 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-date-until.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-date-until.js
@@ -10,9 +10,7 @@ let cal = new Temporal.Calendar("iso8601");
[ "hour", "minute", "second", "millisecond", "microsecond", "nanosecond" ]
.forEach(function(largestUnit) {
assertThrows(() => cal.dateUntil("2021-07-16", "2021-07-17",
- {largestUnit}), RangeError,
- "Invalid unit argument for Temporal.Calendar.prototype.dateUntil() "+
- "'largestUnit'");
+ {largestUnit}), RangeError);
});
assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16").toJSON());
diff --git a/deps/v8/test/mjsunit/temporal/calendar-fields.js b/deps/v8/test/mjsunit/temporal/calendar-fields.js
index cf10d9537e..e7c8db32c4 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-fields.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-fields.js
@@ -8,16 +8,51 @@ let cal = new Temporal.Calendar("iso8601")
assertEquals("iso8601", cal.id)
-const fields = {
+let i = 1;
+const repeated = {
*[Symbol.iterator]() {
- let i = 0;
- while (i++ < 1000) {
- yield "year";
- }
+ yield "year";
+ i++;
+ yield "year";
+ i++;
}
}
-let expected = Array.from(fields);
-// For now, we only input it as array
-let inpiut = expected;
-assertArrayEquals(expected, cal.fields(expected));
+assertThrows(() => cal.fields(repeated), RangeError);
+assertEquals(2, i);
+let repeatedArray = Array.from(repeated);
+assertThrows(() => cal.fields(repeatedArray), RangeError);
+
+const week = {
+ *[Symbol.iterator]() {
+ yield "week";
+ }
+}
+
+assertThrows(() => cal.fields(week), RangeError);
+assertThrows(() => cal.fields(['week']), RangeError);
+assertThrows(() => cal.fields(new Set(['week'])), RangeError);
+
+const allValid = {
+ *[Symbol.iterator]() {
+ yield "nanosecond";
+ yield "microsecond";
+ yield "millisecond";
+ yield "second";
+ yield "minute";
+ yield "hour";
+ yield "day";
+ yield "monthCode";
+ yield "month";
+ yield "year";
+ }
+}
+
+let allValidArray = Array.from(allValid);
+let allValidSet = new Set(allValid);
+assertArrayEquals(allValidArray, cal.fields(allValid));
+assertArrayEquals(allValidArray, cal.fields(allValidArray));
+assertArrayEquals(allValidArray, cal.fields(allValidSet));
+
+// cannot just return the same array
+assertTrue(allValidArray != cal.fields(allValidArray));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-from.js b/deps/v8/test/mjsunit/temporal/calendar-from.js
index ab63c84ac2..2c8b83021c 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-from.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-from.js
@@ -6,8 +6,6 @@
// https://tc39.es/proposal-temporal/#sec-temporal.calendar.from
// 1. If NewTarget is undefined, then
// a. Throw a TypeError exception.
-//assertThrows(() => Temporal.Calendar.from("invalid"), TypeError,
-// "Constructor Temporal.Calendar requires 'new'");
assertEquals("iso8601",
(Temporal.Calendar.from("iso8601")).id);
diff --git a/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js b/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js
index 82a846772f..7e7a96a2de 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js
@@ -7,16 +7,11 @@
let cal = new Temporal.Calendar("iso8601")
// Test throwing
-assertThrows(() => cal.mergeFields(), TypeError,
- "Cannot convert undefined or null to object");
-assertThrows(() => cal.mergeFields(undefined, {}), TypeError,
- "Cannot convert undefined or null to object");
-assertThrows(() => cal.mergeFields(null, {}), TypeError,
- "Cannot convert undefined or null to object");
-assertThrows(() => cal.mergeFields({}, undefined), TypeError,
- "Cannot convert undefined or null to object");
-assertThrows(() => cal.mergeFields({}, null), TypeError,
- "Cannot convert undefined or null to object");
+assertThrows(() => cal.mergeFields(), TypeError);
+assertThrows(() => cal.mergeFields(undefined, {}), TypeError);
+assertThrows(() => cal.mergeFields(null, {}), TypeError);
+assertThrows(() => cal.mergeFields({}, undefined), TypeError);
+assertThrows(() => cal.mergeFields({}, null), TypeError);
// Test String, number, true, false, NaN, BigInt, Symbol types
// pending on https://github.com/tc39/proposal-temporal/issues/1647
diff --git a/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js
index 1ff45c6117..41332fa1b1 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js
@@ -7,127 +7,89 @@
let cal = new Temporal.Calendar("iso8601")
// Check throw for first arg
-let nonObjMsg =
- "Temporal.Calendar.prototype.monthDayFromFields called on non-object";
-assertThrows(() => cal.monthDayFromFields(), TypeError,
- "Temporal.Calendar.prototype.monthDayFromFields called on non-object");
+assertThrows(() => cal.monthDayFromFields(), TypeError);
[undefined, true, false, 123, 456n, Symbol(), "string"].forEach(
function(fields) {
- assertThrows(() => cal.monthDayFromFields(fields), TypeError,
- nonObjMsg);
- assertThrows(() => cal.monthDayFromFields(fields, undefined), TypeError,
- nonObjMsg);
+ assertThrows(() => cal.monthDayFromFields(fields), TypeError);
+ assertThrows(() => cal.monthDayFromFields(fields, undefined), TypeError);
assertThrows(() => cal.monthDayFromFields(fields,
- {overflow: "constrain"}), TypeError, nonObjMsg);
+ {overflow: "constrain"}), TypeError);
assertThrows(() => cal.monthDayFromFields(fields, {overflow: "reject"}),
- TypeError, nonObjMsg);
+ TypeError);
});
-assertThrows(() => cal.monthDayFromFields({month: 1, day: 17}),
- TypeError, "invalid_argument");
-assertThrows(() => cal.monthDayFromFields({year: 2021, day: 17}),
- TypeError, "invalid_argument");
-assertThrows(() => cal.monthDayFromFields({year: 2021, month: 12}),
- TypeError, "invalid_argument");
+assertThrows(() => cal.monthDayFromFields({month: 1, day: 17}), TypeError);
+assertThrows(() => cal.monthDayFromFields({year: 2021, day: 17}), TypeError);
+assertThrows(() => cal.monthDayFromFields({year: 2021, month: 12}), TypeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "m1", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "m1", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M1", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M1", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "m01", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "m01", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 12, monthCode: "M11", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, month: 12, monthCode: "M11", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M00", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M00", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M19", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M19", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M99", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M99", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M13", day: 17}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M13", day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: -1, day: 17}),
- RangeError, "Invalid time value");
+ {year: 2021, month: -1, day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: -Infinity, day: 17}),
- RangeError, "Invalid time value");
+ {year: 2021, month: -Infinity, day: 17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 7, day: -17}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 7, day: -17}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 7, day: -Infinity}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 7, day: -Infinity}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 12, day: 0}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 12, day: 0}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 12, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 1, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 2, day: 29}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 6, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 9, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 0, day: 5}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 0, day: 5}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 13, day: 5}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M12", day: 0}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, monthCode: "M12", day: 0}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M12", day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, monthCode: "M12", day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M01", day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, monthCode: "M01", day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M06", day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, monthCode: "M06", day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M09", day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, monthCode: "M09", day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M00", day: 5}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M00", day: 5}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, monthCode: "M13", day: 5}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M13", day: 5}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 12, day: 0}), RangeError, "Invalid time value");
+ {year: 2021, month: 12, day: 0}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 0, day: 3}), RangeError, "Invalid time value");
+ {year: 2021, month: 0, day: 3}), RangeError);
// Check throw for the second arg
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 7, day: 13}, {overflow: "invalid"}),
- RangeError,
- "Value invalid out of range for Temporal.Calendar.prototype." +
- "monthDayFromFields options property overflow");
+ {year: 2021, month: 7, day: 13}, {overflow: "invalid"}), RangeError);
assertEquals("07-15", cal.monthDayFromFields(
{year: 2021, month: 7, day: 15}).toJSON());
@@ -198,41 +160,28 @@ assertEquals("12-31", cal.monthDayFromFields(
{year: 2021, monthCode: "M12", day: 500}).toJSON());
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 1, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 2, day: 29}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 3, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 3, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 4, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 4, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 5, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 5, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 6, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 7, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 7, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 8, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 8, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 9, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 10, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 10, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 11, day: 31}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 11, day: 31}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 12, day: 32}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.monthDayFromFields(
- {year: 2021, month: 13, day: 5}, {overflow: "reject"}),
- RangeError, "Invalid time value");
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}), RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/calendar-month.js b/deps/v8/test/mjsunit/temporal/calendar-month.js
index 02b0fe644d..53bfb5c0c9 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-month.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-month.js
@@ -10,8 +10,7 @@ assertEquals(7, cal.month(new Temporal.PlainDate(2021, 7, 15)));
assertEquals(8, cal.month(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
assertEquals(6, cal.month(new Temporal.PlainYearMonth(1999, 6)));
assertEquals(3, cal.month("2019-03-15"));
-assertThrows(() => cal.month(new Temporal.PlainMonthDay(3, 16)), TypeError,
- "invalid_argument");
+assertThrows(() => cal.month(new Temporal.PlainMonthDay(3, 16)), TypeError);
// TODO Test the following later.
//assertEquals(1, cal.month(new Temporal.ZonedDateTime(86400n * 366n * 50n,
diff --git a/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js
index 8124546339..b5e87826ae 100644
--- a/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js
+++ b/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js
@@ -6,73 +6,61 @@
// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.yearmonthfromfields
let cal = new Temporal.Calendar("iso8601")
-let nonObjMsg =
- "Temporal.Calendar.prototype.yearMonthFromFields called on non-object");
// Check throw for first arg
assertThrows(() => cal.yearMonthFromFields(),
- TypeError, nonObjMsg);
+ TypeError);
[undefined, true, false, 123, 456n, Symbol(), "string"].forEach(
function(fields) {
- assertThrows(() => cal.yearMonthFromFields(fields), TypeError, nonObjMsg);
- assertThrows(() => cal.yearMonthFromFields(fields, undefined),
- TypeError, nonObjMsg);
+ assertThrows(() => cal.yearMonthFromFields(fields), TypeError);
+ assertThrows(() => cal.yearMonthFromFields(fields, undefined), TypeError);
assertThrows(() => cal.yearMonthFromFields(fields,
- {overflow: "constrain"}), TypeError, nonObjMsg);
+ {overflow: "constrain"}), TypeError);
assertThrows(() => cal.yearMonthFromFields(fields,
- {overflow: "reject"}), TypeError, nonObjMsg);
+ {overflow: "reject"}), TypeError);
});
-assertThrows(() => cal.yearMonthFromFields({month: 1}),
- TypeError, "invalid_argument");
-assertThrows(() => cal.yearMonthFromFields({year: 2021}),
- TypeError, "invalid_argument");
+assertThrows(() => cal.yearMonthFromFields({month: 1}), TypeError);
+assertThrows(() => cal.yearMonthFromFields({year: 2021}), TypeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "m1"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M1"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "m01"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 12,
- monthCode: "M11"}),
- RangeError, "monthCode value is out of range.");
+ monthCode: "M11"}), RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M00"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M19"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M99"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M13"}),
- RangeError, "monthCode value is out of range.");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, month: -1}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, month: -Infinity}),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 0, day: 5},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 13, day: 5},
- {overflow: "reject"}), RangeError, "Invalid time value");
+ {overflow: "reject"}), RangeError);
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, monthCode: "M00"}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M00"}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, monthCode: "M13"}, {overflow: "reject"}),
- RangeError, "monthCode value is out of range.");
+ {year: 2021, monthCode: "M13"}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, month: 0}), RangeError, "Invalid time value");
+ {year: 2021, month: 0}), RangeError);
// Check throw for the second arg
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, month: 7}, {overflow: "invalid"}),
- RangeError,
- "Value invalid out of range for " +
- "Temporal.Calendar.prototype.yearMonthFromFields options property " +
- "overflow");
+ {year: 2021, month: 7}, {overflow: "invalid"}), RangeError);
assertEquals("2021-07",
cal.yearMonthFromFields({year: 2021, month: 7}).toJSON());
@@ -137,8 +125,6 @@ assertEquals("2021-12",
cal.yearMonthFromFields({year: 2021, monthCode: "M12"}).toJSON());
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, month: 13}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 13}, {overflow: "reject"}), RangeError);
assertThrows(() => cal.yearMonthFromFields(
- {year: 2021, month: 9995}, {overflow: "reject"}), RangeError,
- "Invalid time value");
+ {year: 2021, month: 9995}, {overflow: "reject"}), RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/duration-abs.js b/deps/v8/test/mjsunit/temporal/duration-abs.js
index 181122058f..2a883899ce 100644
--- a/deps/v8/test/mjsunit/temporal/duration-abs.js
+++ b/deps/v8/test/mjsunit/temporal/duration-abs.js
@@ -12,8 +12,10 @@ let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
assertDuration(d2.abs(), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
// Test large number
-let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
-assertDuration(d3.abs(), 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
+let d3 = new Temporal.Duration(
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(d3.abs(),
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
// Test negative values
let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
diff --git a/deps/v8/test/mjsunit/temporal/duration-add.js b/deps/v8/test/mjsunit/temporal/duration-add.js
index 38d63cfd42..6a02afe929 100644
--- a/deps/v8/test/mjsunit/temporal/duration-add.js
+++ b/deps/v8/test/mjsunit/temporal/duration-add.js
@@ -7,34 +7,44 @@ d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
let d1 = new Temporal.Duration();
let badDur = {add: d1.add};
-assertThrows(() => badDur.add(d1), TypeError,
- "Method Temporal.Duration.prototype.add called on incompatible receiver #<Object>");
+assertThrows(() => badDur.add(d1), TypeError);
let relativeToOptions = {relativeTo: "2021-08-01"};
let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
-assertThrows(() => d2.add(d1), RangeError, "Invalid time value");
-assertThrows(() => d1.add(d2), RangeError, "Invalid time value");
-assertThrows(() => d2.add(d2), RangeError, "Invalid time value");
-assertDuration(d2.add(d1, relativeToOptions), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
-assertDuration(d1.add(d2, relativeToOptions), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
-assertDuration(d1.add(d1, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
-assertDuration(d2.add(d2, relativeToOptions), 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, false);
+assertThrows(() => d2.add(d1), RangeError);
+assertThrows(() => d1.add(d2), RangeError);
+assertThrows(() => d2.add(d2), RangeError);
+assertDuration(d2.add(d1, relativeToOptions),
+ 1, 2, 0, 25, 5, 6, 7, 8, 9, 10, 1, false);
+assertDuration(d1.add(d2, relativeToOptions),
+ 1, 2, 0, 25, 5, 6, 7, 8, 9, 10, 1, false);
+assertDuration(d1.add(d1, relativeToOptions),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(d2.add(d2, relativeToOptions),
+ 2, 5, 0, 19, 10, 12, 14, 16, 18, 20, 1, false);
// Test large number
-let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
-assertThrows(() => d3.add(d3), RangeError, "Invalid time value");
-assertDuration(d3.add(d3, relativeToOptions), 2e5, 4e5, 6e5, 8e5, 1e6, 12e5, 14e5, 16e5, 18e5, 2e6, 1, false);
+let d3 = new Temporal.Duration(
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertThrows(() => d3.add(d3), RangeError);
+//assertDuration(d3.add(d3, relativeToOptions),
+// 2e5, 4e5, 6e5, 8e5, 1e6, 12e5, 14e5, 16e5, 18e5, 2e6, 1, false);
// Test negative values
let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
-assertThrows(() => d4.add(d0), RangeError, "Invalid time value");
-assertThrows(() => d0.add(d4), RangeError, "Invalid time value");
-assertThrows(() => d4.add(d4), RangeError, "Invalid time value");
-assertThrows(() => d2.add(d4), RangeError, "Invalid time value");
-assertThrows(() => d4.add(d2), RangeError, "Invalid time value");
-assertDuration(d4.add(d0, relativeToOptions), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
-assertDuration(d0.add(d4, relativeToOptions), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
-assertDuration(d4.add(d4, relativeToOptions), -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -1, false);
-assertDuration(d2.add(d4, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
-assertDuration(d4.add(d2, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertThrows(() => d4.add(d1), RangeError);
+assertThrows(() => d1.add(d4), RangeError);
+assertThrows(() => d4.add(d4), RangeError);
+assertThrows(() => d2.add(d4), RangeError);
+assertThrows(() => d4.add(d2), RangeError);
+assertDuration(d4.add(d1, relativeToOptions),
+ -1, -2, 0, -25, -5, -6, -7, -8, -9, -10, -1, false);
+assertDuration(d1.add(d4, relativeToOptions),
+ -1, -2, 0, -25, -5, -6, -7, -8, -9, -10, -1, false);
+assertDuration(d4.add(d4, relativeToOptions),
+ -2, -5, 0, -19, -10, -12, -14, -16, -18, -20, -1, false);
+assertDuration(d2.add(d4, relativeToOptions),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(d4.add(d2, relativeToOptions),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
diff --git a/deps/v8/test/mjsunit/temporal/duration-constructor.js b/deps/v8/test/mjsunit/temporal/duration-constructor.js
index 3f02034c38..a9cb2cb318 100644
--- a/deps/v8/test/mjsunit/temporal/duration-constructor.js
+++ b/deps/v8/test/mjsunit/temporal/duration-constructor.js
@@ -12,82 +12,70 @@ let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
assertDuration(d2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
// Test large number
-let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
-assertDuration(d3, 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
+let d3 = new Temporal.Duration(
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(
+ d3, 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
// Test negative values
-let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
-assertDuration(d4, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+let d4 = new Temporal.Duration(
+ -1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertDuration(
+ d4, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
// Test NaN
let d5 = new Temporal.Duration(NaN, NaN, NaN);
assertDuration(d5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
// 1. If NewTarget is undefined, then
// a. Throw a TypeError exception.
-assertThrows(() => Temporal.Duration(), TypeError,
- "Method invoked on an object that is not Temporal.Duration.");
+assertThrows(() => Temporal.Duration(), TypeError);
// 1. Let number be ? ToNumber(argument).
assertDuration(new Temporal.Duration(undefined, 234, true, false, "567"),
0, 234, 1, 0, 567, 0, 0, 0, 0, 0, 1, false);
-assertThrows(() => new Temporal.Duration(Symbol(123)), TypeError,
- "Cannot convert a Symbol value to a number");
-assertThrows(() => new Temporal.Duration(123n), TypeError,
- "Cannot convert a BigInt value to a number");
+assertThrows(() => new Temporal.Duration(Symbol(123)), TypeError);
+assertThrows(() => new Temporal.Duration(123n), TypeError);
// Test Infinity
// 7.5.4 IsValidDuration ( years, months, weeks, days, hours, minutes, seconds,
// milliseconds, microseconds, nanoseconds )
// a. If v is not finite, return false.
assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, Infinity),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, Infinity),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, Infinity),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, 2, 3, 4, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, 2, 3, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, 2, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(Infinity),
- RangeError, "Invalid time value");
+ RangeError);
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(1, 2, 3, Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(1, 2, Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(1, Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(Infinity), RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9,
- -Infinity), RangeError, "Invalid time value");
+ -Infinity), RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8,
- -Infinity), RangeError, "Invalid time value");
+ -Infinity), RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7,
- -Infinity), RangeError, "Invalid time value");
+ -Infinity), RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -Infinity),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -Infinity),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(-1, -2, -3, -Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(-1, -2, -Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(-1, -Infinity),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(-Infinity),
- RangeError, "Invalid time value");
+ RangeError);
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(-1, -2, -Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(-1, -Infinity), RangeError);
+assertThrows(() => new Temporal.Duration(-Infinity), RangeError);
// Sign different
-assertThrows(() => new Temporal.Duration(1, -2),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(1, 0, -2),
- RangeError, "Invalid time value");
-assertThrows(() => new Temporal.Duration(-1, 0, 0, 3),
- RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, -2), RangeError);
+assertThrows(() => new Temporal.Duration(1, 0, -2), RangeError);
+assertThrows(() => new Temporal.Duration(-1, 0, 0, 3), RangeError);
assertThrows(() => new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 1, -1),
- RangeError, "Invalid time value");
+ RangeError);
assertThrows(() => new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -1, 1),
- RangeError, "Invalid time value");
+ RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/duration-from.js b/deps/v8/test/mjsunit/temporal/duration-from.js
index 8e19063c8a..107ad32d37 100644
--- a/deps/v8/test/mjsunit/temporal/duration-from.js
+++ b/deps/v8/test/mjsunit/temporal/duration-from.js
@@ -163,50 +163,28 @@ assertDuration(Temporal.Duration.from("PT3,001M"),
assertDuration(Temporal.Duration.from("PT3,006M"),
0, 0, 0, 0, 0, 3, 0, 360, 0, 0, 1, false);
-assertThrows(() => Temporal.Duration.from("P2H"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("P2.5M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("P2,5M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("P2S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2.H3M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2,H3M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2.H3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2,H3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2.H0.5M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2,H0,5M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2.H0.5S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2,H0,5S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2H3.2M3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2H3,2M3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2H3.2M0.3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT2H3,2M0,3S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT.1H"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT,1H"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT.1M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT,1M"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT.1S"), RangeError,
- "Invalid time value");
-assertThrows(() => Temporal.Duration.from("PT,1S"), RangeError,
- "Invalid time value");
+assertThrows(() => Temporal.Duration.from("P2H"), RangeError);
+assertThrows(() => Temporal.Duration.from("P2.5M"), RangeError);
+assertThrows(() => Temporal.Duration.from("P2,5M"), RangeError);
+assertThrows(() => Temporal.Duration.from("P2S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2.H3M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2,H3M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2.H3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2,H3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2.H0.5M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2,H0,5M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2.H0.5S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2,H0,5S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2H3.2M3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2H3,2M3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2H3.2M0.3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT2H3,2M0,3S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT.1H"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT,1H"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT.1M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT,1M"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT.1S"), RangeError);
+assertThrows(() => Temporal.Duration.from("PT,1S"), RangeError);
assertDuration(Temporal.Duration.from(
{years: 0, months: 0, weeks: 0, days: 0,
diff --git a/deps/v8/test/mjsunit/temporal/duration-negated.js b/deps/v8/test/mjsunit/temporal/duration-negated.js
index a161a819d3..033f24d5ee 100644
--- a/deps/v8/test/mjsunit/temporal/duration-negated.js
+++ b/deps/v8/test/mjsunit/temporal/duration-negated.js
@@ -12,13 +12,17 @@ let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
assertDuration(d2.negated(), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
// Test large number
-let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
-assertDuration(d3.negated(), -1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5, -1, false);
+let d3 = new Temporal.Duration(
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(d3.negated(),
+ -1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5, -1, false);
// Test negative values
let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
assertDuration(d4.negated(), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
-let d5 = new Temporal.Duration(-1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5);
-assertDuration(d5.negated(), 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
+let d5 = new Temporal.Duration(
+ -1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5);
+assertDuration(d5.negated(),
+ 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
diff --git a/deps/v8/test/mjsunit/temporal/duration-valueOf.js b/deps/v8/test/mjsunit/temporal/duration-valueOf.js
index 1a948868f3..6b1e08bf27 100644
--- a/deps/v8/test/mjsunit/temporal/duration-valueOf.js
+++ b/deps/v8/test/mjsunit/temporal/duration-valueOf.js
@@ -4,5 +4,4 @@
// Flags: --harmony-temporal
let d1 = new Temporal.Duration();
-assertThrows(() => d1.valueOf(), TypeError,
- "Method Temporal.Duration called on a non-object or on a wrong type of object.");
+assertThrows(() => d1.valueOf(), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/duration-with.js b/deps/v8/test/mjsunit/temporal/duration-with.js
index ec9d775057..e3d71d2550 100644
--- a/deps/v8/test/mjsunit/temporal/duration-with.js
+++ b/deps/v8/test/mjsunit/temporal/duration-with.js
@@ -32,26 +32,16 @@ assertDuration(d2.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
assertDuration(d2.with(like7), -9, -8, -7, -6, -5, -4, -3, -2, -1, -10, -1,
false);
// Different sign
-assertThrows(() => d2.with({years: -1}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({months: -2}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({weeks: -3}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({days: -4}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({hours: -5}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({minutes: -6}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({seconds: -7}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({milliseconds: -8}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({microseconds: -9}), RangeError,
- "Invalid time value");
-assertThrows(() => d2.with({nanoseconds: -10}), RangeError,
- "Invalid time value");
+assertThrows(() => d2.with({years: -1}), RangeError);
+assertThrows(() => d2.with({months: -2}), RangeError);
+assertThrows(() => d2.with({weeks: -3}), RangeError);
+assertThrows(() => d2.with({days: -4}), RangeError);
+assertThrows(() => d2.with({hours: -5}), RangeError);
+assertThrows(() => d2.with({minutes: -6}), RangeError);
+assertThrows(() => d2.with({seconds: -7}), RangeError);
+assertThrows(() => d2.with({milliseconds: -8}), RangeError);
+assertThrows(() => d2.with({microseconds: -9}), RangeError);
+assertThrows(() => d2.with({nanoseconds: -10}), RangeError);
// Test large number
@@ -68,45 +58,25 @@ let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
assertDuration(d4, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
assertDuration(d4.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
// Throw when sign flip
-assertThrows(() => d4.with({years: 1}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({months: 2}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({weeks: 3}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({days: 4}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({hours: 5}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({minutes: 6}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({seconds: 7}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({milliseconds: 8}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({microseconds: 9}), RangeError,
- "Invalid time value");
-assertThrows(() => d4.with({nanoseconds: 10}), RangeError,
- "Invalid time value");
+assertThrows(() => d4.with({years: 1}), RangeError);
+assertThrows(() => d4.with({months: 2}), RangeError);
+assertThrows(() => d4.with({weeks: 3}), RangeError);
+assertThrows(() => d4.with({days: 4}), RangeError);
+assertThrows(() => d4.with({hours: 5}), RangeError);
+assertThrows(() => d4.with({minutes: 6}), RangeError);
+assertThrows(() => d4.with({seconds: 7}), RangeError);
+assertThrows(() => d4.with({milliseconds: 8}), RangeError);
+assertThrows(() => d4.with({microseconds: 9}), RangeError);
+assertThrows(() => d4.with({nanoseconds: 10}), RangeError);
// singular throw
-assertThrows(() => d1.with({year:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({month:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({week:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({day:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({hour:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({minute:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({second:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({millisecond:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({microsecond:1}), TypeError,
- "invalid_argument");
-assertThrows(() => d1.with({nanosecond:1}), TypeError,
- "invalid_argument");
+assertThrows(() => d1.with({year:1}), TypeError);
+assertThrows(() => d1.with({month:1}), TypeError);
+assertThrows(() => d1.with({week:1}), TypeError);
+assertThrows(() => d1.with({day:1}), TypeError);
+assertThrows(() => d1.with({hour:1}), TypeError);
+assertThrows(() => d1.with({minute:1}), TypeError);
+assertThrows(() => d1.with({second:1}), TypeError);
+assertThrows(() => d1.with({millisecond:1}), TypeError);
+assertThrows(() => d1.with({microsecond:1}), TypeError);
+assertThrows(() => d1.with({nanosecond:1}), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-add.js b/deps/v8/test/mjsunit/temporal/plain-date-add.js
index 4734dede1d..fc5cccf8f8 100644
--- a/deps/v8/test/mjsunit/temporal/plain-date-add.js
+++ b/deps/v8/test/mjsunit/temporal/plain-date-add.js
@@ -18,7 +18,7 @@ assertPlainDate(d.subtract("-P12D"), 2021, 8, 1);
let goodDate = new Temporal.PlainDate(2021, 7, 20);
let badDate = {add: goodDate.add};
-assertThrows(() => badDateTime.add("P1D"), TypeError);
+assertThrows(() => badDate.add("P1D"), TypeError);
// Throw in ToLimitedTemporalDuration
assertThrows(() => (new Temporal.PlainDate(2021, 7, 20)).add("bad duration"),
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-time-from.js b/deps/v8/test/mjsunit/temporal/plain-date-time-from.js
index c308e57e1b..380f1e6a37 100644
--- a/deps/v8/test/mjsunit/temporal/plain-date-time-from.js
+++ b/deps/v8/test/mjsunit/temporal/plain-date-time-from.js
@@ -98,4 +98,4 @@ assertThrows(() => Temporal.PlainDateTime.from(
assertThrows(() => Temporal.PlainDateTime.from(
{year:9, month: 12, day:31, nanosecond: 1000}, {overflow: "reject"}),
- RangeError
+ RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/temporal-helpers.js b/deps/v8/test/mjsunit/temporal/temporal-helpers.js
index 129f918608..2fcf71d468 100644
--- a/deps/v8/test/mjsunit/temporal/temporal-helpers.js
+++ b/deps/v8/test/mjsunit/temporal/temporal-helpers.js
@@ -6,18 +6,18 @@
function assertDuration(duration, years, months, weeks, days, hours,
minutes, seconds, milliseconds, microseconds, nanoseconds, sign, blank) {
- assertEquals(years, duration.years, duration);
- assertEquals(months, duration.months, duration);
- assertEquals(weeks, duration.weeks, duration);
- assertEquals(days, duration.days, duration);
- assertEquals(hours, duration.hours, duration);
- assertEquals(minutes, duration.minutes, duration);
- assertEquals(seconds, duration.seconds, duration);
- assertEquals(milliseconds, duration.milliseconds, duration);
- assertEquals(microseconds, duration.microseconds, duration);
- assertEquals(nanoseconds, duration.nanoseconds, duration);
- assertEquals(sign, duration.sign, duration);
- assertEquals(blank, duration.blank, duration);
+ assertEquals(years, duration.years, "years");
+ assertEquals(months, duration.months, "months");
+ assertEquals(weeks, duration.weeks, "weeks");
+ assertEquals(days, duration.days, "days");
+ assertEquals(hours, duration.hours, "hours");
+ assertEquals(minutes, duration.minutes, "minutes");
+ assertEquals(seconds, duration.seconds, "seconds");
+ assertEquals(milliseconds, duration.milliseconds, "milliseconds");
+ assertEquals(microseconds, duration.microseconds, "microseconds");
+ assertEquals(nanoseconds, duration.nanoseconds, "nanoseconds");
+ assertEquals(sign, duration.sign, "sign");
+ assertEquals(blank, duration.blank, "blank");
}
function assertPlainDate(time, year, month, day) {
diff --git a/deps/v8/test/mjsunit/temporal/time-zone-constructor.js b/deps/v8/test/mjsunit/temporal/time-zone-constructor.js
new file mode 100644
index 0000000000..5048e9fd00
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/time-zone-constructor.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// 1. If NewTarget is undefined, then
+// a. Throw a TypeError exception.
+assertThrows(() => Temporal.TimeZone("UTC"), TypeError);
+
+assertThrows(() => new Temporal.TimeZone(), RangeError);
+
+assertEquals("UTC", (new Temporal.TimeZone("utc")).id)
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index e82038041f..faac39847d 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -206,18 +206,18 @@ class CombinedTest(testcase.D8TestCase):
"""In addition to standard set of shell flags it appends:
--disable-abortjs: %AbortJS can abort the test even inside
trycatch-wrapper, so we disable it.
- --es-staging: We skip all harmony flags due to false positives,
+ --harmony: We skip all harmony flags due to false positives,
but always pass the staging flag to cover the mature features.
--omit-quit: Calling quit() in JS would otherwise early terminate.
--quiet-load: suppress any stdout from load() function used by
trycatch-wrapper.
"""
return [
- '--test',
- '--disable-abortjs',
- '--es-staging',
- '--omit-quit',
- '--quiet-load',
+ '--test',
+ '--disable-abortjs',
+ '--harmony',
+ '--omit-quit',
+ '--quiet-load',
]
def _get_cmd_params(self):
diff --git a/deps/v8/test/mjsunit/tools/foozzie.js b/deps/v8/test/mjsunit/tools/foozzie.js
index 759df0e983..fa3cc5a84a 100644
--- a/deps/v8/test/mjsunit/tools/foozzie.js
+++ b/deps/v8/test/mjsunit/tools/foozzie.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --correctness-fuzzer-suppressions
-// Files: tools/clusterfuzz/v8_mock.js
+// Files: tools/clusterfuzz/foozzie/v8_mock.js
// Test foozzie mocks for differential fuzzing.
diff --git a/deps/v8/test/mjsunit/tools/foozzie_archs.js b/deps/v8/test/mjsunit/tools/foozzie_archs.js
index 9023428324..bdaec97e01 100644
--- a/deps/v8/test/mjsunit/tools/foozzie_archs.js
+++ b/deps/v8/test/mjsunit/tools/foozzie_archs.js
@@ -3,8 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Files: tools/clusterfuzz/v8_mock.js
-// Files: tools/clusterfuzz/v8_mock_archs.js
+// Files: tools/clusterfuzz/foozzie/v8_mock.js
+// Files: tools/clusterfuzz/foozzie/v8_mock_archs.js
// Test foozzie architecture-specific mocks for differential fuzzing.
diff --git a/deps/v8/test/mjsunit/tools/foozzie_webassembly.js b/deps/v8/test/mjsunit/tools/foozzie_webassembly.js
index d5130a393b..0bb2172e87 100644
--- a/deps/v8/test/mjsunit/tools/foozzie_webassembly.js
+++ b/deps/v8/test/mjsunit/tools/foozzie_webassembly.js
@@ -3,8 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Files: tools/clusterfuzz/v8_mock.js
-// Files: tools/clusterfuzz/v8_mock_webassembly.js
+// Files: tools/clusterfuzz/foozzie/v8_mock.js
+// Files: tools/clusterfuzz/foozzie/v8_mock_webassembly.js
// Test foozzie webassembly-specfific mocks for differential fuzzing.
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log b/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
index 5c966d1c32..af26edf5ab 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
@@ -1272,18 +1272,18 @@ code-creation,BytecodeHandler,0,17736,0x5574264141e0,204,StaLookupSlot
code-creation,BytecodeHandler,0,17745,0x5574264142c0,32,Ldar
code-creation,BytecodeHandler,0,17753,0x557426414300,32,Star
code-creation,BytecodeHandler,0,17762,0x557426414340,40,Mov
-code-creation,BytecodeHandler,0,17771,0x557426414380,4524,LdaNamedProperty
-code-creation,BytecodeHandler,0,17780,0x557426415540,124,LdaNamedPropertyNoFeedback
-code-creation,BytecodeHandler,0,17790,0x5574264155c0,196,LdaNamedPropertyFromSuper
-code-creation,BytecodeHandler,0,17800,0x5574264156a0,216,LdaKeyedProperty
+code-creation,BytecodeHandler,0,17771,0x557426414380,4524,GetNamedProperty
+code-creation,BytecodeHandler,0,17780,0x557426415540,124,GetNamedPropertyNoFeedback
+code-creation,BytecodeHandler,0,17790,0x5574264155c0,196,GetNamedPropertyFromSuper
+code-creation,BytecodeHandler,0,17800,0x5574264156a0,216,GetKeyedProperty
code-creation,BytecodeHandler,0,17809,0x557426415780,188,LdaModuleVariable
code-creation,BytecodeHandler,0,17818,0x557426415840,320,StaModuleVariable
-code-creation,BytecodeHandler,0,17827,0x5574264159a0,180,StaNamedProperty
-code-creation,BytecodeHandler,0,17836,0x557426415a60,140,StaNamedPropertyNoFeedback
-code-creation,BytecodeHandler,0,17846,0x557426415b00,180,StaNamedOwnProperty
-code-creation,BytecodeHandler,0,17855,0x557426415bc0,168,StaKeyedProperty
+code-creation,BytecodeHandler,0,17827,0x5574264159a0,180,SetNamedProperty
+code-creation,BytecodeHandler,0,17836,0x557426415a60,140,SetNamedPropertyNoFeedback
+code-creation,BytecodeHandler,0,17846,0x557426415b00,180,DefineNamedOwnProperty
+code-creation,BytecodeHandler,0,17855,0x557426415bc0,168,SetKeyedProperty
code-creation,BytecodeHandler,0,17864,0x557426415c80,168,StaInArrayLiteral
-code-creation,BytecodeHandler,0,17874,0x557426415d40,196,StaDataPropertyInLiteral
+code-creation,BytecodeHandler,0,17874,0x557426415d40,196,DefineKeyedOwnPropertyInLiteral
code-creation,BytecodeHandler,0,17884,0x557426415e20,164,CollectTypeProfile
code-creation,BytecodeHandler,0,17893,0x557426415ee0,1000,Add
code-creation,BytecodeHandler,0,17901,0x5574264162e0,1060,Sub
@@ -1446,18 +1446,18 @@ code-creation,BytecodeHandler,0,19362,0x557426429ae0,216,StaLookupSlot.Wide
code-creation,BytecodeHandler,0,19371,0x557426429bc0,32,Ldar.Wide
code-creation,BytecodeHandler,0,19380,0x557426429c00,32,Star.Wide
code-creation,BytecodeHandler,0,19389,0x557426429c40,40,Mov.Wide
-code-creation,BytecodeHandler,0,19398,0x557426429c80,4508,LdaNamedProperty.Wide
-code-creation,BytecodeHandler,0,19407,0x55742642ae20,128,LdaNamedPropertyNoFeedback.Wide
-code-creation,BytecodeHandler,0,19419,0x55742642aec0,196,LdaNamedPropertyFromSuper.Wide
-code-creation,BytecodeHandler,0,19429,0x55742642afa0,164,LdaKeyedProperty.Wide
+code-creation,BytecodeHandler,0,19398,0x557426429c80,4508,GetNamedProperty.Wide
+code-creation,BytecodeHandler,0,19407,0x55742642ae20,128,GetNamedPropertyNoFeedback.Wide
+code-creation,BytecodeHandler,0,19419,0x55742642aec0,196,GetNamedPropertyFromSuper.Wide
+code-creation,BytecodeHandler,0,19429,0x55742642afa0,164,GetKeyedProperty.Wide
code-creation,BytecodeHandler,0,19438,0x55742642b060,188,LdaModuleVariable.Wide
code-creation,BytecodeHandler,0,19448,0x55742642b120,320,StaModuleVariable.Wide
-code-creation,BytecodeHandler,0,19457,0x55742642b280,184,StaNamedProperty.Wide
-code-creation,BytecodeHandler,0,19467,0x55742642b340,140,StaNamedPropertyNoFeedback.Wide
-code-creation,BytecodeHandler,0,19476,0x55742642b3e0,184,StaNamedOwnProperty.Wide
-code-creation,BytecodeHandler,0,19486,0x55742642b4a0,176,StaKeyedProperty.Wide
+code-creation,BytecodeHandler,0,19457,0x55742642b280,184,SetNamedProperty.Wide
+code-creation,BytecodeHandler,0,19467,0x55742642b340,140,SetNamedPropertyNoFeedback.Wide
+code-creation,BytecodeHandler,0,19476,0x55742642b3e0,184,DefineNamedOwnProperty.Wide
+code-creation,BytecodeHandler,0,19486,0x55742642b4a0,176,SetKeyedProperty.Wide
code-creation,BytecodeHandler,0,19495,0x55742642b560,176,StaInArrayLiteral.Wide
-code-creation,BytecodeHandler,0,19505,0x55742642b620,200,StaDataPropertyInLiteral.Wide
+code-creation,BytecodeHandler,0,19505,0x55742642b620,200,DefineKeyedOwnPropertyInLiteral.Wide
code-creation,BytecodeHandler,0,19515,0x55742642b700,168,CollectTypeProfile.Wide
code-creation,BytecodeHandler,0,19524,0x55742642b7c0,948,Add.Wide
code-creation,BytecodeHandler,0,19533,0x55742642bb80,1008,Sub.Wide
@@ -1598,18 +1598,18 @@ code-creation,BytecodeHandler,0,20817,0x55742643dd00,212,StaLookupSlot.ExtraWide
code-creation,BytecodeHandler,0,20826,0x55742643dde0,32,Ldar.ExtraWide
code-creation,BytecodeHandler,0,20835,0x55742643de20,28,Star.ExtraWide
code-creation,BytecodeHandler,0,20845,0x55742643de40,40,Mov.ExtraWide
-code-creation,BytecodeHandler,0,20854,0x55742643de80,4492,LdaNamedProperty.ExtraWide
-code-creation,BytecodeHandler,0,20863,0x55742643f020,128,LdaNamedPropertyNoFeedback.ExtraWide
-code-creation,BytecodeHandler,0,20874,0x55742643f0c0,192,LdaNamedPropertyFromSuper.ExtraWide
-code-creation,BytecodeHandler,0,20884,0x55742643f1a0,164,LdaKeyedProperty.ExtraWide
+code-creation,BytecodeHandler,0,20854,0x55742643de80,4492,GetNamedProperty.ExtraWide
+code-creation,BytecodeHandler,0,20863,0x55742643f020,128,GetNamedPropertyNoFeedback.ExtraWide
+code-creation,BytecodeHandler,0,20874,0x55742643f0c0,192,GetNamedPropertyFromSuper.ExtraWide
+code-creation,BytecodeHandler,0,20884,0x55742643f1a0,164,GetKeyedProperty.ExtraWide
code-creation,BytecodeHandler,0,20893,0x55742643f260,188,LdaModuleVariable.ExtraWide
code-creation,BytecodeHandler,0,20903,0x55742643f320,320,StaModuleVariable.ExtraWide
-code-creation,BytecodeHandler,0,20912,0x55742643f480,184,StaNamedProperty.ExtraWide
-code-creation,BytecodeHandler,0,20922,0x55742643f540,140,StaNamedPropertyNoFeedback.ExtraWide
-code-creation,BytecodeHandler,0,20932,0x55742643f5e0,184,StaNamedOwnProperty.ExtraWide
-code-creation,BytecodeHandler,0,20942,0x55742643f6a0,172,StaKeyedProperty.ExtraWide
+code-creation,BytecodeHandler,0,20912,0x55742643f480,184,SetNamedProperty.ExtraWide
+code-creation,BytecodeHandler,0,20922,0x55742643f540,140,SetNamedPropertyNoFeedback.ExtraWide
+code-creation,BytecodeHandler,0,20932,0x55742643f5e0,184,DefineNamedOwnProperty.ExtraWide
+code-creation,BytecodeHandler,0,20942,0x55742643f6a0,172,SetKeyedProperty.ExtraWide
code-creation,BytecodeHandler,0,20953,0x55742643f760,172,StaInArrayLiteral.ExtraWide
-code-creation,BytecodeHandler,0,20963,0x55742643f820,196,StaDataPropertyInLiteral.ExtraWide
+code-creation,BytecodeHandler,0,20963,0x55742643f820,196,DefineKeyedOwnPropertyInLiteral.ExtraWide
code-creation,BytecodeHandler,0,20973,0x55742643f900,168,CollectTypeProfile.ExtraWide
code-creation,BytecodeHandler,0,20982,0x55742643f9c0,948,Add.ExtraWide
code-creation,BytecodeHandler,0,20991,0x55742643fd80,1008,Sub.ExtraWide
diff --git a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
index b4703dd841..28a9d42bb0 100644
--- a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
@@ -93,6 +93,93 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
/Invalid typed array length: 2/);
})();
+(function ConstructFromTypedArray() {
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ const gsab = CreateGrowableSharedArrayBuffer(
+ 4 * sourceCtor.BYTES_PER_ELEMENT,
+ 8 * sourceCtor.BYTES_PER_ELEMENT);
+ const fixedLength = new sourceCtor(gsab, 0, 4);
+ const fixedLengthWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new sourceCtor(gsab, 0);
+ const lengthTrackingWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taFull = new sourceCtor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, ...] << lengthTracking
+ // [3, 4, ...] << lengthTrackingWithOffset
+
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(fixedLength)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(lengthTracking)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+
+ // Grow.
+ gsab.grow(6 * sourceCtor.BYTES_PER_ELEMENT);
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4, 5, 6]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, 5, 6, ...] << lengthTracking
+ // [3, 4, 5, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(fixedLength)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+ assertEquals([1, 2, 3, 4, 5, 6],
+ ToNumbers(new targetCtor(lengthTracking)));
+ assertEquals([3, 4, 5, 6],
+ ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+ });
+})();
+
+(function ConstructFromTypedArraySpeciesConstructorNotCalled() {
+ class MySharedArrayBuffer extends SharedArrayBuffer {
+ constructor(...params) {
+ super(...params);
+ }
+ static get [Symbol.species]() {
+ throw new Error('This should not be called!');
+ }
+ };
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ const gsab = new MySharedArrayBuffer(
+ 4 * sourceCtor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * sourceCtor.BYTES_PER_ELEMENT});
+ // Write some data into the array.
+ const taWrite = new sourceCtor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ const fixedLength = new sourceCtor(gsab, 0, 4);
+ assertEquals([0, 2, 4, 6], ToNumbers(new targetCtor(fixedLength)));
+
+ const fixedLengthWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ assertEquals([4, 6], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+
+ const lengthTracking = new sourceCtor(gsab, 0);
+ assertEquals([0, 2, 4, 6], ToNumbers(new targetCtor(lengthTracking)));
+
+ const lengthTrackingWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+ assertEquals([4, 6], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+ });
+})();
+
(function TypedArrayLengthWhenGrown1() {
const gsab = CreateGrowableSharedArrayBuffer(16, 40);
@@ -2213,11 +2300,6 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
})();
(function IncludesSpecialValues() {
- const floatCtors = [
- Float32Array,
- Float64Array,
- MyFloat32Array
- ];
for (let ctor of floatCtors) {
const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -2230,3 +2312,1256 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
assertTrue(lengthTracking.includes(NaN));
}
})();
+
+(function IndexOfLastIndexOf() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, Math.floor(i / 2));
+ }
+
+ // Orig. array: [0, 0, 1, 1]
+ // [0, 0, 1, 1] << fixedLength
+ // [1, 1] << fixedLengthWithOffset
+ // [0, 0, 1, 1, ...] << lengthTracking
+ // [1, 1, ...] << lengthTrackingWithOffset
+
+ assertEquals(0, IndexOfHelper(fixedLength, 0));
+ assertEquals(1, IndexOfHelper(fixedLength, 0, 1));
+ assertEquals(-1, IndexOfHelper(fixedLength, 0, 2));
+ assertEquals(-1, IndexOfHelper(fixedLength, 0, -2));
+ assertEquals(1, IndexOfHelper(fixedLength, 0, -3));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, 1));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, -3));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, -2));
+ assertEquals(-1, IndexOfHelper(fixedLength, undefined));
+
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, 1));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, 2));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, -2));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, -3));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 1, 1));
+ assertEquals(2, LastIndexOfHelper(fixedLength, 1, -2));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 1, -3));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, undefined));
+
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1, -2));
+ assertEquals(1, IndexOfHelper(fixedLengthWithOffset, 1, -1));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(0, LastIndexOfHelper(fixedLengthWithOffset, 1, -2));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1, -1));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(0, IndexOfHelper(lengthTracking, 0));
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0, 2));
+ assertEquals(2, IndexOfHelper(lengthTracking, 1, -3));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0, 2));
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0, -3));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 1, 1));
+ assertEquals(2, LastIndexOfHelper(lengthTracking, 1, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 1, -3));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(1, IndexOfHelper(lengthTrackingWithOffset, 1, 1));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1, -2));
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1, 1));
+ assertEquals(0, LastIndexOfHelper(lengthTrackingWithOffset, 1, -2));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1, -1));
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, Math.floor(i / 2));
+ }
+
+ // Orig. array: [0, 0, 1, 1, 2, 2]
+ // [0, 0, 1, 1] << fixedLength
+ // [1, 1] << fixedLengthWithOffset
+ // [0, 0, 1, 1, 2, 2, ...] << lengthTracking
+ // [1, 1, 2, 2, ...] << lengthTrackingWithOffset
+
+ assertEquals(2, IndexOfHelper(fixedLength, 1));
+ assertEquals(-1, IndexOfHelper(fixedLength, 2));
+ assertEquals(-1, IndexOfHelper(fixedLength, undefined));
+
+ assertEquals(3, LastIndexOfHelper(fixedLength, 1));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 2));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, undefined));
+
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 2));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 2));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(2, IndexOfHelper(lengthTracking, 1));
+ assertEquals(4, IndexOfHelper(lengthTracking, 2));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(3, LastIndexOfHelper(lengthTracking, 1));
+ assertEquals(5, LastIndexOfHelper(lengthTracking, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(2, IndexOfHelper(lengthTrackingWithOffset, 2));
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(3, LastIndexOfHelper(lengthTrackingWithOffset, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, undefined));
+ }
+})();
+
+(function IndexOfParameterConversionGrows() {
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, 1);
+ }
+
+ let evil = { valueOf: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0));
+ // The TA grew but we only look at the data until the original length.
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ // Growing + length-tracking TA, index conversion.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+ WriteToTypedArray(lengthTracking, 0, 1);
+
+ let evil = { valueOf: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return -4;
+ }};
+ assertEquals(0, IndexOfHelper(lengthTracking, 1, -4));
+ // The TA grew but the start index conversion is done based on the original
+ // length.
+ assertEquals(0, IndexOfHelper(lengthTracking, 1, evil));
+ }
+})();
+
+(function LastIndexOfParameterConversionGrows() {
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, 1);
+ }
+
+ let evil = { valueOf: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return -1;
+ }};
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0));
+ // Because lastIndexOf iterates from the given index downwards, it's not
+ // possible to test that "we only look at the data until the original
+ // length" without also testing that the index conversion happening with the
+ // original length.
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ // Growing + length-tracking TA, index conversion.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+
+ let evil = { valueOf: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return -4;
+ }};
+ assertEquals(0, LastIndexOfHelper(lengthTracking, 0, -4));
+ // The TA grew but the start index conversion is done based on the original
+ // length.
+ assertEquals(0, LastIndexOfHelper(lengthTracking, 0, evil));
+ }
+})();
+
+(function IndexOfLastIndexOfSpecialValues() {
+ for (let ctor of floatCtors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+ lengthTracking[0] = -Infinity;
+ lengthTracking[1] = -Infinity;
+ lengthTracking[2] = Infinity;
+ lengthTracking[3] = Infinity;
+ lengthTracking[4] = NaN;
+ lengthTracking[5] = NaN;
+ assertEquals(0, lengthTracking.indexOf(-Infinity));
+ assertEquals(1, lengthTracking.lastIndexOf(-Infinity));
+ assertEquals(2, lengthTracking.indexOf(Infinity));
+ assertEquals(3, lengthTracking.lastIndexOf(Infinity));
+ // NaN is never found.
+ assertEquals(-1, lengthTracking.indexOf(NaN));
+ assertEquals(-1, lengthTracking.lastIndexOf(NaN));
+ }
+})();
+
+(function JoinToLocaleString() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals('0,2,4,6', fixedLength.join());
+ assertEquals('0,2,4,6', fixedLength.toLocaleString());
+ assertEquals('4,6', fixedLengthWithOffset.join());
+ assertEquals('4,6', fixedLengthWithOffset.toLocaleString());
+ assertEquals('0,2,4,6', lengthTracking.join());
+ assertEquals('0,2,4,6', lengthTracking.toLocaleString());
+ assertEquals('4,6', lengthTrackingWithOffset.join());
+ assertEquals('4,6', lengthTrackingWithOffset.toLocaleString());
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals('0,2,4,6', fixedLength.join());
+ assertEquals('0,2,4,6', fixedLength.toLocaleString());
+ assertEquals('4,6', fixedLengthWithOffset.join());
+ assertEquals('4,6', fixedLengthWithOffset.toLocaleString());
+ assertEquals('0,2,4,6,8,10', lengthTracking.join());
+ assertEquals('0,2,4,6,8,10', lengthTracking.toLocaleString());
+ assertEquals('4,6,8,10', lengthTrackingWithOffset.join());
+ assertEquals('4,6,8,10', lengthTrackingWithOffset.toLocaleString());
+ }
+})();
+
+(function JoinParameterConversionGrows() {
+ // Growing + fixed-length TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ let evil = { toString: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ assertEquals('0.0.0.0', fixedLength.join(evil));
+ }
+
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+
+ let evil = { toString: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length.
+ assertEquals('0.0.0.0', lengthTracking.join(evil));
+ }
+})();
+
+(function ToLocaleStringNumberPrototypeToLocaleStringGrows() {
+ const oldNumberPrototypeToLocaleString = Number.prototype.toLocaleString;
+ const oldBigIntPrototypeToLocaleString = BigInt.prototype.toLocaleString;
+
+ // Growing + fixed-length TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ let growAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --growAfter;
+ if (growAfter == 0) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --growAfter;
+ if (growAfter == 0) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements since it was the starting length. Resizing doesn't
+ // affect the TA.
+ assertEquals('0,0,0,0', fixedLength.toLocaleString());
+ }
+
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+
+ let growAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --growAfter;
+ if (growAfter == 0) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --growAfter;
+ if (growAfter == 0) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements since it was the starting length.
+ assertEquals('0,0,0,0', lengthTracking.toLocaleString());
+ }
+
+ Number.prototype.toLocaleString = oldNumberPrototypeToLocaleString;
+ BigInt.prototype.toLocaleString = oldBigIntPrototypeToLocaleString;
+})();
+
+(function TestMap() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ function Helper(array) {
+ const values = [];
+ function GatherValues(n, ix) {
+ assertEquals(values.length, ix);
+ values.push(n);
+ if (typeof n == 'bigint') {
+ return n + 1n;
+ }
+ return n + 1;
+ }
+ const newValues = array.map(GatherValues);
+ for (let i = 0; i < values.length; ++i) {
+ if (typeof values[i] == 'bigint') {
+ assertEquals(newValues[i], values[i] + 1n);
+ } else {
+ assertEquals(newValues[i], values[i] + 1);
+ }
+ }
+ return ToNumbers(values);
+ }
+
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ assertEquals([0, 2, 4, 6], Helper(lengthTracking));
+ assertEquals([4, 6], Helper(lengthTrackingWithOffset));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ assertEquals([0, 2, 4, 6, 8, 10], Helper(lengthTracking));
+ assertEquals([4, 6, 8, 10], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ let values;
+ let gsab;
+ let growAfter;
+ let growTo;
+ function CollectValuesAndResize(n) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ if (values.length == growAfter) {
+ gsab.grow(growTo);
+ }
+ return n;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValuesAndResize);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, 6], Helper(lengthTracking));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, 6], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapSpeciesCreateGrows() {
+ let values;
+ let gsab;
+ function CollectValues(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ }
+ return 0;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValues);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(gsab, 0, 4);
+ resizeWhenConstructorCalled = true;
+ assertEquals([0, 1, 2, 3], Helper(fixedLength));
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, gsab.byteLength);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(gsab);
+ resizeWhenConstructorCalled = true;
+ assertEquals([0, 1, 2, 3], Helper(lengthTracking));
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, gsab.byteLength);
+ }
+})();
+
+(function Reverse() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const wholeArrayView = new ctor(gsab);
+ function WriteData() {
+ // Write some data into the array.
+ for (let i = 0; i < wholeArrayView.length; ++i) {
+ WriteToTypedArray(wholeArrayView, i, 2 * i);
+ }
+ }
+ WriteData();
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ fixedLength.reverse();
+ assertEquals([6, 4, 2, 0], ToNumbers(wholeArrayView));
+ fixedLengthWithOffset.reverse();
+ assertEquals([6, 4, 0, 2], ToNumbers(wholeArrayView));
+ lengthTracking.reverse();
+ assertEquals([2, 0, 4, 6], ToNumbers(wholeArrayView));
+ lengthTrackingWithOffset.reverse();
+ assertEquals([2, 0, 6, 4], ToNumbers(wholeArrayView));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ WriteData();
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ fixedLength.reverse();
+ assertEquals([6, 4, 2, 0, 8, 10], ToNumbers(wholeArrayView));
+ fixedLengthWithOffset.reverse();
+ assertEquals([6, 4, 0, 2, 8, 10], ToNumbers(wholeArrayView));
+ lengthTracking.reverse();
+ assertEquals([10, 8, 2, 0, 4, 6], ToNumbers(wholeArrayView));
+ lengthTrackingWithOffset.reverse();
+ assertEquals([10, 8, 6, 4, 0, 2], ToNumbers(wholeArrayView));
+ }
+})();
+
+(function SetWithGrowableTarget() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taFull = new ctor(gsab);
+
+ // Orig. array: [0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, ...] << lengthTrackingWithOffset
+
+ SetHelper(fixedLength, [1, 2]);
+ assertEquals([1, 2, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLength, [3, 4], 1);
+ assertEquals([1, 3, 4, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0, 0])}, RangeError);
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0], 1)}, RangeError);
+ assertEquals([1, 3, 4, 0], ToNumbers(taFull));
+
+ SetHelper(fixedLengthWithOffset, [5, 6]);
+ assertEquals([1, 3, 5, 6], ToNumbers(taFull));
+ SetHelper(fixedLengthWithOffset, [7], 1);
+ assertEquals([1, 3, 5, 7], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0], 1)},
+ RangeError);
+ assertEquals([1, 3, 5, 7], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [8, 9]);
+ assertEquals([8, 9, 5, 7], ToNumbers(taFull));
+ SetHelper(lengthTracking, [10, 11], 1);
+ assertEquals([8, 10, 11, 7], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([8, 10, 11, 7], ToNumbers(taFull));
+
+ SetHelper(lengthTrackingWithOffset, [12, 13]);
+ assertEquals([8, 10, 12, 13], ToNumbers(taFull));
+ SetHelper(lengthTrackingWithOffset, [14], 1);
+ assertEquals([8, 10, 12, 14], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0], 1)},
+ RangeError);
+ assertEquals([8, 10, 12, 14], ToNumbers(taFull));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [8, 10, 12, 14, 0, 0]
+ // [8, 10, 12, 14] << fixedLength
+ // [12, 14] << fixedLengthWithOffset
+ // [8, 10, 12, 14, 0, 0, ...] << lengthTracking
+ // [12, 14, 0, 0, ...] << lengthTrackingWithOffset
+ SetHelper(fixedLength, [21, 22]);
+ assertEquals([21, 22, 12, 14, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLength, [23, 24], 1);
+ assertEquals([21, 23, 24, 14, 0, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0, 0])}, RangeError);
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0], 1)}, RangeError);
+ assertEquals([21, 23, 24, 14, 0, 0], ToNumbers(taFull));
+
+ SetHelper(fixedLengthWithOffset, [25, 26]);
+ assertEquals([21, 23, 25, 26, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLengthWithOffset, [27], 1);
+ assertEquals([21, 23, 25, 27, 0, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0], 1)},
+ RangeError);
+ assertEquals([21, 23, 25, 27, 0, 0], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [28, 29, 30, 31, 32, 33]);
+ assertEquals([28, 29, 30, 31, 32, 33], ToNumbers(taFull));
+ SetHelper(lengthTracking, [34, 35, 36, 37, 38], 1);
+ assertEquals([28, 34, 35, 36, 37, 38], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([28, 34, 35, 36, 37, 38], ToNumbers(taFull));
+
+ SetHelper(lengthTrackingWithOffset, [39, 40, 41, 42]);
+ assertEquals([28, 34, 39, 40, 41, 42], ToNumbers(taFull));
+ SetHelper(lengthTrackingWithOffset, [43, 44, 45], 1);
+ assertEquals([28, 34, 39, 43, 44, 45], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([28, 34, 39, 43, 44, 45], ToNumbers(taFull));
+ }
+})();
+
+(function SetSourceLengthGetterGrowsTarget() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ let gsab;
+ let growTo;
+ function CreateSourceProxy(length) {
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ gsab.grow(growTo);
+ return length;
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ // Test that we still throw for lengthTracking TAs if the source length is
+ // too large, even though we resized in the length getter (we check against
+ // the original length).
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTracking.set(CreateSourceProxy(6)); },
+ RangeError);
+ assertEquals([0, 2, 4, 6, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(6)); },
+ RangeError);
+ assertEquals([0, 2, 4, 6, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+})();
+
+(function SetGrowTargetMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ let gsab;
+ // Growing will happen when we're calling Get for the `growAt`:th data
+ // element, but we haven't yet written it to the target.
+ let growAt;
+ let growTo;
+ function CreateSourceProxy(length) {
+ let requestedIndices = [];
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ return length;
+ }
+ requestedIndices.push(prop);
+ if (requestedIndices.length == growAt) {
+ gsab.grow(growTo);
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+ growAt = 2;
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ fixedLength.set(CreateSourceProxy(4));
+ assertEquals([1, 1, 1, 1], ToNumbers(fixedLength));
+ assertEquals([1, 1, 1, 1, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ growAt = 1;
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ fixedLengthWithOffset.set(CreateSourceProxy(2));
+ assertEquals([1, 1], ToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 2, 1, 1, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+ growAt = 2;
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ lengthTracking.set(CreateSourceProxy(2));
+ assertEquals([1, 1, 4, 6, 0, 0], ToNumbers(lengthTracking));
+ assertEquals([1, 1, 4, 6, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ growAt = 1;
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(2));
+ assertEquals([1, 1, 0, 0], ToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 2, 1, 1, 0, 0], ToNumbers(new ctor(gsab)));
+ }
+})();
+
+(function SetWithGrowableSource() {
+ for (let targetIsGrowable of [false, true]) {
+ for (let targetCtor of ctors) {
+ for (let sourceCtor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(
+ 4 * sourceCtor.BYTES_PER_ELEMENT,
+ 8 * sourceCtor.BYTES_PER_ELEMENT);
+ const fixedLength = new sourceCtor(gsab, 0, 4);
+ const fixedLengthWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new sourceCtor(gsab, 0);
+ const lengthTrackingWithOffset = new sourceCtor(
+ gsab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taFull = new sourceCtor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, ...] << lengthTracking
+ // [3, 4, ...] << lengthTrackingWithOffset
+
+ const targetAb = targetIsGrowable ?
+ new ArrayBuffer(6 * targetCtor.BYTES_PER_ELEMENT) :
+ new ArrayBuffer(6 * targetCtor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * targetCtor.BYTES_PER_ELEMENT});
+ const target = new targetCtor(targetAb);
+
+ if (IsBigIntTypedArray(target) != IsBigIntTypedArray(taFull)) {
+ // Can't mix BigInt and non-BigInt types.
+ continue;
+ }
+
+ SetHelper(target, fixedLength);
+ assertEquals([1, 2, 3, 4, 0, 0], ToNumbers(target));
+
+ SetHelper(target, fixedLengthWithOffset);
+ assertEquals([3, 4, 3, 4, 0, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTracking, 1);
+ assertEquals([3, 1, 2, 3, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTrackingWithOffset, 1);
+ assertEquals([3, 3, 4, 3, 4, 0], ToNumbers(target));
+
+ // Grow.
+ gsab.grow(6 * sourceCtor.BYTES_PER_ELEMENT);
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4, 5, 6]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, 5, 6, ...] << lengthTracking
+ // [3, 4, 5, 6, ...] << lengthTrackingWithOffset
+
+ SetHelper(target, fixedLength);
+ assertEquals([1, 2, 3, 4, 4, 0], ToNumbers(target));
+
+ SetHelper(target, fixedLengthWithOffset);
+ assertEquals([3, 4, 3, 4, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTracking, 0);
+ assertEquals([1, 2, 3, 4, 5, 6], ToNumbers(target));
+
+ SetHelper(target, lengthTrackingWithOffset, 1);
+ assertEquals([1, 3, 4, 5, 6, 6], ToNumbers(target));
+ }
+ }
+ }
+})();
+
+(function Subarray() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ const fixedLengthSubFull = fixedLength.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLengthSubFull));
+ const fixedLengthWithOffsetSubFull = fixedLengthWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(fixedLengthWithOffsetSubFull));
+ const lengthTrackingSubFull = lengthTracking.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(lengthTrackingSubFull));
+ const lengthTrackingWithOffsetSubFull =
+ lengthTrackingWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(lengthTrackingWithOffsetSubFull));
+
+ // Relative offsets
+ assertEquals([4, 6], ToNumbers(fixedLength.subarray(-2)));
+ assertEquals([6], ToNumbers(fixedLengthWithOffset.subarray(-1)));
+ assertEquals([4, 6], ToNumbers(lengthTracking.subarray(-2)));
+ assertEquals([6], ToNumbers(lengthTrackingWithOffset.subarray(-1)));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLength.subarray(0)));
+ assertEquals([4, 6], ToNumbers(fixedLengthWithOffset.subarray(0)));
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbers(lengthTracking.subarray(0)));
+ assertEquals([4, 6, 8, 10],
+ ToNumbers(lengthTrackingWithOffset.subarray(0)));
+
+ assertEquals(4, fixedLengthSubFull.length);
+ assertEquals(2, fixedLengthWithOffsetSubFull.length);
+
+ // TODO(v8:11111): Are subarrays of length-tracking TAs also
+ // length-tracking? See
+ // https://github.com/tc39/proposal-resizablearraybuffer/issues/91
+ assertEquals(4, lengthTrackingSubFull.length);
+ assertEquals(2, lengthTrackingWithOffsetSubFull.length);
+ }
+})();
+
+(function SubarrayParameterConversionGrows() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [0, 2, 4, 6, ...] << lengthTracking
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ // Growing + fixed-length TA. Growing won't affect anything.
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ const evil = { valueOf: () => { gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLength.subarray(evil)));
+ }
+
+ // Growing + length-tracking TA. The length computation is done with the
+ // original length.
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+
+ const evil = { valueOf: () => { gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+
+ assertEquals([0, 2, 4, 6], ToNumbers(lengthTracking.subarray(evil)));
+ }
+})();
+
+(function SortWithDefaultComparison() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const taFull = new ctor(gsab, 0);
+ function WriteUnsortedData() {
+ // Write some data into the array.
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - 2 * i);
+ }
+ }
+ // Orig. array: [10, 8, 6, 4]
+ // [10, 8, 6, 4] << fixedLength
+ // [6, 4] << fixedLengthWithOffset
+ // [10, 8, 6, 4, ...] << lengthTracking
+ // [6, 4, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort();
+ assertEquals([4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort();
+ assertEquals([10, 8, 4, 6], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 8, 4, 6], ToNumbers(taFull));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 8, 6, 4, 2, 0]
+ // [10, 8, 6, 4] << fixedLength
+ // [6, 4] << fixedLengthWithOffset
+ // [10, 8, 6, 4, 2, 0, ...] << lengthTracking
+ // [6, 4, 2, 0, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort();
+ assertEquals([4, 6, 8, 10, 2, 0], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort();
+ assertEquals([10, 8, 4, 6, 2, 0], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 8, 0, 2, 4, 6], ToNumbers(taFull));
+ }
+})();
+
+(function SortWithCustomComparison() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const taFull = new ctor(gsab, 0);
+ function WriteUnsortedData() {
+ // Write some data into the array.
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+ function CustomComparison(a, b) {
+ // Sort all odd numbers before even numbers.
+ a = Number(a);
+ b = Number(b);
+ if (a % 2 == 1 && b % 2 == 0) {
+ return -1;
+ }
+ if (a % 2 == 0 && b % 2 == 1) {
+ return 1;
+ }
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ }
+
+ // Orig. array: [10, 9, 8, 7]
+ // [10, 9, 8, 7] << fixedLength
+ // [8, 7] << fixedLengthWithOffset
+ // [10, 9, 8, 7, ...] << lengthTracking
+ // [8, 7, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 7, 8], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 7, 8], ToNumbers(taFull));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 9, 8, 7, 6, 5]
+ // [10, 9, 8, 7] << fixedLength
+ // [8, 7] << fixedLengthWithOffset
+ // [10, 9, 8, 7, 6, 5, ...] << lengthTracking
+ // [8, 7, 6, 5, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10, 6, 5], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 7, 8, 6, 5], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort(CustomComparison);
+ assertEquals([5, 7, 9, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 5, 7, 6, 8], ToNumbers(taFull));
+ }
+})();
+
+(function SortCallbackGrows() {
+ function WriteUnsortedData(taFull) {
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+
+ let gsab;
+ let growTo;
+ function CustomComparison(a, b) {
+ gsab.grow(growTo);
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ }
+
+ // Fixed length TA.
+ for (let ctor of ctors) {
+ gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ const fixedLength = new ctor(gsab, 0, 4);
+ const taFull = new ctor(gsab, 0);
+ WriteUnsortedData(taFull);
+
+ fixedLength.sort(CustomComparison);
+
+ // Growing doesn't affect the sorting.
+ assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
+ }
+
+ // Length-tracking TA.
+ for (let ctor of ctors) {
+ gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ growTo = 6 * ctor.BYTES_PER_ELEMENT;
+ const lengthTracking = new ctor(gsab, 0);
+ const taFull = new ctor(gsab, 0);
+ WriteUnsortedData(taFull);
+
+ lengthTracking.sort(CustomComparison);
+
+ // Growing doesn't affect the sorting. Only the elements that were part of
+ // the original TA are sorted.
+ assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-helpers.js b/deps/v8/test/mjsunit/typedarray-helpers.js
index 266c264e60..366e094fe7 100644
--- a/deps/v8/test/mjsunit/typedarray-helpers.js
+++ b/deps/v8/test/mjsunit/typedarray-helpers.js
@@ -22,6 +22,12 @@ const ctors = [
MyBigInt64Array,
];
+const floatCtors = [
+ Float32Array,
+ Float64Array,
+ MyFloat32Array
+];
+
// Each element of the following array is [getter, setter, size, isBigInt].
const dataViewAccessorsAndSizes = [[DataView.prototype.getUint8,
DataView.prototype.setUint8, 1, false],
@@ -50,6 +56,23 @@ function CreateGrowableSharedArrayBuffer(byteLength, maxByteLength) {
return new SharedArrayBuffer(byteLength, {maxByteLength: maxByteLength});
}
+function IsBigIntTypedArray(ta) {
+ return (ta instanceof BigInt64Array) || (ta instanceof BigUint64Array);
+}
+
+function AllBigIntMatchedCtorCombinations(test) {
+ for (let targetCtor of ctors) {
+ for (let sourceCtor of ctors) {
+ if (IsBigIntTypedArray(new targetCtor()) !=
+ IsBigIntTypedArray(new sourceCtor())) {
+ // Can't mix BigInt and non-BigInt types.
+ continue;
+ }
+ test(targetCtor, sourceCtor);
+ }
+ }
+}
+
function ReadDataFromBuffer(ab, ctor) {
let result = [];
const ta = new ctor(ab, 0, ab.byteLength / ctor.BYTES_PER_ELEMENT);
@@ -131,6 +154,55 @@ function IncludesHelper(array, n, fromIndex) {
return array.includes(n, fromIndex);
}
+function IndexOfHelper(array, n, fromIndex) {
+ if (typeof n == 'number' &&
+ (array instanceof BigInt64Array || array instanceof BigUint64Array)) {
+ if (fromIndex == undefined) {
+ // Technically, passing fromIndex here would still result in the correct
+ // behavior, since "undefined" gets converted to 0 which is a good
+ // "default" index.
+ return array.indexOf(BigInt(n));
+ }
+ return array.indexOf(BigInt(n), fromIndex);
+ }
+ if (fromIndex == undefined) {
+ return array.indexOf(n);
+ }
+ return array.indexOf(n, fromIndex);
+}
+
+function LastIndexOfHelper(array, n, fromIndex) {
+ if (typeof n == 'number' &&
+ (array instanceof BigInt64Array || array instanceof BigUint64Array)) {
+ if (fromIndex == undefined) {
+ // Shouldn't pass fromIndex here, since passing "undefined" is not the
+ // same as not passing the parameter at all. "Undefined" will get
+ // converted to 0 which is not a good "default" index, since lastIndexOf
+ // iterates from the index downwards.
+ return array.lastIndexOf(BigInt(n));
+ }
+ return array.lastIndexOf(BigInt(n), fromIndex);
+ }
+ if (fromIndex == undefined) {
+ return array.lastIndexOf(n);
+ }
+ return array.lastIndexOf(n, fromIndex);
+}
+
+function SetHelper(target, source, offset) {
+ if (target instanceof BigInt64Array || target instanceof BigUint64Array) {
+ const bigIntSource = [];
+ for (s of source) {
+ bigIntSource.push(BigInt(s));
+ }
+ source = bigIntSource;
+ }
+ if (offset == undefined) {
+ return target.set(source);
+ }
+ return target.set(source, offset);
+}
+
function testDataViewMethodsUpToSize(view, bufferSize) {
for (const [getter, setter, size, isBigInt] of dataViewAccessorsAndSizes) {
for (let i = 0; i <= bufferSize - size; ++i) {
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
index c0f65727ee..9ece49cbe5 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
@@ -38,6 +38,57 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
}
})();
+(function ConstructFromTypedArraySpeciesConstructorDetaches() {
+ let rab;
+ class MyArrayBuffer extends ArrayBuffer {
+ constructor(...params) {
+ super(...params);
+ }
+ static get [Symbol.species]() {
+ %ArrayBufferDetach(rab);
+ }
+ };
+
+ function CreateRabForTest(ctor) {
+ const rab = new MyArrayBuffer(
+ 4 * ctor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLength = new sourceCtor(rab, 0, 4);
+ assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLengthWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTracking = new sourceCtor(rab, 0);
+ assertThrows(() => { new targetCtor(lengthTracking); }, TypeError);
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+ assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
+ TypeError);
+ });
+})();
+
(function AccessDetachedTypedArray() {
const rab = CreateResizableArrayBuffer(16, 40);
@@ -654,7 +705,7 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
let values;
let rab;
let detachAfter;
- function CollectValuesAndResize(n) {
+ function CollectValuesAndDetach(n) {
if (typeof n == 'bigint') {
values.push(Number(n));
} else {
@@ -668,19 +719,19 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
function ForEachHelper(array) {
values = [];
- array.forEach(CollectValuesAndResize);
+ array.forEach(CollectValuesAndDetach);
return values;
}
function ReduceHelper(array) {
values = [];
- array.reduce((acc, n) => { CollectValuesAndResize(n); }, "initial value");
+ array.reduce((acc, n) => { CollectValuesAndDetach(n); }, "initial value");
return values;
}
function ReduceRightHelper(array) {
values = [];
- array.reduceRight((acc, n) => { CollectValuesAndResize(n); },
+ array.reduceRight((acc, n) => { CollectValuesAndDetach(n); },
"initial value");
return values;
}
@@ -805,3 +856,627 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
assertFalse(IncludesHelper(fixedLength, 0, evil));
}
})();
+
+(function IndexOfParameterConversionDetaches() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertEquals(0, IndexOfHelper(lengthTracking, 0));
+ // The buffer is detached so indexOf returns -1.
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertEquals(0, IndexOfHelper(lengthTracking, 0));
+ // The buffer is detached so indexOf returns -1, also for undefined).
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined, evil));
+ }
+})();
+
+(function LastIndexOfParameterConversionDetaches() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 2;
+ }};
+ assertEquals(3, LastIndexOfHelper(lengthTracking, 0));
+ // The buffer is detached so lastIndexOf returns -1.
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 2;
+ }};
+ assertEquals(3, LastIndexOfHelper(lengthTracking, 0));
+ // The buffer is detached so lastIndexOf returns -1, also for undefined).
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined, evil));
+ }
+})();
+
+(function JoinToLocaleString() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ %ArrayBufferDetach(rab);
+
+ assertThrows(() => { fixedLength.join(); });
+ assertThrows(() => { fixedLength.toLocaleString(); });
+ assertThrows(() => { fixedLengthWithOffset.join(); });
+ assertThrows(() => { fixedLengthWithOffset.toLocaleString(); });
+ assertThrows(() => { lengthTracking.join(); });
+ assertThrows(() => { lengthTracking.toLocaleString(); });
+ assertThrows(() => { lengthTrackingWithOffset.join(); });
+ assertThrows(() => { lengthTrackingWithOffset.toLocaleString(); });
+ }
+})();
+
+(function JoinParameterConversionDetaches() {
+ // Detaching + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { toString: () => {
+ %ArrayBufferDetach(rab);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length, but the TA is
+ // OOB right after parameter conversion, so all elements are converted to
+ // the empty string.
+ assertEquals('...', fixedLength.join(evil));
+ }
+
+ // Detaching + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { toString: () => {
+ %ArrayBufferDetach(rab);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length, but the TA is
+ // OOB right after parameter conversion, so all elements are converted to
+ // the empty string.
+ assertEquals('...', lengthTracking.join(evil));
+ }
+})();
+
+(function ToLocaleStringNumberPrototypeToLocaleStringDetaches() {
+ const oldNumberPrototypeToLocaleString = Number.prototype.toLocaleString;
+ const oldBigIntPrototypeToLocaleString = BigInt.prototype.toLocaleString;
+
+ // Detaching + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let detachAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --detachAfter;
+ if (detachAfter == 0) {
+ %ArrayBufferDetach(rab);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --detachAfter;
+ if (detachAfter == 0) {
+ %ArrayBufferDetach(rab);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements, since it was the starting length. The TA goes
+ // OOB after 2 elements.
+ assertEquals('0,0,,', fixedLength.toLocaleString());
+ }
+
+ // Detaching + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let detachAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --detachAfter;
+ if (detachAfter == 0) {
+ %ArrayBufferDetach(rab);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --detachAfter;
+ if (detachAfter == 0) {
+ %ArrayBufferDetach(rab);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements, since it was the starting length. The TA goes
+ // OOB after 2 elements.
+ assertEquals('0,0,,', lengthTracking.toLocaleString());
+ }
+
+ Number.prototype.toLocaleString = oldNumberPrototypeToLocaleString;
+ BigInt.prototype.toLocaleString = oldBigIntPrototypeToLocaleString;
+})();
+
+(function MapDetachMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let detachAfter;
+ function CollectValuesAndDetach(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ if (values.length == detachAfter) {
+ %ArrayBufferDetach(rab);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ } else {
+ return 0;
+ }
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValuesAndDetach);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ detachAfter = 2;
+ assertEquals([0, 2, undefined, undefined], Helper(fixedLength));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ detachAfter = 1;
+ assertEquals([4, undefined], Helper(fixedLengthWithOffset));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ detachAfter = 2;
+ assertEquals([0, 2, undefined, undefined], Helper(lengthTracking));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ detachAfter = 1;
+ assertEquals([4, undefined], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapSpeciesCreateDetaches() {
+ let values;
+ let rab;
+ function CollectValues(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ }
+ return 0;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValues);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ let detachWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (detachWhenConstructorCalled) {
+ %ArrayBufferDetach(rab);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(rab, 0, 4);
+ detachWhenConstructorCalled = true;
+ assertEquals([undefined, undefined, undefined, undefined],
+ Helper(fixedLength));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let detachWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (detachWhenConstructorCalled) {
+ %ArrayBufferDetach(rab);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ detachWhenConstructorCalled = true;
+ assertEquals([undefined, undefined, undefined, undefined],
+ Helper(lengthTracking));
+ }
+})();
+
+(function SetSourceLengthGetterDetachesTarget() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ function CreateSourceProxy(length) {
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ %ArrayBufferDetach(rab);
+ return length;
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ // Tests where the length getter returns a non-zero value -> these throw.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ assertThrows(() => { fixedLength.set(CreateSourceProxy(1)); }, TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(1)); },
+ TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ assertThrows(() => { lengthTracking.set(CreateSourceProxy(1)); },
+ TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(1)); },
+ TypeError);
+ }
+
+ // Tests where the length getter returns a zero -> these don't throw.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ fixedLength.set(CreateSourceProxy(0));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ fixedLengthWithOffset.set(CreateSourceProxy(0));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ lengthTracking.set(CreateSourceProxy(0));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ lengthTrackingWithOffset.set(CreateSourceProxy(0));
+ }
+})();
+
+(function SetDetachTargetMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ // Detaching will happen when we're calling Get for the `detachAt`:th data
+ // element, but we haven't yet written it to the target.
+ let detachAt;
+ function CreateSourceProxy(length) {
+ let requestedIndices = [];
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ return length;
+ }
+ requestedIndices.push(prop);
+ if (requestedIndices.length == detachAt) {
+ %ArrayBufferDetach(rab);
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ detachAt = 2;
+ assertThrows(() => { fixedLength.set(CreateSourceProxy(4)); }, TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ detachAt = 2;
+ assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(2)); },
+ TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ detachAt = 2;
+ assertThrows(() => { lengthTracking.set(CreateSourceProxy(2)); },
+ TypeError);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ detachAt = 2;
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(2)); },
+ TypeError);
+ }
+})();
+
+(function Subarray() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ const fixedLengthSubFull = fixedLength.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLengthSubFull));
+ const fixedLengthWithOffsetSubFull = fixedLengthWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(fixedLengthWithOffsetSubFull));
+ const lengthTrackingSubFull = lengthTracking.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(lengthTrackingSubFull));
+ const lengthTrackingWithOffsetSubFull =
+ lengthTrackingWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(lengthTrackingWithOffsetSubFull));
+
+ %ArrayBufferDetach(rab);
+
+ // The previously created subarrays are OOB.
+ assertEquals(0, fixedLengthSubFull.length);
+ assertEquals(0, fixedLengthWithOffsetSubFull.length);
+ assertEquals(0, lengthTrackingSubFull.length);
+ assertEquals(0, lengthTrackingWithOffsetSubFull.length);
+
+ // Trying to create new subarrays fails.
+ assertThrows(() => { fixedLength.subarray(0); }, TypeError);
+ assertThrows(() => { fixedLengthWithOffset.subarray(0); }, TypeError);
+ assertThrows(() => { lengthTracking.subarray(0); }, TypeError);
+ assertThrows(() => { lengthTrackingWithOffset.subarray(0); }, TypeError);
+ }
+})();
+
+(function SubarrayParameterConversionDetaches() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [0, 2, 4, 6, ...] << lengthTracking
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ // Fixed-length TA + first parameter conversion detaches. Can't construct
+ // even zero-length TAs with a detached buffer.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { fixedLength.subarray(evil, 0); }, TypeError);
+ }
+
+ // Length-tracking TA + first parameter conversion detaches. Can't construct
+ // even zero-length TAs with a detached buffer.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { fixedLength.subarray(evil, 0); }, TypeError);
+ }
+
+ // Fixed-length TA + second parameter conversion detaches. Can't construct
+ // even zero-length TAs with a detached buffer.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { fixedLength.subarray(0, evil); }, TypeError);
+ }
+
+ // Length-tracking TA + second parameter conversion detaches. Can't construct
+ // even zero-length TAs with a detached buffer.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { fixedLength.subarray(0, evil); }, TypeError);
+ }
+})();
+
+(function SortCallbackDetaches() {
+ function WriteUnsortedData(taFull) {
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+
+ let rab;
+ function CustomComparison(a, b) {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }
+
+ // Fixed length TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ assertThrows(() => { fixedLength.sort(CustomComparison); });
+ }
+
+ // Length-tracking TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, 0);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ assertThrows(() => { lengthTracking.sort(CustomComparison); });
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
index 039980c70d..acd636cae3 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
@@ -93,6 +93,209 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
/Invalid typed array length: 2/);
})();
+(function ConstructFromTypedArray() {
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ const rab = CreateResizableArrayBuffer(
+ 4 * sourceCtor.BYTES_PER_ELEMENT,
+ 8 * sourceCtor.BYTES_PER_ELEMENT);
+ const fixedLength = new sourceCtor(rab, 0, 4);
+ const fixedLengthWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new sourceCtor(rab, 0);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taFull = new sourceCtor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, ...] << lengthTracking
+ // [3, 4, ...] << lengthTrackingWithOffset
+
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(fixedLength)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(lengthTracking)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [1, 2, 3]
+ // [1, 2, 3, ...] << lengthTracking
+ // [3, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
+ assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
+ assertEquals([1, 2, 3], ToNumbers(new targetCtor(lengthTracking)));
+ assertEquals([3], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * sourceCtor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
+ assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
+ assertEquals([1], ToNumbers(new targetCtor(lengthTracking)));
+ assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
+ TypeError);
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
+ assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
+ assertEquals([], ToNumbers(new targetCtor(lengthTracking)));
+ assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
+ TypeError);
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * sourceCtor.BYTES_PER_ELEMENT);
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4, 5, 6]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, 5, 6, ...] << lengthTracking
+ // [3, 4, 5, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals([1, 2, 3, 4], ToNumbers(new targetCtor(fixedLength)));
+ assertEquals([3, 4], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+ assertEquals([1, 2, 3, 4, 5, 6],
+ ToNumbers(new targetCtor(lengthTracking)));
+ assertEquals([3, 4, 5, 6],
+ ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+ });
+})();
+
+(function ConstructFromTypedArraySpeciesConstructorShrinks() {
+ let rab;
+ let resizeTo;
+ class MyArrayBuffer extends ArrayBuffer {
+ constructor(...params) {
+ super(...params);
+ }
+ static get [Symbol.species]() {
+ rab.resize(resizeTo);
+ }
+ };
+
+ function CreateRabForTest(ctor) {
+ const rab = new MyArrayBuffer(
+ 4 * ctor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLength = new sourceCtor(rab, 0, 4);
+ resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
+ assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLengthWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
+ assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTracking = new sourceCtor(rab, 0);
+ resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2], ToNumbers(new targetCtor(lengthTracking)));
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+ resizeTo = 3 * sourceCtor.BYTES_PER_ELEMENT;
+ assertEquals([4], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+ resizeTo = 1 * sourceCtor.BYTES_PER_ELEMENT;
+ assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
+ TypeError);
+ });
+})();
+
+(function ConstructFromTypedArraySpeciesConstructorGrows() {
+ let rab;
+ let resizeTo;
+ class MyArrayBuffer extends ArrayBuffer {
+ constructor(...params) {
+ super(...params);
+ }
+ static get [Symbol.species]() {
+ rab.resize(resizeTo);
+ }
+ };
+ function CreateRabForTest(ctor) {
+ const rab = new MyArrayBuffer(
+ 4 * ctor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLength = new sourceCtor(rab, 0, 4);
+ resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
+ // Fixed-length TA unaffected by growing.
+ assertEquals([0, 2, 4, 6], ToNumbers(new targetCtor(fixedLength)));
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const fixedLengthWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
+ // Fixed-length TA unaffected by growing.
+ assertEquals([4, 6], ToNumbers(new targetCtor(fixedLengthWithOffset)));
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTracking = new sourceCtor(rab, 0);
+ resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, 6, 0, 0],
+ ToNumbers(new targetCtor(lengthTracking)));
+ });
+
+ AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
+ rab = CreateRabForTest(sourceCtor);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+ resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
+ assertEquals([4, 6, 0, 0],
+ ToNumbers(new targetCtor(lengthTrackingWithOffset)));
+ });
+})();
+
(function TypedArrayLengthWhenResizedOutOfBounds1() {
const rab = CreateResizableArrayBuffer(16, 40);
@@ -4017,11 +4220,6 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
})();
(function IncludesSpecialValues() {
- const floatCtors = [
- Float32Array,
- Float64Array,
- MyFloat32Array
- ];
for (let ctor of floatCtors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -4034,3 +4232,2445 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
assertTrue(lengthTracking.includes(NaN));
}
})();
+
+(function IndexOfLastIndexOf() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, Math.floor(i / 2));
+ }
+
+ // Orig. array: [0, 0, 1, 1]
+ // [0, 0, 1, 1] << fixedLength
+ // [1, 1] << fixedLengthWithOffset
+ // [0, 0, 1, 1, ...] << lengthTracking
+ // [1, 1, ...] << lengthTrackingWithOffset
+
+ assertEquals(0, IndexOfHelper(fixedLength, 0));
+ assertEquals(1, IndexOfHelper(fixedLength, 0, 1));
+ assertEquals(-1, IndexOfHelper(fixedLength, 0, 2));
+ assertEquals(-1, IndexOfHelper(fixedLength, 0, -2));
+ assertEquals(1, IndexOfHelper(fixedLength, 0, -3));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, 1));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, -3));
+ assertEquals(2, IndexOfHelper(fixedLength, 1, -2));
+ assertEquals(-1, IndexOfHelper(fixedLength, undefined));
+
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, 1));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, 2));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, -2));
+ assertEquals(1, LastIndexOfHelper(fixedLength, 0, -3));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 1, 1));
+ assertEquals(2, LastIndexOfHelper(fixedLength, 1, -2));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 1, -3));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, undefined));
+
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1, -2));
+ assertEquals(1, IndexOfHelper(fixedLengthWithOffset, 1, -1));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(0, LastIndexOfHelper(fixedLengthWithOffset, 1, -2));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1, -1));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(0, IndexOfHelper(lengthTracking, 0));
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0, 2));
+ assertEquals(2, IndexOfHelper(lengthTracking, 1, -3));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0, 2));
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0, -3));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 1, 1));
+ assertEquals(2, LastIndexOfHelper(lengthTracking, 1, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 1, -3));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(1, IndexOfHelper(lengthTrackingWithOffset, 1, 1));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1, -2));
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1, 1));
+ assertEquals(0, LastIndexOfHelper(lengthTrackingWithOffset, 1, -2));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1, -1));
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 0, 1]
+ // [0, 0, 1, ...] << lengthTracking
+ // [1, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { IndexOfHelper(fixedLength, 1); });
+ assertThrows(() => { IndexOfHelper(fixedLengthWithOffset, 1); });
+
+ assertThrows(() => { LastIndexOfHelper(fixedLength, 1); });
+ assertThrows(() => { LastIndexOfHelper(fixedLengthWithOffset, 1); });
+
+ assertEquals(2, IndexOfHelper(lengthTracking, 1));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(1, LastIndexOfHelper(lengthTracking, 0));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, LastIndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { IndexOfHelper(fixedLength, 0); });
+ assertThrows(() => { IndexOfHelper(fixedLengthWithOffset, 0); });
+ assertThrows(() => { IndexOfHelper(lengthTrackingWithOffset, 0); });
+
+ assertThrows(() => { LastIndexOfHelper(fixedLength, 0); });
+ assertThrows(() => { LastIndexOfHelper(fixedLengthWithOffset, 0); });
+ assertThrows(() => { LastIndexOfHelper(lengthTrackingWithOffset, 0); });
+
+ assertEquals(0, IndexOfHelper(lengthTracking, 0));
+
+ assertEquals(0, LastIndexOfHelper(lengthTracking, 0));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { IndexOfHelper(fixedLength, 0); });
+ assertThrows(() => { IndexOfHelper(fixedLengthWithOffset, 0); });
+ assertThrows(() => { IndexOfHelper(lengthTrackingWithOffset, 0); });
+
+ assertThrows(() => { LastIndexOfHelper(fixedLength, 0); });
+ assertThrows(() => { LastIndexOfHelper(fixedLengthWithOffset, 0); });
+ assertThrows(() => { LastIndexOfHelper(lengthTrackingWithOffset, 0); });
+
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, Math.floor(i / 2));
+ }
+
+ // Orig. array: [0, 0, 1, 1, 2, 2]
+ // [0, 0, 1, 1] << fixedLength
+ // [1, 1] << fixedLengthWithOffset
+ // [0, 0, 1, 1, 2, 2, ...] << lengthTracking
+ // [1, 1, 2, 2, ...] << lengthTrackingWithOffset
+
+ assertEquals(2, IndexOfHelper(fixedLength, 1));
+ assertEquals(-1, IndexOfHelper(fixedLength, 2));
+ assertEquals(-1, IndexOfHelper(fixedLength, undefined));
+
+ assertEquals(3, LastIndexOfHelper(fixedLength, 1));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 2));
+ assertEquals(-1, LastIndexOfHelper(fixedLength, undefined));
+
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(0, IndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, 2));
+ assertEquals(-1, IndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(fixedLengthWithOffset, 1));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, 2));
+ assertEquals(-1, LastIndexOfHelper(fixedLengthWithOffset, undefined));
+
+ assertEquals(2, IndexOfHelper(lengthTracking, 1));
+ assertEquals(4, IndexOfHelper(lengthTracking, 2));
+ assertEquals(-1, IndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(3, LastIndexOfHelper(lengthTracking, 1));
+ assertEquals(5, LastIndexOfHelper(lengthTracking, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, undefined));
+
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(0, IndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(2, IndexOfHelper(lengthTrackingWithOffset, 2));
+ assertEquals(-1, IndexOfHelper(lengthTrackingWithOffset, undefined));
+
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, 0));
+ assertEquals(1, LastIndexOfHelper(lengthTrackingWithOffset, 1));
+ assertEquals(3, LastIndexOfHelper(lengthTrackingWithOffset, 2));
+ assertEquals(-1, LastIndexOfHelper(lengthTrackingWithOffset, undefined));
+ }
+})();
+
+(function IndexOfParameterConversionShrinks() {
+ // Shrinking + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals(0, IndexOfHelper(fixedLength, 0));
+ // The TA is OOB so indexOf returns -1.
+ assertEquals(-1, IndexOfHelper(fixedLength, 0, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals(0, IndexOfHelper(fixedLength, 0));
+ // The TA is OOB so indexOf returns -1, also for undefined).
+ assertEquals(-1, IndexOfHelper(fixedLength, undefined, evil));
+ }
+
+ // Shrinking + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals(2, IndexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, IndexOfHelper(lengthTracking, 2, evil));
+ }
+})();
+
+(function LastIndexOfParameterConversionShrinks() {
+ // Shrinking + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 2;
+ }};
+ assertEquals(3, LastIndexOfHelper(fixedLength, 0));
+ // The TA is OOB so lastIndexOf returns -1.
+ assertEquals(-1, LastIndexOfHelper(fixedLength, 0, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 2;
+ }};
+ assertEquals(3, LastIndexOfHelper(fixedLength, 0));
+ // The TA is OOB so lastIndexOf returns -1, also for undefined).
+ assertEquals(-1, LastIndexOfHelper(fixedLength, undefined, evil));
+ }
+
+ // Shrinking + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 2;
+ }};
+ assertEquals(2, LastIndexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 2, evil));
+ }
+})();
+
+(function IndexOfParameterConversionGrows() {
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, 1);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0));
+ // The TA grew but we only look at the data until the original length.
+ assertEquals(-1, IndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ // Growing + length-tracking TA, index conversion.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ WriteToTypedArray(lengthTracking, 0, 1);
+
+ let evil = { valueOf: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return -4;
+ }};
+ assertEquals(0, IndexOfHelper(lengthTracking, 1, -4));
+ // The TA grew but the start index conversion is done based on the original
+ // length.
+ assertEquals(0, IndexOfHelper(lengthTracking, 1, evil));
+ }
+})();
+
+(function LastIndexOfParameterConversionGrows() {
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, 1);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return -1;
+ }};
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0));
+ // Because lastIndexOf iterates from the given index downwards, it's not
+ // possible to test that "we only look at the data until the original
+ // length" without also testing that the index conversion happening with the
+ // original length.
+ assertEquals(-1, LastIndexOfHelper(lengthTracking, 0, evil));
+ }
+
+ // Growing + length-tracking TA, index conversion.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return -4;
+ }};
+ assertEquals(0, LastIndexOfHelper(lengthTracking, 0, -4));
+ // The TA grew but the start index conversion is done based on the original
+ // length.
+ assertEquals(0, LastIndexOfHelper(lengthTracking, 0, evil));
+ }
+})();
+
+(function IndexOfLastIndexOfSpecialValues() {
+ for (let ctor of floatCtors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ lengthTracking[0] = -Infinity;
+ lengthTracking[1] = -Infinity;
+ lengthTracking[2] = Infinity;
+ lengthTracking[3] = Infinity;
+ lengthTracking[4] = NaN;
+ lengthTracking[5] = NaN;
+ assertEquals(0, lengthTracking.indexOf(-Infinity));
+ assertEquals(1, lengthTracking.lastIndexOf(-Infinity));
+ assertEquals(2, lengthTracking.indexOf(Infinity));
+ assertEquals(3, lengthTracking.lastIndexOf(Infinity));
+ // NaN is never found.
+ assertEquals(-1, lengthTracking.indexOf(NaN));
+ assertEquals(-1, lengthTracking.lastIndexOf(NaN));
+ }
+})();
+
+(function JoinToLocaleString() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals('0,2,4,6', fixedLength.join());
+ assertEquals('0,2,4,6', fixedLength.toLocaleString());
+ assertEquals('4,6', fixedLengthWithOffset.join());
+ assertEquals('4,6', fixedLengthWithOffset.toLocaleString());
+ assertEquals('0,2,4,6', lengthTracking.join());
+ assertEquals('0,2,4,6', lengthTracking.toLocaleString());
+ assertEquals('4,6', lengthTrackingWithOffset.join());
+ assertEquals('4,6', lengthTrackingWithOffset.toLocaleString());
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { fixedLength.join(); });
+ assertThrows(() => { fixedLength.toLocaleString(); });
+ assertThrows(() => { fixedLengthWithOffset.join(); });
+ assertThrows(() => { fixedLengthWithOffset.toLocaleString(); });
+
+ assertEquals('0,2,4', lengthTracking.join());
+ assertEquals('0,2,4', lengthTracking.toLocaleString());
+ assertEquals('4', lengthTrackingWithOffset.join());
+ assertEquals('4', lengthTrackingWithOffset.toLocaleString());
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { fixedLength.join(); });
+ assertThrows(() => { fixedLength.toLocaleString(); });
+ assertThrows(() => { fixedLengthWithOffset.join(); });
+ assertThrows(() => { fixedLengthWithOffset.toLocaleString(); });
+ assertThrows(() => { lengthTrackingWithOffset.join(); });
+ assertThrows(() => { lengthTrackingWithOffset.toLocaleString(); });
+
+ assertEquals('0', lengthTracking.join());
+ assertEquals('0', lengthTracking.toLocaleString());
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.join(); });
+ assertThrows(() => { fixedLength.toLocaleString(); });
+ assertThrows(() => { fixedLengthWithOffset.join(); });
+ assertThrows(() => { fixedLengthWithOffset.toLocaleString(); });
+ assertThrows(() => { lengthTrackingWithOffset.join(); });
+ assertThrows(() => { lengthTrackingWithOffset.toLocaleString(); });
+
+ assertEquals('', lengthTracking.join());
+ assertEquals('', lengthTracking.toLocaleString());
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals('0,2,4,6', fixedLength.join());
+ assertEquals('0,2,4,6', fixedLength.toLocaleString());
+ assertEquals('4,6', fixedLengthWithOffset.join());
+ assertEquals('4,6', fixedLengthWithOffset.toLocaleString());
+ assertEquals('0,2,4,6,8,10', lengthTracking.join());
+ assertEquals('0,2,4,6,8,10', lengthTracking.toLocaleString());
+ assertEquals('4,6,8,10', lengthTrackingWithOffset.join());
+ assertEquals('4,6,8,10', lengthTrackingWithOffset.toLocaleString());
+ }
+})();
+
+(function JoinParameterConversionShrinks() {
+ // Shrinking + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { toString: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length, but the TA is
+ // OOB right after parameter conversion, so all elements are converted to
+ // the empty string.
+ assertEquals('...', fixedLength.join(evil));
+ }
+
+ // Shrinking + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { toString: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length. Elements beyond
+ // the new length are converted to the empty string.
+ assertEquals('0.0..', lengthTracking.join(evil));
+ }
+})();
+
+(function JoinParameterConversionGrows() {
+ // Growing + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { toString: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ assertEquals('0.0.0.0', fixedLength.join(evil));
+ }
+
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { toString: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length.
+ assertEquals('0.0.0.0', lengthTracking.join(evil));
+ }
+})();
+
+(function ToLocaleStringNumberPrototypeToLocaleStringShrinks() {
+ const oldNumberPrototypeToLocaleString = Number.prototype.toLocaleString;
+ const oldBigIntPrototypeToLocaleString = BigInt.prototype.toLocaleString;
+
+ // Shrinking + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let resizeAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements, since it was the starting length. The TA goes
+ // OOB after 2 elements.
+ assertEquals('0,0,,', fixedLength.toLocaleString());
+ }
+
+ // Shrinking + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let resizeAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements, since it was the starting length. Elements beyond
+ // the new length are converted to the empty string.
+ assertEquals('0,0,,', lengthTracking.toLocaleString());
+ }
+
+ Number.prototype.toLocaleString = oldNumberPrototypeToLocaleString;
+ BigInt.prototype.toLocaleString = oldBigIntPrototypeToLocaleString;
+})();
+
+(function ToLocaleStringNumberPrototypeToLocaleStringGrows() {
+ const oldNumberPrototypeToLocaleString = Number.prototype.toLocaleString;
+ const oldBigIntPrototypeToLocaleString = BigInt.prototype.toLocaleString;
+
+ // Growing + fixed-length TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let resizeAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements since it was the starting length. Resizing doesn't
+ // affect the TA.
+ assertEquals('0,0,0,0', fixedLength.toLocaleString());
+ }
+
+ // Growing + length-tracking TA.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let resizeAfter = 2;
+ Number.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements since it was the starting length.
+ assertEquals('0,0,0,0', lengthTracking.toLocaleString());
+ }
+
+ Number.prototype.toLocaleString = oldNumberPrototypeToLocaleString;
+ BigInt.prototype.toLocaleString = oldBigIntPrototypeToLocaleString;
+})();
+
+(function TestMap() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < taWrite.length; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ function Helper(array) {
+ const values = [];
+ function GatherValues(n, ix) {
+ assertEquals(values.length, ix);
+ values.push(n);
+ if (typeof n == 'bigint') {
+ return n + 1n;
+ }
+ return n + 1;
+ }
+ const newValues = array.map(GatherValues);
+ for (let i = 0; i < values.length; ++i) {
+ if (typeof values[i] == 'bigint') {
+ assertEquals(newValues[i], values[i] + 1n);
+ } else {
+ assertEquals(newValues[i], values[i] + 1);
+ }
+ }
+ return ToNumbers(values);
+ }
+
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ assertEquals([0, 2, 4, 6], Helper(lengthTracking));
+ assertEquals([4, 6], Helper(lengthTrackingWithOffset));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { Helper(fixedLength); });
+ assertThrows(() => { Helper(fixedLengthWithOffset); });
+
+ assertEquals([0, 2, 4], Helper(lengthTracking));
+ assertEquals([4], Helper(lengthTrackingWithOffset));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { Helper(fixedLength); });
+ assertThrows(() => { Helper(fixedLengthWithOffset); });
+ assertThrows(() => { Helper(lengthTrackingWithOffset); });
+
+ assertEquals([0], Helper(lengthTracking));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { Helper(fixedLength); });
+ assertThrows(() => { Helper(fixedLengthWithOffset); });
+ assertThrows(() => { Helper(lengthTrackingWithOffset); });
+
+ assertEquals([], Helper(lengthTracking));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ assertEquals([0, 2, 4, 6, 8, 10], Helper(lengthTracking));
+ assertEquals([4, 6, 8, 10], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapShrinkMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function CollectValuesAndResize(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ }
+ return 0;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValuesAndResize);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, undefined, undefined], Helper(fixedLength));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, undefined], Helper(fixedLengthWithOffset));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, undefined], Helper(lengthTracking));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, undefined], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function CollectValuesAndResize(n) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ return n;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValuesAndResize);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, 6], Helper(fixedLength));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, 6], Helper(fixedLengthWithOffset));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([0, 2, 4, 6], Helper(lengthTracking));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertEquals([4, 6], Helper(lengthTrackingWithOffset));
+ }
+})();
+
+(function MapSpeciesCreateShrinks() {
+ let values;
+ let rab;
+ function CollectValues(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ }
+ return 0;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValues);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(rab, 0, 4);
+ resizeWhenConstructorCalled = true;
+ assertEquals([undefined, undefined, undefined, undefined],
+ Helper(fixedLength));
+ assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ resizeWhenConstructorCalled = true;
+ assertEquals([0, 1, undefined, undefined], Helper(lengthTracking));
+ assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ }
+})();
+
+(function MapSpeciesCreateGrows() {
+ let values;
+ let rab;
+ function CollectValues(n, ix, ta) {
+ if (typeof n == 'bigint') {
+ values.push(Number(n));
+ } else {
+ values.push(n);
+ }
+ // We still need to return a valid BigInt / non-BigInt, even if
+ // n is `undefined`.
+ if (IsBigIntTypedArray(ta)) {
+ return 0n;
+ }
+ return 0;
+ }
+
+ function Helper(array) {
+ values = [];
+ array.map(CollectValues);
+ return values;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(rab, 0, 4);
+ resizeWhenConstructorCalled = true;
+ assertEquals([0, 1, 2, 3], Helper(fixedLength));
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ resizeWhenConstructorCalled = true;
+ assertEquals([0, 1, 2, 3], Helper(lengthTracking));
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ }
+})();
+
+(function Reverse() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const wholeArrayView = new ctor(rab);
+ function WriteData() {
+ // Write some data into the array.
+ for (let i = 0; i < wholeArrayView.length; ++i) {
+ WriteToTypedArray(wholeArrayView, i, 2 * i);
+ }
+ }
+ WriteData();
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ fixedLength.reverse();
+ assertEquals([6, 4, 2, 0], ToNumbers(wholeArrayView));
+ fixedLengthWithOffset.reverse();
+ assertEquals([6, 4, 0, 2], ToNumbers(wholeArrayView));
+ lengthTracking.reverse();
+ assertEquals([2, 0, 4, 6], ToNumbers(wholeArrayView));
+ lengthTrackingWithOffset.reverse();
+ assertEquals([2, 0, 6, 4], ToNumbers(wholeArrayView));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+ WriteData();
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { fixedLength.reverse(); });
+ assertThrows(() => { fixedLengthWithOffset.reverse(); });
+
+ lengthTracking.reverse();
+ assertEquals([4, 2, 0], ToNumbers(wholeArrayView));
+ lengthTrackingWithOffset.reverse();
+ assertEquals([4, 2, 0], ToNumbers(wholeArrayView));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+ WriteData();
+
+ assertThrows(() => { fixedLength.reverse(); });
+ assertThrows(() => { fixedLengthWithOffset.reverse(); });
+ assertThrows(() => { lengthTrackingWithOffset.reverse(); });
+
+ lengthTracking.reverse();
+ assertEquals([0], ToNumbers(wholeArrayView));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.reverse(); });
+ assertThrows(() => { fixedLengthWithOffset.reverse(); });
+ assertThrows(() => { lengthTrackingWithOffset.reverse(); });
+
+ lengthTracking.reverse();
+ assertEquals([], ToNumbers(wholeArrayView));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ WriteData();
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ fixedLength.reverse();
+ assertEquals([6, 4, 2, 0, 8, 10], ToNumbers(wholeArrayView));
+ fixedLengthWithOffset.reverse();
+ assertEquals([6, 4, 0, 2, 8, 10], ToNumbers(wholeArrayView));
+ lengthTracking.reverse();
+ assertEquals([10, 8, 2, 0, 4, 6], ToNumbers(wholeArrayView));
+ lengthTrackingWithOffset.reverse();
+ assertEquals([10, 8, 6, 4, 0, 2], ToNumbers(wholeArrayView));
+ }
+})();
+
+(function SetWithResizableTarget() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const taFull = new ctor(rab);
+
+ // Orig. array: [0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, ...] << lengthTrackingWithOffset
+
+ // For making sure we're not calling the source length or element getters
+ // if the target is OOB.
+ const throwingProxy = new Proxy({}, {
+ get(target, prop, receiver) {
+ throw new Error('Called getter for ' + prop);
+ }});
+
+ SetHelper(fixedLength, [1, 2]);
+ assertEquals([1, 2, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLength, [3, 4], 1);
+ assertEquals([1, 3, 4, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([1, 3, 4, 0], ToNumbers(taFull));
+
+ SetHelper(fixedLengthWithOffset, [5, 6]);
+ assertEquals([1, 3, 5, 6], ToNumbers(taFull));
+ SetHelper(fixedLengthWithOffset, [7], 1);
+ assertEquals([1, 3, 5, 7], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0], 1)},
+ RangeError);
+ assertEquals([1, 3, 5, 7], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [8, 9]);
+ assertEquals([8, 9, 5, 7], ToNumbers(taFull));
+ SetHelper(lengthTracking, [10, 11], 1);
+ assertEquals([8, 10, 11, 7], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([8, 10, 11, 7], ToNumbers(taFull));
+
+ SetHelper(lengthTrackingWithOffset, [12, 13]);
+ assertEquals([8, 10, 12, 13], ToNumbers(taFull));
+ SetHelper(lengthTrackingWithOffset, [14], 1);
+ assertEquals([8, 10, 12, 14], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0])});
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0], 1)});
+ assertEquals([8, 10, 12, 14], ToNumbers(taFull));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [8, 10, 12]
+ // [8, 10, 12, ...] << lengthTracking
+ // [12, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { SetHelper(fixedLength, throwingProxy)}, TypeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, throwingProxy)},
+ TypeError);
+ assertEquals([8, 10, 12], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [15, 16]);
+ assertEquals([15, 16, 12], ToNumbers(taFull));
+ SetHelper(lengthTracking, [17, 18], 1);
+ assertEquals([15, 17, 18], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0])}, RangeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0], 1)}, RangeError);
+ assertEquals([15, 17, 18], ToNumbers(taFull));
+
+ SetHelper(lengthTrackingWithOffset, [19]);
+ assertEquals([15, 17, 19], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0], 1)},
+ RangeError);
+ assertEquals([15, 17, 19], ToNumbers(taFull));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { SetHelper(fixedLength, throwingProxy)}, TypeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, throwingProxy)},
+ TypeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, throwingProxy)},
+ TypeError);
+ assertEquals([15], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [20]);
+ assertEquals([20], ToNumbers(taFull));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { SetHelper(fixedLength, throwingProxy)}, TypeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, throwingProxy)},
+ TypeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, throwingProxy)},
+ TypeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0])}, RangeError);
+ assertEquals([], ToNumbers(taFull));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 0, 0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, 0, 0, ...] << lengthTrackingWithOffset
+ SetHelper(fixedLength, [21, 22]);
+ assertEquals([21, 22, 0, 0, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLength, [23, 24], 1);
+ assertEquals([21, 23, 24, 0, 0, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0, 0])}, RangeError);
+ assertThrows(() => { SetHelper(fixedLength, [0, 0, 0, 0], 1)}, RangeError);
+ assertEquals([21, 23, 24, 0, 0, 0], ToNumbers(taFull));
+
+ SetHelper(fixedLengthWithOffset, [25, 26]);
+ assertEquals([21, 23, 25, 26, 0, 0], ToNumbers(taFull));
+ SetHelper(fixedLengthWithOffset, [27], 1);
+ assertEquals([21, 23, 25, 27, 0, 0], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(fixedLengthWithOffset, [0, 0], 1)},
+ RangeError);
+ assertEquals([21, 23, 25, 27, 0, 0], ToNumbers(taFull));
+
+ SetHelper(lengthTracking, [28, 29, 30, 31, 32, 33]);
+ assertEquals([28, 29, 30, 31, 32, 33], ToNumbers(taFull));
+ SetHelper(lengthTracking, [34, 35, 36, 37, 38], 1);
+ assertEquals([28, 34, 35, 36, 37, 38], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTracking, [0, 0, 0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([28, 34, 35, 36, 37, 38], ToNumbers(taFull));
+
+ SetHelper(lengthTrackingWithOffset, [39, 40, 41, 42]);
+ assertEquals([28, 34, 39, 40, 41, 42], ToNumbers(taFull));
+ SetHelper(lengthTrackingWithOffset, [43, 44, 45], 1);
+ assertEquals([28, 34, 39, 43, 44, 45], ToNumbers(taFull));
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0, 0, 0])},
+ RangeError);
+ assertThrows(() => { SetHelper(lengthTrackingWithOffset, [0, 0, 0, 0], 1)},
+ RangeError);
+ assertEquals([28, 34, 39, 43, 44, 45], ToNumbers(taFull));
+ }
+})();
+
+(function SetSourceLengthGetterShrinksTarget() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ let resizeTo;
+ function CreateSourceProxy(length) {
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ rab.resize(resizeTo);
+ return length;
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ // Tests where the length getter returns a non-zero value -> these throw if
+ // the TA went OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { fixedLength.set(CreateSourceProxy(1)); }, TypeError);
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(1)); },
+ TypeError);
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTracking.set(CreateSourceProxy(1));
+ assertEquals([1, 2, 4], ToNumbers(lengthTracking));
+ assertEquals([1, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(1));
+ assertEquals([1], ToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 2, 1], ToNumbers(new ctor(rab)));
+ }
+
+ // Length-tracking TA goes OOB because of the offset.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 1 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(1)); },
+ TypeError);
+ assertEquals([0], ToNumbers(new ctor(rab)));
+ }
+
+ // Tests where the length getter returns a zero -> these don't throw even if
+ // the TA went OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ fixedLength.set(CreateSourceProxy(0));
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ fixedLengthWithOffset.set(CreateSourceProxy(0));
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTracking.set(CreateSourceProxy(0));
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(0));
+ assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ // Length-tracking TA goes OOB because of the offset.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 1 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(0));
+ assertEquals([0], ToNumbers(new ctor(rab)));
+ }
+})();
+
+(function SetSourceLengthGetterGrowsTarget() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ let resizeTo;
+ function CreateSourceProxy(length) {
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ rab.resize(resizeTo);
+ return length;
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ // Test that we still throw for lengthTracking TAs if the source length is
+ // too large, even though we resized in the length getter (we check against
+ // the original length).
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTracking.set(CreateSourceProxy(6)); },
+ RangeError);
+ assertEquals([0, 2, 4, 6, 0, 0], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(4)); },
+ RangeError);
+ assertEquals([0, 2, 4, 6, 0, 0], ToNumbers(new ctor(rab)));
+ }
+})();
+
+(function SetShrinkTargetMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ // Resizing will happen when we're calling Get for the `resizeAt`:th data
+ // element, but we haven't yet written it to the target.
+ let resizeAt;
+ let resizeTo;
+ function CreateSourceProxy(length) {
+ let requestedIndices = [];
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ return length;
+ }
+ requestedIndices.push(prop);
+ if (requestedIndices.length == resizeAt) {
+ rab.resize(resizeTo);
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeAt = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { fixedLength.set(CreateSourceProxy(4)); }, TypeError);
+ assertEquals([1, 2, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeAt = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(2)); },
+ TypeError);
+ assertEquals([0, 2, 1], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAt = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTracking.set(CreateSourceProxy(2));
+ assertEquals([1, 1, 4], ToNumbers(lengthTracking));
+ assertEquals([1, 1, 4], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeAt = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(2));
+ assertEquals([1], ToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 2, 1], ToNumbers(new ctor(rab)));
+ }
+
+ // Length-tracking TA goes OOB because of the offset.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeAt = 1;
+ resizeTo = 1 * ctor.BYTES_PER_ELEMENT;
+ assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(2)); },
+ TypeError);
+ assertEquals([0], ToNumbers(new ctor(rab)));
+ }
+})();
+
+(function SetGrowTargetMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let rab;
+ // Resizing will happen when we're calling Get for the `resizeAt`:th data
+ // element, but we haven't yet written it to the target.
+ let resizeAt;
+ let resizeTo;
+ function CreateSourceProxy(length) {
+ let requestedIndices = [];
+ return new Proxy({}, {
+ get(target, prop, receiver) {
+ if (prop == 'length') {
+ return length;
+ }
+ requestedIndices.push(prop);
+ if (requestedIndices.length == resizeAt) {
+ rab.resize(resizeTo);
+ }
+ return true; // Can be converted to both BigInt and Number.
+ }
+ });
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ resizeAt = 2;
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ fixedLength.set(CreateSourceProxy(4));
+ assertEquals([1, 1, 1, 1], ToNumbers(fixedLength));
+ assertEquals([1, 1, 1, 1, 0, 0], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ resizeAt = 1;
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ fixedLengthWithOffset.set(CreateSourceProxy(2));
+ assertEquals([1, 1], ToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 2, 1, 1, 0, 0], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAt = 2;
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ lengthTracking.set(CreateSourceProxy(2));
+ assertEquals([1, 1, 4, 6, 0, 0], ToNumbers(lengthTracking));
+ assertEquals([1, 1, 4, 6, 0, 0], ToNumbers(new ctor(rab)));
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ resizeAt = 1;
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ lengthTrackingWithOffset.set(CreateSourceProxy(2));
+ assertEquals([1, 1, 0, 0], ToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 2, 1, 1, 0, 0], ToNumbers(new ctor(rab)));
+ }
+})();
+
+(function SetWithResizableSource() {
+ for (let targetIsResizable of [false, true]) {
+ for (let targetCtor of ctors) {
+ for (let sourceCtor of ctors) {
+ const rab = CreateResizableArrayBuffer(
+ 4 * sourceCtor.BYTES_PER_ELEMENT,
+ 8 * sourceCtor.BYTES_PER_ELEMENT);
+ const fixedLength = new sourceCtor(rab, 0, 4);
+ const fixedLengthWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new sourceCtor(rab, 0);
+ const lengthTrackingWithOffset = new sourceCtor(
+ rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taFull = new sourceCtor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, ...] << lengthTracking
+ // [3, 4, ...] << lengthTrackingWithOffset
+
+ const targetAb = targetIsResizable ?
+ new ArrayBuffer(6 * targetCtor.BYTES_PER_ELEMENT) :
+ new ArrayBuffer(6 * targetCtor.BYTES_PER_ELEMENT,
+ {maxByteLength: 8 * targetCtor.BYTES_PER_ELEMENT});
+ const target = new targetCtor(targetAb);
+
+ if (IsBigIntTypedArray(target) != IsBigIntTypedArray(taFull)) {
+ // Can't mix BigInt and non-BigInt types.
+ continue;
+ }
+
+ SetHelper(target, fixedLength);
+ assertEquals([1, 2, 3, 4, 0, 0], ToNumbers(target));
+
+ SetHelper(target, fixedLengthWithOffset);
+ assertEquals([3, 4, 3, 4, 0, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTracking, 1);
+ assertEquals([3, 1, 2, 3, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTrackingWithOffset, 1);
+ assertEquals([3, 3, 4, 3, 4, 0], ToNumbers(target));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * sourceCtor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [1, 2, 3]
+ // [1, 2, 3, ...] << lengthTracking
+ // [3, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { SetHelper(target, fixedLength)}, TypeError);
+ assertThrows(() => { SetHelper(target, fixedLengthWithOffset)},
+ TypeError);
+ assertEquals([3, 3, 4, 3, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTracking);
+ assertEquals([1, 2, 3, 3, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTrackingWithOffset);
+ assertEquals([3, 2, 3, 3, 4, 0], ToNumbers(target));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * sourceCtor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { SetHelper(target, fixedLength)}, TypeError);
+ assertThrows(() => { SetHelper(target, fixedLengthWithOffset)},
+ TypeError);
+ assertThrows(() => { SetHelper(target, lengthTrackingWithOffset)},
+ TypeError);
+
+ SetHelper(target, lengthTracking, 3);
+ assertEquals([3, 2, 3, 1, 4, 0], ToNumbers(target));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { SetHelper(target, fixedLength)}, TypeError);
+ assertThrows(() => { SetHelper(target, fixedLengthWithOffset)},
+ TypeError);
+ assertThrows(() => { SetHelper(target, lengthTrackingWithOffset)},
+ TypeError);
+
+ SetHelper(target, lengthTracking, 4);
+ assertEquals([3, 2, 3, 1, 4, 0], ToNumbers(target));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * sourceCtor.BYTES_PER_ELEMENT);
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taFull, i, i + 1);
+ }
+
+ // Orig. array: [1, 2, 3, 4, 5, 6]
+ // [1, 2, 3, 4] << fixedLength
+ // [3, 4] << fixedLengthWithOffset
+ // [1, 2, 3, 4, 5, 6, ...] << lengthTracking
+ // [3, 4, 5, 6, ...] << lengthTrackingWithOffset
+
+ SetHelper(target, fixedLength);
+ assertEquals([1, 2, 3, 4, 4, 0], ToNumbers(target));
+
+ SetHelper(target, fixedLengthWithOffset);
+ assertEquals([3, 4, 3, 4, 4, 0], ToNumbers(target));
+
+ SetHelper(target, lengthTracking, 0);
+ assertEquals([1, 2, 3, 4, 5, 6], ToNumbers(target));
+
+ SetHelper(target, lengthTrackingWithOffset, 1);
+ assertEquals([1, 3, 4, 5, 6, 6], ToNumbers(target));
+ }
+ }
+ }
+})();
+
+(function Subarray() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ const fixedLengthSubFull = fixedLength.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLengthSubFull));
+ const fixedLengthWithOffsetSubFull = fixedLengthWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(fixedLengthWithOffsetSubFull));
+ const lengthTrackingSubFull = lengthTracking.subarray(0);
+ assertEquals([0, 2, 4, 6], ToNumbers(lengthTrackingSubFull));
+ const lengthTrackingWithOffsetSubFull =
+ lengthTrackingWithOffset.subarray(0);
+ assertEquals([4, 6], ToNumbers(lengthTrackingWithOffsetSubFull));
+
+ // Relative offsets
+ assertEquals([4, 6], ToNumbers(fixedLength.subarray(-2)));
+ assertEquals([6], ToNumbers(fixedLengthWithOffset.subarray(-1)));
+ assertEquals([4, 6], ToNumbers(lengthTracking.subarray(-2)));
+ assertEquals([6], ToNumbers(lengthTrackingWithOffset.subarray(-1)));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ // We can create subarrays of OOB arrays (which have length 0), as long as
+ // the new arrays are not OOB.
+ assertEquals([], ToNumbers(fixedLength.subarray(0)));
+ assertEquals([], ToNumbers(fixedLengthWithOffset.subarray(0)));
+
+ assertEquals([0, 2, 4], ToNumbers(lengthTracking.subarray(0)));
+ assertEquals([4], ToNumbers(lengthTrackingWithOffset.subarray(0)));
+
+ // Also the previously created subarrays are OOB.
+ assertEquals(0, fixedLengthSubFull.length);
+ assertEquals(0, fixedLengthWithOffsetSubFull.length);
+
+ // Relative offsets
+ assertEquals([2, 4], ToNumbers(lengthTracking.subarray(-2)));
+ assertEquals([4], ToNumbers(lengthTrackingWithOffset.subarray(-1)));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertEquals([], ToNumbers(fixedLength.subarray(0)));
+ assertEquals([0], ToNumbers(lengthTracking.subarray(0)));
+
+ // Even the 0-length subarray of fixedLengthWithOffset would be OOB ->
+ // this throws.
+ assertThrows(() => { fixedLengthWithOffset.subarray(0); }, RangeError);
+
+ // Also the previously created subarrays are OOB.
+ assertEquals(0, fixedLengthSubFull.length);
+ assertEquals(0, fixedLengthWithOffsetSubFull.length);
+ assertEquals(0, lengthTrackingWithOffsetSubFull.length);
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertEquals([], ToNumbers(fixedLength.subarray(0)));
+ assertEquals([], ToNumbers(lengthTracking.subarray(0)));
+
+ assertThrows(() => { fixedLengthWithOffset.subarray(0); }, RangeError);
+ assertThrows(() => { lengthTrackingWithOffset.subarray(0); }, RangeError);
+
+ // Also the previously created subarrays are OOB.
+ assertEquals(0, fixedLengthSubFull.length);
+ assertEquals(0, fixedLengthWithOffsetSubFull.length);
+ assertEquals(0, lengthTrackingWithOffsetSubFull.length);
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLength.subarray(0)));
+ assertEquals([4, 6], ToNumbers(fixedLengthWithOffset.subarray(0)));
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbers(lengthTracking.subarray(0)));
+ assertEquals([4, 6, 8, 10],
+ ToNumbers(lengthTrackingWithOffset.subarray(0)));
+
+ // Also the previously created subarrays are no longer OOB.
+ assertEquals(4, fixedLengthSubFull.length);
+ assertEquals(2, fixedLengthWithOffsetSubFull.length);
+
+ // TODO(v8:11111): Are subarrays of length-tracking TAs also
+ // length-tracking? See
+ // https://github.com/tc39/proposal-resizablearraybuffer/issues/91
+ assertEquals(4, lengthTrackingSubFull.length);
+ assertEquals(2, lengthTrackingWithOffsetSubFull.length);
+ }
+})();
+
+(function SubarrayParameterConversionShrinks() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [0, 2, 4, 6, ...] << lengthTracking
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ // Fixed-length TA + first parameter conversion shrinks. The old length is
+ // used in the length computation, and the subarray construction fails.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertThrows(() => { fixedLength.subarray(evil); }, RangeError);
+ }
+
+ // Like the previous test, but now we construct a smaller subarray and it
+ // succeeds.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals([0], ToNumbers(fixedLength.subarray(evil, 1)));
+ }
+
+ // Fixed-length TA + second parameter conversion shrinks. The old length is
+ // used in the length computation, and the subarray construction fails.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 3;
+ }};
+ assertThrows(() => { fixedLength.subarray(0, evil); }, RangeError);
+ }
+
+ // Like the previous test, but now we construct a smaller subarray and it
+ // succeeds.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 1;
+ }};
+ assertEquals([0], ToNumbers(fixedLength.subarray(0, evil)));
+ }
+
+ // Shrinking + fixed-length TA, subarray construction succeeds even though the
+ // TA goes OOB.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ const evil = { valueOf: () => { rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+
+ assertEquals([0], ToNumbers(fixedLength.subarray(evil, 1)));
+ }
+
+ // Length-tracking TA + first parameter conversion shrinks. The old length is
+ // used in the length computation, and the subarray construction fails.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertThrows(() => { lengthTracking.subarray(evil); });
+ }
+
+ // Like the previous test, but now we construct a smaller subarray and it
+ // succeeds.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertEquals([0], ToNumbers(lengthTracking.subarray(evil, 1)));
+ }
+
+ // Length-tracking TA + first parameter conversion shrinks. The second
+ // parameter is negative -> the relative index is not recomputed, and the
+ // subarray construction fails.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertThrows(() => { lengthTracking.subarray(evil, -1); });
+ }
+
+ // Length-tracking TA + second parameter conversion shrinks. The second
+ // parameter is too large -> the subarray construction fails.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 3;
+ }};
+ assertThrows(() => { lengthTracking.subarray(0, evil); });
+ }
+})();
+
+(function SubarrayParameterConversionGrows() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [0, 2, 4, 6, ...] << lengthTracking
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ // Growing a fixed length TA back in bounds.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // Make `fixedLength` OOB.
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+
+ const evil = { valueOf: () => { rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+
+ // The length computation is done before parameter conversion. At that
+ // point, the length is 0, since the TA is OOB.
+ assertEquals([], ToNumbers(fixedLength.subarray(evil, 0, 1)));
+ }
+
+ // Growing + fixed-length TA. Growing won't affect anything.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ const evil = { valueOf: () => { rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+
+ assertEquals([0, 2, 4, 6], ToNumbers(fixedLength.subarray(evil)));
+ }
+
+ // Growing + length-tracking TA. The length computation is done with the
+ // original length.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ const evil = { valueOf: () => { rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;}};
+
+ assertEquals([0, 2, 4, 6], ToNumbers(lengthTracking.subarray(evil)));
+ }
+})();
+
+(function SortWithDefaultComparison() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const taFull = new ctor(rab, 0);
+ function WriteUnsortedData() {
+ // Write some data into the array.
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - 2 * i);
+ }
+ }
+ // Orig. array: [10, 8, 6, 4]
+ // [10, 8, 6, 4] << fixedLength
+ // [6, 4] << fixedLengthWithOffset
+ // [10, 8, 6, 4, ...] << lengthTracking
+ // [6, 4, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort();
+ assertEquals([4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort();
+ assertEquals([10, 8, 4, 6], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 8, 4, 6], ToNumbers(taFull));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 8, 6]
+ // [10, 8, 6, ...] << lengthTracking
+ // [6, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 8, 6], ToNumbers(taFull));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ assertThrows(() => { lengthTrackingWithOffset.sort(); }, TypeError);
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(); }, TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ assertThrows(() => { lengthTrackingWithOffset.sort(); }, TypeError);
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 8, 6, 4, 2, 0]
+ // [10, 8, 6, 4] << fixedLength
+ // [6, 4] << fixedLengthWithOffset
+ // [10, 8, 6, 4, 2, 0, ...] << lengthTracking
+ // [6, 4, 2, 0, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort();
+ assertEquals([4, 6, 8, 10, 2, 0], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort();
+ assertEquals([10, 8, 4, 6, 2, 0], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 8, 0, 2, 4, 6], ToNumbers(taFull));
+ }
+})();
+
+(function SortWithCustomComparison() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ const taFull = new ctor(rab, 0);
+ function WriteUnsortedData() {
+ // Write some data into the array.
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+ function CustomComparison(a, b) {
+ // Sort all odd numbers before even numbers.
+ a = Number(a);
+ b = Number(b);
+ if (a % 2 == 1 && b % 2 == 0) {
+ return -1;
+ }
+ if (a % 2 == 0 && b % 2 == 1) {
+ return 1;
+ }
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ }
+ // Orig. array: [10, 9, 8, 7]
+ // [10, 9, 8, 7] << fixedLength
+ // [8, 7] << fixedLengthWithOffset
+ // [10, 9, 8, 7, ...] << lengthTracking
+ // [8, 7, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 7, 8], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData(CustomComparison);
+ lengthTrackingWithOffset.sort();
+ assertEquals([10, 9, 7, 8], ToNumbers(taFull));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 9, 8]
+ // [10, 9, 8, ...] << lengthTracking
+ // [8, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(CustomComparison); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(CustomComparison); },
+ TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort(CustomComparison);
+ assertEquals([9, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 8], ToNumbers(taFull));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(CustomComparison); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(CustomComparison); },
+ TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ assertThrows(() => { lengthTrackingWithOffset.sort(CustomComparison); },
+ TypeError);
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLength.sort(CustomComparison); }, TypeError);
+
+ WriteUnsortedData();
+ assertThrows(() => { fixedLengthWithOffset.sort(CustomComparison); },
+ TypeError);
+
+ WriteUnsortedData();
+ lengthTracking.sort();
+ assertEquals([], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ assertThrows(() => { lengthTrackingWithOffset.sort(CustomComparison); },
+ TypeError);
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [10, 9, 8, 7, 6, 5]
+ // [10, 9, 8, 7] << fixedLength
+ // [8, 7] << fixedLengthWithOffset
+ // [10, 9, 8, 7, 6, 5, ...] << lengthTracking
+ // [8, 7, 6, 5, ...] << lengthTrackingWithOffset
+
+ WriteUnsortedData();
+ fixedLength.sort(CustomComparison);
+ assertEquals([7, 9, 8, 10, 6, 5], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ fixedLengthWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 7, 8, 6, 5], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTracking.sort(CustomComparison);
+ assertEquals([5, 7, 9, 6, 8, 10], ToNumbers(taFull));
+
+ WriteUnsortedData();
+ lengthTrackingWithOffset.sort(CustomComparison);
+ assertEquals([10, 9, 5, 7, 6, 8], ToNumbers(taFull));
+ }
+})();
+
+(function SortCallbackShrinks() {
+ function WriteUnsortedData(taFull) {
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+
+ let rab;
+ let resizeTo;
+ function CustomComparison(a, b) {
+ rab.resize(resizeTo);
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ }
+
+ // Fixed length TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 2 * ctor.BYTES_PER_ELEMENT;
+ const fixedLength = new ctor(rab, 0, 4);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ assertThrows(() => { fixedLength.sort(CustomComparison); });
+
+ // The data is unchanged.
+ assertEquals([10, 9], ToNumbers(taFull));
+ }
+
+ // Length-tracking TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 2 * ctor.BYTES_PER_ELEMENT;
+ const lengthTracking = new ctor(rab, 0);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ lengthTracking.sort(CustomComparison);
+
+ // The sort result is implementation defined, but it contains 2 elements out
+ // of the 4 original ones.
+ const newData = ToNumbers(taFull);
+ assertEquals(2, newData.length);
+ assertTrue([10, 9, 8, 7].includes(newData[0]));
+ assertTrue([10, 9, 8, 7].includes(newData[1]));
+ }
+})();
+
+(function SortCallbackGrows() {
+ function WriteUnsortedData(taFull) {
+ for (let i = 0; i < taFull.length; ++i) {
+ WriteToTypedArray(taFull, i, 10 - i);
+ }
+ }
+
+ let rab;
+ let resizeTo;
+ function CustomComparison(a, b) {
+ rab.resize(resizeTo);
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ }
+
+ // Fixed length TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ const fixedLength = new ctor(rab, 0, 4);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ fixedLength.sort(CustomComparison);
+
+ // Growing doesn't affect the sorting.
+ assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
+ }
+
+ // Length-tracking TA.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 6 * ctor.BYTES_PER_ELEMENT;
+ const lengthTracking = new ctor(rab, 0);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ lengthTracking.sort(CustomComparison);
+
+ // Growing doesn't affect the sorting. Only the elements that were part of
+ // the original TA are sorted.
+ assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index eeab4983f5..8630a7bd85 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc
+// Flags: --expose-wasm --expose-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/call-ref.js b/deps/v8/test/mjsunit/wasm/call-ref.js
index 812a764f7f..2a38bba178 100644
--- a/deps/v8/test/mjsunit/wasm/call-ref.js
+++ b/deps/v8/test/mjsunit/wasm/call-ref.js
@@ -96,10 +96,11 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print("--imported function from another module--");
assertEquals(57, instance.exports.test_wasm_import());
+ /* TODO(7748): Implement cross-module type canonicalization.
print("--not imported function defined in another module--");
assertEquals(19, instance.exports.main(
exporting_instance.exports.addition, 12, 7));
-
+*/
print("--imported WebAssembly.Function--")
assertEquals(21, instance.exports.test_js_api_import());
print("--not imported WebAssembly.Function--")
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 74c48525e0..da990385f3 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -4,7 +4,7 @@
// The test needs --wasm-tier-up because we can't serialize and deserialize
// Liftoff code.
-// Flags: --expose-wasm --allow-natives-syntax --expose-gc --wasm-tier-up
+// Flags: --expose-wasm --allow-natives-syntax --expose-gc --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/element-segments-with-reftypes.js b/deps/v8/test/mjsunit/wasm/element-segments-with-reftypes.js
index b30900cf41..a17e5438b8 100644
--- a/deps/v8/test/mjsunit/wasm/element-segments-with-reftypes.js
+++ b/deps/v8/test/mjsunit/wasm/element-segments-with-reftypes.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-typed-funcref
+// Flags: --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -97,3 +97,22 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(instance.exports.table.get(2)(10), 20);
assertEquals(instance.exports.table.get(3)(10), 11);
})();
+
+// Test that mutable globals cannot be used in element segments, even under
+// --experimental-wasm-gc.
+(function TestMutableGlobalInElementSegment() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let global = builder.addImportedGlobal("m", "g", kWasmFuncRef, true);
+ let table = builder.addTable(kWasmFuncRef, 10, 10);
+ builder.addActiveElementSegment(
+ table.index, WasmInitExpr.I32Const(0),
+ [WasmInitExpr.GlobalGet(global.index)], kWasmFuncRef);
+ builder.addExportOfKind("table", kExternalTable, table.index);
+
+ assertThrows(
+ () => builder.instantiate({m : {g :
+ new WebAssembly.Global({value: "anyfunc", mutable: true}, null)}}),
+ WebAssembly.CompileError,
+ /mutable globals cannot be used in initializer expressions/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-api.js b/deps/v8/test/mjsunit/wasm/exceptions-api.js
index 29d3de0602..41e81b6a9f 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-api.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-api.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-eh --experimental-wasm-reftypes
+// Flags: --experimental-wasm-eh
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-externref.js b/deps/v8/test/mjsunit/wasm/exceptions-externref.js
index c0505599b9..329e7d9618 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-externref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-externref.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-eh --experimental-wasm-reftypes --allow-natives-syntax
+// Flags: --experimental-wasm-eh --allow-natives-syntax
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
diff --git a/deps/v8/test/mjsunit/wasm/extended-constants.js b/deps/v8/test/mjsunit/wasm/extended-constants.js
new file mode 100644
index 0000000000..688f175ccd
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/extended-constants.js
@@ -0,0 +1,74 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-extended-const
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function ExtendedConstantsTestI32() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+
+ let imported_global_0 = builder.addImportedGlobal("m", "g0", kWasmI32, false);
+ let imported_global_1 = builder.addImportedGlobal("m", "g1", kWasmI32, false);
+
+ let defined_global = builder.addGlobal(
+ kWasmI32, false,
+ WasmInitExpr.I32Add(
+ WasmInitExpr.GlobalGet(imported_global_0),
+ WasmInitExpr.I32Mul(
+ WasmInitExpr.GlobalGet(imported_global_1),
+ WasmInitExpr.I32Sub(
+ WasmInitExpr.GlobalGet(imported_global_0),
+ WasmInitExpr.I32Const(1)))));
+
+ builder.addExportOfKind("global", kExternalGlobal, defined_global.index);
+
+ let value0 = 123;
+ let value1 = -450;
+
+ let global_obj0 = new WebAssembly.Global({value: "i32", mutable: false},
+ value0);
+ let global_obj1 = new WebAssembly.Global({value: "i32", mutable: false},
+ value1);
+
+ let instance = builder.instantiate({m : {g0: global_obj0, g1: global_obj1}});
+
+ assertEquals(value0 + (value1 * (value0 - 1)), instance.exports.global.value);
+})();
+
+(function ExtendedConstantsTestI64() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+
+ let imported_global_0 = builder.addImportedGlobal("m", "g0", kWasmI64, false);
+ let imported_global_1 = builder.addImportedGlobal("m", "g1", kWasmI64, false);
+
+ let defined_global = builder.addGlobal(
+ kWasmI64, false,
+ WasmInitExpr.I64Add(
+ WasmInitExpr.GlobalGet(imported_global_0),
+ WasmInitExpr.I64Mul(
+ WasmInitExpr.GlobalGet(imported_global_1),
+ WasmInitExpr.I64Sub(
+ WasmInitExpr.GlobalGet(imported_global_0),
+ WasmInitExpr.I64Const(1)))));
+
+ builder.addExportOfKind("global", kExternalGlobal, defined_global.index);
+
+ let value0 = 123n;
+ let value1 = -450n;
+
+ let global_obj0 = new WebAssembly.Global({value: "i64", mutable: false},
+ value0);
+ let global_obj1 = new WebAssembly.Global({value: "i64", mutable: false},
+ value1);
+
+ let instance = builder.instantiate({m : {g0: global_obj0, g1: global_obj1}});
+
+ assertEquals(value0 + (value1 * (value0 - 1n)),
+ instance.exports.global.value);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
index 33fade59a1..e456c5b020 100644
--- a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --expose-gc --liftoff
-// Flags: --no-wasm-tier-up --experimental-liftoff-extern-ref
+// Flags: --expose-gc --liftoff --no-wasm-tier-up
d8.file.execute("test/mjsunit/wasm/externref-globals.js");
diff --git a/deps/v8/test/mjsunit/wasm/externref-globals.js b/deps/v8/test/mjsunit/wasm/externref-globals.js
index d3b3a9403f..c071b77edf 100644
--- a/deps/v8/test/mjsunit/wasm/externref-globals.js
+++ b/deps/v8/test/mjsunit/wasm/externref-globals.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --expose-gc
+// Flags: --expose-gc
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/externref-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-liftoff.js
index 0debadac33..a27058939f 100644
--- a/deps/v8/test/mjsunit/wasm/externref-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/externref-liftoff.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc --liftoff
-// Flags: --no-wasm-tier-up --experimental-liftoff-extern-ref
+// Flags: --expose-wasm --expose-gc --liftoff --no-wasm-tier-up
// Flags: --allow-natives-syntax
d8.file.execute("test/mjsunit/wasm/externref.js");
diff --git a/deps/v8/test/mjsunit/wasm/externref.js b/deps/v8/test/mjsunit/wasm/externref.js
index 43192a7ef7..cf052cdd75 100644
--- a/deps/v8/test/mjsunit/wasm/externref.js
+++ b/deps/v8/test/mjsunit/wasm/externref.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc
-// Flags: --allow-natives-syntax
+// Flags: --expose-wasm --expose-gc --allow-natives-syntax
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/gc-nominal.js b/deps/v8/test/mjsunit/wasm/gc-nominal.js
index 8b371fc84f..30f5ab684e 100644
--- a/deps/v8/test/mjsunit/wasm/gc-nominal.js
+++ b/deps/v8/test/mjsunit/wasm/gc-nominal.js
@@ -6,14 +6,16 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-(function() {
+(function TestNominalTypesBasic() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
- let struct1 = builder.addStructSubtype([makeField(kWasmI32, true)]);
- let struct2 = builder.addStructSubtype(
+ builder.setNominal();
+ let struct1 = builder.addStruct([makeField(kWasmI32, true)]);
+ let struct2 = builder.addStruct(
[makeField(kWasmI32, true), makeField(kWasmI32, true)], struct1);
- let array1 = builder.addArraySubtype(kWasmI32, true);
- let array2 = builder.addArraySubtype(kWasmI32, true, array1);
+ let array1 = builder.addArray(kWasmI32, true);
+ let array2 = builder.addArray(kWasmI32, true, array1);
builder.addFunction("main", kSig_v_v)
.addLocals(wasmOptRefType(struct1), 1)
@@ -28,27 +30,146 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
kGCPrefix, kExprStructNewDefault, struct2, kExprLocalSet, 0,
// Check that we can create an array with explicit RTT...
kExprI32Const, 10, // length
- kGCPrefix, kExprRttCanon, array2, kGCPrefix,
- kExprArrayNewDefaultWithRtt, array2,
+ kGCPrefix, kExprRttCanon, array2,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array2,
// ...and upcast it.
kExprLocalSet, 1,
// Check that we can create an array with implicit RTT.
kExprI32Const, 10, // length
- kGCPrefix, kExprArrayNewDefault, array2, kExprLocalSet, 1
- ])
+ kGCPrefix, kExprArrayNewDefault, array2, kExprLocalSet, 1])
.exportFunc();
// This test is only interested in type checking.
builder.instantiate();
})();
-(function () {
- let builder = new WasmModuleBuilder();
- let t0 = builder.addStructSubtype([]);
- for (let i = 0; i < 32; i++) {
- builder.addStructSubtype([], i);
- }
- assertThrows(
- () => builder.instantiate(), WebAssembly.CompileError,
- /subtyping depth is greater than allowed/);
+(function TestSubtypingDepthTooLarge() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.setNominal();
+ builder.addStruct([]);
+ for (let i = 0; i < 32; i++) builder.addStruct([], i);
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ /subtyping depth is greater than allowed/);
+})();
+
+(function TestArrayInitFromDataStatic() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.setNominal();
+ let array_type_index = builder.addArray(kWasmI16, true);
+
+ let dummy_byte = 0xff;
+ let element_0 = 1000;
+ let element_1 = -2222;
+
+ let data_segment = builder.addPassiveDataSegment(
+ [dummy_byte, element_0 & 0xff, (element_0 >> 8) & 0xff,
+ element_1 & 0xff, (element_1 >> 8) & 0xff]);
+
+ let global = builder.addGlobal(
+ wasmRefType(array_type_index), true,
+ WasmInitExpr.ArrayInitFromDataStatic(
+ array_type_index, data_segment,
+ [WasmInitExpr.I32Const(1), WasmInitExpr.I32Const(2)], builder));
+
+ builder.addFunction("global_get", kSig_i_i)
+ .addBody([
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ // parameters: (segment offset, array length, array index)
+ builder.addFunction("init_from_data", kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayInitFromDataStatic,
+ array_type_index, data_segment,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ builder.addFunction("drop_segment", kSig_v_v)
+ .addBody([kNumericPrefix, kExprDataDrop, data_segment])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(element_0, instance.exports.global_get(0));
+ assertEquals(element_1, instance.exports.global_get(1));
+
+ let init = instance.exports.init_from_data;
+
+ assertEquals(element_0, init(1, 2, 0));
+ assertEquals(element_1, init(1, 2, 1));
+
+ assertTraps(kTrapArrayTooLarge, () => init(1, 1000000000, 0));
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(2, 2, 0));
+
+ instance.exports.drop_segment();
+
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(1, 2, 0));
+})();
+
+(function TestArrayInitFromData() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.setNominal();
+ let array_type_index = builder.addArray(kWasmI16, true);
+
+ let dummy_byte = 0xff;
+ let element_0 = 1000;
+ let element_1 = -2222;
+
+ let data_segment = builder.addPassiveDataSegment(
+ [dummy_byte, element_0 & 0xff, (element_0 >> 8) & 0xff,
+ element_1 & 0xff, (element_1 >> 8) & 0xff]);
+
+ let global = builder.addGlobal(
+ wasmRefType(array_type_index), true,
+ WasmInitExpr.ArrayInitFromData(
+ array_type_index, data_segment,
+ [WasmInitExpr.I32Const(1), WasmInitExpr.I32Const(2),
+ WasmInitExpr.RttCanon(array_type_index)],
+ builder));
+
+ builder.addFunction("global_get", kSig_i_i)
+ .addBody([
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ // parameters: (segment offset, array length, array index)
+ builder.addFunction("init_from_data", kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprRttCanon, array_type_index,
+ kGCPrefix, kExprArrayInitFromData, array_type_index, data_segment,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ builder.addFunction("drop_segment", kSig_v_v)
+ .addBody([kNumericPrefix, kExprDataDrop, data_segment])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(element_0, instance.exports.global_get(0));
+ assertEquals(element_1, instance.exports.global_get(1));
+
+ let init = instance.exports.init_from_data;
+
+ assertEquals(element_0, init(1, 2, 0));
+ assertEquals(element_1, init(1, 2, 1));
+
+ assertTraps(kTrapArrayTooLarge, () => init(1, 1000000000, 0));
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(2, 2, 0));
+
+ instance.exports.drop_segment();
+
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(1, 2, 0));
})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-optimizations.js b/deps/v8/test/mjsunit/wasm/gc-optimizations.js
index 9fcf9cfe85..145977cf02 100644
--- a/deps/v8/test/mjsunit/wasm/gc-optimizations.js
+++ b/deps/v8/test/mjsunit/wasm/gc-optimizations.js
@@ -376,9 +376,10 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function AllocationFolding() {
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
+ builder.setNominal();
- let struct_index = builder.addStructSubtype([makeField(kWasmI32, true)]);
- let struct_2 = builder.addStructSubtype([
+ let struct_index = builder.addStruct([makeField(kWasmI32, true)]);
+ let struct_2 = builder.addStruct([
makeField(wasmRefType(struct_index), false),
makeField(wasmRefType(struct_index), false)
]);
diff --git a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
index bcb67de750..3fe78293cf 100644
--- a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
@@ -2,18 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-grow-shared-memory --experimental-wasm-threads
-
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-(function TestGrowSharedMemoryWithoutPostMessage() {
- print(arguments.callee.name);
- let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
- assertEquals(memory.buffer.byteLength, kPageSize);
- assertEquals(1, memory.grow(1));
- assertEquals(memory.buffer.byteLength, 2 * kPageSize);
-})();
-
function assertIsWasmSharedMemory(memory) {
assertTrue(memory instanceof Object,
"Memory is not an object");
@@ -34,6 +24,14 @@ function assertTrue(value, msg) {
let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
+(function TestGrowSharedMemoryWithoutPostMessage() {
+ print(arguments.callee.name);
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
+ assertEquals(memory.buffer.byteLength, kPageSize);
+ assertEquals(1, memory.grow(1));
+ assertEquals(memory.buffer.byteLength, 2 * kPageSize);
+})();
+
(function TestPostMessageWithGrow() {
print(arguments.callee.name);
function workerCode(workerHelpers) {
@@ -381,3 +379,75 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
"initial": 1, "maximum": 2, "shared": true });
assertEquals(memory.grow(0), 1);
})();
+
+// Tests that a function receives the update of a shared memory's size if a
+// loop's stack guard gets invoked. This is not strictly required by spec, but
+// we implement it as an optimization.
+(function TestStackGuardUpdatesMemorySize() {
+ print(arguments.callee.name);
+
+ let initial_size = 1;
+ let final_size = 2;
+
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
+
+ let sync_index = 64;
+ let sync_value = 42;
+
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("mod", "mem", 1, 5, true);
+ // int x;
+ // while (true) {
+ // memory[sync_index] = sync_value;
+ // x = memory_size();
+ // if (x != 1) break;
+ // }
+ // return x;
+ builder.addFunction("main", kSig_i_v)
+ .addLocals(kWasmI32, 1)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ ...wasmI32Const(sync_index),
+ ...wasmI32Const(sync_value),
+ kAtomicPrefix, kExprI32AtomicStore, 0, 0,
+ kExprMemorySize, 0, kExprLocalTee, 0,
+ kExprI32Const, initial_size,
+ kExprI32Eq,
+ kExprBrIf, 0,
+ kExprEnd,
+ kExprLocalGet, 0])
+ .exportFunc();
+
+ builder.addFunction("setter", kSig_v_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kAtomicPrefix, kExprI32AtomicStore, 0, 0])
+ .exportFunc();
+
+ builder.addFunction("getter", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kAtomicPrefix, kExprI32AtomicLoad, 0, 0])
+ .exportFunc();
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ function workerCode() {
+ onmessage = function(obj) {
+ let instance = new WebAssembly.Instance(
+ obj.module, {mod: {mem: obj.memory}});
+ let res = instance.exports.main();
+ postMessage(res);
+ }
+ }
+
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: []});
+ worker.postMessage({module: module, memory: memory});
+
+ let instance = new WebAssembly.Instance(module, {mod: {mem: memory}});
+
+ // Make sure the worker thread has entered the loop.
+ while (instance.exports.getter(sync_index) != sync_value) {}
+
+ memory.grow(final_size - initial_size);
+
+ assertEquals(final_size, worker.getMessage());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/imported-function-types.js b/deps/v8/test/mjsunit/wasm/imported-function-types.js
index 895644017e..756296cbc1 100644
--- a/deps/v8/test/mjsunit/wasm/imported-function-types.js
+++ b/deps/v8/test/mjsunit/wasm/imported-function-types.js
@@ -35,8 +35,9 @@ var importing_module = function(imported_function) {
return builder.instantiate({other: {func: imported_function}});
};
+// TODO(7748): Implement cross-module subtyping.
// Same form/different index should be fine.
-importing_module(exporting_module.exports.func2);
+// importing_module(exporting_module.exports.func2);
// Same index/different form should throw.
assertThrows(
() => importing_module(exporting_module.exports.func1),
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
index 8954f11558..0295e8c451 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes --experimental-wasm-return-call
+// Flags: --expose-wasm --experimental-wasm-return-call
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 7f1041973b..5f80d7fdf4 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --expose-gc
+// Flags: --expose-gc
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
@@ -900,3 +900,19 @@ function js_div(a, b) { return (a / b) | 0; }
assertEquals(300, main(2));
assertEquals(400, main(3));
})();
+
+(function TestNonImportedGlobalInElementSegment() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let global = builder.addGlobal(kWasmFuncRef, false,
+ WasmInitExpr.RefNull(kWasmFuncRef));
+ let table = builder.addTable(kWasmFuncRef, 10, 10);
+ builder.addActiveElementSegment(
+ table.index, WasmInitExpr.I32Const(0),
+ [WasmInitExpr.GlobalGet(global.index)], kWasmFuncRef);
+ builder.addExportOfKind("table", kExternalTable, table.index);
+
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ /non-imported globals cannot be used in initializer expressions/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/inlining.js b/deps/v8/test/mjsunit/wasm/inlining.js
index 52fc2a3a6e..eb6fd39227 100644
--- a/deps/v8/test/mjsunit/wasm/inlining.js
+++ b/deps/v8/test/mjsunit/wasm/inlining.js
@@ -62,6 +62,49 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(10, instance.exports.main(10));
})();
+(function LoopInLoopTest() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let fact = builder.addFunction("fact", kSig_i_i)
+ .addLocals(kWasmI32, 1)
+ .addBody([// result = 1;
+ kExprI32Const, 1, kExprLocalSet, 1,
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 1,
+ // if input == 1 return result;
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Eq, kExprBrIf, 1,
+ // result *= input;
+ kExprLocalGet, 0, kExprI32Mul, kExprLocalSet, 1,
+ // input -= 1;
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprLocalSet, 0,
+ kExprBr, 0,
+ kExprEnd,
+ kExprUnreachable]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addLocals(kWasmI32, 1)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 1,
+ // if input == 0 return sum;
+ kExprLocalGet, 0, kExprI32Const, 0, kExprI32Eq, kExprBrIf, 1,
+ // sum += fact(input);
+ kExprLocalGet, 0, kExprCallFunction, fact.index,
+ kExprI32Add, kExprLocalSet, 1,
+ // input -= 1;
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprLocalSet, 0,
+ kExprBr, 0,
+ kExprEnd,
+ kExprUnreachable])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(33, instance.exports.main(4));
+})();
+
(function InfiniteLoopTest() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -252,7 +295,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(20, instance.exports.main(10, 20));
})();
-// Tests that no LoopExits are emitted in the inlined function.
(function LoopUnrollingTest() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -278,6 +320,46 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(25, instance.exports.main(10));
})();
+(function ThrowInLoopTest() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ // f(x, y) {
+ // do {
+ // if (x < 0) throw x;
+ // y++; x--;
+ // } while (x > 0);
+ // return y;
+ // }
+ let callee = builder.addFunction("callee", kSig_i_ii)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0, kExprI32Const, 0, kExprI32LtS,
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0, kExprThrow, tag,
+ kExprEnd,
+ kExprLocalGet, 1, kExprI32Const, 1, kExprI32Add, kExprLocalSet, 1,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub, kExprLocalSet, 0,
+ kExprLocalGet, 0, kExprI32Const, 0, kExprI32GtS, kExprBrIf, 0,
+ kExprEnd,
+ kExprLocalGet, 1
+ ]);
+ // g(x) = (try { f(x, 5) } catch(x) { x }) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprTry, kWasmI32,
+ kExprLocalGet, 0, kExprI32Const, 5,
+ kExprCallFunction, callee.index,
+ kExprCatch, tag,
+ kExprEnd,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(25, instance.exports.main(10));
+ assertEquals(-20, instance.exports.main(-10));
+})();
+
(function InlineSubtypeSignatureTest() {
print(arguments.callee.name);
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index f9b231242c..30ddf4c1ee 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -715,18 +715,18 @@ assertThrows(
/must be convertible to a valid number/);
assertThrows(
() => set.call(tbl1, 0, undefined), TypeError,
- /must be null or a WebAssembly function/);
+ /Argument 1 is invalid for table of type funcref/);
assertThrows(
() => set.call(tbl1, undefined, undefined), TypeError,
/must be convertible to a valid number/);
assertThrows(
() => set.call(tbl1, 0, {}), TypeError,
- /must be null or a WebAssembly function/);
-assertThrows(() => set.call(tbl1, 0, function() {
-}), TypeError, /must be null or a WebAssembly function/);
+ /Argument 1 is invalid for table of type funcref/);
+assertThrows(() => set.call(tbl1, 0, function() {}),
+ TypeError, /Argument 1 is invalid for table of type funcref/);
assertThrows(
() => set.call(tbl1, 0, Math.sin), TypeError,
- /must be null or a WebAssembly function/);
+ /Argument 1 is invalid for table of type funcref/);
assertThrows(
() => set.call(tbl1, {valueOf() { throw Error('hai') }}, null), Error,
'hai');
diff --git a/deps/v8/test/mjsunit/wasm/load-immutable.js b/deps/v8/test/mjsunit/wasm/load-immutable.js
new file mode 100644
index 0000000000..567175d7fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/load-immutable.js
@@ -0,0 +1,137 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-liftoff --experimental-wasm-nn-locals
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Test that comparisons with array length in a loop get optimized away.
+(function ArrayLoopOptimizationTest() {
+ var builder = new WasmModuleBuilder();
+ var array_index = builder.addArray(kWasmI32, true);
+
+ // Increase these parameters to measure performance.
+ let array_size = 10; // 100000000;
+ let iterations = 1; // 50;
+
+ builder.addFunction("array_inc", kSig_v_v)
+ .addLocals(wasmRefType(array_index), 1)
+ .addLocals(kWasmI32, 2)
+ // Locals: 0 -> array, 1 -> length, 2 -> index
+ .addBody([
+ ...wasmI32Const(array_size),
+ kExprCallFunction, 1,
+ kExprLocalSet, 0,
+
+ // length = array.length
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayLen, array_index,
+ kExprLocalSet, 1,
+
+ // while (true) {
+ kExprLoop, kWasmVoid,
+ // if (index < length) {
+ kExprLocalGet, 2,
+ kExprLocalGet, 1,
+ kExprI32LtU,
+ kExprIf, kWasmVoid,
+ // array[index] = array[index] + 5;
+ kExprLocalGet, 0,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayGet, array_index,
+ kExprI32Const, 5,
+ kExprI32Add,
+ kGCPrefix, kExprArraySet, array_index,
+ // index = index + 1;
+ kExprLocalGet, 2,
+ kExprI32Const, 1,
+ kExprI32Add,
+ kExprLocalSet, 2,
+ // continue;
+ kExprBr, 1,
+ // }
+ // break;
+ kExprEnd,
+ // }
+ kExprEnd])
+ .exportFunc();
+
+ builder.addFunction("make_array",
+ makeSig([kWasmI32], [wasmRefType(array_index)]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayNewDefault, array_index])
+
+ var instance = builder.instantiate({});
+
+ let before = Date.now();
+ for (let i = 0; i < iterations; i++) {
+ instance.exports.array_inc();
+ }
+ let after = Date.now();
+ print(
+ "Average of " + iterations + " runs: " +
+ (after - before)/iterations + "ms");
+})();
+
+(function ImmutableLoadThroughEffect() {
+ var builder = new WasmModuleBuilder();
+ builder.setNominal();
+ var struct = builder.addStruct([
+ makeField(kWasmI32, false), makeField(kWasmI32, true)]);
+
+ let effect = builder.addImport('m', 'f', kSig_v_v);
+
+ builder.addFunction("main", kSig_i_i)
+ .addLocals(wasmRefType(struct), 1)
+ .addBody([
+ // Initialize an object
+ kExprLocalGet, 0,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
+ kGCPrefix, kExprStructNew, struct,
+ kExprLocalSet, 1,
+ // Introduce unknown effect
+ kExprCallFunction, effect,
+ // TF should be able to eliminate this load...
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructGet, struct, 0,
+ // ... but not this one.
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructGet, struct, 1,
+ kExprI32Add
+ ])
+ .exportFunc();
+
+ var instance = builder.instantiate({m : { f: function () {} }});
+
+ assertEquals(85, instance.exports.main(42));
+})();
+
+(function FunctionTypeCheckThroughEffect() {
+ var builder = new WasmModuleBuilder();
+ var sig = builder.addType(kSig_i_i);
+
+ let effect = builder.addImport('m', 'f', kSig_v_v);
+
+ builder.addFunction("input", sig)
+ .addBody([kExprLocalGet, 0])
+ .exportFunc();
+
+ builder.addFunction("main", makeSig([wasmRefType(kWasmFuncRef)], [kWasmI32]))
+ .addBody([
+ // Type check the function
+ kExprLocalGet, 0, kGCPrefix, kExprRttCanon, sig, kGCPrefix, kExprRefCast,
+ kExprDrop,
+ // Introduce unknown effect
+ kExprCallFunction, effect,
+ // TF should be able to eliminate the second type check, and return the
+ // constant 1.
+ kExprLocalGet, 0, kGCPrefix, kExprRttCanon, sig,
+ kGCPrefix, kExprRefTest])
+ .exportFunc();
+
+ var instance = builder.instantiate({m : { f: function () {} }});
+
+ assertEquals(1, instance.exports.main(instance.exports.input));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory64.js b/deps/v8/test/mjsunit/wasm/memory64.js
index dc5b3ecf65..a1ba05b3aa 100644
--- a/deps/v8/test/mjsunit/wasm/memory64.js
+++ b/deps/v8/test/mjsunit/wasm/memory64.js
@@ -111,3 +111,112 @@ function BasicMemory64Tests(num_pages) {
assertEquals(-1n, instance.exports.grow(7n)); // Above the of 10.
assertEquals(4n, instance.exports.grow(6n)); // Just at the maximum of 10.
})();
+
+(function TestBulkMemoryOperations() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const kMemSizeInPages = 10;
+ const kMemSize = kMemSizeInPages * kPageSize;
+ builder.addMemory64(kMemSizeInPages, kMemSizeInPages);
+ const kSegmentSize = 1024;
+ // Build a data segment with values [0, kSegmentSize-1].
+ const segment = Array.from({length: kSegmentSize}, (_, idx) => idx)
+ builder.addPassiveDataSegment(segment);
+ builder.exportMemoryAs('memory');
+
+ builder.addFunction('fill', makeSig([kWasmI64, kWasmI32, kWasmI64], []))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0 (dst)
+ kExprLocalGet, 1, // local.get 1 (value)
+ kExprLocalGet, 2, // local.get 2 (size)
+ kNumericPrefix, kExprMemoryFill, 0 // memory.fill mem=0
+ ])
+ .exportFunc();
+
+ builder.addFunction('copy', makeSig([kWasmI64, kWasmI64, kWasmI64], []))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0 (dst)
+ kExprLocalGet, 1, // local.get 1 (src)
+ kExprLocalGet, 2, // local.get 2 (size)
+ kNumericPrefix, kExprMemoryCopy, 0, 0 // memory.copy srcmem=0 dstmem=0
+ ])
+ .exportFunc();
+
+ builder.addFunction('init', makeSig([kWasmI64, kWasmI32, kWasmI32], []))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0 (dst)
+ kExprLocalGet, 1, // local.get 1 (offset)
+ kExprLocalGet, 2, // local.get 2 (size)
+ kNumericPrefix, kExprMemoryInit, 0, 0 // memory.init seg=0 mem=0
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+ let fill = instance.exports.fill;
+ let copy = instance.exports.copy;
+ let init = instance.exports.init;
+ // {memory(offset,size)} extracts the memory at [offset, offset+size)] into an
+ // Array.
+ let memory = (offset, size) => Array.from(new Uint8Array(
+ instance.exports.memory.buffer.slice(offset, offset + size)));
+
+ // Empty init (size=0).
+ init(0n, 0, 0);
+ assertEquals([0, 0], memory(0, 2));
+ // Init memory[5..7] with [10..12].
+ init(5n, 10, 3);
+ assertEquals([0, 0, 10, 11, 12, 0, 0], memory(3, 7));
+ // Init the end of memory ([kMemSize-2, kMemSize-1]) with [20, 21].
+ init(BigInt(kMemSize-2), 20, 2);
+ assertEquals([0, 0, 20, 21], memory(kMemSize - 4, 4));
+ // Writing slightly OOB.
+ assertTraps(kTrapMemOutOfBounds, () => init(BigInt(kMemSize-2), 20, 3));
+ // Writing OOB, but the low 32-bit are in-bound.
+ assertTraps(kTrapMemOutOfBounds, () => init(1n << 32n, 0, 0));
+ // OOB even though size == 0.
+ assertTraps(kTrapMemOutOfBounds, () => init(-1n, 0, 0));
+ // More OOB.
+ assertTraps(kTrapMemOutOfBounds, () => init(-1n, 0, 1));
+ assertTraps(kTrapMemOutOfBounds, () => init(1n << 62n, 0, 1));
+ assertTraps(kTrapMemOutOfBounds, () => init(1n << 63n, 0, 1));
+
+ // Empty copy (size=0).
+ copy(0n, 0n, 0n);
+ // Copy memory[5..7] (containing [10..12]) to [3..5].
+ copy(3n, 5n, 3n);
+ assertEquals([0, 0, 0, 10, 11, 12, 11, 12, 0], memory(0, 9));
+ // Copy to the end of memory ([kMemSize-2, kMemSize-1]).
+ copy(BigInt(kMemSize-2), 3n, 2n);
+ assertEquals([0, 0, 10, 11], memory(kMemSize - 4, 4));
+ // Writing slightly OOB.
+ assertTraps(kTrapMemOutOfBounds, () => copy(BigInt(kMemSize-2), 0n, 3n));
+ // Writing OOB, but the low 32-bit are in-bound.
+ assertTraps(kTrapMemOutOfBounds, () => copy(1n << 32n, 0n, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => copy(0n, 0n, 1n << 32n));
+ // OOB even though size == 0.
+ assertTraps(kTrapMemOutOfBounds, () => copy(-1n, 0n, 0n));
+ // More OOB.
+ assertTraps(kTrapMemOutOfBounds, () => copy(-1n, 0n, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => copy(1n << 62n, 0n, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => copy(1n << 63n, 0n, 1n));
+
+ // Empty fill (size=0).
+ fill(0n, 0, 0n);
+ // Fill memory[15..17] with 3s.
+ fill(15n, 3, 3n);
+ assertEquals([0, 3, 3, 3, 0], memory(14, 5));
+ // Fill the end of memory ([kMemSize-2, kMemSize-1]) with 7s.
+ fill(BigInt(kMemSize-2), 7, 2n);
+ assertEquals([0, 0, 7, 7], memory(kMemSize - 4, 4));
+ // Writing slightly OOB.
+ assertTraps(kTrapMemOutOfBounds, () => fill(BigInt(kMemSize-2), 0, 3n));
+ // Writing OOB, but the low 32-bit are in-bound.
+ assertTraps(kTrapMemOutOfBounds, () => fill(1n << 32n, 0, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => fill(0n, 0, 1n << 32n));
+ // OOB even though size == 0.
+ assertTraps(kTrapMemOutOfBounds, () => fill(-1n, 0, 0n));
+ // More OOB.
+ assertTraps(kTrapMemOutOfBounds, () => fill(-1n, 0, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => fill(1n << 62n, 0, 1n));
+ assertTraps(kTrapMemOutOfBounds, () => fill(1n << 63n, 0, 1n));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/multi-table-element-section.js b/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
index f466f4fe39..7feece6aed 100644
--- a/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
+++ b/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes
+// Flags: --expose-wasm
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/print-code.js b/deps/v8/test/mjsunit/wasm/print-code.js
index 633706bd77..6b338157d3 100644
--- a/deps/v8/test/mjsunit/wasm/print-code.js
+++ b/deps/v8/test/mjsunit/wasm/print-code.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// The test needs --wasm-tier-up because we can't serialize and deserialize
+// The test needs --no-liftoff because we can't serialize and deserialize
// Liftoff code.
-// Flags: --allow-natives-syntax --print-wasm-code --wasm-tier-up
+// Flags: --allow-natives-syntax --print-wasm-code --no-liftoff
// Just test that printing the code of the following wasm modules does not
// crash.
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals.js b/deps/v8/test/mjsunit/wasm/reference-globals.js
index 6ab071f9fa..361708d6fe 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals.js
@@ -6,6 +6,7 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+/* TODO(7748): Implement cross-module subtyping.
(function TestReferenceGlobals() {
print(arguments.callee.name);
@@ -105,6 +106,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// The correct function reference has been passed.
assertEquals(66, instance.exports.test_import(42, 24));
})();
+*/
(function TestStructInitExpr() {
print(arguments.callee.name);
diff --git a/deps/v8/test/mjsunit/wasm/reference-tables.js b/deps/v8/test/mjsunit/wasm/reference-tables.js
index d73a2415c0..3bbf0ffdac 100644
--- a/deps/v8/test/mjsunit/wasm/reference-tables.js
+++ b/deps/v8/test/mjsunit/wasm/reference-tables.js
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-typed-funcref
+// Flags: --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
-
+/* TODO(7748): Implement cross-module subtyping.
(function TestTables() {
print(arguments.callee.name);
var exporting_instance = (function() {
@@ -102,6 +102,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
TypeError,
/Argument 1 must be null or a WebAssembly function of type compatible to/);
})();
+*/
(function TestNonNullableTables() {
print(arguments.callee.name);
@@ -109,11 +110,11 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
var binary_type = builder.addType(kSig_i_ii);
- var addition = builder.addFunction('addition', kSig_i_ii).addBody([
+ var addition = builder.addFunction('addition', binary_type).addBody([
kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
]);
var subtraction =
- builder.addFunction('subtraction', kSig_i_ii)
+ builder.addFunction('subtraction', binary_type)
.addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub])
.exportFunc();
@@ -143,3 +144,67 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(44, instance.exports.table_test(0, 33, 11));
assertEquals(22, instance.exports.table_test(1, 33, 11));
})();
+
+(function TestAnyRefTable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let unary_type = builder.addType(kSig_i_i);
+ let binary_type = builder.addType(kSig_i_ii);
+ let struct_type = builder.addStruct([makeField(kWasmI32, false)]);
+
+ let successor = builder.addFunction('addition', unary_type)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add]);
+
+ let subtraction = builder.addFunction('subtraction', binary_type)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub])
+
+ let table = builder.addTable(kWasmAnyRef, 4, 4);
+ builder.addActiveElementSegment(
+ table, WasmInitExpr.I32Const(0),
+ [WasmInitExpr.RefFunc(successor.index),
+ WasmInitExpr.RefFunc(subtraction.index),
+ WasmInitExpr.StructNew(struct_type, [WasmInitExpr.I32Const(10)]),
+ WasmInitExpr.RefNull(kWasmEqRef)],
+ kWasmAnyRef);
+
+ // return static_cast<i->i>(table[0])(local_0)
+ builder.addFunction("f0_getter", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprI32Const, 0, kExprTableGet, 0,
+ kGCPrefix, kExprRefAsFunc, kGCPrefix, kExprRefCastStatic, unary_type,
+ kExprCallRef])
+ .exportFunc();
+
+ // return static_cast<(i,i)->i>(table[1])(local_0, local_1)
+ builder.addFunction("f1_getter", kSig_i_ii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ kExprI32Const, 1, kExprTableGet, 0,
+ kGCPrefix, kExprRefAsFunc, kGCPrefix, kExprRefCastStatic, binary_type,
+ kExprCallRef])
+ .exportFunc();
+
+ // return static_cast<struct_type>(table[2]).field_0
+ builder.addFunction("struct_getter", kSig_i_v)
+ .addBody([
+ kExprI32Const, 2, kExprTableGet, 0,
+ kGCPrefix, kExprRefAsData, kGCPrefix, kExprRefCastStatic, struct_type,
+ kGCPrefix, kExprStructGet, struct_type, 0])
+ .exportFunc();
+
+ // return table[3] == null
+ builder.addFunction("null_getter", kSig_i_v)
+ .addBody([kExprI32Const, 3, kExprTableGet, 0, kExprRefIsNull])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+
+ assertTrue(!!instance);
+
+ assertEquals(43, instance.exports.f0_getter(42));
+ assertEquals(-7, instance.exports.f1_getter(12, 19));
+ assertEquals(10, instance.exports.struct_getter());
+ assertEquals(1, instance.exports.null_getter());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js b/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js
new file mode 100644
index 0000000000..be0ff18fc3
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --wasm-dynamic-tiering --liftoff
+// Make the test faster:
+// Flags: --wasm-tiering-budget=1000
+
+// This test busy-waits for tier-up to be complete, hence it does not work in
+// predictable mode where we only have a single thread.
+// Flags: --no-predictable
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const num_functions = 3;
+
+function create_builder() {
+ const builder = new WasmModuleBuilder();
+ builder.addImport("foo", "bar", kSig_i_v);
+ for (let i = 0; i < num_functions; ++i) {
+ builder.addFunction('f' + i, kSig_i_v)
+ .addBody(wasmI32Const(i))
+ .exportFunc();
+ }
+ return builder;
+}
+
+const wire_bytes = create_builder().toBuffer();
+
+function serializeModule() {
+ const module = new WebAssembly.Module(wire_bytes);
+ let instance = new WebAssembly.Instance(module, {foo: {bar: () => 1}});
+ // Execute {f1} until it gets tiered up.
+ while (%IsLiftoffFunction(instance.exports.f1)) {
+ instance.exports.f1();
+ }
+ // Execute {f2} once, so that the module knows that this is a used function.
+ instance.exports.f2();
+ const buff = %SerializeWasmModule(module);
+ return buff;
+};
+
+const serialized_module = serializeModule();
+// Do some GCs to make sure the first module got collected and removed from the
+// module cache.
+gc();
+gc();
+gc();
+
+(function testSerializedModule() {
+ print(arguments.callee.name);
+ const module = %DeserializeWasmModule(serialized_module, wire_bytes);
+
+ const instance = new WebAssembly.Instance(module, {foo: {bar: () => 1}});
+
+ assertTrue(%IsTurboFanFunction(instance.exports.f1));
+ assertTrue(%IsLiftoffFunction(instance.exports.f2));
+ assertTrue(
+ !%IsLiftoffFunction(instance.exports.f0) &&
+ !%IsTurboFanFunction(instance.exports.f0));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/speculative-inlining.js b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
index e783be53e1..0572e7449f 100644
--- a/deps/v8/test/mjsunit/wasm/speculative-inlining.js
+++ b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
@@ -44,17 +44,19 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_i_i);
+
// h(x) = x - 1
- let callee0 = builder.addFunction("callee0", kSig_i_i)
+ let callee0 = builder.addFunction("callee0", sig_index)
.addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
// f(x) = x - 2
- let callee1 = builder.addFunction("callee1", kSig_i_i)
+ let callee1 = builder.addFunction("callee1", sig_index)
.addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub]);
- let global0 = builder.addGlobal(wasmRefType(1), false,
+ let global0 = builder.addGlobal(wasmRefType(sig_index), false,
WasmInitExpr.RefFunc(callee0.index));
- let global1 = builder.addGlobal(wasmRefType(1), false,
+ let global1 = builder.addGlobal(wasmRefType(sig_index), false,
WasmInitExpr.RefFunc(callee1.index));
// g(x, y) = if (y) { h(5) + x } else { f(7) + x }
@@ -114,17 +116,19 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_i_i);
+
// h(x) = x - 1
- let callee0 = builder.addFunction("callee0", kSig_i_i)
+ let callee0 = builder.addFunction("callee0", sig_index)
.addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
// f(x) = x - 2
- let callee1 = builder.addFunction("callee1", kSig_i_i)
+ let callee1 = builder.addFunction("callee1", sig_index)
.addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub]);
- let global0 = builder.addGlobal(wasmRefType(1), false,
+ let global0 = builder.addGlobal(wasmRefType(sig_index), false,
WasmInitExpr.RefFunc(callee0.index));
- let global1 = builder.addGlobal(wasmRefType(1), false,
+ let global1 = builder.addGlobal(wasmRefType(sig_index), false,
WasmInitExpr.RefFunc(callee1.index));
// g(x, y) = if (y) { h(x) } else { f(x) }
@@ -151,6 +155,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(8, instance.exports.main(10, 0));
})();
+/* TODO(7748): Implement cross-module subtyping.
(function CallRefImportedFunction() {
print(arguments.callee.name);
@@ -191,6 +196,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// The function f1 defined in another module should not be inlined.
assertEquals(1, instance2.exports.main(0, instance1.exports.f1));
})();
+*/
// Check that we handle WasmJSFunctions properly and do not inline them, both
// in the monomorphic and polymorphic case.
diff --git a/deps/v8/test/mjsunit/wasm/stack-switching.js b/deps/v8/test/mjsunit/wasm/stack-switching.js
index 045c016bae..bfca73fe63 100644
--- a/deps/v8/test/mjsunit/wasm/stack-switching.js
+++ b/deps/v8/test/mjsunit/wasm/stack-switching.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --experimental-wasm-stack-switching --expose-gc
+// Flags: --allow-natives-syntax --experimental-wasm-stack-switching
+// Flags: --experimental-wasm-type-reflection --expose-gc
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -14,12 +15,68 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
/WebAssembly.Suspender must be invoked with 'new'/);
})();
+(function TestSuspenderTypes() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addImport('m', 'import', kSig_v_i);
+ builder.addFunction("export", kSig_i_i)
+ .addBody([kExprLocalGet, 0]).exportFunc();
+ builder.addFunction("wrong1", kSig_ii_v)
+ .addBody([kExprI32Const, 0, kExprI32Const, 0]).exportFunc();
+ builder.addFunction("wrong2", kSig_v_v)
+ .addBody([]).exportFunc();
+ let suspender = new WebAssembly.Suspender();
+ function js_import(i) {
+ return Promise.resolve(42);
+ }
+
+ // Wrap the import, instantiate the module, and wrap the export.
+ let wasm_js_import = new WebAssembly.Function(
+ {parameters: ['i32'], results: ['externref']}, js_import);
+ let import_wrapper = suspender.suspendOnReturnedPromise(wasm_js_import);
+ let instance = builder.instantiate({'m': {'import': import_wrapper}});
+ let export_wrapper =
+ suspender.returnPromiseOnSuspend(instance.exports.export);
+
+ // Check type errors.
+ wasm_js_import = new WebAssembly.Function(
+ {parameters: [], results: ['i32']}, js_import);
+ assertThrows(() => suspender.suspendOnReturnedPromise(wasm_js_import),
+ TypeError, /Expected a WebAssembly.Function with return type externref/);
+ assertThrows(() => suspender.returnPromiseOnSuspend(instance.exports.wrong1),
+ TypeError,
+ /Expected a WebAssembly.Function with exactly one return type/);
+ assertThrows(() => suspender.returnPromiseOnSuspend(instance.exports.wrong2),
+ TypeError,
+ /Expected a WebAssembly.Function with exactly one return type/);
+ // Signature mismatch (link error).
+ let wrong_import = new WebAssembly.Function(
+ {parameters: ['f32'], results: ['externref']}, () => {});
+ wrong_import = suspender.suspendOnReturnedPromise(wrong_import);
+ assertThrows(() => builder.instantiate({'m': {'import': wrong_import}}),
+ WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+
+ // Check the wrapped export's signature.
+ let export_sig = WebAssembly.Function.type(export_wrapper);
+ assertEquals(['i32'], export_sig.parameters);
+ assertEquals(['externref'], export_sig.results);
+
+ // Check the wrapped import's signature.
+ let import_sig = WebAssembly.Function.type(import_wrapper);
+ assertEquals(['i32'], import_sig.parameters);
+ assertEquals(['externref'], import_sig.results);
+})();
+
(function TestStackSwitchNoSuspend() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addGlobal(kWasmI32, true).exportAs('g');
- builder.addFunction("test", kSig_v_v)
- .addBody([kExprI32Const, 42, kExprGlobalSet, 0]).exportFunc();
+ builder.addFunction("test", kSig_i_v)
+ .addBody([
+ kExprI32Const, 42,
+ kExprGlobalSet, 0,
+ kExprI32Const, 0]).exportFunc();
let instance = builder.instantiate();
let suspender = new WebAssembly.Suspender();
let wrapper = suspender.returnPromiseOnSuspend(instance.exports.test);
@@ -27,14 +84,145 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(42, instance.exports.g.value);
})();
+(function TestStackSwitchSuspend() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ import_index = builder.addImport('m', 'import', kSig_i_v);
+ builder.addFunction("test", kSig_i_v)
+ .addBody([
+ kExprCallFunction, import_index, // suspend
+ ]).exportFunc();
+ let suspender = new WebAssembly.Suspender();
+ function js_import() {
+ return Promise.resolve(42);
+ };
+ let wasm_js_import = new WebAssembly.Function(
+ {parameters: [], results: ['externref']}, js_import);
+ let suspending_wasm_js_import =
+ suspender.suspendOnReturnedPromise(wasm_js_import);
+
+ let instance = builder.instantiate({m: {import: suspending_wasm_js_import}});
+ let wrapped_export = suspender.returnPromiseOnSuspend(instance.exports.test);
+ let combined_promise = wrapped_export();
+ combined_promise.then(v => assertEquals(42, v));
+})();
+
+// Check that we can suspend back out of a resumed computation.
+(function TestStackSwitchSuspendLoop() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addGlobal(kWasmI32, true).exportAs('g');
+ import_index = builder.addImport('m', 'import', kSig_i_v);
+ // Pseudo-code for the wasm function:
+ // for (i = 0; i < 5; ++i) {
+ // g = g + import();
+ // }
+ builder.addFunction("test", kSig_i_v)
+ .addLocals(kWasmI32, 1)
+ .addBody([
+ kExprI32Const, 5,
+ kExprLocalSet, 0,
+ kExprLoop, kWasmVoid,
+ kExprCallFunction, import_index, // suspend
+ kExprGlobalGet, 0, // resume
+ kExprI32Add,
+ kExprGlobalSet, 0,
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprLocalTee, 0,
+ kExprBrIf, 0,
+ kExprEnd,
+ kExprI32Const, 0,
+ ]).exportFunc();
+ let suspender = new WebAssembly.Suspender();
+ let i = 0;
+ // The n-th call to the import returns a promise that resolves to n.
+ function js_import() {
+ return Promise.resolve(++i);
+ };
+ let wasm_js_import = new WebAssembly.Function(
+ {parameters: [], results: ['externref']}, js_import);
+ let suspending_wasm_js_import =
+ suspender.suspendOnReturnedPromise(wasm_js_import);
+ let instance = builder.instantiate({m: {import: suspending_wasm_js_import}});
+ let wrapped_export = suspender.returnPromiseOnSuspend(instance.exports.test);
+ let chained_promise = wrapped_export();
+ assertEquals(0, instance.exports.g.value);
+ chained_promise.then(_ => assertEquals(15, instance.exports.g.value));
+})();
+
(function TestStackSwitchGC() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let gc_index = builder.addImport('m', 'gc', kSig_v_v);
- builder.addFunction("test", kSig_v_v)
- .addBody([kExprCallFunction, gc_index]).exportFunc();
+ builder.addFunction("test", kSig_i_v)
+ .addBody([kExprCallFunction, gc_index, kExprI32Const, 0]).exportFunc();
let instance = builder.instantiate({'m': {'gc': gc}});
let suspender = new WebAssembly.Suspender();
let wrapper = suspender.returnPromiseOnSuspend(instance.exports.test);
wrapper();
})();
+
+// Check that the suspender does not suspend if the import's
+// return value is not a promise.
+(function TestStackSwitchNoPromise() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addGlobal(kWasmI32, true).exportAs('g');
+ import_index = builder.addImport('m', 'import', kSig_i_v);
+ builder.addFunction("test", kSig_i_v)
+ .addBody([
+ kExprCallFunction, import_index, // suspend
+ kExprGlobalSet, 0, // resume
+ kExprGlobalGet, 0,
+ ]).exportFunc();
+ let suspender = new WebAssembly.Suspender();
+ function js_import() {
+ return 42
+ };
+ let wasm_js_import = new WebAssembly.Function({parameters: [], results: ['externref']}, js_import);
+ let suspending_wasm_js_import = suspender.suspendOnReturnedPromise(wasm_js_import);
+ let instance = builder.instantiate({m: {import: suspending_wasm_js_import}});
+ let wrapped_export = suspender.returnPromiseOnSuspend(instance.exports.test);
+ let result = wrapped_export();
+ // TODO(thibaudm): Check the result's value once this is supported.
+ assertEquals(42, instance.exports.g.value);
+})();
+
+(function TestStackSwitchSuspendArgs() {
+ print(arguments.callee.name);
+ function reduce(array) {
+ // a[0] + a[1] * 2 + a[2] * 3 + ...
+ return array.reduce((prev, cur, i) => prev + cur * (i + 1));
+ }
+ let builder = new WasmModuleBuilder();
+ // Number of param registers + 1 for both types.
+ let sig = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmF32, kWasmF32, kWasmF32, kWasmF32, kWasmF32, kWasmF32, kWasmF32], [kWasmI32]);
+ import_index = builder.addImport('m', 'import', sig);
+ builder.addFunction("test", sig)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 3,
+ kExprLocalGet, 4, kExprLocalGet, 5, kExprLocalGet, 6, kExprLocalGet, 7,
+ kExprLocalGet, 8, kExprLocalGet, 9, kExprLocalGet, 10, kExprLocalGet, 11,
+ kExprLocalGet, 12,
+ kExprCallFunction, import_index, // suspend
+ ]).exportFunc();
+ let suspender = new WebAssembly.Suspender();
+ function js_import(i1, i2, i3, i4, i5, i6, f1, f2, f3, f4, f5, f6, f7) {
+ return Promise.resolve(reduce(Array.from(arguments)));
+ };
+ let wasm_js_import = new WebAssembly.Function(
+ {parameters: ['i32', 'i32', 'i32', 'i32', 'i32', 'i32', 'f32', 'f32',
+ 'f32', 'f32', 'f32', 'f32', 'f32'], results: ['externref']}, js_import);
+ let suspending_wasm_js_import =
+ suspender.suspendOnReturnedPromise(wasm_js_import);
+
+ let instance = builder.instantiate({m: {import: suspending_wasm_js_import}});
+ let wrapped_export = suspender.returnPromiseOnSuspend(instance.exports.test);
+ let args = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13];
+ let combined_promise =
+ wrapped_export.apply(null, args);
+ combined_promise.then(v => assertEquals(reduce(args), v));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access-liftoff.js b/deps/v8/test/mjsunit/wasm/table-access-liftoff.js
index a4cb4f6a1c..8f7a93b593 100644
--- a/deps/v8/test/mjsunit/wasm/table-access-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/table-access-liftoff.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes --liftoff
-// Flags: --no-wasm-tier-up --liftoff-extern-ref
+// Flags: --liftoff --no-wasm-tier-up
d8.file.execute("test/mjsunit/wasm/table-access.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index b693cbb500..4af26ef10d 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes
+// Flags: --expose-wasm
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-copy-externref.js b/deps/v8/test/mjsunit/wasm/table-copy-externref.js
index 93f8195d55..3891d4760e 100644
--- a/deps/v8/test/mjsunit/wasm/table-copy-externref.js
+++ b/deps/v8/test/mjsunit/wasm/table-copy-externref.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes
-
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let kTableSize = 5;
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
index 97e874189b..10e118d8c8 100644
--- a/deps/v8/test/mjsunit/wasm/table-fill.js
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes
+// Flags: --expose-wasm
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index 25ed6eb1c4..49fced9588 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-reftypes
+// Flags: --expose-wasm
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/test-partial-serialization.js b/deps/v8/test/mjsunit/wasm/test-partial-serialization.js
index 150c5c8e69..339110a447 100644
--- a/deps/v8/test/mjsunit/wasm/test-partial-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/test-partial-serialization.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --liftoff --no-wasm-tier-up --expose-gc
+// Flags: --no-wasm-dynamic-tiering
// Compile functions 0 and 2 with Turbofan, the rest with Liftoff:
// Flags: --wasm-tier-mask-for-testing=5
@@ -22,9 +23,9 @@ function create_builder() {
function check(instance) {
for (let i = 0; i < num_functions; ++i) {
- const expect_liftoff = i != 0 && i != 2;
+ const expect_turbofan = i == 0 || i == 2;
assertEquals(
- expect_liftoff, %IsLiftoffFunction(instance.exports['f' + i]),
+ expect_turbofan, %IsTurboFanFunction(instance.exports['f' + i]),
'function ' + i);
}
}
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
index 8f1a5a3f7c..40da63a57d 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-type-reflection --experimental-wasm-reftypes
+// Flags: --experimental-wasm-type-reflection
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -34,9 +34,15 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
+ global = new WebAssembly.Global({value: "funcref"});
+ type = global.type();
+ assertEquals("funcref", type.value);
+ assertEquals(false, type.mutable);
+ assertEquals(2, Object.getOwnPropertyNames(type).length);
+
global = new WebAssembly.Global({value: "anyfunc"});
type = global.type();
- assertEquals("anyfunc", type.value);
+ assertEquals("funcref", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
})();
@@ -73,9 +79,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(fun2, instance.exports.get_global());
})();
-// This is an extension of "type-reflection.js/TestFunctionTableSetAndCall" to
-// multiple table indexes. If --experimental-wasm-reftypes is enabled by default
-// this test case can supersede the other one.
(function TestFunctionMultiTableSetAndCall() {
let builder = new WasmModuleBuilder();
let v1 = 7; let v2 = 9; let v3 = 0.0;
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index f88cf15136..1c88ed62b2 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -68,10 +68,24 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
})();
(function TestTableType() {
- let table = new WebAssembly.Table({initial: 1, element: "anyfunc"});
+ let table = new WebAssembly.Table({initial: 1, element: "funcref"});
let type = table.type();
assertEquals(1, type.minimum);
- assertEquals("anyfunc", type.element);
+ assertEquals("funcref", type.element);
+ assertEquals(undefined, type.maximum);
+ assertEquals(2, Object.getOwnPropertyNames(type).length);
+
+ table = new WebAssembly.Table({initial: 2, maximum: 15, element: "funcref"});
+ type = table.type();
+ assertEquals(2, type.minimum);
+ assertEquals(15, type.maximum);
+ assertEquals("funcref", type.element);
+ assertEquals(3, Object.getOwnPropertyNames(type).length);
+
+ table = new WebAssembly.Table({initial: 1, element: "anyfunc"});
+ type = table.type();
+ assertEquals(1, type.minimum);
+ assertEquals("funcref", type.element);
assertEquals(undefined, type.maximum);
assertEquals(2, Object.getOwnPropertyNames(type).length);
@@ -79,7 +93,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
type = table.type();
assertEquals(2, type.minimum);
assertEquals(15, type.maximum);
- assertEquals("anyfunc", type.element);
+ assertEquals("funcref", type.element);
assertEquals(3, Object.getOwnPropertyNames(type).length);
})();
@@ -91,7 +105,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals("a", exports[0].name);
assertTrue("type" in exports[0]);
- assertEquals("anyfunc", exports[0].type.element);
+ assertEquals("funcref", exports[0].type.element);
assertEquals(20, exports[0].type.minimum);
assertFalse("maximum" in exports[0].type);
@@ -102,7 +116,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals("b", exports[0].name);
assertTrue("type" in exports[0]);
- assertEquals("anyfunc", exports[0].type.element);
+ assertEquals("funcref", exports[0].type.element);
assertEquals(15, exports[0].type.minimum);
assertEquals(25, exports[0].type.maximum);
})();
@@ -116,7 +130,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals("a", imports[0].name);
assertEquals("m", imports[0].module);
assertTrue("type" in imports[0]);
- assertEquals("anyfunc", imports[0].type.element);
+ assertEquals("funcref", imports[0].type.element);
assertEquals(20, imports[0].type.minimum);
assertFalse("maximum" in imports[0].type);
@@ -128,7 +142,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals("b", imports[0].name);
assertEquals("m", imports[0].module);
assertTrue("type" in imports[0]);
- assertEquals("anyfunc", imports[0].type.element);
+ assertEquals("funcref", imports[0].type.element);
assertEquals(15, imports[0].type.minimum);
assertEquals(25, imports[0].type.maximum);
})();
@@ -238,28 +252,28 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
})();
(function TestTableConstructorWithMinimum() {
- let table = new WebAssembly.Table({minimum: 1, element: 'anyfunc'});
+ let table = new WebAssembly.Table({minimum: 1, element: 'funcref'});
assertTrue(table instanceof WebAssembly.Table);
let type = table.type();
assertEquals(1, type.minimum);
- assertEquals('anyfunc', type.element);
+ assertEquals('funcref', type.element);
assertEquals(2, Object.getOwnPropertyNames(type).length);
- table = new WebAssembly.Table({minimum: 1, element: 'anyfunc', maximum: 5});
+ table = new WebAssembly.Table({minimum: 1, element: 'funcref', maximum: 5});
assertTrue(table instanceof WebAssembly.Table);
type = table.type();
assertEquals(1, type.minimum);
assertEquals(5, type.maximum);
- assertEquals('anyfunc', type.element);
+ assertEquals('funcref', type.element);
assertEquals(3, Object.getOwnPropertyNames(type).length);
assertThrows(
- () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc'}),
+ () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'funcref'}),
TypeError,
/The properties 'initial' and 'minimum' are not allowed at the same time/);
assertThrows(
- () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc',
+ () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'funcref',
maximum: 5}),
TypeError,
/The properties 'initial' and 'minimum' are not allowed at the same time/);
@@ -514,29 +528,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
});
})();
-(function TestFunctionTableSetAndCall() {
- let builder = new WasmModuleBuilder();
- let fun1 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 7);
- let fun2 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 9);
- let fun3 = new WebAssembly.Function({parameters:[], results:["f64"]}, _ => 0);
- let table = new WebAssembly.Table({element: "anyfunc", initial: 2});
- let table_index = builder.addImportedTable("m", "table", 2);
- let sig_index = builder.addType(kSig_i_v);
- table.set(0, fun1);
- builder.addFunction('main', kSig_i_i)
- .addBody([
- kExprLocalGet, 0,
- kExprCallIndirect, sig_index, table_index
- ])
- .exportFunc();
- let instance = builder.instantiate({ m: { table: table }});
- assertEquals(7, instance.exports.main(0));
- table.set(1, fun2);
- assertEquals(9, instance.exports.main(1));
- table.set(1, fun3);
- assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(1));
-})();
-
(function TestFunctionTableSetI64() {
let builder = new WasmModuleBuilder();
let fun = new WebAssembly.Function({parameters:[], results:["i64"]}, _ => 0n);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index d2b2b83bea..940d793ae8 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -77,9 +77,13 @@ let kLocalNamesCode = 2;
let kWasmFunctionTypeForm = 0x60;
let kWasmStructTypeForm = 0x5f;
let kWasmArrayTypeForm = 0x5e;
-let kWasmFunctionSubtypeForm = 0x5d;
-let kWasmStructSubtypeForm = 0x5c;
-let kWasmArraySubtypeForm = 0x5b;
+let kWasmFunctionNominalForm = 0x5d;
+let kWasmStructNominalForm = 0x5c;
+let kWasmArrayNominalForm = 0x5b;
+let kWasmSubtypeForm = 0x50;
+let kWasmRecursiveTypeGroupForm = 0x4f;
+
+let kNoSuperType = 0xFFFFFFFF;
let kLimitsNoMaximum = 0x00;
let kLimitsWithMaximum = 0x01;
@@ -116,11 +120,12 @@ let kWasmI16 = 0x79;
// indices.
let kWasmFuncRef = -0x10;
let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
-let kWasmExternRef = -0x11;
-let kWasmAnyRef = -0x12;
+let kWasmAnyRef = -0x11;
+let kWasmExternRef = kWasmAnyRef; // Alias for test backwards compatibility.
let kWasmEqRef = -0x13;
let kWasmI31Ref = -0x16;
let kWasmDataRef = -0x19;
+let kWasmArrayRef = -0x20;
// Use the positive-byte versions inside function bodies.
let kLeb128Mask = 0x7f;
@@ -129,8 +134,9 @@ let kAnyFuncCode = kFuncRefCode; // Alias named as in the JS API spec
let kExternRefCode = kWasmExternRef & kLeb128Mask;
let kAnyRefCode = kWasmAnyRef & kLeb128Mask;
let kEqRefCode = kWasmEqRef & kLeb128Mask;
-let kI31RefCode = kWasmI31Ref & kLeb128Mask;
-let kDataRefCode = kWasmDataRef & kLeb128Mask;
+let kI31RefCode = kWasmI31Ref & kLeb128Mask;
+let kDataRefCode = kWasmDataRef & kLeb128Mask;
+let kArrayRefCode = kWasmArrayRef & kLeb128Mask;
let kWasmOptRef = 0x6c;
let kWasmRef = 0x6b;
@@ -141,13 +147,8 @@ function wasmRefType(heap_type) {
return {opcode: kWasmRef, heap_type: heap_type};
}
-let kWasmRttWithDepth = 0x69;
-function wasmRtt(index, depth) {
- if (index < 0) throw new Error("Expecting non-negative type index");
- return {opcode: kWasmRttWithDepth, index: index, depth: depth};
-}
let kWasmRtt = 0x68;
-function wasmRttNoDepth(index) {
+function wasmRtt(index) {
if (index < 0) throw new Error("Expecting non-negative type index");
return {opcode: kWasmRtt, index: index};
}
@@ -488,6 +489,8 @@ let kExprArrayInit = 0x19;
let kExprArrayInitStatic = 0x1a;
let kExprArrayNew = 0x1b;
let kExprArrayNewDefault = 0x1c;
+let kExprArrayInitFromData = 0x1e;
+let kExprArrayInitFromDataStatic = 0x1d;
let kExprI31New = 0x20;
let kExprI31GetS = 0x21;
let kExprI31GetU = 0x22;
@@ -505,12 +508,19 @@ let kExprBrOnCastStaticFail = 0x47;
let kExprRefIsFunc = 0x50;
let kExprRefIsData = 0x51;
let kExprRefIsI31 = 0x52;
+let kExprRefIsArray = 0x53;
let kExprRefAsFunc = 0x58;
let kExprRefAsData = 0x59;
let kExprRefAsI31 = 0x5a;
+let kExprRefAsArray = 0x5b;
let kExprBrOnFunc = 0x60;
let kExprBrOnData = 0x61;
let kExprBrOnI31 = 0x62;
+let kExprBrOnArray = 0x66;
+let kExprBrOnNonFunc = 0x63;
+let kExprBrOnNonData = 0x64;
+let kExprBrOnNonI31 = 0x65;
+let kExprBrOnNonArray = 0x67;
// Numeric opcodes.
let kExprI32SConvertSatF32 = 0x00;
@@ -858,9 +868,10 @@ let kTrapFloatUnrepresentable = 5;
let kTrapTableOutOfBounds = 6;
let kTrapFuncSigMismatch = 7;
let kTrapUnalignedAccess = 8;
-let kTrapDataSegmentDropped = 9;
+let kTrapDataSegmentOutOfBounds = 9;
let kTrapElemSegmentDropped = 10;
let kTrapRethrowNull = 11;
+let kTrapArrayTooLarge = 12;
let kTrapMsgs = [
'unreachable', // --
@@ -872,9 +883,10 @@ let kTrapMsgs = [
'table index is out of bounds', // --
'null function or function signature mismatch', // --
'operation does not support unaligned accesses', // --
- 'data segment has been dropped', // --
+ 'data segment out of bounds', // --
'element segment has been dropped', // --
- 'rethrowing null value' // --
+ 'rethrowing null value', // --
+ 'requested new array is too large' // --
];
// This requires test/mjsunit/mjsunit.js.
@@ -1004,6 +1016,16 @@ class Binary {
case kSimdPrefix:
this.emit_bytes(wasmS128Const(expr.value));
break;
+ case kExprI32Add:
+ case kExprI32Sub:
+ case kExprI32Mul:
+ case kExprI64Add:
+ case kExprI64Sub:
+ case kExprI64Mul:
+ this.emit_init_expr_recursive(expr.operands[0]);
+ this.emit_init_expr_recursive(expr.operands[1]);
+ this.emit_u8(expr.kind);
+ break;
case kExprRefFunc:
this.emit_u8(kExprRefFunc);
this.emit_u32v(expr.value);
@@ -1033,6 +1055,16 @@ class Binary {
this.emit_u32v(expr.value);
this.emit_u32v(expr.operands.length - 1);
break;
+ case kExprArrayInitFromData:
+ case kExprArrayInitFromDataStatic:
+ for (let operand of expr.operands) {
+ this.emit_init_expr_recursive(operand);
+ }
+ this.emit_u8(kGCPrefix);
+ this.emit_u8(expr.kind);
+ this.emit_u32v(expr.array_index);
+ this.emit_u32v(expr.data_segment);
+ break;
case kExprRttCanon:
this.emit_u8(kGCPrefix);
this.emit_u8(kExprRttCanon);
@@ -1170,6 +1202,24 @@ class WasmInitExpr {
static S128Const(value) {
return {kind: kSimdPrefix, value: value};
}
+ static I32Add(lhs, rhs) {
+ return {kind: kExprI32Add, operands: [lhs, rhs]};
+ }
+ static I32Sub(lhs, rhs) {
+ return {kind: kExprI32Sub, operands: [lhs, rhs]};
+ }
+ static I32Mul(lhs, rhs) {
+ return {kind: kExprI32Mul, operands: [lhs, rhs]};
+ }
+ static I64Add(lhs, rhs) {
+ return {kind: kExprI64Add, operands: [lhs, rhs]};
+ }
+ static I64Sub(lhs, rhs) {
+ return {kind: kExprI64Sub, operands: [lhs, rhs]};
+ }
+ static I64Mul(lhs, rhs) {
+ return {kind: kExprI64Mul, operands: [lhs, rhs]};
+ }
static GlobalGet(index) {
return {kind: kExprGlobalGet, value: index};
}
@@ -1197,6 +1247,20 @@ class WasmInitExpr {
static ArrayInitStatic(type, args) {
return {kind: kExprArrayInitStatic, value: type, operands: args};
}
+ static ArrayInitFromData(array_index, data_segment, args, builder) {
+ // array.init_from_data means we need to pull the data count section before
+ // any section that may include init. expressions.
+ builder.early_data_count_section = true;
+ return {kind: kExprArrayInitFromData, array_index: array_index,
+ data_segment: data_segment, operands: args};
+ }
+ static ArrayInitFromDataStatic(array_index, data_segment, args, builder) {
+ // array.init_from_data means we need to pull the data count section before
+ // any section that may include init. expressions.
+ builder.early_data_count_section = true;
+ return {kind: kExprArrayInitFromDataStatic, array_index: array_index,
+ data_segment: data_segment, operands: args};
+ }
static RttCanon(type) {
return {kind: kExprRttCanon, value: type};
}
@@ -1271,38 +1335,25 @@ function makeField(type, mutability) {
}
class WasmStruct {
- constructor(fields) {
+ constructor(fields, supertype_idx) {
if (!Array.isArray(fields)) {
throw new Error('struct fields must be an array');
}
this.fields = fields;
this.type_form = kWasmStructTypeForm;
- }
-}
-
-class WasmStructSubtype extends WasmStruct {
- constructor(fields, supertype_idx) {
- super(fields);
this.supertype = supertype_idx;
- this.type_form = kWasmStructSubtypeForm;
}
}
class WasmArray {
- constructor(type, mutability) {
+ constructor(type, mutability, supertype_idx) {
this.type = type;
this.mutability = mutability;
this.type_form = kWasmArrayTypeForm;
- }
-}
-
-class WasmArraySubtype extends WasmArray {
- constructor(type, mutability, supertype_idx) {
- super(type, mutability);
this.supertype = supertype_idx;
- this.type_form = kWasmArraySubtypeForm;
}
}
+
class WasmElemSegment {
constructor(table, offset, type, elements, is_decl) {
this.table = table;
@@ -1355,6 +1406,8 @@ class WasmModuleBuilder {
this.num_imported_globals = 0;
this.num_imported_tables = 0;
this.num_imported_tags = 0;
+ this.nominal = false; // Controls only how gc-modules are printed.
+ this.early_data_count_section = false;
return this;
}
@@ -1413,6 +1466,9 @@ class WasmModuleBuilder {
this.explicit.push(this.createCustomSection(name, bytes));
}
+ // TODO(7748): Support recursive groups.
+
+ // TODO(7748): Support function supertypes.
addType(type) {
this.types.push(type);
var pl = type.params.length; // should have params
@@ -1420,24 +1476,13 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
- addStruct(fields) {
- this.types.push(new WasmStruct(fields));
+ addStruct(fields, supertype_idx = kNoSuperType) {
+ this.types.push(new WasmStruct(fields, supertype_idx));
return this.types.length - 1;
}
- kGenericSuperType = 0xFFFFFFFE;
- addStructSubtype(fields, supertype_idx = this.kGenericSuperType) {
- this.types.push(new WasmStructSubtype(fields, supertype_idx));
- return this.types.length - 1;
- }
-
- addArray(type, mutability) {
- this.types.push(new WasmArray(type, mutability));
- return this.types.length - 1;
- }
-
- addArraySubtype(type, mutability, supertype_idx = this.kGenericSuperType) {
- this.types.push(new WasmArraySubtype(type, mutability, supertype_idx));
+ addArray(type, mutability, supertype_idx = kNoSuperType) {
+ this.types.push(new WasmArray(type, mutability, supertype_idx));
return this.types.length - 1;
}
@@ -1650,6 +1695,10 @@ class WasmModuleBuilder {
return this;
}
+ setNominal() {
+ this.nominal = true;
+ }
+
setName(name) {
this.name = name;
return this;
@@ -1669,32 +1718,51 @@ class WasmModuleBuilder {
section.emit_u32v(wasm.types.length);
for (let type of wasm.types) {
if (type instanceof WasmStruct) {
- section.emit_u8(type.type_form);
+ if (!this.nominal && type.supertype != kNoSuperType) {
+ section.emit_u8(kWasmSubtypeForm);
+ section.emit_u8(1); // supertype count
+ section.emit_u32v(type.supertype);
+ }
+ section.emit_u8(this.nominal ? kWasmStructNominalForm
+ : kWasmStructTypeForm);
section.emit_u32v(type.fields.length);
for (let field of type.fields) {
section.emit_type(field.type);
section.emit_u8(field.mutability ? 1 : 0);
}
- if (type instanceof WasmStructSubtype) {
- if (type.supertype === this.kGenericSuperType) {
+ if (this.nominal) {
+ if (type.supertype === kNoSuperType) {
section.emit_u8(kDataRefCode);
} else {
section.emit_heap_type(type.supertype);
}
}
} else if (type instanceof WasmArray) {
- section.emit_u8(type.type_form);
+ if (!this.nominal && type.supertype != kNoSuperType) {
+ section.emit_u8(kWasmSubtypeForm);
+ section.emit_u8(1); // supertype count
+ section.emit_u32v(type.supertype);
+ }
+ section.emit_u8(this.nominal ? kWasmArrayNominalForm
+ : kWasmArrayTypeForm);
section.emit_type(type.type);
section.emit_u8(type.mutability ? 1 : 0);
- if (type instanceof WasmArraySubtype) {
- if (type.supertype === this.kGenericSuperType) {
+ if (this.nominal) {
+ if (type.supertype === kNoSuperType) {
section.emit_u8(kDataRefCode);
} else {
section.emit_heap_type(type.supertype);
}
}
} else {
- section.emit_u8(kWasmFunctionTypeForm);
+ /* TODO(7748): Support function supertypes.
+ if (!this.nominal && type.supertype != kNoSuperType) {
+ section.emit_u8(kWasmSubtypeForm);
+ section.emit_u8(1); // supertype count
+ section.emit_u32v(type.supertype);
+ } */
+ section.emit_u8(this.nominal ? kWasmFunctionNominalForm
+ : kWasmFunctionTypeForm);
section.emit_u32v(type.params.length);
for (let param of type.params) {
section.emit_type(param);
@@ -1703,6 +1771,15 @@ class WasmModuleBuilder {
for (let result of type.results) {
section.emit_type(result);
}
+ if (this.nominal) {
+ /* TODO(7748): Support function supertypes.
+ if (type.supertype === kNoSuperType) {
+ section.emit_u8(kFuncRefCode);
+ } else {
+ section.emit_heap_type(type.supertype);
+ }*/
+ section.emit_u8(kFuncRefCode);
+ }
}
}
});
@@ -1759,6 +1836,14 @@ class WasmModuleBuilder {
});
}
+ // If there are any passive data segments, add the DataCount section.
+ if (this.early_data_count_section &&
+ wasm.data_segments.some(seg => !seg.is_active)) {
+ binary.emit_section(kDataCountSectionCode, section => {
+ section.emit_u32v(wasm.data_segments.length);
+ });
+ }
+
// Add table section
if (wasm.tables.length > 0) {
if (debug) print('emitting tables @ ' + binary.length);
@@ -1921,7 +2006,8 @@ class WasmModuleBuilder {
}
// If there are any passive data segments, add the DataCount section.
- if (wasm.data_segments.some(seg => !seg.is_active)) {
+ if (!this.early_data_count_section &&
+ wasm.data_segments.some(seg => !seg.is_active)) {
binary.emit_section(kDataCountSectionCode, section => {
section.emit_u32v(wasm.data_segments.length);
});
@@ -2000,13 +2086,12 @@ class WasmModuleBuilder {
if (seg.is_active) {
section.emit_u8(0); // linear memory index 0 / flags
if (seg.is_global) {
- // initializer is a global variable
+ // Initializer is a global variable.
section.emit_u8(kExprGlobalGet);
section.emit_u32v(seg.addr);
} else {
- // initializer is a constant
- section.emit_u8(kExprI32Const);
- section.emit_u32v(seg.addr);
+ // Initializer is a constant.
+ section.emit_bytes(wasmI32Const(seg.addr));
}
section.emit_u8(kExprEnd);
} else {
@@ -2018,7 +2103,7 @@ class WasmModuleBuilder {
});
}
- // Add any explicitly added sections
+ // Add any explicitly added sections.
for (let exp of wasm.explicit) {
if (debug) print('emitting explicit @ ' + binary.length);
binary.emit_bytes(exp);
diff --git a/deps/v8/test/mjsunit/web-snapshot-helpers.js b/deps/v8/test/mjsunit/web-snapshot-helpers.js
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/mjsunit/web-snapshot-helpers.js
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-1.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-1.js
new file mode 100644
index 0000000000..ba5d435029
--- /dev/null
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-1.js
@@ -0,0 +1,246 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax
+
+'use strict';
+
+d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
+
+(function TestMinimal() {
+ function createObjects() {
+ globalThis.foo = {
+ str: 'hello',
+ n: 42,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('hello', foo.str);
+ assertEquals(42, foo.n);
+})();
+
+(function TestDefaultObjectProto() {
+ function createObjects() {
+ globalThis.foo = {
+ str: 'hello',
+ n: 42,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals(Object.prototype, Object.getPrototypeOf(foo));
+})();
+
+(function TestEmptyObject() {
+ function createObjects() {
+ globalThis.foo = {};
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals([], Object.keys(foo));
+})();
+
+(function TestEmptyObjectProto() {
+ function createObjects() {
+ globalThis.foo = {};
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals(Object.prototype, Object.getPrototypeOf(foo));
+})();
+
+(function TestObjectProto() {
+ function createObjects() {
+ globalThis.foo = {
+ __proto__ : {x : 10},
+ y: 11
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals(10, Object.getPrototypeOf(foo).x);
+})();
+
+(function TestObjectProtoInSnapshot() {
+ function createObjects() {
+ globalThis.o1 = { x: 10};
+ globalThis.o2 = {
+ __proto__ : o1,
+ y: 11
+ };
+ }
+ const { o1, o2 } = takeAndUseWebSnapshot(createObjects, ['o1', 'o2']);
+ assertEquals(o1, Object.getPrototypeOf(o2));
+ assertEquals(Object.prototype, Object.getPrototypeOf(o1));
+})();
+
+(function TestNumbers() {
+ function createObjects() {
+ globalThis.foo = {
+ a: 6,
+ b: -7,
+ c: 7.3,
+ d: NaN,
+ e: Number.POSITIVE_INFINITY,
+ f: Number.NEGATIVE_INFINITY,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals(6, foo.a);
+ assertEquals(-7, foo.b);
+ assertEquals(7.3, foo.c);
+ assertEquals(NaN, foo.d);
+ assertEquals(Number.POSITIVE_INFINITY, foo.e);
+ assertEquals(Number.NEGATIVE_INFINITY, foo.f);
+})();
+
+(function TestTopLevelNumbers() {
+ function createObjects() {
+ globalThis.a = 6;
+ globalThis.b = -7;
+ }
+ const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
+ assertEquals(6, a);
+ assertEquals(-7, b);
+})();
+
+(function TestOddballs() {
+ function createObjects() {
+ globalThis.foo = {
+ a: true,
+ b: false,
+ c: null,
+ d: undefined,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertTrue(foo.a);
+ assertFalse(foo.b);
+ assertEquals(null, foo.c);
+ assertEquals(undefined, foo.d);
+})();
+
+(function TestTopLevelOddballs() {
+ function createObjects() {
+ globalThis.a = true;
+ globalThis.b = false;
+ }
+ const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
+ assertTrue(a);
+ assertFalse(b);
+})();
+
+(function TestStringWithNull() {
+ function createObjects() {
+ globalThis.s = 'l\0l';
+ }
+ const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
+ assertEquals(108, s.charCodeAt(0));
+ assertEquals(0, s.charCodeAt(1));
+ assertEquals(108, s.charCodeAt(2));
+})();
+
+(function TestTwoByteString() {
+ function createObjects() {
+ globalThis.s = '\u{1F600}';
+ }
+ const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
+ assertEquals('\u{1F600}', s);
+})();
+
+(function TestTwoByteStringWithNull() {
+ function createObjects() {
+ globalThis.s = 'l\0l\u{1F600}';
+ }
+ const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
+ assertEquals(108, s.charCodeAt(0));
+ assertEquals(0, s.charCodeAt(1));
+ assertEquals(108, s.charCodeAt(2));
+})();
+
+(function TestFunction() {
+ function createObjects() {
+ globalThis.foo = {
+ key: function () { return 'bar'; },
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('bar', foo.key());
+})();
+
+(function TestFunctionWithContext() {
+ function createObjects() {
+ globalThis.foo = {
+ key: (function () {
+ let result = 'bar';
+ function inner() { return result; }
+ return inner;
+ })(),
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('bar', foo.key());
+})();
+
+(function TestInnerFunctionWithContextAndParentContext() {
+ function createObjects() {
+ globalThis.foo = {
+ key: (function () {
+ let part1 = 'snap';
+ function inner() {
+ let part2 = 'shot';
+ function innerinner() {
+ return part1 + part2;
+ }
+ return innerinner;
+ }
+ return inner();
+ })()
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('snapshot', foo.key());
+})();
+
+(function TestTopLevelFunctionWithContext() {
+ function createObjects() {
+ globalThis.foo = (function () {
+ let result = 'bar';
+ function inner() { return result; }
+ return inner;
+ })();
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('bar', foo());
+})();
+
+(function TestRegExp() {
+ function createObjects() {
+ globalThis.foo = {
+ re: /ab+c/gi,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('/ab+c/gi', foo.re.toString());
+ assertTrue(foo.re.test('aBc'));
+ assertFalse(foo.re.test('ac'));
+})();
+
+(function TestRegExpNoFlags() {
+ function createObjects() {
+ globalThis.foo = {
+ re: /ab+c/,
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals('/ab+c/', foo.re.toString());
+ assertTrue(foo.re.test('abc'));
+ assertFalse(foo.re.test('ac'));
+})();
+
+(function TestTopLevelRegExp() {
+ function createObjects() {
+ globalThis.re = /ab+c/gi;
+ }
+ const { re } = takeAndUseWebSnapshot(createObjects, ['re']);
+ assertEquals('/ab+c/gi', re.toString());
+ assertTrue(re.test('aBc'));
+ assertFalse(re.test('ac'));
+})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-2.js
index d202258c11..8c87dda9b1 100644
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-2.js
@@ -1,221 +1,12 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
+// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax
+'use strict';
-function use(exports) {
- const result = Object.create(null);
- exports.forEach(x => result[x] = globalThis[x]);
- return result;
-}
-
-function takeAndUseWebSnapshot(createObjects, exports) {
- // Take a snapshot in Realm r1.
- const r1 = Realm.create();
- Realm.eval(r1, createObjects, { type: 'function' });
- const snapshot = Realm.takeWebSnapshot(r1, exports);
- // Use the snapshot in Realm r2.
- const r2 = Realm.create();
- const success = Realm.useWebSnapshot(r2, snapshot);
- assertTrue(success);
- return Realm.eval(r2, use, { type: 'function', arguments: [exports] });
-}
-
-(function TestMinimal() {
- function createObjects() {
- globalThis.foo = {
- str: 'hello',
- n: 42,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('hello', foo.str);
- assertEquals(42, foo.n);
-})();
-
-(function TestEmptyObject() {
- function createObjects() {
- globalThis.foo = {};
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([], Object.keys(foo));
-})();
-
-(function TestNumbers() {
- function createObjects() {
- globalThis.foo = {
- a: 6,
- b: -7,
- c: 7.3,
- d: NaN,
- e: Number.POSITIVE_INFINITY,
- f: Number.NEGATIVE_INFINITY,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(6, foo.a);
- assertEquals(-7, foo.b);
- assertEquals(7.3, foo.c);
- assertEquals(NaN, foo.d);
- assertEquals(Number.POSITIVE_INFINITY, foo.e);
- assertEquals(Number.NEGATIVE_INFINITY, foo.f);
-})();
-
-(function TestTopLevelNumbers() {
- function createObjects() {
- globalThis.a = 6;
- globalThis.b = -7;
- }
- const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
- assertEquals(6, a);
- assertEquals(-7, b);
-})();
-
-(function TestOddballs() {
- function createObjects() {
- globalThis.foo = {
- a: true,
- b: false,
- c: null,
- d: undefined,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(foo.a);
- assertFalse(foo.b);
- assertEquals(null, foo.c);
- assertEquals(undefined, foo.d);
-})();
-
-(function TestTopLevelOddballs() {
- function createObjects() {
- globalThis.a = true;
- globalThis.b = false;
- }
- const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
- assertTrue(a);
- assertFalse(b);
-})();
-
-(function TestStringWithNull() {
- function createObjects() {
- globalThis.s = 'l\0l';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals(108, s.charCodeAt(0));
- assertEquals(0, s.charCodeAt(1));
- assertEquals(108, s.charCodeAt(2));
-})();
-
-(function TestTwoByteString() {
- function createObjects() {
- globalThis.s = '\u{1F600}';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals('\u{1F600}', s);
-})();
-
-(function TestTwoByteStringWithNull() {
- function createObjects() {
- globalThis.s = 'l\0l\u{1F600}';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals(108, s.charCodeAt(0));
- assertEquals(0, s.charCodeAt(1));
- assertEquals(108, s.charCodeAt(2));
-})();
-
-(function TestFunction() {
- function createObjects() {
- globalThis.foo = {
- key: function () { return 'bar'; },
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo.key());
-})();
-
-(function TestFunctionWithContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- let result = 'bar';
- function inner() { return result; }
- return inner;
- })(),
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo.key());
-})();
-
-(function TestInnerFunctionWithContextAndParentContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- let part1 = 'snap';
- function inner() {
- let part2 = 'shot';
- function innerinner() {
- return part1 + part2;
- }
- return innerinner;
- }
- return inner();
- })()
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('snapshot', foo.key());
-})();
-
-(function TestTopLevelFunctionWithContext() {
- function createObjects() {
- globalThis.foo = (function () {
- let result = 'bar';
- function inner() { return result; }
- return inner;
- })();
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo());
-})();
-
-(function TestRegExp() {
- function createObjects() {
- globalThis.foo = {
- re: /ab+c/gi,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('/ab+c/gi', foo.re.toString());
- assertTrue(foo.re.test('aBc'));
- assertFalse(foo.re.test('ac'));
-})();
-
-(function TestRegExpNoFlags() {
- function createObjects() {
- globalThis.foo = {
- re: /ab+c/,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('/ab+c/', foo.re.toString());
- assertTrue(foo.re.test('abc'));
- assertFalse(foo.re.test('ac'));
-})();
-
-(function TestTopLevelRegExp() {
- function createObjects() {
- globalThis.re = /ab+c/gi;
- }
- const { re } = takeAndUseWebSnapshot(createObjects, ['re']);
- assertEquals('/ab+c/gi', re.toString());
- assertTrue(re.test('aBc'));
- assertFalse(re.test('ac'));
-})();
+d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
(function TestObjectReferencingObject() {
function createObjects() {
@@ -305,6 +96,50 @@ function takeAndUseWebSnapshot(createObjects, exports) {
assertEquals(5, foo.array[0]());
})();
+(function TestInPlaceStringsInArray() {
+ function createObjects() {
+ globalThis.foo = {
+ array: ['foo', 'bar', 'baz']
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ // We cannot test that the strings are really in-place; that's covered by
+ // cctests.
+ assertEquals('foobarbaz', foo.array.join(''));
+})();
+
+(function TestRepeatedInPlaceStringsInArray() {
+ function createObjects() {
+ globalThis.foo = {
+ array: ['foo', 'bar', 'foo']
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ // We cannot test that the strings are really in-place; that's covered by
+ // cctests.
+ assertEquals('foobarfoo', foo.array.join(''));
+})();
+
+(function TestInPlaceStringsInObject() {
+ function createObjects() {
+ globalThis.foo = {a: 'foo', b: 'bar', c: 'baz'};
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ // We cannot test that the strings are really in-place; that's covered by
+ // cctests.
+ assertEquals('foobarbaz', foo.a + foo.b + foo.c);
+})();
+
+(function TestRepeatedInPlaceStringsInObject() {
+ function createObjects() {
+ globalThis.foo = {a: 'foo', b: 'bar', c: 'foo'};
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ // We cannot test that the strings are really in-place; that's covered by
+ // cctests.
+ assertEquals('foobarfoo', foo.a + foo.b + foo.c);
+})();
+
(function TestContextReferencingArray() {
function createObjects() {
function outer() {
@@ -413,3 +248,50 @@ function takeAndUseWebSnapshot(createObjects, exports) {
assertEquals(3, o.x);
assertEquals(10, o.m(3, 4));
})();
+
+(function TestFunctionPrototypeBecomesProto() {
+ function createObjects() {
+ globalThis.F = function() {}
+ globalThis.F.prototype.x = 100;
+ }
+ const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
+ const o = new F();
+ assertEquals(100, Object.getPrototypeOf(o).x);
+})();
+
+(function TestFunctionCtorCallsFunctionInPrototype() {
+ function createObjects() {
+ globalThis.F = function() {
+ this.fooCalled = false;
+ this.foo();
+ }
+ globalThis.F.prototype.foo = function() { this.fooCalled = true; };
+ }
+ const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
+ const o = new F();
+ assertTrue(o.fooCalled);
+})();
+
+(function TestFunctionPrototypeConnectedToObjectPrototype() {
+ function createObjects() {
+ globalThis.F = function() {}
+ }
+ const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
+ const o = new F();
+ assertEquals(Object.prototype,
+ Object.getPrototypeOf(Object.getPrototypeOf(o)));
+})();
+
+(function TestFunctionInheritance() {
+ function createObjects() {
+ globalThis.Super = function() {}
+ globalThis.Super.prototype.superfunc = function() { return 'superfunc'; };
+ globalThis.Sub = function() {}
+ globalThis.Sub.prototype = Object.create(Super.prototype);
+ globalThis.Sub.prototype.subfunc = function() { return 'subfunc'; };
+ }
+ const { Sub } = takeAndUseWebSnapshot(createObjects, ['Sub']);
+ const o = new Sub();
+ assertEquals('superfunc', o.superfunc());
+ assertEquals('subfunc', o.subfunc());
+})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-3.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-3.js
new file mode 100644
index 0000000000..d0d0ec81a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-3.js
@@ -0,0 +1,104 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax
+
+'use strict';
+
+d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
+
+(function TestObjectWithPackedElements() {
+ function createObjects() {
+ globalThis.foo = {
+ '0': 'zero', '1': 'one', '2': 'two', '3': 'three'
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ // Objects always get HOLEY_ELEMENTS; no PACKED or SMI_ELEMENTS.
+ const elementsKindTest = {0: 0, 1: 1, 2: 2};
+ assertFalse(%HasPackedElements(elementsKindTest));
+ assertFalse(%HasSmiElements(elementsKindTest));
+
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('zeroonetwothree', foo[0] + foo[1] + foo[2] + foo[3]);
+})();
+
+(function TestObjectWithPackedSmiElements() {
+ function createObjects() {
+ globalThis.foo = {
+ '0': 0, '1': 1, '2': 2, '3': 3
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('0123', '' + foo[0] + foo[1] + foo[2] + foo[3]);
+})();
+
+(function TestObjectWithHoleyElements() {
+ function createObjects() {
+ globalThis.foo = {
+ '1': 'a', '11': 'b', '111': 'c'
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('abc', foo[1] + foo[11] + foo[111]);
+})();
+
+(function TestObjectWithHoleySmiElements() {
+ function createObjects() {
+ globalThis.foo = {
+ '1': 0, '11': 1, '111': 2
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('012', '' + foo[1] + foo[11] + foo[111]);
+})();
+
+(function TestObjectWithPropertiesAndElements() {
+ function createObjects() {
+ globalThis.foo = {
+ 'prop1': 'value1', '1': 'a', 'prop2': 'value2', '11': 'b', '111': 'c'
+ };
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('abc', foo[1] + foo[11] + foo[111]);
+ assertEquals('value1value2', foo.prop1 + foo.prop2);
+})();
+
+(function TestObjectsWithSamePropertiesButDifferentElementsKind() {
+ function createObjects() {
+ globalThis.foo = {
+ 'prop1': 'value1', 'prop2': 'value2', '1': 'a', '11': 'b', '111': 'c'
+ };
+ globalThis.bar = {
+ 'prop1': 'value1', 'prop2': 'value2', '0': 0, '1': 0
+ }
+ }
+ const { foo, bar } = takeAndUseWebSnapshot(createObjects, ['foo', 'bar']);
+ assertFalse(%HasPackedElements(foo));
+ assertFalse(%HasSmiElements(foo));
+ assertEquals('abc', foo[1] + foo[11] + foo[111]);
+ assertEquals('value1value2', foo.prop1 + foo.prop2);
+ assertFalse(%HasPackedElements(bar));
+ assertFalse(%HasSmiElements(bar));
+ assertEquals('00', '' + bar[0] + bar[1]);
+ assertEquals('value1value2', bar.prop1 + bar.prop2);
+})();
+
+(function TestObjectWithEmptyMap() {
+ function createObjects() {
+ globalThis.foo = [{a:1}, {}, {b: 2}];
+ }
+ const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
+ assertEquals(1, foo[0].a);
+ assertEquals(2, foo[2].b);
+})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js
new file mode 100644
index 0000000000..c8f0c024c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js
@@ -0,0 +1,83 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax
+
+
+
+const external_1 = {external: 1};
+const external_2 = {external: 2};
+const object = {
+ a: [1,2],
+ b: external_1,
+ c: [external_1, external_2],
+ d: { d_a: external_2 }
+};
+
+(function testNoExternals() {
+ const snapshot = %WebSnapshotSerialize(object);
+ const deserialized = %WebSnapshotDeserialize(snapshot);
+ %HeapObjectVerify(deserialized);
+ assertEquals(deserialized, object);
+ assertEquals(deserialized.b, external_1);
+ assertNotSame(deserialized.b, external_1);
+ assertEquals(deserialized.d.d_a, external_2);
+ assertNotSame(deserialized.d.d_a, external_2);
+})();
+
+(function testOneExternals() {
+ const externals = [ external_1];
+ const snapshot = %WebSnapshotSerialize(object, externals);
+ const replaced_externals = [{replacement:1}]
+ const deserialized = %WebSnapshotDeserialize(snapshot, replaced_externals);
+ %HeapObjectVerify(deserialized);
+ assertEquals(deserialized.a, object.a);
+ assertSame(deserialized.b, replaced_externals[0]);
+ assertArrayEquals(deserialized.c, [replaced_externals[0], external_2]);
+ assertSame(deserialized.c[0], replaced_externals[0]);
+ assertNotSame(deserialized.c[1], external_2);
+ assertEquals(deserialized.d.d_a, external_2);
+ assertNotSame(deserialized.d.d_a, external_2);
+})();
+
+(function testTwoExternals() {
+ const externals = [external_1, external_2];
+ const snapshot = %WebSnapshotSerialize(object, externals);
+ const replaced_externals = [{replacement:1}, {replacement:2}]
+ const deserialized = %WebSnapshotDeserialize(snapshot, replaced_externals);
+ %HeapObjectVerify(deserialized);
+ assertEquals(deserialized.a, object.a);
+ assertSame(deserialized.b, replaced_externals[0]);
+ assertArrayEquals(deserialized.c, replaced_externals);
+ assertSame(deserialized.c[0], replaced_externals[0]);
+ assertSame(deserialized.c[1], replaced_externals[1]);
+ assertSame(deserialized.d.d_a, replaced_externals[1]);
+})();
+
+
+(function testApiObject() {
+ const api_object = new d8.dom.Div();
+ const source_1 = [{}, api_object];
+ assertThrows(() => %WebSnapshotSerialize(source_1));
+
+ let externals = [external_1]
+ const source_2 = [{}, external_1, api_object, api_object];
+ const snapshot_2 = %WebSnapshotSerialize(source_2, externals);
+ %HeapObjectVerify(externals);
+ // Check that the unhandled api object is added to the externals.
+ assertArrayEquals(externals, [external_1, api_object]);
+
+ assertThrows(() => %WebSnapshotDeserialize(snapshot_2));
+ assertThrows(() => %WebSnapshotDeserialize(snapshot_2, []));
+ assertThrows(() => %WebSnapshotDeserialize(snapshot_2, [external_1]));
+
+ const result_2 = %WebSnapshotDeserialize(snapshot_2, [external_1, api_object]);
+ %HeapObjectVerify(externals);
+ %HeapObjectVerify(result_2);
+ assertArrayEquals(result_2, source_2);
+ assertNotSame(result_2[0], source_2[0]);
+ assertSame(result_2[1], external_1);
+ assertSame(result_2[2], api_object);
+ assertSame(result_2[3], api_object);
+})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js
new file mode 100644
index 0000000000..9e99d0e618
--- /dev/null
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function use(exports) {
+ const result = Object.create(null);
+ exports.forEach(x => result[x] = globalThis[x]);
+ return result;
+}
+
+function takeAndUseWebSnapshot(createObjects, exports) {
+ // Take a snapshot in Realm r1.
+ const r1 = Realm.create();
+ Realm.eval(r1, createObjects, { type: 'function' });
+ const snapshot = Realm.takeWebSnapshot(r1, exports);
+ // Use the snapshot in Realm r2.
+ const r2 = Realm.create();
+ const success = Realm.useWebSnapshot(r2, snapshot);
+ assertTrue(success);
+ const result =
+ Realm.eval(r2, use, { type: 'function', arguments: [exports] });
+ %HeapObjectVerify(result);
+ return result;
+}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index f539889b9f..63f6e3b3ea 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -26,7 +26,8 @@ static const char* kHeader =
"# This file is automatically generated by mkgrokdump and should not\n"
"# be modified manually.\n"
"\n"
- "# List of known V8 instance types.\n";
+ "# List of known V8 instance types.\n"
+ "# yapf: disable\n\n";
// Debug builds emit debug code, affecting code object sizes.
#ifndef DEBUG
@@ -59,8 +60,8 @@ static void DumpKnownMap(FILE* out, i::Heap* heap, const char* space_name,
MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
if (root_name == nullptr) return;
- i::PrintF(out, " (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", space_name,
- root_ptr, map.instance_type(), root_name);
+ i::PrintF(out, " (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n",
+ space_name, root_ptr, map.instance_type(), root_name);
#undef MUTABLE_ROOT_LIST_CASE
#undef RO_ROOT_LIST_CASE
@@ -115,9 +116,9 @@ static int DumpHeapConstants(FILE* out, const char* argv0) {
// Start up V8.
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- if (!v8::V8::InitializeVirtualMemoryCage()) {
- FATAL("Could not initialize the virtual memory cage");
+#ifdef V8_SANDBOX
+ if (!v8::V8::InitializeSandbox()) {
+ FATAL("Could not initialize the sandbox");
}
#endif
v8::V8::Initialize();
@@ -150,12 +151,13 @@ static int DumpHeapConstants(FILE* out, const char* argv0) {
DumpKnownMap(out, heap, i::BaseSpace::GetSpaceName(i::RO_SPACE),
object);
}
- i::PagedSpaceObjectIterator iterator(heap, heap->map_space());
+
+ i::PagedSpace* space_for_maps = heap->space_for_maps();
+ i::PagedSpaceObjectIterator iterator(heap, space_for_maps);
for (i::HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
if (!object.IsMap()) continue;
- DumpKnownMap(out, heap, i::BaseSpace::GetSpaceName(i::MAP_SPACE),
- object);
+ DumpKnownMap(out, heap, space_for_maps->name(), object);
}
i::PrintF(out, "}\n");
}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 4073cec99e..164f612109 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -219,9 +219,11 @@
##################### FLAKY TESTS #####################
+ # BUG(v8:12558): These tests time out on Windows and also in debug mode.
+ 'js1_5/Regress/regress-360969-05': [PASS, SLOW, ['mode == debug', NO_VARIANTS], ['system == windows', SKIP]],
+ 'js1_5/Regress/regress-360969-06': [PASS, SLOW, ['mode == debug', NO_VARIANTS], ['system == windows', SKIP]],
+
# These tests time out in debug mode but pass in product mode
- 'js1_5/Regress/regress-360969-05': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
- 'js1_5/Regress/regress-360969-06': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
'js1_5/extensions/regress-365527': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
'js1_5/Regress/regress-280769-3': [PASS, ['mode == debug', FAIL]],
@@ -252,8 +254,8 @@
'ecma/Date/15.9.5.29-1': [PASS, FAIL],
'ecma/Date/15.9.5.28-1': [PASS, FAIL],
- # 1050186: Arm/MIPS vm is broken; probably unrelated to dates
- 'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel or arch == mips', FAIL]],
+ # v8:12655: Test keeps flaking, probably related to time issues
+ 'ecma/Array/15.4.4.5-3': [SKIP],
# These 4 tests made an incorrect assumption that the timezone offset of any
# given timezone has not changed over time. With icu-timezone-data enabled
diff --git a/deps/v8/test/test262/BUILD.gn b/deps/v8/test/test262/BUILD.gn
index c3d71866d2..68c8d1455d 100644
--- a/deps/v8/test/test262/BUILD.gn
+++ b/deps/v8/test/test262/BUILD.gn
@@ -13,7 +13,7 @@ group("v8_test262") {
data = [
"data/",
"detachArrayBuffer.js",
- "harness/",
+ "../../third_party/test262-harness/",
"harness-adapt.js",
"harness-adapt-donotevaluate.js",
"harness-agent.js",
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index c2caa58ac5..c0b6e15628 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -283,145 +283,2566 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=11111
'built-ins/ArrayBuffer/prototype/transfer/*': [FAIL],
'built-ins/ArrayBuffer/prototype/transfer/this-is-sharedarraybuffer': [PASS],
- 'built-ins/TypedArray/prototype/indexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/join/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/lastIndexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/map/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reverse/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-target-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-set-values-same-buffer-same-type-resized': [SKIP],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/sort/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/toLocaleString/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-different-type': [FAIL],
- 'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-same-type': [FAIL],
- 'built-ins/Array/prototype/every/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/filter/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/forEach/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/map/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/reduce/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/reduceRight/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/Array/prototype/some/callbackfn-resize-arraybuffer': [FAIL],
- 'built-ins/TypedArray/prototype/map/callbackfn-resize': [SKIP],
+ # See also https://github.com/tc39/test262/issues/3380
+ 'built-ins/TypedArray/prototype/map/callbackfn-resize': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12525
+ # regexp-v-flag not yet in Stage 3.
+ 'built-ins/RegExp/property-escapes/generated/strings/Basic_Emoji': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Keycap_Sequence': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Test': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Flag_Sequence': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Modifier_Sequence': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Tag_Sequence': [FAIL],
+ 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_ZWJ_Sequence': [FAIL],
+
+ # Reject invalid date
+ # https://github.com/tc39/test262/issues/3252
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/basic': [FAIL],
+
+ # Extra req of calendar name
+ # https://github.com/tc39/test262/pull/3261
+ # https://github.com/tc39/test262/issues/3260
+ 'built-ins/Temporal/Calendar/from/calendar-object-invalid': [FAIL],
+
+ # -0
+ 'built-ins/Temporal/PlainDate/prototype/subtract/balance-smaller-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/balance-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/negated/subclassing-ignored': [FAIL],
+
+ 'built-ins/Temporal/TimeZone/from/argument-valid': [SKIP],
+
+ # incorect calculation
+ # https://github.com/tc39/test262/pull/3250
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-higher-units': [FAIL],
+
+ # Calendar in TemporalTimeString
+ # https://github.com/tc39/test262/pull/3257
+ 'built-ins/Temporal/PlainTime/from/argument-string-with-calendar': [FAIL],
+
+ # PlainTime RelativeTime
+ # https://github.com/tc39/proposal-temporal/pull/1862
+ 'built-ins/Temporal/Duration/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Duration/compare/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/compare/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/compare/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/compare/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/timezone-getpossibleinstantsfor-iterable': [FAIL],
+
+ # Valid calendar in the test
+ # https://github.com/tc39/test262/pull/3261
+ 'built-ins/Temporal/Calendar/from/calendar-string-not-builtin': [FAIL],
+
+ # Calendar.dateAdd with null instead of undefined
+ # https://github.com/tc39/test262/issues/3262
+ 'built-ins/Temporal/Duration/prototype/add/calendar-dateadd-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/calendar-dateadd-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/calendar-dateadd-called-with-options-undefined': [SKIP],
+ 'built-ins/Temporal/TimeZone/basic': [SKIP],
+
+ # TimeZone name test should move to intl402
+ # https://github.com/tc39/test262/issues/3253
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-datetime': [FAIL],
+
+ # precision
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-string-datetime': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-undefined': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11544
- 'built-ins/Temporal/*': [FAIL],
- 'intl402/Temporal/*': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/mergeFields/arguments-not-object': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-string-zoneddatetime': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-negative-epochnanoseconds': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/round/balance-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/round/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-negative-epochnanoseconds': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/balance-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/subtract/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-zoneddatetime': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/total/options-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-string-zoneddatetime': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-negative-epochnanoseconds': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/order-of-operations': [FAIL],
+
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-days': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-months': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-months-weeks': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-weeks-days': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-weeks': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-years': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-years-months-days': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-years-months': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/add-years-weeks': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/balance-smaller-units': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/date-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/duration-argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/throw-range-error-from-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/throw-range-error-from-ToTemporalDuration': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/throw-type-error-from-GetOptionsObject': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/fields-not-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/throws-range-error': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/throws-type-error': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/throw-type-error-from-GetOptionsObject': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-monthCode-day': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-monthCode-day-need-constrain': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-month-day': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-month-day-need-constrain': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/largest-unit-day': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/largest-unit-month': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/largest-unit-week': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/largest-unit-year': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/no-options': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/throws-range-error-ToLargestTemporalUnit': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/throws-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/throws-type-error-GetOptionsObject': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/month-day': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/plain-date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/plain-date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/plain-date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/plain-date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/plain-date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/plain-date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/plain-date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/plain-date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/argument-iterable-not-array': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/argument-throws-duplicate-keys': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/argument-throws-invalid-keys': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/long-input': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/non-string-element-throws': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/repeated-throw': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/fields/reverse': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/mergeFields/arguments-empty-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/mergeFields/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/month-day': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/year-month': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/fields-not-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/month-day-throw-type-error': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/year-month': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/cross-year': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/date': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/date-time': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/fields-not-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/throw-range-error-ToTemporalDate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/year-month': [FAIL],
+ 'built-ins/Temporal/Duration/compare/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Duration/compare/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/Duration/compare/options-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-string-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-string-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-string-zoneddatetime-wrong-offset': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-undefined-throw-on-calendar-units': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/fractional-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-existing-object': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-non-string': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Duration/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/from/negative-inifinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/from/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/from/string-with-skipped-units': [FAIL],
+ 'built-ins/Temporal/Duration/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/abs/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/abs/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/options-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-string-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-string-zoneddatetime-wrong-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/negated/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/dateuntil-field': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-string-datetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-string-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-string-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-string-zoneddatetime-wrong-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-undefined-throw-on-calendar-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/round-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-disallowed-units-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-plurals-accepted-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-string-shorthand-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/options-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-zoneddatetime-wrong-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toJSON/negative-components': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/blank-duration-precision': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-nan': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/negative-components': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/precision': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/smallestunit-valid-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/dateuntil-field': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-string-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-string-zoneddatetime-wrong-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-undefined-throw-on-calendar-units': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-disallowed-units-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-plurals-accepted-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-string-shorthand-string': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/unit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/compare/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Instant/compare/instant-string': [FAIL],
+ 'built-ins/Temporal/Instant/compare/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/from/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochMicroseconds/basic': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochMicroseconds/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochMilliseconds/basic': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochMilliseconds/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochNanoseconds/basic': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochNanoseconds/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochSeconds/basic': [FAIL],
+ 'built-ins/Temporal/Instant/fromEpochSeconds/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/from/instant-string': [FAIL],
+ 'built-ins/Temporal/Instant/from/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/from/timezone-custom': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/argument-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/result-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/instant-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/options-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/smallestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/smallestunit-string-shorthand': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/round/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/instant-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/options-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/argument-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/result-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toJSON/basic': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toJSON/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toJSON/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toLocaleString/return-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/basic': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-nan': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/precision': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/smallestunit-valid-units': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-offset': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/plain-custom-timezone': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/instant-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/options-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Now/plainDateISO/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/calendar-object': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTimeISO/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTimeISO/time-zone-undefined': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/time-zone-undefined': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/plainTimeISO/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTimeISO/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTimeISO/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/PlainDate/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/calendar': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/use-internal-slots': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/limits': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDate/limits': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/balance-smaller-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/limits': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/dayOfWeek/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/dayOfYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/daysInWeek/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-object-valid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-call-different': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-call-same': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-no-call': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/monthsInYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-id-match': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-default': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/limits': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toLocaleString/return-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/time-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/time-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-always': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-auto': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-never': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/calendarname-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-getpossibleinstantsfor-iterable': [SKIP],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-id-match': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/days-in-month': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/days-in-year': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-default': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-higher-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/weeks-months': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/weekOfYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/withCalendar/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/withCalendar/missing-argument': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/plaindatelike-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/use-internal-slots': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/parser': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/smallestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/smallestunit-string-shorthand': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/round/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/balance-negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-dateuntil-called-with-copy-of-options': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-dateuntil-called-with-plaindate-calendar': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/round-negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toLocaleString/return-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainDate/limits': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-nan': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-valid-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/balance-negative-time-units': [SKIP],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/disambiguation-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/disambiguation-undefined': [SKIP],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/disambiguation-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/options-undefined': [SKIP],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/plain-custom-timezone': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-getpossibleinstantsfor-iterable': [SKIP],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/balance': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/balance-negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-dateuntil-called-with-copy-of-options': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-dateuntil-called-with-plaindate-calendar': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/round-negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withCalendar/missing-argument': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/time-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/fields-leap-day': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/fields-missing-properties': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/fields-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/fields-plainmonthday': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/fields-string': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/overflow': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/day/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/calendars': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/monthCode/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toJSON/calendarname': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/limits': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-always': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-auto': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-never': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/valueOf/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/basic': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/use-internal-slots': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-string-shorthand': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toLocaleString/return-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-nan': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-valid-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-primitive': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/plaindate-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-getpossibleinstantsfor-iterable': [SKIP],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/plaintimelike-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/use-internal-slots': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-arguments-extra-options': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-daysinmonth-wrong-value': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/arguments-missing-throws': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/mixed-calendar-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-as-expected': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/symmetry': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-arguments-extra-options': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-daysinmonth-wrong-value': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/negative-infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/argument-not-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/limits': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/TimeZone/from/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/TimeZone/from/argument-primitive': [FAIL],
+ 'built-ins/Temporal/TimeZone/from/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/from/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-not-datetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/balance-negative-time-units': [SKIP],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/disambiguation-invalid-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/disambiguation-undefined': [SKIP],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/disambiguation-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/options-undefined': [SKIP],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getNextTransition/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getNextTransition/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getNextTransition/instant-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/argument-not-absolute': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/instant-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/argument-not-absolute': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/argument-not-absolute-getOffsetNanosecondsFor-override': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/basic': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/instant-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-not-absolute': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-not-absolute-getOffsetNanosecondsFor-override': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-undefined': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/custom-timezone': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/instant-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/instant-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/pre-epoch': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-not-datetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/argument-zoneddatetime': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/instant-string': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/toJSON/tostring-call': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/toJSON/tostring-undefined-custom': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/toJSON/tostring-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/zoneddatetime-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/zoneddatetime-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/disambiguation-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/disambiguation-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/disambiguation-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/offset-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/offset-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/offset-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/options-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/options-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/calendar-returns-infinity': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfWeek/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfWeek/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfWeek/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfWeek/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfYear/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/dayOfYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInMonth/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInMonth/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInMonth/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInMonth/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInMonth/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInWeek/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInWeek/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInWeek/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInWeek/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInWeek/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInYear/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/daysInYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/zoneddatetime-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/zoneddatetime-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/field-names': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/field-prop-desc': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/field-traversal-order': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/getISOFields/offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hour/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/hoursInDay/timezone-getpossibleinstantsfor-iterable': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/inLeapYear/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/inLeapYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/inLeapYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/inLeapYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/inLeapYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/microsecond/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/microsecond/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/millisecond/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/millisecond/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/minute/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/calendar-returns-infinity': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthsInYear/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthsInYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthsInYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthsInYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthsInYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/nanosecond/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/basic': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offsetNanoseconds/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offsetNanoseconds/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offsetNanoseconds/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offsetNanoseconds/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offsetNanoseconds/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/offset/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/dateadd-options': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/div-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingincrement-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingincrement-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/smallestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/smallestunit-string-shorthand': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/round/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/second/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-dateuntil-called-with-copy-of-options': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit-plurals-accepted': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-non-integer': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingmode-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingmode-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/smallestunit-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/smallestunit-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/zoneddatetime-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/zoneddatetime-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/startOfDay/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/argument-string-negative-fractional-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/non-integer-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/options-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toInstant/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toLocaleString/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toLocaleString/return-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/plain-custom-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDateTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-result': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-result': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/calendarname-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/calendarname-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/calendarname-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-nan': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/offset-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/offset-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/offset-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/roundingmode-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/roundingmode-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/smallestunit-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/smallestunit-valid-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-dateuntil-called-with-copy-of-options': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/largestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/largestunit-plurals-accepted': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/largestunit-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/largestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/options-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-nan': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-non-integer': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingmode-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingmode-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingmode-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/smallestunit-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/smallestunit-plurals-accepted': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/smallestunit-undefined': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/smallestunit-wrong-type': [SKIP],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/sub-minute-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/zoneddatetime-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/zoneddatetime-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/weekOfYear/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/weekOfYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/weekOfYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/weekOfYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/weekOfYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/calendar-merge-fields-returns-primitive': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/missing-argument': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/disambiguation-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/disambiguation-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/disambiguation-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/offset-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/offset-property-sub-minute': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/offset-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/offset-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/options-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/overflow-invalid-string': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/overflow-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/overflow-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-balance-negative-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/plaintime-propertybag-no-time-units': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/time-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/read-time-fields-before-datefromfields': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/receiver-offset-broken': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/subclassing-ignored': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/timezone-string-datetime': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/calendar-returns-infinity': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/timezone-string-datetime': [FAIL],
'intl402/DateTimeFormat/prototype/formatRange/temporal-objects-resolved-time-zone': [FAIL],
'intl402/DateTimeFormat/prototype/formatRangeToParts/temporal-objects-resolved-time-zone': [FAIL],
'intl402/DateTimeFormat/prototype/format/temporal-objects-resolved-time-zone': [FAIL],
'intl402/DateTimeFormat/prototype/formatToParts/temporal-objects-resolved-time-zone': [FAIL],
- 'intl402/DateTimeFormat/prototype/formatRange/temporal-objects-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'intl402/DateTimeFormat/prototype/formatRangeToParts/temporal-objects-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'intl402/DateTimeFormat/prototype/format/temporal-objects-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'intl402/DateTimeFormat/prototype/formatToParts/temporal-objects-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/dateAdd/date-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/dateUntil/argument-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/day/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/dayOfWeek/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/dayOfYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/daysInMonth/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/daysInWeek/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/daysInYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/argument-string-with-utc-designator': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/branding': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/argument-string-with-utc-designator': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/branding': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/inLeapYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/monthCode/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/monthDayFromFields/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/month/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/monthsInYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/weekOfYear/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/year/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Duration/prototype/add/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Duration/prototype/round/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Duration/prototype/subtract/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Duration/prototype/total/relativeto-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toLocaleString/options-conflict': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/Now/plainDateTimeISO/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/PlainDate/compare/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDate/from/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toLocaleString/options-conflict': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toLocaleString/resolved-time-zone': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toLocaleString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/compare/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/from/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toLocaleString/options-conflict': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toLocaleString/resolved-time-zone': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toLocaleString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/toLocaleString/resolved-time-zone': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/toLocaleString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/PlainMonthDay/prototype/toPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toLocaleString/options-conflict': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toLocaleString/resolved-time-zone': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toLocaleString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toPlainDateTime/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toZonedDateTime/plaindate-infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/compare/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/from/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/toLocaleString/resolved-time-zone': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/toLocaleString/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/TimeZone/prototype/getInstantFor/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/instant-string': [FAIL],
+ 'intl402/Temporal/TimeZone/prototype/getOffsetStringFor/instant-string': [FAIL],
+ 'intl402/Temporal/TimeZone/prototype/getPlainDateTimeFor/instant-string': [FAIL],
+ 'intl402/Temporal/TimeZone/prototype/getPossibleInstantsFor/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/compare/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/from/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/equals/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/branding': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/branding': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/calendar-returns-infinity': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/since/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/toLocaleString/locales-undefined': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/toLocaleString/options-conflict': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/toLocaleString/options-undefined': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/until/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
+ 'intl402/Temporal/Duration/prototype/add/relativeto-string-datetime': [FAIL],
+ 'intl402/Temporal/Duration/prototype/round/relativeto-string-datetime': [FAIL],
+ 'intl402/Temporal/Duration/prototype/subtract/relativeto-string-datetime': [FAIL],
+ 'intl402/Temporal/Duration/prototype/total/relativeto-string-datetime': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toString/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toZonedDateTimeISO/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/Instant/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/PlainTime/prototype/toZonedDateTime/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/from/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/equals/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/since/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/until/timezone-string-datetime': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/year-zero': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/year-zero': [FAIL],
+ 'intl402/Temporal/Now/plainDate/calendar-string': [FAIL],
+ 'intl402/Temporal/Now/plainDateTime/calendar-string': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/from/argument-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/id/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/compare/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Duration/compare/calendar-possibly-required': [FAIL],
+ 'built-ins/Temporal/Duration/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-possibly-required': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-exact-number-of-digits': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-possibly-required': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/Instant/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/Instant/from/year-zero': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/calendar-undefined': [FAIL],
+ 'built-ins/Temporal/Now/plainDateISO/return-value': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/calendar-undefined': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTimeISO/return-value-calendar': [FAIL],
+ 'built-ins/Temporal/Now/plainTimeISO/return-value': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/calendar-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-number': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-object-valid': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-string-overflow': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-invalid-duration': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-missing-properties': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/argument-singular-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/balance-smaller-units-basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/add/overflow-reject': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/days-in-month': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/days-in-year': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/rounding-relative': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/smallestunit-higher-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/weeks-months': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-invalid-duration': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-missing-properties': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/argument-singular-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/balance-smaller-units-basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/subtract/overflow-reject': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/limits': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/largestunit-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/rounding-relative': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/smallestunit-higher-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainTime/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-number': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-object-leap-second': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-plaintime': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-leap-second': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-trailing-junk': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/overflow-reject': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-duration': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-higher-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/add/options-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/rounding-cross-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-hours': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-microseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-milliseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-minutes': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-nanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingincrement-seconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/round/smallestunit-missing': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/result-sub-second': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-hours': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-microseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-milliseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-minutes': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-nanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingincrement-seconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-duration': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-higher-units': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/subtract/options-ignored': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-auto': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-number': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/rounding-cross-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/smallestunit-fractionalseconddigits': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/result-sub-second': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-hours': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-microseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-milliseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-minutes': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-nanoseconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingincrement-seconds': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/valueOf/basic': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/compare-calendar': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/compare-reference-day': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-number': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-plainyearmonth': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-string-trailing-junk': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/limits': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/overflow-reject': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/limits': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-duration-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-lower-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/argument-object-plural': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/limits': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/month-length': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/daysInMonth/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/daysInYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-cast': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/compare-calendar': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/compare-reference-day': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/use-internal-slots': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/monthsInYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/argument-casting': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-auto': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-months': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-years': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-duration-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-lower-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/argument-object-plural': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/limits': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/month-length': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-always': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-auto': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-never': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/argument-casting': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/arguments-missing-throws': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-auto': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-disallowed-units': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-months': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-years': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/mixed-calendar-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-as-expected': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-ceil': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-floor': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-halfExpand': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingmode-trunc': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/valueOf/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/argument-calendar-field': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/argument-missing-fields': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/argument-timezone-field': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/basic': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/options-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getNextTransition/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/year-zero': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/id/branding': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-offset-not-agreeing-with-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-offset-not-agreeing-with-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/add/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-offset-not-agreeing-with-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-offset-not-agreeing-with-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/subtract/argument-string-fractional-units-rounding-mode': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-offset-not-agreeing-with-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/largestunit-smallestunit-mismatch': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/copy-properties-not-undefined': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/year-zero': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-no-implicit-midnight': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-with-time-designator': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/year-zero': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=10776
- 'intl402/NumberFormat/constructor-options-roundingMode-invalid': [FAIL],
- 'intl402/NumberFormat/constructor-options-throwing-getters-rounding-mode': [FAIL],
- 'intl402/NumberFormat/constructor-signDisplay-negative': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-ceil': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-expand': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-floor': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-ceil': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-even': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-floor': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-trunc': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-rounding-mode-trunc': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-currency-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-currency-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-currency-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-currency-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-currency-zh-TW': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-negative-zh-TW': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-currency-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-currency-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-currency-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-currency-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-currency-zh-TW': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-negative-zh-TW': [FAIL],
- 'intl402/NumberFormat/prototype/resolvedOptions/roundingMode': [FAIL],
- 'intl402/NumberFormat/prototype/format/useGrouping-extended-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/format/useGrouping-extended-en-IN': [FAIL],
- 'intl402/NumberFormat/prototype/format/useGrouping-extended-en-US': [FAIL],
- 'intl402/NumberFormat/test-option-useGrouping-extended': [FAIL],
+ 'intl402/NumberFormat/constructor-roundingIncrement': [FAIL],
+ # NumberFormat.prototype.formatRange
+ 'intl402/NumberFormat/prototype/formatRange/en-US': [FAIL],
+ 'intl402/NumberFormat/prototype/formatRange/pt-PT': [FAIL],
+
+ # https://github.com/tc39/test262/pull/3425
+ 'intl402/NumberFormat/prototype/formatRange/x-greater-than-y-throws': [FAIL],
+ 'intl402/NumberFormat/prototype/formatRangeToParts/x-greater-than-y-throws': [FAIL],
+
+ # String handling
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-1000': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-100': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-10': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-1': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-2000': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-200': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-20': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-2500': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-250': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-25': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-2': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-5000': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-500': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-50': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-5': [FAIL],
'intl402/NumberFormat/prototype/format/value-decimal-string': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-ceil': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-expand': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-floor': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-ceil': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-even': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-expand': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-floor': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-half-trunc': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-mode-trunc': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-priority-auto': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-priority-less-precision': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-priority-more-precision': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=11660
'intl402/DurationFormat/prototype/prototype_attributes': [FAIL],
- 'intl402/DurationFormat/prototype/toStringTag': [FAIL],
+ 'intl402/DurationFormat/instance/extensibility': [FAIL],
+ 'intl402/DurationFormat/instance/length': [FAIL],
+ 'intl402/DurationFormat/instance/name': [FAIL],
+ 'intl402/DurationFormat/instance/prop-desc': [FAIL],
+ 'intl402/DurationFormat/instance/prototype': [FAIL],
+ 'intl402/DurationFormat/prototype/constructor/prop-desc': [FAIL],
+ 'intl402/DurationFormat/prototype/constructor/value': [FAIL],
+ 'intl402/DurationFormat/prototype/format/length': [FAIL],
+ 'intl402/DurationFormat/prototype/format/name': [FAIL],
+ 'intl402/DurationFormat/prototype/format/prop-desc': [FAIL],
+ 'intl402/DurationFormat/prototype/format/throw-invoked-as-func': [FAIL],
+ 'intl402/DurationFormat/prototype/formatToParts/length': [FAIL],
+ 'intl402/DurationFormat/prototype/formatToParts/name': [FAIL],
+ 'intl402/DurationFormat/prototype/formatToParts/prop-desc': [FAIL],
+ 'intl402/DurationFormat/prototype/formatToParts/throw-invoked-as-func': [FAIL],
+ 'intl402/DurationFormat/prototype/resolvedOptions/length': [FAIL],
+ 'intl402/DurationFormat/prototype/resolvedOptions/name': [FAIL],
+ 'intl402/DurationFormat/prototype/resolvedOptions/prop-desc': [FAIL],
+ 'intl402/DurationFormat/prototype/resolvedOptions/throw-invoked-as-func': [FAIL],
+ 'intl402/DurationFormat/prototype/toStringTag/toString': [FAIL],
+ 'intl402/DurationFormat/prototype/toStringTag/toStringTag': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11989
- 'built-ins/ShadowRealm/constructor': [FAIL],
- 'built-ins/ShadowRealm/descriptor': [FAIL],
- 'built-ins/ShadowRealm/extensibility': [FAIL],
- 'built-ins/ShadowRealm/instance': [FAIL],
- 'built-ins/ShadowRealm/instance-extensibility': [FAIL],
- 'built-ins/ShadowRealm/length': [FAIL],
- 'built-ins/ShadowRealm/name': [FAIL],
- 'built-ins/ShadowRealm/proto': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/descriptor': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/errors-from-the-other-realm-is-wrapped-into-a-typeerror': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/globalthis-available-properties': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/globalthis-config-only-properties': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/globalthis-orginary-object': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/length': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/name': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/no-conditional-strict-mode': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/not-constructor': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/proto': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/returns-primitive-values': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/returns-proxy-callable-object': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/returns-symbol-values': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/throws-error-from-ctor-realm': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/throws-syntaxerror-on-bad-syntax': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/throws-typeerror-if-evaluation-resolves-to-non-primitive': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/throws-when-argument-is-not-a-string': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/validates-realm-object': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm-extended': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-from-return-values-share-no-identity': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-observing-their-scopes': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-accepts-callable-objects': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-can-resolve-callable-returns': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-new-wrapping-on-each-evaluation': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties-extended': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-multiple-different-realms-nested': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-multiple-different-realms': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-proto-from-caller-realm': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-proxied-observes-boundary': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-throws-typeerror-from-caller-realm': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-throws-typeerror-on-non-primitive-arguments': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-throws-typeerror-on-non-primitive-returns': [FAIL],
- 'built-ins/ShadowRealm/prototype/evaluate/nested-realms': [FAIL],
- 'built-ins/ShadowRealm/prototype/importValue/descriptor': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/exportName-tostring': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/import-value': [FAIL],
- 'built-ins/ShadowRealm/prototype/importValue/length': [FAIL],
- 'built-ins/ShadowRealm/prototype/importValue/name': [FAIL],
- 'built-ins/ShadowRealm/prototype/importValue/not-constructor': [FAIL],
- 'built-ins/ShadowRealm/prototype/importValue/proto': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/specifier-tostring': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/throws-if-import-value-does-not-exist': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/throws-typeerror-import-syntax-error': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/throws-typeerror-import-throws': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/validates-realm-object': [FAIL],
- 'built-ins/ShadowRealm/prototype/proto': [FAIL],
- 'built-ins/ShadowRealm/prototype/Symbol.toStringTag': [FAIL],
+ 'built-ins/ShadowRealm/WrappedFunction/length': [FAIL],
+ 'built-ins/ShadowRealm/WrappedFunction/length-throws-typeerror': [FAIL],
+ 'built-ins/ShadowRealm/WrappedFunction/name': [FAIL],
+ 'built-ins/ShadowRealm/WrappedFunction/name-throws-typeerror': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=12085
'language/statements/class/subclass/derived-class-return-override-catch-finally': [FAIL],
@@ -430,6 +2851,29 @@
'language/statements/class/subclass/derived-class-return-override-finally-super-arrow': [FAIL],
'language/statements/class/subclass/derived-class-return-override-for-of-arrow': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12524
+ 'built-ins/Date/prototype/setHours/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setMinutes/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setMonth/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setSeconds/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setUTCHours/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setUTCMinutes/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setUTCMonth/arg-coercion-order': [FAIL],
+ 'built-ins/Date/prototype/setUTCSeconds/arg-coercion-order': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12680
+ 'language/expressions/logical-assignment/left-hand-side-private-reference-method-short-circuit-nullish': [FAIL],
+ 'language/expressions/logical-assignment/left-hand-side-private-reference-method-short-circuit-or': [FAIL],
+ 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-and': [FAIL],
+ 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-nullish': [FAIL],
+ 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-or': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12044
+ 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12681
+ 'built-ins/Array/prototype/push/set-length-zero-array-length-is-non-writable': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -549,6 +2993,14 @@
'language/identifiers/start-unicode-9*': [FAIL],
}], # no_i18n == True
+['arch == arm64', {
+ # Problem in V8 Android Arm64 - N5X
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-1000': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-2000': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-2500': [SKIP],
+ 'intl402/NumberFormat/prototype/format/format-rounding-increment-5000': [SKIP],
+}], # 'arch == arm64'
+
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
# Causes stack overflow on simulators due to eager compilation of
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 5d2cecef4d..af4788f6a3 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -44,8 +44,7 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'Intl.Locale-info': '--harmony_intl_locale_info',
- 'Intl-enumeration': '--harmony_intl_enumeration',
+ 'Intl.NumberFormat-v3': '--harmony_intl_number_format_v3',
'Symbol.prototype.description': '--harmony-symbol-description',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
@@ -59,23 +58,27 @@ FEATURE_FLAGS = {
'Object.hasOwn': '--harmony-object-has-own',
'class-static-block': '--harmony-class-static-blocks',
'resizable-arraybuffer': '--harmony-rab-gsab',
+ 'Temporal': '--harmony-temporal',
'array-find-from-last': '--harmony_array_find_last',
+ 'ShadowRealm': '--harmony-shadow-realm',
}
SKIPPED_FEATURES = set([])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
+BASE_DIR = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
TEST_262_NATIVE_FILES = ["detachArrayBuffer.js"]
TEST_262_SUITE_PATH = ["data", "test"]
TEST_262_HARNESS_PATH = ["data", "harness"]
-TEST_262_TOOLS_PATH = ["harness", "src"]
+TEST_262_TOOLS_ABS_PATH = [BASE_DIR, "third_party", "test262-harness", "src"]
TEST_262_LOCAL_TESTS_PATH = ["local-tests", "test"]
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
- *TEST_262_TOOLS_PATH))
+sys.path.append(os.path.join(*TEST_262_TOOLS_ABS_PATH))
class VariantsGenerator(testsuite.VariantsGenerator):
@@ -136,15 +139,14 @@ class TestSuite(testsuite.TestSuite):
self.parse_test_record = self._load_parse_test_record()
def _load_parse_test_record(self):
- root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
+ root = os.path.join(*TEST_262_TOOLS_ABS_PATH)
f = None
try:
(f, pathname, description) = imp.find_module("parseTestRecord", [root])
module = imp.load_module("parseTestRecord", f, pathname, description)
return module.parseTestRecord
except:
- print ('Cannot load parseTestRecord; '
- 'you may need to gclient sync for test262')
+ print('Cannot load parseTestRecord')
raise
finally:
if f:
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 0b3be07e3b..661042399c 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -196,8 +196,8 @@ macro TestFunctionPointers(implicit context: Context)(): Boolean {
@export
macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
- let _var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
- let _var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
+ let _var1: int31 = FromConstexpr<bool>(42 == 0) ? FromConstexpr<int31>(0) : 1;
+ let _var2: int31 = FromConstexpr<bool>(42 == 0) ? FromConstexpr<int31>(1) : 0;
return True;
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index de1816be87..d20d3bf0df 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -16,12 +16,13 @@ if (is_fuchsia) {
fuchsia_package_runner("v8_unittests_fuchsia") {
testonly = true
+ use_cfv2 = false
package = ":v8_unittests_pkg"
package_name_override = "v8_unittests"
}
}
-v8_executable("v8_cppgc_shared_unittests") {
+v8_executable("v8_heap_base_unittests") {
testonly = true
configs = [
@@ -32,16 +33,19 @@ v8_executable("v8_cppgc_shared_unittests") {
sources = [ "heap/base/run-all-unittests.cc" ]
deps = [
- ":v8_cppgc_shared_unittests_sources",
+ ":v8_heap_base_unittests_sources",
"//testing/gmock",
"//testing/gtest",
]
}
-v8_source_set("v8_cppgc_shared_unittests_sources") {
+v8_source_set("v8_heap_base_unittests_sources") {
testonly = true
- sources = [ "heap/base/worklist-unittest.cc" ]
+ sources = [
+ "heap/base/active-system-pages-unittest.cc",
+ "heap/base/worklist-unittest.cc",
+ ]
configs = [
"../..:external_config",
@@ -49,7 +53,7 @@ v8_source_set("v8_cppgc_shared_unittests_sources") {
]
deps = [
- "../..:v8_cppgc_shared_for_testing",
+ "../..:v8_heap_base_for_testing",
"//testing/gmock",
"//testing/gtest",
]
@@ -74,7 +78,7 @@ if (cppgc_is_standalone) {
deps = [
":cppgc_unittests_sources",
- ":v8_cppgc_shared_unittests_sources",
+ ":v8_heap_base_unittests_sources",
"../..:cppgc_for_testing",
"//testing/gmock",
"//testing/gtest",
@@ -162,14 +166,13 @@ v8_source_set("cppgc_unittests_sources") {
v8_executable("unittests") {
testonly = true
- # TODO(machenbach): Translate from gyp.
- #['OS=="aix"', {
- # 'ldflags': [ '-Wl,-bbigtoc' ],
- #}],
+ if (current_os == "aix") {
+ ldflags = [ "-Wl,-bbigtoc" ]
+ }
deps = [
":unittests_sources",
- ":v8_cppgc_shared_unittests_sources",
+ ":v8_heap_base_unittests_sources",
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
@@ -233,6 +236,7 @@ v8_source_set("unittests_sources") {
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
"base/vector-unittest.cc",
+ "base/virtual-address-space-unittest.cc",
"base/vlq-base64-unittest.cc",
"base/vlq-unittest.cc",
"codegen/aligned-slot-allocator-unittest.cc",
@@ -375,7 +379,7 @@ v8_source_set("unittests_sources") {
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
"runtime/runtime-debug-unittest.cc",
- "security/virtual-memory-cage-unittest.cc",
+ "sandbox/sandbox-unittest.cc",
"strings/char-predicates-unittest.cc",
"strings/unicode-unittest.cc",
"tasks/background-compile-task-unittest.cc",
@@ -522,6 +526,7 @@ v8_source_set("unittests_sources") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../..:v8_shared_internal_headers",
+ "../..:v8_version",
"../../third_party/inspector_protocol:crdtp_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
diff --git a/deps/v8/test/unittests/api/interceptor-unittest.cc b/deps/v8/test/unittests/api/interceptor-unittest.cc
index a1f6cbdc36..635bf6a0b7 100644
--- a/deps/v8/test/unittests/api/interceptor-unittest.cc
+++ b/deps/v8/test/unittests/api/interceptor-unittest.cc
@@ -174,8 +174,8 @@ TEST_F(InterceptorLoggingTest, DispatchTest) {
EXPECT_EQ(Run("obj.foo"), "named getter");
EXPECT_EQ(Run("obj[42]"), "indexed getter");
- EXPECT_EQ(Run("obj.foo = null"), "named setter");
- EXPECT_EQ(Run("obj[42] = null"), "indexed setter");
+ EXPECT_EQ(Run("obj.foo = null"), "named setter, named descriptor");
+ EXPECT_EQ(Run("obj[42] = null"), "indexed setter, indexed descriptor");
EXPECT_EQ(Run("Object.getOwnPropertyDescriptor(obj, 'foo')"),
"named descriptor");
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index 882afea314..4858e08544 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -4,7 +4,7 @@
#include "src/base/platform/time.h"
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
#include <mach/mach_time.h>
#endif
#if V8_OS_POSIX
@@ -201,8 +201,7 @@ TEST(TimeDelta, FromAndIn) {
TimeDelta::FromMicroseconds(13).InMicroseconds());
}
-
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
TEST(TimeDelta, MachTimespec) {
TimeDelta null = TimeDelta();
EXPECT_EQ(null, TimeDelta::FromMachTimespec(null.ToMachTimespec()));
@@ -363,19 +362,14 @@ TEST(TimeTicks, NowResolution) {
}
TEST(TimeTicks, IsMonotonic) {
- TimeTicks previous_normal_ticks;
- TimeTicks previous_highres_ticks;
+ TimeTicks previous_ticks;
ElapsedTimer timer;
timer.Start();
while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
- TimeTicks normal_ticks = TimeTicks::Now();
- TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
- EXPECT_GE(normal_ticks, previous_normal_ticks);
- EXPECT_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
- EXPECT_GE(highres_ticks, previous_highres_ticks);
- EXPECT_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
- previous_normal_ticks = normal_ticks;
- previous_highres_ticks = highres_ticks;
+ TimeTicks ticks = TimeTicks::Now();
+ EXPECT_GE(ticks, previous_ticks);
+ EXPECT_GE((ticks - previous_ticks).InMicroseconds(), 0);
+ previous_ticks = ticks;
}
}
@@ -437,14 +431,14 @@ TEST(ElapsedTimer, StartStopArgs) {
DCHECK(!timer1.IsStarted());
DCHECK(!timer2.IsStarted());
- TimeTicks now = TimeTicks::HighResolutionNow();
+ TimeTicks now = TimeTicks::Now();
timer1.Start(now);
timer2.Start(now);
DCHECK(timer1.IsStarted());
DCHECK(timer2.IsStarted());
Sleep(wait_time);
- now = TimeTicks::HighResolutionNow();
+ now = TimeTicks::Now();
TimeDelta delta1 = timer1.Elapsed(now);
Sleep(wait_time);
TimeDelta delta2 = timer2.Elapsed(now);
@@ -454,20 +448,20 @@ TEST(ElapsedTimer, StartStopArgs) {
Sleep(wait_time);
EXPECT_NE(delta1, timer2.Elapsed());
- TimeTicks now2 = TimeTicks::HighResolutionNow();
+ TimeTicks now2 = TimeTicks::Now();
EXPECT_NE(timer1.Elapsed(now), timer1.Elapsed(now2));
EXPECT_NE(delta1, timer1.Elapsed(now2));
EXPECT_NE(delta2, timer2.Elapsed(now2));
EXPECT_GE(timer1.Elapsed(now2), timer2.Elapsed(now2));
- now = TimeTicks::HighResolutionNow();
+ now = TimeTicks::Now();
timer1.Pause(now);
timer2.Pause(now);
DCHECK(timer1.IsPaused());
DCHECK(timer2.IsPaused());
Sleep(wait_time);
- now = TimeTicks::HighResolutionNow();
+ now = TimeTicks::Now();
timer1.Resume(now);
DCHECK(!timer1.IsPaused());
DCHECK(timer2.IsPaused());
diff --git a/deps/v8/test/unittests/base/virtual-address-space-unittest.cc b/deps/v8/test/unittests/base/virtual-address-space-unittest.cc
new file mode 100644
index 0000000000..bd5a57ca0f
--- /dev/null
+++ b/deps/v8/test/unittests/base/virtual-address-space-unittest.cc
@@ -0,0 +1,266 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/virtual-address-space.h"
+
+#include "src/base/emulated-virtual-address-subspace.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+constexpr size_t KB = 1024;
+constexpr size_t MB = KB * 1024;
+
+void TestRandomPageAddressGeneration(v8::VirtualAddressSpace* space) {
+ space->SetRandomSeed(::testing::FLAGS_gtest_random_seed);
+ for (int i = 0; i < 10; i++) {
+ Address addr = space->RandomPageAddress();
+ EXPECT_GE(addr, space->base());
+ EXPECT_LT(addr, space->base() + space->size());
+ }
+}
+
+void TestBasicPageAllocation(v8::VirtualAddressSpace* space) {
+ // Allocation sizes in KB.
+ const size_t allocation_sizes[] = {4, 8, 12, 16, 32, 64, 128,
+ 256, 512, 768, 1024, 768, 512, 256,
+ 128, 64, 32, 16, 12, 8, 4};
+
+ std::vector<Address> allocations;
+ size_t alignment = space->allocation_granularity();
+ for (size_t i = 0; i < arraysize(allocation_sizes); i++) {
+ size_t size = allocation_sizes[i] * KB;
+ if (!IsAligned(size, space->allocation_granularity())) continue;
+ Address allocation =
+ space->AllocatePages(VirtualAddressSpace::kNoHint, size, alignment,
+ PagePermissions::kReadWrite);
+
+ ASSERT_NE(kNullAddress, allocation);
+ EXPECT_GE(allocation, space->base());
+ EXPECT_LT(allocation, space->base() + space->size());
+
+ allocations.push_back(allocation);
+
+ // Memory must be writable
+ *reinterpret_cast<size_t*>(allocation) = size;
+ }
+
+ // Windows has an allocation granularity of 64KB and macOS could have 16KB, so
+ // we won't necessarily have managed to obtain all allocations, but we
+ // should've gotten all that are >= 64KB.
+ EXPECT_GE(allocations.size(), 11UL);
+
+ for (Address allocation : allocations) {
+ //... and readable
+ size_t size = *reinterpret_cast<size_t*>(allocation);
+ space->FreePages(allocation, size);
+ }
+}
+
+void TestPageAllocationAlignment(v8::VirtualAddressSpace* space) {
+ // In multiples of the allocation_granularity.
+ const size_t alignments[] = {1, 2, 4, 8, 16, 32, 64};
+ const size_t size = space->allocation_granularity();
+
+ for (size_t i = 0; i < arraysize(alignments); i++) {
+ size_t alignment = alignments[i] * space->allocation_granularity();
+ Address allocation =
+ space->AllocatePages(VirtualAddressSpace::kNoHint, size, alignment,
+ PagePermissions::kReadWrite);
+
+ ASSERT_NE(kNullAddress, allocation);
+ EXPECT_TRUE(IsAligned(allocation, alignment));
+ EXPECT_GE(allocation, space->base());
+ EXPECT_LT(allocation, space->base() + space->size());
+
+ space->FreePages(allocation, size);
+ }
+}
+
+void TestParentSpaceCannotAllocateInChildSpace(v8::VirtualAddressSpace* parent,
+ v8::VirtualAddressSpace* child) {
+ child->SetRandomSeed(::testing::FLAGS_gtest_random_seed);
+
+ size_t chunksize = parent->allocation_granularity();
+ size_t alignment = chunksize;
+ Address start = child->base();
+ Address end = start + child->size();
+
+ for (int i = 0; i < 10; i++) {
+ Address hint = child->RandomPageAddress();
+ Address allocation = parent->AllocatePages(hint, chunksize, alignment,
+ PagePermissions::kNoAccess);
+ ASSERT_NE(kNullAddress, allocation);
+ EXPECT_TRUE(allocation < start || allocation >= end);
+
+ parent->FreePages(allocation, chunksize);
+ }
+}
+
+void TestSharedPageAllocation(v8::VirtualAddressSpace* space) {
+ const size_t size = 2 * space->allocation_granularity();
+
+ PlatformSharedMemoryHandle handle =
+ OS::CreateSharedMemoryHandleForTesting(size);
+ if (handle == kInvalidSharedMemoryHandle) return;
+
+ Address mapping1 =
+ space->AllocateSharedPages(VirtualAddressSpace::kNoHint, size,
+ PagePermissions::kReadWrite, handle, 0);
+ ASSERT_NE(kNullAddress, mapping1);
+ Address mapping2 =
+ space->AllocateSharedPages(VirtualAddressSpace::kNoHint, size,
+ PagePermissions::kReadWrite, handle, 0);
+ ASSERT_NE(kNullAddress, mapping2);
+ ASSERT_NE(mapping1, mapping2);
+
+ int value = 0x42;
+ EXPECT_EQ(0, *reinterpret_cast<int*>(mapping2));
+ *reinterpret_cast<int*>(mapping1) = value;
+ EXPECT_EQ(value, *reinterpret_cast<int*>(mapping2));
+
+ space->FreeSharedPages(mapping1, size);
+ space->FreeSharedPages(mapping2, size);
+
+ OS::DestroySharedMemoryHandle(handle);
+}
+
+TEST(VirtualAddressSpaceTest, TestPagePermissionSubsets) {
+ const PagePermissions kNoAccess = PagePermissions::kNoAccess;
+ const PagePermissions kRead = PagePermissions::kRead;
+ const PagePermissions kReadWrite = PagePermissions::kReadWrite;
+ const PagePermissions kReadWriteExecute = PagePermissions::kReadWriteExecute;
+ const PagePermissions kReadExecute = PagePermissions::kReadExecute;
+
+ EXPECT_TRUE(IsSubset(kNoAccess, kNoAccess));
+ EXPECT_FALSE(IsSubset(kRead, kNoAccess));
+ EXPECT_FALSE(IsSubset(kReadWrite, kNoAccess));
+ EXPECT_FALSE(IsSubset(kReadWriteExecute, kNoAccess));
+ EXPECT_FALSE(IsSubset(kReadExecute, kNoAccess));
+
+ EXPECT_TRUE(IsSubset(kNoAccess, kRead));
+ EXPECT_TRUE(IsSubset(kRead, kRead));
+ EXPECT_FALSE(IsSubset(kReadWrite, kRead));
+ EXPECT_FALSE(IsSubset(kReadWriteExecute, kRead));
+ EXPECT_FALSE(IsSubset(kReadExecute, kRead));
+
+ EXPECT_TRUE(IsSubset(kNoAccess, kReadWrite));
+ EXPECT_TRUE(IsSubset(kRead, kReadWrite));
+ EXPECT_TRUE(IsSubset(kReadWrite, kReadWrite));
+ EXPECT_FALSE(IsSubset(kReadWriteExecute, kReadWrite));
+ EXPECT_FALSE(IsSubset(kReadExecute, kReadWrite));
+
+ EXPECT_TRUE(IsSubset(kNoAccess, kReadWriteExecute));
+ EXPECT_TRUE(IsSubset(kRead, kReadWriteExecute));
+ EXPECT_TRUE(IsSubset(kReadWrite, kReadWriteExecute));
+ EXPECT_TRUE(IsSubset(kReadWriteExecute, kReadWriteExecute));
+ EXPECT_TRUE(IsSubset(kReadExecute, kReadWriteExecute));
+
+ EXPECT_TRUE(IsSubset(kNoAccess, kReadExecute));
+ EXPECT_TRUE(IsSubset(kRead, kReadExecute));
+ EXPECT_FALSE(IsSubset(kReadWrite, kReadExecute));
+ EXPECT_FALSE(IsSubset(kReadWriteExecute, kReadExecute));
+ EXPECT_TRUE(IsSubset(kReadExecute, kReadExecute));
+}
+
+TEST(VirtualAddressSpaceTest, TestRootSpace) {
+ VirtualAddressSpace rootspace;
+
+ TestRandomPageAddressGeneration(&rootspace);
+ TestBasicPageAllocation(&rootspace);
+ TestPageAllocationAlignment(&rootspace);
+ TestSharedPageAllocation(&rootspace);
+}
+
+TEST(VirtualAddressSpaceTest, TestSubspace) {
+ constexpr size_t kSubspaceSize = 32 * MB;
+ constexpr size_t kSubSubspaceSize = 16 * MB;
+
+ VirtualAddressSpace rootspace;
+
+ if (!rootspace.CanAllocateSubspaces()) return;
+ size_t subspace_alignment = rootspace.allocation_granularity();
+ auto subspace = rootspace.AllocateSubspace(VirtualAddressSpace::kNoHint,
+ kSubspaceSize, subspace_alignment,
+ PagePermissions::kReadWrite);
+ ASSERT_TRUE(subspace);
+ EXPECT_NE(kNullAddress, subspace->base());
+ EXPECT_EQ(kSubspaceSize, subspace->size());
+ EXPECT_EQ(PagePermissions::kReadWrite, subspace->max_page_permissions());
+
+ TestRandomPageAddressGeneration(subspace.get());
+ TestBasicPageAllocation(subspace.get());
+ TestPageAllocationAlignment(subspace.get());
+ TestParentSpaceCannotAllocateInChildSpace(&rootspace, subspace.get());
+ TestSharedPageAllocation(subspace.get());
+
+ // Test sub-subspaces
+ if (!subspace->CanAllocateSubspaces()) return;
+ size_t subsubspace_alignment = subspace->allocation_granularity();
+ auto subsubspace = subspace->AllocateSubspace(
+ VirtualAddressSpace::kNoHint, kSubSubspaceSize, subsubspace_alignment,
+ PagePermissions::kReadWrite);
+ ASSERT_TRUE(subsubspace);
+ EXPECT_NE(kNullAddress, subsubspace->base());
+ EXPECT_EQ(kSubSubspaceSize, subsubspace->size());
+ EXPECT_EQ(PagePermissions::kReadWrite, subsubspace->max_page_permissions());
+
+ TestRandomPageAddressGeneration(subsubspace.get());
+ TestBasicPageAllocation(subsubspace.get());
+ TestPageAllocationAlignment(subsubspace.get());
+ TestParentSpaceCannotAllocateInChildSpace(subspace.get(), subsubspace.get());
+ TestSharedPageAllocation(subsubspace.get());
+}
+
+TEST(VirtualAddressSpaceTest, TestEmulatedSubspace) {
+ constexpr size_t kSubspaceSize = 32 * MB;
+ // Size chosen so page allocation tests will obtain pages in both the mapped
+ // and the unmapped region.
+ constexpr size_t kSubspaceMappedSize = 1 * MB;
+
+ VirtualAddressSpace rootspace;
+
+ size_t subspace_alignment = rootspace.allocation_granularity();
+ ASSERT_TRUE(
+ IsAligned(kSubspaceMappedSize, rootspace.allocation_granularity()));
+ Address reservation = kNullAddress;
+ for (int i = 0; i < 10; i++) {
+ // Reserve the full size first at a random address, then free it again to
+ // ensure that there's enough free space behind the final reservation.
+ Address hint = rootspace.RandomPageAddress();
+ reservation = rootspace.AllocatePages(hint, kSubspaceSize,
+ rootspace.allocation_granularity(),
+ PagePermissions::kNoAccess);
+ ASSERT_NE(kNullAddress, reservation);
+ hint = reservation;
+ rootspace.FreePages(reservation, kSubspaceSize);
+ reservation =
+ rootspace.AllocatePages(hint, kSubspaceMappedSize, subspace_alignment,
+ PagePermissions::kNoAccess);
+ if (reservation == hint) {
+ break;
+ } else {
+ rootspace.FreePages(reservation, kSubspaceMappedSize);
+ reservation = kNullAddress;
+ }
+ }
+ ASSERT_NE(kNullAddress, reservation);
+
+ EmulatedVirtualAddressSubspace subspace(&rootspace, reservation,
+ kSubspaceMappedSize, kSubspaceSize);
+ EXPECT_EQ(reservation, subspace.base());
+ EXPECT_EQ(kSubspaceSize, subspace.size());
+ EXPECT_EQ(rootspace.max_page_permissions(), subspace.max_page_permissions());
+
+ TestRandomPageAddressGeneration(&subspace);
+ TestBasicPageAllocation(&subspace);
+ TestPageAllocationAlignment(&subspace);
+ // An emulated subspace does *not* guarantee that the parent space cannot
+ // allocate pages in it, so no TestParentSpaceCannotAllocateInChildSpace.
+ TestSharedPageAllocation(&subspace);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/register-configuration-unittest.cc b/deps/v8/test/unittests/codegen/register-configuration-unittest.cc
index 060370b156..cd96cfaa02 100644
--- a/deps/v8/test/unittests/codegen/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/codegen/register-configuration-unittest.cc
@@ -26,10 +26,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
- RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
- kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, general_codes,
- double_codes, RegisterConfiguration::OVERLAP);
+ RegisterConfiguration test(AliasingKind::kOverlap, kNumGeneralRegs,
+ kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, 0, general_codes,
+ double_codes);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
@@ -62,10 +62,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
int general_codes[] = {1, 2};
int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
- RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
- kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, general_codes,
- double_codes, RegisterConfiguration::COMBINE);
+ RegisterConfiguration test(AliasingKind::kCombine, kNumGeneralRegs,
+ kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, 0, general_codes,
+ double_codes);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
index 3af4f7b587..5013377530 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
@@ -115,8 +115,8 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
kSystemPointerSize);
}
- const RegList kCalleeSaveRegisters = 0;
- const RegList kCalleeSaveFPRegisters = 0;
+ const RegList kCalleeSaveRegisters;
+ const DoubleRegList kCalleeSaveFPRegisters;
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index d644906f1c..5d049e04af 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -24,6 +24,7 @@ InstructionSequenceTest::InstructionSequenceTest()
: sequence_(nullptr),
num_general_registers_(Register::kNumRegisters),
num_double_registers_(DoubleRegister::kNumRegisters),
+ num_simd128_registers_(Simd128Register::kNumRegisters),
instruction_blocks_(zone()),
current_block_(nullptr),
block_returns_(false) {}
@@ -69,11 +70,10 @@ int InstructionSequenceTest::GetAllocatableCode(int index,
const RegisterConfiguration* InstructionSequenceTest::config() {
if (!config_) {
config_.reset(new RegisterConfiguration(
- num_general_registers_, num_double_registers_, num_general_registers_,
- num_double_registers_, kAllocatableCodes.data(),
- kAllocatableCodes.data(),
- kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
- : RegisterConfiguration::COMBINE));
+ kFPAliasing, num_general_registers_, num_double_registers_,
+ num_simd128_registers_, num_general_registers_, num_double_registers_,
+ num_simd128_registers_, kAllocatableCodes.data(),
+ kAllocatableCodes.data(), kAllocatableCodes.data()));
}
return config_.get();
}
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 0a8768d063..f624b91ac4 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -279,6 +279,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
+ int num_simd128_registers_;
// Block building state.
InstructionBlocks instruction_blocks_;
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
index 0a36179a60..2cbc5fc353 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
@@ -85,7 +85,7 @@ TEST_F(InstructionTest, OperandInterference) {
EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
}
- if (kSimpleFPAliasing) {
+ if (kFPAliasing != AliasingKind::kCombine) {
// Simple FP aliasing: interfering registers of different reps have the same
// index.
for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
@@ -162,7 +162,7 @@ TEST_F(InstructionTest, PrepareInsertAfter) {
CHECK(Contains(&to_eliminate, d2, d0));
}
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
// Moves inserted after should cause all interfering moves to be eliminated.
auto s0 = AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kFloat32, 0);
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index cacff09652..021c88374b 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/bytecode-liveness-map.h"
#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -42,21 +43,6 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
save_flags_ = nullptr;
}
- std::string ToLivenessString(const BytecodeLivenessState* liveness) const {
- const BitVector& bit_vector = liveness->bit_vector();
-
- std::string out;
- out.resize(bit_vector.length());
- for (int i = 0; i < bit_vector.length(); ++i) {
- if (bit_vector.Contains(i)) {
- out[i] = 'L';
- } else {
- out[i] = '.';
- }
- }
- return out;
- }
-
void EnsureLivenessMatches(
Handle<BytecodeArray> bytecode,
const std::vector<std::pair<std::string, std::string>>&
@@ -69,12 +55,13 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
ss << std::setw(4) << iterator.current_offset() << " : ";
iterator.PrintTo(ss);
- EXPECT_EQ(liveness.first, ToLivenessString(analysis.GetInLivenessFor(
- iterator.current_offset())))
+ EXPECT_EQ(liveness.first,
+ ToString(*analysis.GetInLivenessFor(iterator.current_offset())))
<< " at bytecode " << ss.str();
- EXPECT_EQ(liveness.second, ToLivenessString(analysis.GetOutLivenessFor(
- iterator.current_offset())))
+ EXPECT_EQ(
+ liveness.second,
+ ToString(*analysis.GetOutLivenessFor(iterator.current_offset())))
<< " at bytecode " << ss.str();
iterator.Advance();
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 03960705e1..6f0a4b7d84 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -288,218 +288,6 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
IsBranch(cond2, control2)))))));
}
-TEST_F(EffectControlLinearizerTest, UnreachableThenBranch) {
- Schedule schedule(zone());
-
- // Create the graph.
- Node* unreachable = graph()->NewNode(common()->Unreachable(),
- graph()->start(), graph()->start());
- Node* branch =
- graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* true_throw = graph()->NewNode(common()->Throw(), unreachable, if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* false_throw =
- graph()->NewNode(common()->Throw(), unreachable, if_false);
-
- graph()->SetEnd(graph()->NewNode(common()->End(0)));
-
- // Build the basic block structure.
- BasicBlock* start = schedule.start();
- schedule.rpo_order()->push_back(start);
- start->set_rpo_number(0);
-
- BasicBlock* tblock = AddBlockToSchedule(&schedule);
- BasicBlock* fblock = AddBlockToSchedule(&schedule);
-
- // Populate the basic blocks with nodes.
- schedule.AddNode(start, graph()->start());
- schedule.AddNode(start, unreachable);
- schedule.AddBranch(start, branch, tblock, fblock);
-
- schedule.AddNode(tblock, if_true);
- schedule.AddThrow(tblock, true_throw);
- NodeProperties::MergeControlToEnd(graph(), common(), true_throw);
-
- schedule.AddNode(fblock, if_false);
- schedule.AddThrow(fblock, false_throw);
- NodeProperties::MergeControlToEnd(graph(), common(), false_throw);
-
- ASSERT_THAT(end(), IsEnd(IsThrow(), IsThrow()));
- ASSERT_THAT(end()->op()->ControlInputCount(), 2);
-
- // Run the state effect linearizer and machine lowering, maintaining the
- // schedule.
- LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), broker());
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
-}
-
-TEST_F(EffectControlLinearizerTest, UnreachableThenDiamond) {
- Schedule schedule(zone());
-
- // Create the graph.
- Node* unreachable = graph()->NewNode(common()->Unreachable(),
- graph()->start(), graph()->start());
- Node* branch =
- graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* throw_node = graph()->NewNode(common()->Throw(), unreachable, if_false);
- graph()->SetEnd(graph()->NewNode(common()->End(0)));
-
- // Build the basic block structure.
- BasicBlock* start = schedule.start();
- schedule.rpo_order()->push_back(start);
- start->set_rpo_number(0);
-
- BasicBlock* tblock = AddBlockToSchedule(&schedule);
- BasicBlock* fblock = AddBlockToSchedule(&schedule);
- BasicBlock* mblock = AddBlockToSchedule(&schedule);
-
- // Populate the basic blocks with nodes.
- schedule.AddNode(start, graph()->start());
- schedule.AddNode(start, unreachable);
- schedule.AddBranch(start, branch, tblock, fblock);
-
- schedule.AddNode(tblock, if_true);
- schedule.AddGoto(tblock, mblock);
-
- schedule.AddNode(fblock, if_false);
- schedule.AddGoto(fblock, mblock);
-
- schedule.AddNode(mblock, merge);
- schedule.AddThrow(mblock, throw_node);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
- ASSERT_THAT(end()->op()->ControlInputCount(), 1);
-
- // Run the state effect linearizer and machine lowering, maintaining the
- // schedule.
- LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), broker());
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
-}
-
-TEST_F(EffectControlLinearizerTest, UnreachableThenLoop) {
- Schedule schedule(zone());
-
- // Create the graph.
- Node* unreachable = graph()->NewNode(common()->Unreachable(),
- graph()->start(), graph()->start());
- Node* loop = graph()->NewNode(common()->Loop(1), graph()->start());
-
- Node* cond = Int32Constant(0);
- Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
- loop->AppendInput(zone(), if_false);
- NodeProperties::ChangeOp(loop, common()->Loop(2));
-
- Node* throw_node = graph()->NewNode(common()->Throw(), unreachable, if_false);
- graph()->SetEnd(graph()->NewNode(common()->End(0)));
-
- // Build the basic block structure.
- BasicBlock* start = schedule.start();
- schedule.rpo_order()->push_back(start);
- start->set_rpo_number(0);
-
- BasicBlock* lblock = AddBlockToSchedule(&schedule);
- BasicBlock* fblock = AddBlockToSchedule(&schedule);
- BasicBlock* tblock = AddBlockToSchedule(&schedule);
-
- // Populate the basic blocks with nodes.
- schedule.AddNode(start, graph()->start());
- schedule.AddNode(start, unreachable);
- schedule.AddGoto(start, lblock);
-
- schedule.AddNode(lblock, loop);
- schedule.AddNode(lblock, cond);
- schedule.AddBranch(lblock, branch, tblock, fblock);
-
- schedule.AddNode(fblock, if_false);
- schedule.AddGoto(fblock, lblock);
-
- schedule.AddNode(tblock, if_true);
- schedule.AddThrow(tblock, throw_node);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
- ASSERT_THAT(end()->op()->ControlInputCount(), 1);
-
- // Run the state effect linearizer and machine lowering, maintaining the
- // schedule.
- LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), broker());
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
-}
-
-TEST_F(EffectControlLinearizerTest, UnreachableInChangedBlockThenBranch) {
- Schedule schedule(zone());
-
- // Create the graph.
- Node* truncate = graph()->NewNode(simplified()->TruncateTaggedToWord32(),
- NumberConstant(1.1));
- Node* unreachable = graph()->NewNode(common()->Unreachable(),
- graph()->start(), graph()->start());
- Node* branch =
- graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* true_throw = graph()->NewNode(common()->Throw(), unreachable, if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* false_throw =
- graph()->NewNode(common()->Throw(), unreachable, if_false);
-
- graph()->SetEnd(graph()->NewNode(common()->End(0)));
-
- // Build the basic block structure.
- BasicBlock* start = schedule.start();
- schedule.rpo_order()->push_back(start);
- start->set_rpo_number(0);
-
- BasicBlock* tblock = AddBlockToSchedule(&schedule);
- BasicBlock* fblock = AddBlockToSchedule(&schedule);
-
- // Populate the basic blocks with nodes.
- schedule.AddNode(start, graph()->start());
- schedule.AddNode(start, truncate);
- schedule.AddNode(start, unreachable);
- schedule.AddBranch(start, branch, tblock, fblock);
-
- schedule.AddNode(tblock, if_true);
- schedule.AddThrow(tblock, true_throw);
- NodeProperties::MergeControlToEnd(graph(), common(), true_throw);
-
- schedule.AddNode(fblock, if_false);
- schedule.AddThrow(fblock, false_throw);
- NodeProperties::MergeControlToEnd(graph(), common(), false_throw);
-
- ASSERT_THAT(end(), IsEnd(IsThrow(), IsThrow()));
- ASSERT_THAT(end()->op()->ControlInputCount(), 2);
-
- // Run the state effect linearizer and machine lowering, maintaining the
- // schedule.
- LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), broker());
-
- ASSERT_THAT(end(), IsEnd(IsThrow()));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 4a197d557d..29ff218e9e 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -4,7 +4,6 @@
#include "test/unittests/compiler/graph-unittest.h"
-#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 9f5f4d5aa0..d9cd87c063 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -8,7 +8,6 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
@@ -46,8 +45,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
protected:
Reduction Reduce(Node* node) {
- JSHeapCopyReducer heap_copy_reducer(broker());
- CHECK(!heap_copy_reducer.Reduce(node).Changed());
MachineOperatorBuilder machine(zone());
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 3c76ac0701..cd9236adc4 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -43,14 +43,14 @@ class LinkageTailCall : public TestWithZone {
locations, // location_sig
stack_arguments,
Operator::kNoProperties, // properties
- 0, // callee-saved
- 0, // callee-saved fp
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSavedFp, // callee-saved fp
CallDescriptor::kNoFlags, // flags,
"", StackArgumentOrder::kDefault,
#if V8_ENABLE_WEBASSEMBLY
nullptr, // wasm function sig
#endif
- 0, // allocatable_registers
+ RegList{}, // allocatable_registers
stack_returns);
}
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 344ea3dfad..4a26bbc715 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -360,7 +360,7 @@ TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) {
EmitNop();
Instruction* first_instr = LastInstruction();
AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64));
- if (!kSimpleFPAliasing) {
+ if (kFPAliasing == AliasingKind::kCombine) {
// We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3.
// Add moves to registers s2 and s3.
AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32));
diff --git a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
index 9b7687b6c4..d578111829 100644
--- a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
@@ -1056,19 +1056,6 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
return os << acc.type;
}
-struct MemoryAccessImm2 {
- MachineType type;
- ArchOpcode store_opcode;
- ArchOpcode store_opcode_unaligned;
- bool (InstructionSelectorTest::Stream::*val_predicate)(
- const InstructionOperand*) const;
- const int32_t immediates[40];
-};
-
-std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) {
- return os << acc.type;
-}
-
// ----------------------------------------------------------------------------
// Loads and stores immediate values
// ----------------------------------------------------------------------------
@@ -1181,6 +1168,20 @@ const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}}};
+#ifdef RISCV_HAS_NO_UNALIGNED
+struct MemoryAccessImm2 {
+ MachineType type;
+ ArchOpcode store_opcode;
+ ArchOpcode store_opcode_unaligned;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) {
+ return os << acc.type;
+}
+
const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
{MachineType::Int16(),
kRiscvUsh,
@@ -1222,7 +1223,7 @@ const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
-89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
-
+#endif
} // namespace
using InstructionSelectorMemoryAccessTest =
@@ -1327,6 +1328,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
+#ifdef RISCV_HAS_NO_UNALIGNED
using InstructionSelectorMemoryAccessUnalignedImmTest =
InstructionSelectorTestWithParam<MemoryAccessImm2>;
@@ -1358,7 +1360,7 @@ TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessUnalignedImmTest,
::testing::ValuesIn(kMemoryAccessesImmUnaligned));
-
+#endif
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
// ----------------------------------------------------------------------------
@@ -1601,6 +1603,45 @@ TEST_F(InstructionSelectorTest, Word64ReverseBytes) {
}
}
+TEST_F(InstructionSelectorTest, ExternalReferenceLoad1) {
+ // Test offsets we can use kMode_Root for.
+ const int64_t kOffsets[] = {0, 1, 4, INT32_MIN, INT32_MAX};
+ TRACED_FOREACH(int64_t, offset, kOffsets) {
+ StreamBuilder m(this, MachineType::Int64());
+ ExternalReference reference =
+ bit_cast<ExternalReference>(isolate()->isolate_root() + offset);
+ Node* const value =
+ m.Load(MachineType::Int64(), m.ExternalConstant(reference));
+ m.Return(value);
+
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvLd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Root, s[0]->addressing_mode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToInt64(s[0]->InputAt(0)), offset);
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ExternalReferenceLoad2) {
+ // Offset too large, we cannot use kMode_Root.
+ StreamBuilder m(this, MachineType::Int64());
+ int64_t offset = 0x100000000;
+ ExternalReference reference =
+ bit_cast<ExternalReference>(isolate()->isolate_root() + offset);
+ Node* const value =
+ m.Load(MachineType::Int64(), m.ExternalConstant(reference));
+ m.Return(value);
+
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvLd, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Root, s[0]->addressing_mode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
index c24b2f2d97..9e1081346a 100644
--- a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/state-values-utils.h"
+
+#include "src/compiler/bytecode-liveness-map.h"
#include "src/utils/bit-vector.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -138,10 +140,10 @@ TEST_F(StateValuesIteratorTest, TreeFromVectorWithLiveness) {
inputs.push_back(Int32Constant(i));
}
// Generate the input liveness.
- BitVector liveness(count, zone());
+ BytecodeLivenessState liveness(count, zone());
for (int i = 0; i < count; i++) {
if (i % 3 == 0) {
- liveness.Add(i);
+ liveness.MarkRegisterLive(i);
}
}
@@ -156,7 +158,7 @@ TEST_F(StateValuesIteratorTest, TreeFromVectorWithLiveness) {
for (StateValuesAccess::iterator it =
StateValuesAccess(values_node).begin();
!it.done(); ++it) {
- if (liveness.Contains(i)) {
+ if (liveness.RegisterIsLive(i)) {
EXPECT_THAT(it.node(), IsInt32Constant(i));
} else {
EXPECT_EQ(it.node(), nullptr);
@@ -209,10 +211,10 @@ TEST_F(StateValuesIteratorTest, BuildTreeWithLivenessIdentical) {
inputs.push_back(Int32Constant(i));
}
// Generate the input liveness.
- BitVector liveness(count, zone());
+ BytecodeLivenessState liveness(count, zone());
for (int i = 0; i < count; i++) {
if (i % 3 == 0) {
- liveness.Add(i);
+ liveness.MarkRegisterLive(i);
}
}
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index eb79e26e62..736de80762 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -539,12 +539,12 @@ TEST_P(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
" results['stale_rejected_promise'] = true;"
"})");
microtask_queue()->RunMicrotasks(isolate());
- EXPECT_TRUE(
- JSReceiver::HasProperty(results, NameFromChars("stale_resolved_promise"))
- .FromJust());
- EXPECT_TRUE(
- JSReceiver::HasProperty(results, NameFromChars("stale_rejected_promise"))
- .FromJust());
+ EXPECT_TRUE(JSReceiver::HasProperty(isolate(), results,
+ NameFromChars("stale_resolved_promise"))
+ .FromJust());
+ EXPECT_TRUE(JSReceiver::HasProperty(isolate(), results,
+ NameFromChars("stale_rejected_promise"))
+ .FromJust());
// Set stale handlers to valid promises.
RunJS(
@@ -554,12 +554,12 @@ TEST_P(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
"Promise.reject("
" stale_handler.bind(null, results, 'stale_handler_reject'))");
microtask_queue()->RunMicrotasks(isolate());
- EXPECT_FALSE(
- JSReceiver::HasProperty(results, NameFromChars("stale_handler_resolve"))
- .FromJust());
- EXPECT_FALSE(
- JSReceiver::HasProperty(results, NameFromChars("stale_handler_reject"))
- .FromJust());
+ EXPECT_FALSE(JSReceiver::HasProperty(isolate(), results,
+ NameFromChars("stale_handler_resolve"))
+ .FromJust());
+ EXPECT_FALSE(JSReceiver::HasProperty(isolate(), results,
+ NameFromChars("stale_handler_reject"))
+ .FromJust());
}
TEST_P(MicrotaskQueueTest, DetachGlobal_Chain) {
diff --git a/deps/v8/test/unittests/heap/base/active-system-pages-unittest.cc b/deps/v8/test/unittests/heap/base/active-system-pages-unittest.cc
new file mode 100644
index 0000000000..f04f9a94cc
--- /dev/null
+++ b/deps/v8/test/unittests/heap/base/active-system-pages-unittest.cc
@@ -0,0 +1,81 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/base/active-system-pages.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace heap {
+namespace base {
+
+TEST(ActiveSystemPagesTest, Add) {
+ ActiveSystemPages pages;
+ const size_t kPageSizeBits = 0;
+ EXPECT_EQ(pages.Add(0, 1, kPageSizeBits), size_t{1});
+ EXPECT_EQ(pages.Add(1, 2, kPageSizeBits), size_t{1});
+ EXPECT_EQ(pages.Add(63, 64, kPageSizeBits), size_t{1});
+ EXPECT_EQ(pages.Size(kPageSizeBits), size_t{3});
+
+ // Try to add page a second time.
+ EXPECT_EQ(pages.Add(0, 2, kPageSizeBits), size_t{0});
+}
+
+TEST(ActiveSystemPagesTest, AddUnalignedRange) {
+ ActiveSystemPages pages;
+ const size_t kPageSizeBits = 12;
+ const size_t kPageSize = size_t{1} << kPageSizeBits;
+ const size_t kWordSize = 8;
+ EXPECT_EQ(pages.Add(0, kPageSize + kWordSize, kPageSizeBits), size_t{2});
+ EXPECT_EQ(pages.Add(3 * kPageSize - kWordSize, 3 * kPageSize, kPageSizeBits),
+ size_t{1});
+ EXPECT_EQ(pages.Add(kPageSize + kWordSize, 3 * kPageSize - kWordSize,
+ kPageSizeBits),
+ size_t{0});
+ EXPECT_EQ(pages.Size(kPageSizeBits), size_t{3} * kPageSize);
+}
+
+TEST(ActiveSystemPagesTest, AddFullBitset) {
+ ActiveSystemPages pages;
+ const size_t kPageSizeBits = 0;
+ EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{64});
+ EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{0});
+ EXPECT_EQ(pages.Size(kPageSizeBits), size_t{64});
+}
+
+TEST(ActiveSystemPagesTest, Reduce) {
+ ActiveSystemPages original;
+ const size_t kPageSizeBits = 0;
+ EXPECT_EQ(original.Add(0, 3, kPageSizeBits), size_t{3});
+
+ ActiveSystemPages updated;
+ EXPECT_EQ(updated.Add(1, 3, kPageSizeBits), size_t{2});
+
+ EXPECT_EQ(original.Reduce(updated), size_t{1});
+}
+
+TEST(ActiveSystemPagesTest, ReduceFullBitset) {
+ ActiveSystemPages original;
+ const size_t kPageSizeBits = 0;
+ EXPECT_EQ(original.Add(0, 64, kPageSizeBits), size_t{64});
+
+ ActiveSystemPages updated;
+ EXPECT_EQ(updated.Add(63, 64, kPageSizeBits), size_t{1});
+
+ EXPECT_EQ(original.Reduce(updated), size_t{63});
+}
+
+TEST(ActiveSystemPagesTest, Clear) {
+ ActiveSystemPages pages;
+ const size_t kPageSizeBits = 0;
+ EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{64});
+ EXPECT_EQ(pages.Clear(), size_t{64});
+ EXPECT_EQ(pages.Size(kPageSizeBits), size_t{0});
+
+ EXPECT_EQ(pages.Add(0, 2, kPageSizeBits), size_t{2});
+ EXPECT_EQ(pages.Clear(), size_t{2});
+ EXPECT_EQ(pages.Size(kPageSizeBits), size_t{0});
+}
+
+} // namespace base
+} // namespace heap
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
index 528548fdb8..080e32c20e 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
@@ -63,9 +63,10 @@ TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
v8::Context::Scope context_scope(context);
uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
- v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
- context, &wrappable_type,
- cppgc::MakeGarbageCollected<Wrappable>(allocation_handle()));
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, &wrappable_type, wrappable_object);
Wrappable::destructor_callcount = 0;
EXPECT_FALSE(api_object.IsEmpty());
EXPECT_EQ(0u, Wrappable::destructor_callcount);
@@ -93,81 +94,6 @@ TEST_F(UnifiedHeapTest, WriteBarrierV8ToCppReference) {
EXPECT_EQ(0u, Wrappable::destructor_callcount);
}
-#if !defined(_MSC_VER) || defined(__clang__)
-
-TEST_F(UnifiedHeapTest, WriteBarrierV8ToCppReferenceWithExplicitAPI) {
-// TODO(v8:12356): Remove test when fully removing the deprecated API.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- if (!FLAG_incremental_marking) return;
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- void* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, nullptr, nullptr);
- Wrappable::destructor_callcount = 0;
- WrapperHelper::ResetWrappableConnection(api_object);
- SimulateIncrementalMarking();
- {
- // The following snippet shows the embedder code for implementing a GC-safe
- // setter for JS to C++ references.
- WrapperHelper::SetWrappableConnection(api_object, wrappable, wrappable);
- JSHeapConsistency::WriteBarrierParams params;
- auto barrier_type = JSHeapConsistency::GetWriteBarrierType(
- api_object, 1, wrappable, params,
- [this]() -> cppgc::HeapHandle& { return cpp_heap().GetHeapHandle(); });
- EXPECT_EQ(JSHeapConsistency::WriteBarrierType::kMarking, barrier_type);
- JSHeapConsistency::DijkstraMarkingBarrier(
- params, cpp_heap().GetHeapHandle(), wrappable);
- }
- CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
- EXPECT_EQ(0u, Wrappable::destructor_callcount);
-#pragma GCC diagnostic pop
-}
-
-TEST_F(UnifiedHeapTest, WriteBarrierCppToV8Reference) {
-// TODO(v8:12165): Remove test when fully removing the deprecated API.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- if (!FLAG_incremental_marking) return;
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- cppgc::Persistent<Wrappable> wrappable =
- cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- Wrappable::destructor_callcount = 0;
- SimulateIncrementalMarking();
- // Pick a sentinel to compare against.
- void* kMagicAddress = &Wrappable::destructor_callcount;
- {
- // The following snippet shows the embedder code for implementing a GC-safe
- // setter for C++ to JS references.
- v8::HandleScope nested_scope(v8_isolate());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, nullptr, nullptr);
- // Setting only one field to avoid treating this as wrappable backref, see
- // `LocalEmbedderHeapTracer::ExtractWrapperInfo`.
- api_object->SetAlignedPointerInInternalField(1, kMagicAddress);
- wrappable->SetWrapper(v8_isolate(), api_object);
- JSHeapConsistency::WriteBarrierParams params;
-
- auto barrier_type = JSHeapConsistency::GetWriteBarrierType(
- wrappable->wrapper(), params,
- [this]() -> cppgc::HeapHandle& { return cpp_heap().GetHeapHandle(); });
- EXPECT_EQ(JSHeapConsistency::WriteBarrierType::kMarking, barrier_type);
- JSHeapConsistency::DijkstraMarkingBarrier(
- params, cpp_heap().GetHeapHandle(), wrappable->wrapper());
- }
- CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
- EXPECT_EQ(0u, Wrappable::destructor_callcount);
- EXPECT_EQ(kMagicAddress,
- wrappable->wrapper()->GetAlignedPointerFromInternalField(1));
-#pragma GCC diagnostic pop
-}
-
-#endif // !_MSC_VER || __clang__
-
#if DEBUG
namespace {
class Unreferenced : public cppgc::GarbageCollected<Unreferenced> {
@@ -187,7 +113,7 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
cpp_heap().stats_collector()->NotifySafePointForTesting();
{
cppgc::subtle::NoGarbageCollectionScope no_gc_scope(cpp_heap());
- cppgc::internal::FreeUnreferencedObject(cpp_heap(), unreferenced);
+ cppgc::subtle::FreeUnreferencedObject(cpp_heap(), *unreferenced);
// Force safepoint to make sure allocated size decrease due to freeing
// unreferenced object is reported to CppHeap. Due to
// NoGarbageCollectionScope, CppHeap will cache the reported decrease and
@@ -205,7 +131,6 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
}
#endif // DEBUG
-#if !V8_OS_FUCHSIA
TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
v8::HandleScope handle_scope(v8_isolate());
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
@@ -221,7 +146,6 @@ TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
auto local = holder.Get(v8_isolate());
EXPECT_TRUE(local->IsObject());
}
-#endif // !V8_OS_FUCHSIA
TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
auto heap = v8::CppHeap::Create(
@@ -241,7 +165,8 @@ TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
}
USE(object);
{
- js_heap.SetEmbedderStackStateForNextFinalization(
+ EmbedderStackStateScope stack_scope(
+ &js_heap, EmbedderStackStateScope::kExplicitInvocation,
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
CollectGarbage(OLD_SPACE);
cpp_heap.AsBase().sweeper().FinishIfRunning();
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
index 585a2d1a0e..84e44c84bd 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
@@ -29,7 +29,8 @@ UnifiedHeapTest::UnifiedHeapTest(
void UnifiedHeapTest::CollectGarbageWithEmbedderStack(
cppgc::Heap::SweepingType sweeping_type) {
- heap()->SetEmbedderStackStateForNextFinalization(
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
CollectGarbage(OLD_SPACE);
if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
@@ -39,7 +40,8 @@ void UnifiedHeapTest::CollectGarbageWithEmbedderStack(
void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack(
cppgc::Heap::SweepingType sweeping_type) {
- heap()->SetEmbedderStackStateForNextFinalization(
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
CollectGarbage(OLD_SPACE);
if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index b349b591ca..096de19f33 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -70,8 +70,9 @@ class EphemeronPairTest : public testing::TestWithHeap {
}
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform) {
- marker_ = MarkerFactory::CreateAndStartMarking<Marker>(
- heap, platform, IncrementalPreciseMarkingConfig);
+ marker_ = std::make_unique<Marker>(heap, platform,
+ IncrementalPreciseMarkingConfig);
+ marker_->StartMarking();
}
Marker* marker() const { return marker_.get(); }
diff --git a/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
index 4084004887..2458f67381 100644
--- a/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
@@ -47,6 +47,7 @@ class DynamicallySized final : public GarbageCollected<DynamicallySized> {
} // namespace
TEST_F(ExplicitManagementTest, FreeRegularObjectToLAB) {
+#if !defined(CPPGC_YOUNG_GENERATION)
auto* o =
MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
const auto& space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
@@ -64,9 +65,11 @@ TEST_F(ExplicitManagementTest, FreeRegularObjectToLAB) {
// LAB is included in allocated object size, so no change is expected.
EXPECT_EQ(allocated_size_before, AllocatedObjectSize());
EXPECT_FALSE(space.free_list().ContainsForTesting({needle, size}));
+#endif //! defined(CPPGC_YOUNG_GENERATION)
}
TEST_F(ExplicitManagementTest, FreeRegularObjectToFreeList) {
+#if !defined(CPPGC_YOUNG_GENERATION)
auto* o =
MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
const auto& space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
@@ -82,9 +85,11 @@ TEST_F(ExplicitManagementTest, FreeRegularObjectToFreeList) {
EXPECT_EQ(lab.start(), nullptr);
EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
EXPECT_TRUE(space.free_list().ContainsForTesting({needle, size}));
+#endif //! defined(CPPGC_YOUNG_GENERATION)
}
TEST_F(ExplicitManagementTest, FreeLargeObject) {
+#if !defined(CPPGC_YOUNG_GENERATION)
auto* o = MakeGarbageCollected<DynamicallySized>(
GetHeap()->GetAllocationHandle(),
AdditionalBytes(kLargeObjectSizeThreshold));
@@ -98,9 +103,11 @@ TEST_F(ExplicitManagementTest, FreeLargeObject) {
subtle::FreeUnreferencedObject(GetHeapHandle(), *o);
EXPECT_FALSE(heap.page_backend()->Lookup(needle));
EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
+#endif //! defined(CPPGC_YOUNG_GENERATION)
}
TEST_F(ExplicitManagementTest, FreeBailsOutDuringGC) {
+#if !defined(CPPGC_YOUNG_GENERATION)
const size_t snapshot_before = AllocatedObjectSize();
auto* o =
MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
@@ -113,6 +120,7 @@ TEST_F(ExplicitManagementTest, FreeBailsOutDuringGC) {
ResetLinearAllocationBuffers();
subtle::FreeUnreferencedObject(GetHeapHandle(), *o);
EXPECT_EQ(snapshot_before, AllocatedObjectSize());
+#endif //! defined(CPPGC_YOUNG_GENERATION)
}
TEST_F(ExplicitManagementTest, GrowAtLAB) {
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
index a48f27f56c..529a42aef6 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
@@ -22,6 +22,8 @@ class MockGarbageCollector : public GarbageCollector {
MOCK_METHOD(void, StartIncrementalGarbageCollection,
(GarbageCollector::Config), (override));
MOCK_METHOD(size_t, epoch, (), (const, override));
+ MOCK_METHOD(const EmbedderStackState*, override_stack_state, (),
+ (const, override));
};
class MockTaskRunner : public cppgc::TaskRunner {
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
index ec6f6c3a61..0d248a0377 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
@@ -37,6 +37,9 @@ class FakeGarbageCollector : public GarbageCollector {
}
size_t epoch() const override { return callcount_; }
+ const EmbedderStackState* override_stack_state() const override {
+ return nullptr;
+ }
private:
StatsCollector* stats_collector_;
@@ -50,6 +53,8 @@ class MockGarbageCollector : public GarbageCollector {
MOCK_METHOD(void, StartIncrementalGarbageCollection,
(GarbageCollector::Config), (override));
MOCK_METHOD(size_t, epoch, (), (const, override));
+ MOCK_METHOD(const EmbedderStackState*, override_stack_state, (),
+ (const, override));
};
void FakeAllocate(StatsCollector* stats_collector, size_t bytes) {
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index a9987eb7ce..3262aa279d 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -40,8 +40,8 @@ class MarkerTest : public testing::TestWithHeap {
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) {
- marker_ =
- MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config);
+ marker_ = std::make_unique<Marker>(heap, platform, config);
+ marker_->StartMarking();
}
Marker* marker() const { return marker_.get(); }
@@ -421,8 +421,8 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) {
- GetMarkerRef() =
- MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config);
+ GetMarkerRef() = std::make_unique<Marker>(heap, platform, config);
+ GetMarkerRef()->StartMarking();
}
MarkerBase* marker() const { return Heap::From(GetHeap())->marker(); }
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
index a2768c86e5..7310134139 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
@@ -23,8 +23,10 @@ namespace {
class MarkingVisitorTest : public testing::TestWithHeap {
public:
MarkingVisitorTest()
- : marker_(MarkerFactory::CreateAndStartMarking<Marker>(
- *Heap::From(GetHeap()), GetPlatformHandle().get())) {}
+ : marker_(std::make_unique<Marker>(*Heap::From(GetHeap()),
+ GetPlatformHandle().get())) {
+ marker_->StartMarking();
+ }
~MarkingVisitorTest() override { marker_->ClearAllWorklistsForTesting(); }
Marker* GetMarker() { return marker_.get(); }
@@ -51,7 +53,7 @@ class TestMarkingVisitor : public MutatorMarkingVisitor {
marker->MutatorMarkingStateForTesting()) {}
~TestMarkingVisitor() { marking_state_.Publish(); }
- MarkingStateBase& marking_state() { return marking_state_; }
+ BasicMarkingState& marking_state() { return marking_state_; }
};
} // namespace
diff --git a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
index 41af4774cf..0b742a16c0 100644
--- a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
@@ -4,7 +4,11 @@
#if defined(CPPGC_YOUNG_GENERATION)
+#include <initializer_list>
+#include <vector>
+
#include "include/cppgc/allocation.h"
+#include "include/cppgc/explicit-management.h"
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/caged-heap-local-data.h"
#include "include/cppgc/persistent.h"
@@ -18,6 +22,12 @@ namespace internal {
namespace {
+bool IsHeapObjectYoung(void* obj) {
+ return HeapObjectHeader::FromObject(obj).IsYoung();
+}
+
+bool IsHeapObjectOld(void* obj) { return !IsHeapObjectYoung(obj); }
+
class SimpleGCedBase : public GarbageCollected<SimpleGCedBase> {
public:
static size_t destructed_objects;
@@ -32,7 +42,7 @@ class SimpleGCedBase : public GarbageCollected<SimpleGCedBase> {
size_t SimpleGCedBase::destructed_objects;
template <size_t Size>
-class SimpleGCed final : public SimpleGCedBase {
+class SimpleGCed : public SimpleGCedBase {
char array[Size];
};
@@ -50,6 +60,8 @@ struct OtherType<Large> {
using Type = Small;
};
+} // namespace
+
class MinorGCTest : public testing::TestWithHeap {
public:
MinorGCTest() {
@@ -65,9 +77,18 @@ class MinorGCTest : public testing::TestWithHeap {
Heap::From(GetHeap())->CollectGarbage(
Heap::Config::MinorPreciseAtomicConfig());
}
+
void CollectMajor() {
Heap::From(GetHeap())->CollectGarbage(Heap::Config::PreciseAtomicConfig());
}
+
+ const auto& RememberedSlots() const {
+ return Heap::From(GetHeap())->remembered_set().remembered_slots_;
+ }
+
+ const auto& RememberedSourceObjects() const {
+ return Heap::From(GetHeap())->remembered_set().remembered_source_objects_;
+ }
};
template <typename SmallOrLarge>
@@ -76,11 +97,95 @@ class MinorGCTestForType : public MinorGCTest {
using Type = SmallOrLarge;
};
-} // namespace
-
using ObjectTypes = ::testing::Types<Small, Large>;
TYPED_TEST_SUITE(MinorGCTestForType, ObjectTypes);
+namespace {
+template <typename... Args>
+void RunMinorGCAndExpectObjectsPromoted(MinorGCTest& test, Args*... args) {
+ ([args] { EXPECT_TRUE(IsHeapObjectYoung(args)); }(), ...);
+ test.CollectMinor();
+ ([args] { EXPECT_TRUE(IsHeapObjectOld(args)); }(), ...);
+}
+
+struct ExpectRememberedSlotsAdded final {
+ ExpectRememberedSlotsAdded(
+ const MinorGCTest& test,
+ std::initializer_list<void*> slots_expected_to_be_remembered)
+ : remembered_slots_(test.RememberedSlots()),
+ slots_expected_to_be_remembered_(slots_expected_to_be_remembered),
+ initial_number_of_slots_(remembered_slots_.size()) {
+ // Check that the remembered set doesn't contain specified slots.
+ EXPECT_FALSE(std::includes(remembered_slots_.begin(),
+ remembered_slots_.end(),
+ slots_expected_to_be_remembered_.begin(),
+ slots_expected_to_be_remembered_.end()));
+ }
+
+ ~ExpectRememberedSlotsAdded() {
+ const size_t current_number_of_slots = remembered_slots_.size();
+ EXPECT_EQ(
+ initial_number_of_slots_ + slots_expected_to_be_remembered_.size(),
+ current_number_of_slots);
+ EXPECT_TRUE(std::includes(remembered_slots_.begin(),
+ remembered_slots_.end(),
+ slots_expected_to_be_remembered_.begin(),
+ slots_expected_to_be_remembered_.end()));
+ }
+
+ private:
+ const std::set<void*>& remembered_slots_;
+ std::set<void*> slots_expected_to_be_remembered_;
+ const size_t initial_number_of_slots_ = 0;
+};
+
+struct ExpectRememberedSlotsRemoved final {
+ ExpectRememberedSlotsRemoved(
+ const MinorGCTest& test,
+ std::initializer_list<void*> slots_expected_to_be_removed)
+ : remembered_slots_(test.RememberedSlots()),
+ slots_expected_to_be_removed_(slots_expected_to_be_removed),
+ initial_number_of_slots_(remembered_slots_.size()) {
+ DCHECK_GE(initial_number_of_slots_, slots_expected_to_be_removed_.size());
+ // Check that the remembered set does contain specified slots to be removed.
+ EXPECT_TRUE(std::includes(remembered_slots_.begin(),
+ remembered_slots_.end(),
+ slots_expected_to_be_removed_.begin(),
+ slots_expected_to_be_removed_.end()));
+ }
+
+ ~ExpectRememberedSlotsRemoved() {
+ const size_t current_number_of_slots = remembered_slots_.size();
+ EXPECT_EQ(initial_number_of_slots_ - slots_expected_to_be_removed_.size(),
+ current_number_of_slots);
+ EXPECT_FALSE(std::includes(remembered_slots_.begin(),
+ remembered_slots_.end(),
+ slots_expected_to_be_removed_.begin(),
+ slots_expected_to_be_removed_.end()));
+ }
+
+ private:
+ const std::set<void*>& remembered_slots_;
+ std::set<void*> slots_expected_to_be_removed_;
+ const size_t initial_number_of_slots_ = 0;
+};
+
+struct ExpectNoRememberedSlotsAdded final {
+ explicit ExpectNoRememberedSlotsAdded(const MinorGCTest& test)
+ : remembered_slots_(test.RememberedSlots()),
+ initial_remembered_slots_(remembered_slots_) {}
+
+ ~ExpectNoRememberedSlotsAdded() {
+ EXPECT_EQ(initial_remembered_slots_, remembered_slots_);
+ }
+
+ private:
+ const std::set<void*>& remembered_slots_;
+ std::set<void*> initial_remembered_slots_;
+};
+
+} // namespace
+
TYPED_TEST(MinorGCTestForType, MinorCollection) {
using Type = typename TestFixture::Type;
@@ -162,7 +267,7 @@ void InterGenerationalPointerTest(MinorGCTest* test, cppgc::Heap* heap) {
}
}
- const auto& set = Heap::From(heap)->remembered_slots();
+ const auto& set = test->RememberedSlots();
auto set_size_before = set.size();
// Issue generational barrier.
@@ -226,16 +331,239 @@ TYPED_TEST(MinorGCTestForType, OmitGenerationalBarrierForSentinels) {
TestFixture::CollectMinor();
EXPECT_FALSE(HeapObjectHeader::FromObject(old.Get()).IsYoung());
- const auto& set = Heap::From(this->GetHeap())->remembered_slots();
+ {
+ ExpectNoRememberedSlotsAdded _(*this);
+ // Try issuing generational barrier for nullptr.
+ old->next = static_cast<Type*>(nullptr);
+ }
+ {
+ ExpectNoRememberedSlotsAdded _(*this);
+ // Try issuing generational barrier for sentinel.
+ old->next = static_cast<Type*>(kSentinelPointer);
+ }
+}
+
+template <typename From, typename To>
+void TestRememberedSetInvalidation(MinorGCTest& test) {
+ Persistent<From> old = MakeGarbageCollected<From>(test.GetAllocationHandle());
+
+ test.CollectMinor();
+
+ auto* young = MakeGarbageCollected<To>(test.GetAllocationHandle());
+
+ {
+ ExpectRememberedSlotsAdded _(test, {old->next.GetSlotForTesting()});
+ // Issue the generational barrier.
+ old->next = young;
+ }
+
+ {
+ ExpectRememberedSlotsRemoved _(test, {old->next.GetSlotForTesting()});
+ // Release the persistent and free the old object.
+ auto* old_raw = old.Release();
+ subtle::FreeUnreferencedObject(test.GetHeapHandle(), *old_raw);
+ }
+
+ // Visiting remembered slots must not fail.
+ test.CollectMinor();
+}
+
+TYPED_TEST(MinorGCTestForType, RememberedSetInvalidationOnPromptlyFree) {
+ using Type1 = typename TestFixture::Type;
+ using Type2 = typename OtherType<Type1>::Type;
+ TestRememberedSetInvalidation<Type1, Type1>(*this);
+ TestRememberedSetInvalidation<Type1, Type2>(*this);
+}
+
+TEST_F(MinorGCTest, RememberedSetInvalidationOnShrink) {
+ using Member = Member<Small>;
+
+ static constexpr size_t kTrailingMembers = 64;
+ static constexpr size_t kBytesToAllocate = kTrailingMembers * sizeof(Member);
+
+ static constexpr size_t kFirstMemberToInvalidate = kTrailingMembers / 2;
+ static constexpr size_t kLastMemberToInvalidate = kTrailingMembers;
+
+ // Create an object with additional kBytesToAllocate bytes.
+ Persistent<Small> old = MakeGarbageCollected<Small>(
+ this->GetAllocationHandle(), AdditionalBytes(kBytesToAllocate));
+
+ auto get_member = [&old](size_t i) -> Member& {
+ return *reinterpret_cast<Member*>(reinterpret_cast<uint8_t*>(old.Get()) +
+ sizeof(Small) + i * sizeof(Member));
+ };
+
+ CollectMinor();
+
+ auto* young = MakeGarbageCollected<Small>(GetAllocationHandle());
+
+ const auto& set = RememberedSlots();
const size_t set_size_before_barrier = set.size();
- // Try issuing generational barrier for nullptr.
- old->next = static_cast<Type*>(nullptr);
- EXPECT_EQ(set_size_before_barrier, set.size());
+ // Issue the generational barriers.
+ for (size_t i = kFirstMemberToInvalidate; i < kLastMemberToInvalidate; ++i) {
+ // Construct the member.
+ new (&get_member(i)) Member;
+ // Issue the barrier.
+ get_member(i) = young;
+ }
+
+ // Check that barriers hit (kLastMemberToInvalidate -
+ // kFirstMemberToInvalidate) times.
+ EXPECT_EQ(set_size_before_barrier +
+ (kLastMemberToInvalidate - kFirstMemberToInvalidate),
+ set.size());
+
+ // Shrink the buffer for old object.
+ subtle::Resize(*old, AdditionalBytes(kBytesToAllocate / 2));
- // Try issuing generational barrier for sentinel.
- old->next = static_cast<Type*>(kSentinelPointer);
+ // Check that the reference was invalidated.
EXPECT_EQ(set_size_before_barrier, set.size());
+
+ // Visiting remembered slots must not fail.
+ CollectMinor();
+}
+
+namespace {
+
+template <typename Value>
+struct InlinedObject {
+ struct Inner {
+ Inner() = default;
+ explicit Inner(AllocationHandle& handle)
+ : ref(MakeGarbageCollected<Value>(handle)) {}
+
+ void Trace(Visitor* v) const { v->Trace(ref); }
+
+ double d = -1.;
+ Member<Value> ref;
+ };
+
+ InlinedObject() = default;
+ explicit InlinedObject(AllocationHandle& handle)
+ : ref(MakeGarbageCollected<Value>(handle)), inner(handle) {}
+
+ void Trace(cppgc::Visitor* v) const {
+ v->Trace(ref);
+ v->Trace(inner);
+ }
+
+ int a_ = -1;
+ Member<Value> ref;
+ Inner inner;
+};
+
+template <typename Value>
+class GCedWithInlinedArray
+ : public GarbageCollected<GCedWithInlinedArray<Value>> {
+ public:
+ static constexpr size_t kNumObjects = 16;
+
+ GCedWithInlinedArray(HeapHandle& heap_handle, AllocationHandle& alloc_handle)
+ : heap_handle_(heap_handle), alloc_handle_(alloc_handle) {}
+
+ using WriteBarrierParams = subtle::HeapConsistency::WriteBarrierParams;
+ using HeapConsistency = subtle::HeapConsistency;
+
+ void SetInPlaceRange(size_t from, size_t to) {
+ DCHECK_GT(to, from);
+ DCHECK_GT(kNumObjects, from);
+
+ for (; from != to; ++from)
+ new (&objects[from]) InlinedObject<Value>(alloc_handle_);
+
+ GenerationalBarrierForSourceObject(&objects[from]);
+ }
+
+ void Trace(cppgc::Visitor* v) const {
+ for (const auto& object : objects) v->Trace(object);
+ }
+
+ InlinedObject<Value> objects[kNumObjects];
+
+ private:
+ void GenerationalBarrierForSourceObject(void* object) {
+ DCHECK(object);
+ WriteBarrierParams params;
+ const auto barrier_type = HeapConsistency::GetWriteBarrierType(
+ object, params, [this]() -> HeapHandle& { return heap_handle_; });
+ EXPECT_EQ(HeapConsistency::WriteBarrierType::kGenerational, barrier_type);
+ HeapConsistency::GenerationalBarrierForSourceObject(params, object);
+ }
+
+ HeapHandle& heap_handle_;
+ AllocationHandle& alloc_handle_;
+};
+
+} // namespace
+
+TYPED_TEST(MinorGCTestForType, GenerationalBarrierDeferredTracing) {
+ using Type = typename TestFixture::Type;
+
+ Persistent<GCedWithInlinedArray<Type>> array =
+ MakeGarbageCollected<GCedWithInlinedArray<Type>>(
+ this->GetAllocationHandle(), this->GetHeapHandle(),
+ this->GetAllocationHandle());
+
+ this->CollectMinor();
+
+ EXPECT_TRUE(IsHeapObjectOld(array.Get()));
+
+ const auto& remembered_objects = this->RememberedSourceObjects();
+ {
+ ExpectNoRememberedSlotsAdded _(*this);
+ EXPECT_EQ(0u, remembered_objects.count(
+ &HeapObjectHeader::FromObject(array->objects)));
+
+ array->SetInPlaceRange(2, 4);
+
+ EXPECT_EQ(1u, remembered_objects.count(
+ &HeapObjectHeader::FromObject(array->objects)));
+ }
+
+ RunMinorGCAndExpectObjectsPromoted(
+ *this, array->objects[2].ref.Get(), array->objects[2].inner.ref.Get(),
+ array->objects[3].ref.Get(), array->objects[3].inner.ref.Get());
+
+ EXPECT_EQ(0u, remembered_objects.size());
+}
+
+namespace {
+class GCedWithCustomWeakCallback final
+ : public GarbageCollected<GCedWithCustomWeakCallback> {
+ public:
+ static size_t custom_callback_called;
+
+ void CustomWeakCallbackMethod(const LivenessBroker& broker) {
+ custom_callback_called++;
+ }
+
+ void Trace(cppgc::Visitor* visitor) const {
+ visitor->RegisterWeakCallbackMethod<
+ GCedWithCustomWeakCallback,
+ &GCedWithCustomWeakCallback::CustomWeakCallbackMethod>(this);
+ }
+};
+size_t GCedWithCustomWeakCallback::custom_callback_called = 0;
+} // namespace
+
+TEST_F(MinorGCTest, ReexecuteCustomCallback) {
+ // Create an object with additional kBytesToAllocate bytes.
+ Persistent<GCedWithCustomWeakCallback> old =
+ MakeGarbageCollected<GCedWithCustomWeakCallback>(GetAllocationHandle());
+
+ CollectMinor();
+ EXPECT_EQ(1u, GCedWithCustomWeakCallback::custom_callback_called);
+
+ CollectMinor();
+ EXPECT_EQ(2u, GCedWithCustomWeakCallback::custom_callback_called);
+
+ CollectMinor();
+ EXPECT_EQ(3u, GCedWithCustomWeakCallback::custom_callback_called);
+
+ CollectMajor();
+ // The callback must be called only once.
+ EXPECT_EQ(4u, GCedWithCustomWeakCallback::custom_callback_called);
}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
index d1c257b463..a1536ab996 100644
--- a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
@@ -115,6 +115,8 @@ TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
}
+// See the comment in globals.h when setting |kGuardPageSize| for details.
+#if !(defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS))
TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
// This tests that the testing allocator actually uses protected guard
// regions.
@@ -132,6 +134,7 @@ TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
#endif
}
+#endif // !(defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS))
namespace {
@@ -295,15 +298,19 @@ TEST(PageBackendTest, LookupNormal) {
PageBackend backend(allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
- EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ if (kGuardPageSize) {
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ }
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kPageSize -
2 * kGuardPageSize - 1));
EXPECT_EQ(nullptr,
backend.Lookup(writeable_base + kPageSize - 2 * kGuardPageSize));
- EXPECT_EQ(nullptr,
- backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
+ if (kGuardPageSize) {
+ EXPECT_EQ(nullptr,
+ backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
+ }
}
TEST(PageBackendTest, LookupLarge) {
@@ -312,7 +319,9 @@ TEST(PageBackendTest, LookupLarge) {
PageBackend backend(allocator, oom_handler);
constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize);
- EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ if (kGuardPageSize) {
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ }
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kSize - 1));
diff --git a/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
index 9e23cb5681..cd0b49a9c9 100644
--- a/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
@@ -68,8 +68,9 @@ class V8_NODISCARD CppgcTracingScopesTest : public testing::TestWithHeap {
Config config = {Config::CollectionType::kMajor,
Config::StackState::kNoHeapPointers,
Config::MarkingType::kIncremental};
- GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>(
+ GetMarkerRef() = std::make_unique<Marker>(
Heap::From(GetHeap())->AsBase(), GetPlatformHandle().get(), config);
+ GetMarkerRef()->StartMarking();
DelegatingTracingControllerImpl::check_expectations = true;
}
diff --git a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
index cf45fe0248..70f971e62f 100644
--- a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
@@ -22,7 +22,8 @@ class GCed : public GarbageCollected<GCed> {
};
} // namespace
-TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
+TEST_F(TestingTest,
+ OverrideEmbeddertackStateScopeDoesNotOverrideExplicitCalls) {
{
auto* gced = MakeGarbageCollected<GCed>(GetHeap()->GetAllocationHandle());
WeakPersistent<GCed> weak{gced};
@@ -38,7 +39,7 @@ TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
EmbedderStackState::kMayContainHeapPointers);
internal::Heap::From(GetHeap())->CollectGarbage(
Heap::Config::PreciseAtomicConfig());
- EXPECT_TRUE(weak);
+ EXPECT_FALSE(weak);
}
{
auto* gced = MakeGarbageCollected<GCed>(GetHeap()->GetAllocationHandle());
@@ -47,7 +48,7 @@ TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
GetHeap()->GetHeapHandle(), EmbedderStackState::kNoHeapPointers);
internal::Heap::From(GetHeap())->CollectGarbage(
Heap::Config::ConservativeAtomicConfig());
- EXPECT_FALSE(weak);
+ EXPECT_TRUE(weak);
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index 39c92b95e9..7fe91397fc 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -48,7 +48,7 @@ class DelegatingTracingController : public TracingController {
};
class TestWithPlatform : public ::testing::Test {
- protected:
+ public:
static void SetUpTestSuite();
static void TearDownTestSuite();
@@ -67,7 +67,7 @@ class TestWithPlatform : public ::testing::Test {
};
class TestWithHeap : public TestWithPlatform {
- protected:
+ public:
TestWithHeap();
void PreciseGC() {
diff --git a/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc b/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc
index 4176a2d618..7159b2ba5c 100644
--- a/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <atomic>
+
#include "include/cppgc/allocation.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/marker.h"
@@ -24,8 +26,9 @@ class WeakContainerTest : public testing::TestWithHeap {
Config config = {Config::CollectionType::kMajor,
Config::StackState::kNoHeapPointers,
Config::MarkingType::kIncremental};
- GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>(
+ GetMarkerRef() = std::make_unique<Marker>(
Heap::From(GetHeap())->AsBase(), GetPlatformHandle().get(), config);
+ GetMarkerRef()->StartMarking();
}
void FinishMarking(Config::StackState stack_state) {
@@ -49,7 +52,10 @@ constexpr size_t SizeOf() {
class TraceableGCed : public GarbageCollected<TraceableGCed> {
public:
- void Trace(cppgc::Visitor*) const { n_trace_calls++; }
+ void Trace(cppgc::Visitor*) const {
+ reinterpret_cast<std::atomic<size_t>*>(&n_trace_calls)
+ ->fetch_add(1, std::memory_order_relaxed);
+ }
static size_t n_trace_calls;
};
size_t TraceableGCed::n_trace_calls = 0u;
diff --git a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
index 6bec4bb46a..28892b2b1d 100644
--- a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
@@ -162,10 +162,11 @@ class WriteBarrierTest : public testing::TestWithHeap {
public:
WriteBarrierTest() : internal_heap_(Heap::From(GetHeap())) {
DCHECK_NULL(GetMarkerRef().get());
- GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>(
- *internal_heap_, GetPlatformHandle().get(),
- IncrementalMarkingScope::kIncrementalConfig);
+ GetMarkerRef() =
+ std::make_unique<Marker>(*internal_heap_, GetPlatformHandle().get(),
+ IncrementalMarkingScope::kIncrementalConfig);
marker_ = GetMarkerRef().get();
+ marker_->StartMarking();
}
~WriteBarrierTest() override {
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index 0f8da917e5..df4ad206f5 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -120,8 +120,10 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EmbedderStackStateScope scope =
+ EmbedderStackStateScope::ExplicitScopeForTesting(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
EXPECT_CALL(
remote_tracer,
EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
@@ -134,8 +136,10 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, TemporaryEmbedderStackState) {
local_tracer.SetRemoteTracer(&remote_tracer);
// Default is unknown, see above.
{
- EmbedderStackStateScope scope(
- &local_tracer, EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EmbedderStackStateScope scope =
+ EmbedderStackStateScope::ExplicitScopeForTesting(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
EXPECT_CALL(remote_tracer,
EnterFinalPause(
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
@@ -150,12 +154,15 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
local_tracer.SetRemoteTracer(&remote_tracer);
// Default is unknown, see above.
{
- EmbedderStackStateScope scope(
- &local_tracer, EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EmbedderStackStateScope scope =
+ EmbedderStackStateScope::ExplicitScopeForTesting(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
{
- EmbedderStackStateScope nested_scope(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ EmbedderStackStateScope nested_scope =
+ EmbedderStackStateScope::ExplicitScopeForTesting(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
EXPECT_CALL(
remote_tracer,
EnterFinalPause(
@@ -173,8 +180,10 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EmbedderStackStateScope scope =
+ EmbedderStackStateScope::ExplicitScopeForTesting(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
EXPECT_CALL(
remote_tracer,
EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index c8ec9ca563..19c8e37585 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -53,14 +53,61 @@ TEST(GCTracer, AverageSpeed) {
namespace {
-void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
- size_t per_space_counter_bytes) {
+void SampleAndAddAllocation(GCTracer* tracer, double time_ms,
+ size_t per_space_counter_bytes) {
// Increment counters of all spaces.
tracer->SampleAllocation(time_ms, per_space_counter_bytes,
per_space_counter_bytes, per_space_counter_bytes);
tracer->AddAllocation(time_ms);
}
+enum class StartTracingMode {
+ kAtomic,
+ kIncremental,
+ kIncrementalStart,
+ kIncrementalEnterPause,
+};
+
+void StartTracing(GCTracer* tracer, GarbageCollector collector,
+ StartTracingMode mode) {
+ DCHECK_IMPLIES(mode != StartTracingMode::kAtomic,
+ !Heap::IsYoungGenerationCollector(collector));
+ // Start the cycle for incremental marking.
+ if (mode == StartTracingMode::kIncremental ||
+ mode == StartTracingMode::kIncrementalStart) {
+ tracer->StartCycle(collector, GarbageCollectionReason::kTesting,
+ "collector unittest",
+ GCTracer::MarkingType::kIncremental);
+ }
+ // If just that was requested, no more to be done.
+ if (mode == StartTracingMode::kIncrementalStart) return;
+ // Else, we enter the observable pause.
+ tracer->StartObservablePause();
+ // Start an atomic GC cycle.
+ if (mode == StartTracingMode::kAtomic) {
+ tracer->StartCycle(collector, GarbageCollectionReason::kTesting,
+ "collector unittest", GCTracer::MarkingType::kAtomic);
+ }
+ // We enter the atomic pause.
+ tracer->StartAtomicPause();
+ // Update the current event for an incremental GC cycle.
+ if (mode != StartTracingMode::kAtomic) {
+ tracer->UpdateCurrentEvent(GarbageCollectionReason::kTesting,
+ "collector unittest");
+ }
+}
+
+void StopTracing(GCTracer* tracer, GarbageCollector collector) {
+ tracer->StopAtomicPause();
+ tracer->StopObservablePause();
+ tracer->UpdateStatistics(collector);
+ if (Heap::IsYoungGenerationCollector(collector)) {
+ tracer->StopCycle(collector);
+ } else {
+ tracer->NotifySweepingCompleted();
+ }
+}
+
} // namespace
TEST_F(GCTracerTest, AllocationThroughput) {
@@ -70,17 +117,17 @@ TEST_F(GCTracerTest, AllocationThroughput) {
const int time1 = 100;
const size_t counter1 = 1000;
- SampleAndAddAllocaton(tracer, time1, counter1);
+ SampleAndAddAllocation(tracer, time1, counter1);
const int time2 = 200;
const size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2);
+ SampleAndAddAllocation(tracer, time2, counter2);
// Will only consider the current sample.
EXPECT_EQ(2 * (counter2 - counter1) / (time2 - time1),
static_cast<size_t>(
tracer->AllocationThroughputInBytesPerMillisecond(100)));
const int time3 = 1000;
const size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3);
+ SampleAndAddAllocation(tracer, time3, counter3);
// Only consider last sample.
EXPECT_EQ(2 * (counter3 - counter2) / (time3 - time2),
static_cast<size_t>(
@@ -97,10 +144,10 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
const int time1 = 100;
const size_t counter1 = 1000;
- SampleAndAddAllocaton(tracer, time1, counter1);
+ SampleAndAddAllocation(tracer, time1, counter1);
const int time2 = 200;
const size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2);
+ SampleAndAddAllocation(tracer, time2, counter2);
const size_t expected_throughput1 = (counter2 - counter1) / (time2 - time1);
EXPECT_EQ(expected_throughput1,
static_cast<size_t>(
@@ -114,7 +161,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
tracer->EmbedderAllocationThroughputInBytesPerMillisecond()));
const int time3 = 1000;
const size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3);
+ SampleAndAddAllocation(tracer, time3, counter3);
const size_t expected_throughput2 = (counter3 - counter1) / (time3 - time1);
EXPECT_EQ(expected_throughput2,
static_cast<size_t>(
@@ -134,10 +181,10 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
const int time1 = 100;
const size_t counter1 = 1000;
- SampleAndAddAllocaton(tracer, time1, counter1);
+ SampleAndAddAllocation(tracer, time1, counter1);
const int time2 = 200;
const size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2);
+ SampleAndAddAllocation(tracer, time2, counter2);
const size_t expected_throughput1 = (counter2 - counter1) / (time2 - time1);
EXPECT_EQ(
expected_throughput1,
@@ -149,7 +196,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100)));
const int time3 = 1000;
const size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3);
+ SampleAndAddAllocation(tracer, time3, counter3);
const size_t expected_throughput2 = (counter3 - counter2) / (time3 - time2);
// Only consider last sample.
EXPECT_EQ(
@@ -177,12 +224,12 @@ TEST_F(GCTracerTest, RegularScope) {
tracer->ResetForTesting();
EXPECT_DOUBLE_EQ(0.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
- // Sample not added because it's not within a started tracer.
+ // Sample not added because the cycle has not started.
+ tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 10);
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kAtomic);
tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(100.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
}
@@ -194,12 +241,10 @@ TEST_F(GCTracerTest, IncrementalScope) {
0.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
// Sample is added because its ScopeId is listed as incremental sample.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- // Switch to incremental MC to enable writing back incremental scopes.
- tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncremental);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
200.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
}
@@ -211,15 +256,12 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
// Round 1.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 50);
// Scavenger has no impact on incremental marking details.
- tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
- "collector unittest");
- tracer->Stop(GarbageCollector::SCAVENGER);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- // Switch to incremental MC to enable writing back incremental scopes.
- tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
+ StopTracing(tracer, GarbageCollector::SCAVENGER);
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncremental);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
100,
tracer->current_
@@ -239,12 +281,10 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
// Round 2. Numbers should be reset.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 15);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- // Switch to incremental MC to enable writing back incremental scopes.
- tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncremental);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
122,
tracer->current_
@@ -267,6 +307,8 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
tracer->ResetForTesting();
// Round 1.
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncrementalStart);
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(1000000 / 100,
@@ -276,24 +318,21 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
// Scavenger has no impact on incremental marking details.
- tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
- "collector unittest");
- tracer->Stop(GarbageCollector::SCAVENGER);
+ StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
+ StopTracing(tracer, GarbageCollector::SCAVENGER);
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(300, tracer->incremental_marking_duration_);
EXPECT_EQ(3000000u, tracer->incremental_marking_bytes_);
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- // Switch to incremental MC.
- tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(400, tracer->incremental_marking_duration_);
EXPECT_EQ(4000000u, tracer->incremental_marking_bytes_);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncrementalEnterPause);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_EQ(400, tracer->current_.incremental_marking_duration);
EXPECT_EQ(4000000u, tracer->current_.incremental_marking_bytes);
EXPECT_EQ(0, tracer->incremental_marking_duration_);
@@ -302,12 +341,12 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
// Round 2.
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncrementalStart);
tracer->AddIncrementalMarkingStep(2000, 1000);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
- // Switch to incremental MC.
- tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kIncrementalEnterPause);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ((4000000.0 / 400 + 1000.0 / 2000) / 2,
static_cast<double>(
tracer->IncrementalMarkingSpeedInBytesPerMillisecond()));
@@ -352,13 +391,12 @@ TEST_F(GCTracerTest, MutatorUtilization) {
TEST_F(GCTracerTest, BackgroundScavengerScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
- "collector unittest");
+ StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
tracer->AddScopeSampleBackground(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10);
tracer->AddScopeSampleBackground(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1);
- tracer->Stop(GarbageCollector::SCAVENGER);
+ StopTracing(tracer, GarbageCollector::SCAVENGER);
EXPECT_DOUBLE_EQ(
11, tracer->current_
.scopes[GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]);
@@ -367,8 +405,8 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
TEST_F(GCTracerTest, BackgroundMinorMCScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- tracer->Start(GarbageCollector::MINOR_MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
+ StartTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR,
+ StartTracingMode::kAtomic);
tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
10);
tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
@@ -381,7 +419,7 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 30);
tracer->AddScopeSampleBackground(
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 3);
- tracer->Stop(GarbageCollector::MINOR_MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
11,
tracer->current_.scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING]);
@@ -401,14 +439,13 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
200);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 10);
// Scavenger should not affect the major mark-compact scopes.
- tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
- "collector unittest");
- tracer->Stop(GarbageCollector::SCAVENGER);
+ StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
+ StopTracing(tracer, GarbageCollector::SCAVENGER);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 20);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 1);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 2);
- tracer->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting, "collector unittest");
+ StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
+ StartTracingMode::kAtomic);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
30);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
@@ -417,7 +454,7 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40);
tracer->AddScopeSampleBackground(
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4);
- tracer->Stop(GarbageCollector::MARK_COMPACTOR);
+ StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
111, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
EXPECT_DOUBLE_EQ(
@@ -505,7 +542,8 @@ TEST_F(GCTracerTest, RecordMarkCompactHistograms) {
tracer->current_.scopes[GCTracer::Scope::MC_MARK] = 5;
tracer->current_.scopes[GCTracer::Scope::MC_PROLOGUE] = 6;
tracer->current_.scopes[GCTracer::Scope::MC_SWEEP] = 7;
- tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_finalize());
+ tracer->RecordGCPhasesHistograms(
+ GCTracer::RecordGCPhasesInfo::Mode::Finalize);
EXPECT_EQ(1, GcHistogram::Get("V8.GCFinalizeMC.Clear")->Total());
EXPECT_EQ(2, GcHistogram::Get("V8.GCFinalizeMC.Epilogue")->Total());
EXPECT_EQ(3, GcHistogram::Get("V8.GCFinalizeMC.Evacuate")->Total());
@@ -524,33 +562,12 @@ TEST_F(GCTracerTest, RecordScavengerHistograms) {
tracer->ResetForTesting();
tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS] = 1;
tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL] = 2;
- tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_scavenger());
+ tracer->RecordGCPhasesHistograms(
+ GCTracer::RecordGCPhasesInfo::Mode::Scavenger);
EXPECT_EQ(1, GcHistogram::Get("V8.GCScavenger.ScavengeRoots")->Total());
EXPECT_EQ(2, GcHistogram::Get("V8.GCScavenger.ScavengeMain")->Total());
GcHistogram::CleanUp();
}
-TEST_F(GCTracerTest, RecordGCSumHistograms) {
- if (FLAG_stress_incremental_marking) return;
- isolate()->SetCreateHistogramFunction(&GcHistogram::CreateHistogram);
- isolate()->SetAddHistogramSampleFunction(&GcHistogram::AddHistogramSample);
- GCTracer* tracer = i_isolate()->heap()->tracer();
- tracer->ResetForTesting();
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_START]
- .duration = 1;
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_SWEEPING]
- .duration = 2;
- tracer->AddIncrementalMarkingStep(3.0, 1024);
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .duration = 4;
- const double atomic_pause_duration = 5.0;
- tracer->RecordGCSumCounters(atomic_pause_duration);
- EXPECT_EQ(15, GcHistogram::Get("V8.GCMarkCompactor")->Total());
- GcHistogram::CleanUp();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 18192c3b53..82b5eee57d 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -93,7 +93,7 @@ TEST(Heap, HeapSizeFromPhysicalMemory) {
TEST_F(HeapTest, ASLR) {
#if V8_TARGET_ARCH_X64
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
Heap* heap = i_isolate()->heap();
std::set<void*> hints;
for (int i = 0; i < 1000; i++) {
@@ -114,7 +114,7 @@ TEST_F(HeapTest, ASLR) {
EXPECT_LE(diff, kRegionMask);
}
}
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
#endif // V8_TARGET_ARCH_X64
}
diff --git a/deps/v8/test/unittests/heap/heap-utils.cc b/deps/v8/test/unittests/heap/heap-utils.cc
index 66ad8d98c8..24dce15db0 100644
--- a/deps/v8/test/unittests/heap/heap-utils.cc
+++ b/deps/v8/test/unittests/heap/heap-utils.cc
@@ -19,7 +19,8 @@ void HeapInternalsBase::SimulateIncrementalMarking(Heap* heap,
i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
SafepointScope scope(heap);
- collector->EnsureSweepingCompleted();
+ collector->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
if (marking->IsStopped()) {
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 1414c7b0b8..b57a4b197c 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -19,7 +19,7 @@ using LocalHeapTest = TestWithIsolate;
TEST_F(LocalHeapTest, Initialize) {
Heap* heap = i_isolate()->heap();
- CHECK(heap->safepoint()->ContainsAnyLocalHeap());
+ heap->safepoint()->AssertMainThreadIsOnlyThread();
}
TEST_F(LocalHeapTest, Current) {
diff --git a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
index 1a011a65a7..a48cc6a446 100644
--- a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
@@ -45,9 +45,9 @@ TEST_F(MarkingWorklistTest, PushPopEmbedder) {
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
- worklists.PushEmbedder(pushed_object);
+ worklists.PushWrapper(pushed_object);
HeapObject popped_object;
- EXPECT_TRUE(worklists.PopEmbedder(&popped_object));
+ EXPECT_TRUE(worklists.PopWrapper(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 1c0ef3ae65..1519a1a8c1 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -239,7 +239,8 @@ TEST(TypedSlotSet, Iterate) {
static const int kDelta = 10000001;
int added = 0;
for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
- SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
+ SlotType type =
+ static_cast<SlotType>(i % static_cast<uint8_t>(SlotType::kCleared));
set.Insert(type, i);
++added;
}
@@ -247,7 +248,8 @@ TEST(TypedSlotSet, Iterate) {
set.Iterate(
[&iterated](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(addr);
- EXPECT_EQ(i % CLEARED_SLOT, static_cast<uint32_t>(type));
+ EXPECT_EQ(i % static_cast<uint8_t>(SlotType::kCleared),
+ static_cast<uint32_t>(type));
EXPECT_EQ(0u, i % kDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
@@ -271,7 +273,8 @@ TEST(TypedSlotSet, ClearInvalidSlots) {
const int kHostDelta = 100;
uint32_t entries = 10;
for (uint32_t i = 0; i < entries; i++) {
- SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
+ SlotType type =
+ static_cast<SlotType>(i % static_cast<uint8_t>(SlotType::kCleared));
set.Insert(type, i * kHostDelta);
}
@@ -299,8 +302,8 @@ TEST(TypedSlotSet, Merge) {
TypedSlotSet set0(0), set1(0);
static const uint32_t kEntries = 10000;
for (uint32_t i = 0; i < kEntries; i++) {
- set0.Insert(FULL_EMBEDDED_OBJECT_SLOT, 2 * i);
- set1.Insert(FULL_EMBEDDED_OBJECT_SLOT, 2 * i + 1);
+ set0.Insert(SlotType::kEmbeddedObjectFull, 2 * i);
+ set1.Insert(SlotType::kEmbeddedObjectFull, 2 * i + 1);
}
uint32_t count = 0;
set0.Merge(&set1);
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index b0b6371ca1..aba0bdb964 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -233,9 +233,9 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
PagePermissionsMap page_permissions_;
};
-// This test is currently incompatible with the VirtualMemoryCage. Enable it
-// once the VirtualMemorySpace interface is stable.
-#if !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
+// This test is currently incompatible with the sandbox. Enable it
+// once the VirtualAddressSpace interface is stable.
+#if !V8_OS_FUCHSIA && !V8_SANDBOX
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
@@ -255,15 +255,14 @@ class SequentialUnmapperTest : public TestWithIsolate {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
- // The pointer cage must be destroyed before the virtual memory cage.
+ // The pointer cage must be destroyed before the sandbox.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- // Reinitialze the virtual memory cage so it uses the TrackingPageAllocator.
- GetProcessWideVirtualMemoryCage()->TearDown();
+#ifdef V8_SANDBOX
+ // Reinitialze the sandbox so it uses the TrackingPageAllocator.
+ GetProcessWideSandbox()->TearDown();
constexpr bool use_guard_regions = false;
- CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
- tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
- use_guard_regions));
+ CHECK(GetProcessWideSandbox()->Initialize(
+ tracking_page_allocator_, kSandboxMinimumSize, use_guard_regions));
#endif
IsolateAllocator::InitializeOncePerProcess();
#endif
@@ -277,8 +276,8 @@ class SequentialUnmapperTest : public TestWithIsolate {
// freed until process teardown.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#endif
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- GetProcessWideVirtualMemoryCage()->TearDown();
+#ifdef V8_SANDBOX
+ GetProcessWideSandbox()->TearDown();
#endif
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());
@@ -313,6 +312,7 @@ bool SequentialUnmapperTest::old_flag_;
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
+ MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
@@ -320,7 +320,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
- allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
+ allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
@@ -342,6 +342,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
+ MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
@@ -350,7 +351,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
- allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
+ allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
@@ -364,7 +365,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS
}
-#endif // !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
+#endif // !V8_OS_FUCHSIA && !V8_SANDBOX
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 38f308376f..a4707381b1 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -111,7 +111,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
feedback_spec.AddKeyedStoreICSlot(LanguageMode::kSloppy);
FeedbackSlot strict_keyed_store_slot =
feedback_spec.AddKeyedStoreICSlot(LanguageMode::kStrict);
- FeedbackSlot store_own_slot = feedback_spec.AddStoreOwnICSlot();
+ FeedbackSlot define_named_own_slot = feedback_spec.AddDefineNamedOwnICSlot();
FeedbackSlot store_array_element_slot =
feedback_spec.AddStoreInArrayLiteralICSlot();
@@ -142,16 +142,16 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.LoadNamedProperty(reg, name, load_slot.ToInt())
.LoadNamedPropertyFromSuper(reg, name, load_slot.ToInt())
.LoadKeyedProperty(reg, keyed_load_slot.ToInt())
- .StoreNamedProperty(reg, name, sloppy_store_slot.ToInt(),
- LanguageMode::kSloppy)
- .StoreKeyedProperty(reg, reg, sloppy_keyed_store_slot.ToInt(),
- LanguageMode::kSloppy)
- .StoreNamedProperty(reg, name, strict_store_slot.ToInt(),
- LanguageMode::kStrict)
- .StoreKeyedProperty(reg, reg, strict_keyed_store_slot.ToInt(),
- LanguageMode::kStrict)
- .StoreNamedOwnProperty(reg, name, store_own_slot.ToInt())
- .DefineKeyedProperty(reg, reg, store_own_slot.ToInt())
+ .SetNamedProperty(reg, name, sloppy_store_slot.ToInt(),
+ LanguageMode::kSloppy)
+ .SetKeyedProperty(reg, reg, sloppy_keyed_store_slot.ToInt(),
+ LanguageMode::kSloppy)
+ .SetNamedProperty(reg, name, strict_store_slot.ToInt(),
+ LanguageMode::kStrict)
+ .SetKeyedProperty(reg, reg, strict_keyed_store_slot.ToInt(),
+ LanguageMode::kStrict)
+ .DefineNamedOwnProperty(reg, name, define_named_own_slot.ToInt())
+ .DefineKeyedOwnProperty(reg, reg, define_named_own_slot.ToInt())
.StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
// Emit Iterator-protocol operations
@@ -367,8 +367,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.LoadLiteral(Smi::FromInt(20000000));
const AstRawString* wide_name = ast_factory.GetOneByteString("var_wide_name");
- builder.StoreDataPropertyInLiteral(reg, reg,
- DataPropertyInLiteralFlag::kNoFlags, 0);
+ builder.DefineKeyedOwnPropertyInLiteral(
+ reg, reg, DefineKeyedOwnPropertyInLiteralFlag::kNoFlags, 0);
// Emit wide context operations.
builder.LoadContextSlot(reg, 1024, 0, BytecodeArrayBuilder::kMutableSlot)
@@ -449,7 +449,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Generate BytecodeArray.
Handle<ScopeInfo> scope_info =
factory->NewScopeInfo(ScopeInfo::kVariablePartIndex);
- scope_info->set_flags(0);
+ int flags = ScopeInfo::IsEmptyBit::encode(true);
+ scope_info->set_flags(flags);
scope_info->set_context_local_count(0);
scope_info->set_parameter_count(0);
scope.SetScriptScopeInfo(scope_info);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 1060e4ee95..a15594c8f1 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -38,7 +38,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t load_feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -180,14 +180,14 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kGetNamedProperty);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), load_feedback_slot);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index c2eda59d07..77ecbb99ee 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -38,7 +38,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -90,7 +90,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -142,7 +142,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -199,7 +199,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -256,7 +256,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -339,7 +339,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -358,9 +358,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
iterator -= 3;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
- offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset -= Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kGetNamedProperty);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -370,7 +370,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
ASSERT_TRUE(iterator.IsValid());
iterator += 2;
- offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
@@ -396,7 +396,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset +=
@@ -434,7 +434,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -586,7 +586,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kGetNamedProperty);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -594,7 +594,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
@@ -687,7 +687,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
- Register param = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -796,8 +796,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
+ offset -= Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kGetNamedProperty);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 35a63cf1bc..e0db6e43a8 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -22,53 +22,41 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
struct BytecodesAndResult {
const uint8_t bytecode[32];
const size_t length;
- int parameter_count;
const char* output;
};
const BytecodesAndResult cases[] = {
- {{B(LdaSmi), U8(1)}, 2, 0, " LdaSmi [1]"},
- {{B(Wide), B(LdaSmi), U16(1000)}, 4, 0, " LdaSmi.Wide [1000]"},
- {{B(ExtraWide), B(LdaSmi), U32(100000)},
- 6,
- 0,
- "LdaSmi.ExtraWide [100000]"},
- {{B(LdaSmi), U8(-1)}, 2, 0, " LdaSmi [-1]"},
- {{B(Wide), B(LdaSmi), U16(-1000)}, 4, 0, " LdaSmi.Wide [-1000]"},
+ {{B(LdaSmi), U8(1)}, 2, " LdaSmi [1]"},
+ {{B(Wide), B(LdaSmi), U16(1000)}, 4, " LdaSmi.Wide [1000]"},
+ {{B(ExtraWide), B(LdaSmi), U32(100000)}, 6, "LdaSmi.ExtraWide [100000]"},
+ {{B(LdaSmi), U8(-1)}, 2, " LdaSmi [-1]"},
+ {{B(Wide), B(LdaSmi), U16(-1000)}, 4, " LdaSmi.Wide [-1000]"},
{{B(ExtraWide), B(LdaSmi), U32(-100000)},
6,
- 0,
"LdaSmi.ExtraWide [-100000]"},
- {{B(Star), R8(5)}, 2, 0, " Star r5"},
- {{B(Wide), B(Star), R16(136)}, 4, 0, " Star.Wide r136"},
+ {{B(Star), R8(5)}, 2, " Star r5"},
+ {{B(Wide), B(Star), R16(136)}, 4, " Star.Wide r136"},
{{B(Wide), B(CallAnyReceiver), R16(134), R16(135), U16(10), U16(177)},
10,
- 0,
"CallAnyReceiver.Wide r134, r135-r144, [177]"},
{{B(ForInPrepare), R8(10), U8(11)},
3,
- 0,
" ForInPrepare r10-r12, [11]"},
{{B(CallRuntime), U16(Runtime::FunctionId::kIsSmi), R8(0), U8(0)},
5,
- 0,
" CallRuntime [IsSmi], r0-r0"},
{{B(Ldar),
- static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
+ static_cast<uint8_t>(Register::FromParameterIndex(2).ToOperand())},
2,
- 3,
" Ldar a1"},
{{B(Wide), B(CreateObjectLiteral), U16(513), U16(1027), U8(165)},
7,
- 0,
"CreateObjectLiteral.Wide [513], [1027], #165"},
{{B(ExtraWide), B(JumpIfNull), U32(123456789)},
6,
- 0,
"JumpIfNull.ExtraWide [123456789]"},
{{B(CallJSRuntime), U8(Context::BOOLEAN_FUNCTION_INDEX), R8(0), U8(0)},
4,
- 0,
" CallJSRuntime [boolean_function], r0-r0"}};
for (size_t i = 0; i < arraysize(cases); ++i) {
@@ -88,8 +76,7 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
// Generate decoded byte output.
std::stringstream actual_ss;
- BytecodeDecoder::Decode(actual_ss, cases[i].bytecode,
- cases[i].parameter_count);
+ BytecodeDecoder::Decode(actual_ss, cases[i].bytecode);
// Compare.
CHECK_EQ(actual_ss.str(), expected_ss.str());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
index d789412760..cd4d8e6850 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
@@ -41,10 +41,10 @@ TEST_F(BytecodeNodeTest, Constructor3) {
TEST_F(BytecodeNodeTest, Constructor4) {
uint32_t operands[] = {0x11, 0x22, 0x33};
- BytecodeNode node(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ BytecodeNode node(Bytecode::kGetNamedProperty, operands[0], operands[1],
operands[2]);
CHECK_EQ(node.operand_count(), 3);
- CHECK_EQ(node.bytecode(), Bytecode::kLdaNamedProperty);
+ CHECK_EQ(node.bytecode(), Bytecode::kGetNamedProperty);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK_EQ(node.operand(2), operands[2]);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 7c20e69b3e..e0ca71e678 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -92,7 +92,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
Initialize(3, 1);
- Register parameter = Register::FromParameterIndex(1, 3);
+ Register parameter = Register::FromParameterIndex(1);
optimizer()->DoLdar(parameter);
CHECK_EQ(write_count(), 0u);
Register temp = NewTemporary();
@@ -155,7 +155,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
Initialize(3, 1);
- Register parameter = Register::FromParameterIndex(1, 3);
+ Register parameter = Register::FromParameterIndex(1);
optimizer()->DoLdar(parameter);
CHECK_EQ(write_count(), 0u);
Register local = Register(0);
@@ -175,7 +175,7 @@ TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
TEST_F(BytecodeRegisterOptimizerTest, SingleTemporaryNotMaterializedForInput) {
Initialize(3, 1);
- Register parameter = Register::FromParameterIndex(1, 3);
+ Register parameter = Register::FromParameterIndex(1);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->DoMov(parameter, temp0);
@@ -193,7 +193,7 @@ TEST_F(BytecodeRegisterOptimizerTest, SingleTemporaryNotMaterializedForInput) {
TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Initialize(3, 1);
- Register parameter = Register::FromParameterIndex(1, 3);
+ Register parameter = Register::FromParameterIndex(1);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 1aadb5a6c9..04c9726772 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -41,10 +41,10 @@ TEST(OperandConversion, Parameters) {
for (size_t p = 0; p < count; p++) {
int parameter_count = parameter_counts[p];
for (int i = 0; i < parameter_count; i++) {
- Register r = Register::FromParameterIndex(i, parameter_count);
+ Register r = Register::FromParameterIndex(i);
uint32_t operand_value = r.ToOperand();
Register s = Register::FromOperand(operand_value);
- CHECK_EQ(i, s.ToParameterIndex(parameter_count));
+ CHECK_EQ(i, s.ToParameterIndex());
}
}
}
@@ -67,7 +67,7 @@ TEST(OperandConversion, RegistersParametersNoOverlap) {
}
for (int i = 0; i < parameter_count; i += 1) {
- Register r = Register::FromParameterIndex(i, parameter_count);
+ Register r = Register::FromParameterIndex(i);
uint32_t operand = r.ToOperand();
uint8_t index = static_cast<uint8_t>(operand);
CHECK_LT(index, operand_count.size());
diff --git a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
index 8b00a40798..01110840b5 100644
--- a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
@@ -27,7 +27,7 @@ class TestTask : public v8::Task {
};
double RealTime() {
- return base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ return base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
diff --git a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
index 2694920329..2f03b17327 100644
--- a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
+++ b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
@@ -56,7 +56,7 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
// Restore the original time source.
- RuntimeCallTimer::Now = &base::TimeTicks::HighResolutionNow;
+ RuntimeCallTimer::Now = &base::TimeTicks::Now;
}
RuntimeCallStats* stats() {
@@ -111,10 +111,10 @@ class V8_NODISCARD NativeTimeScope {
public:
NativeTimeScope() {
CHECK_EQ(RuntimeCallTimer::Now, &RuntimeCallStatsTestNow);
- RuntimeCallTimer::Now = &base::TimeTicks::HighResolutionNow;
+ RuntimeCallTimer::Now = &base::TimeTicks::Now;
}
~NativeTimeScope() {
- CHECK_EQ(RuntimeCallTimer::Now, &base::TimeTicks::HighResolutionNow);
+ CHECK_EQ(RuntimeCallTimer::Now, &base::TimeTicks::Now);
RuntimeCallTimer::Now = &RuntimeCallStatsTestNow;
}
};
diff --git a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
index a9fb66dfd4..a7e3ce7980 100644
--- a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
+++ b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
@@ -50,7 +50,7 @@ TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
BytecodeOffset bailout_id(1);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
bailout_id);
@@ -66,7 +66,7 @@ TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
HeapObject code_entry;
osr_cache->Get(OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&code_entry);
- EXPECT_EQ(code_entry, ToCodeT(*code));
+ EXPECT_EQ(code_entry, *code);
Smi osr_offset_entry;
osr_cache->Get(OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&osr_offset_entry);
EXPECT_EQ(osr_offset_entry.value(), bailout_id.ToInt());
@@ -83,7 +83,7 @@ TEST_F(TestWithNativeContext, GrowCodeCache) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
@@ -108,7 +108,7 @@ TEST_F(TestWithNativeContext, GrowCodeCache) {
HeapObject code_entry;
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&code_entry);
- EXPECT_EQ(code_entry, ToCodeT(*code));
+ EXPECT_EQ(code_entry, *code);
Smi osr_offset_entry;
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)
->ToSmi(&osr_offset_entry);
@@ -126,7 +126,7 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
@@ -138,7 +138,7 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
GetSource(&source1, 1);
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
- Handle<Code> code1(function1->code(), isolate);
+ Handle<CodeT> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
BytecodeOffset(bailout_id));
@@ -172,7 +172,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
int bailout_id = 0;
// Add max_capacity - 1 entries.
@@ -189,7 +189,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
GetSource(&source1, 1);
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
- Handle<Code> code1(function1->code(), isolate);
+ Handle<CodeT> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
@@ -204,7 +204,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
EXPECT_EQ(object, *shared1);
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
- EXPECT_EQ(object, ToCodeT(*code1));
+ EXPECT_EQ(object, *code1);
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
@@ -213,7 +213,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
GetSource(&source2, 2);
Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
- Handle<Code> code2(function2->code(), isolate);
+ Handle<CodeT> code2(function2->code(), isolate);
bailout_id++;
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
BytecodeOffset(bailout_id));
@@ -227,7 +227,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
EXPECT_EQ(object, *shared2);
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
- EXPECT_EQ(object, ToCodeT(*code2));
+ EXPECT_EQ(object, *code2);
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
}
@@ -243,7 +243,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
int num_entries = kInitialEntries * 2;
int expected_length = kInitialLength * 2;
@@ -267,7 +267,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
GetSource(&source1, 1);
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
- Handle<Code> code1(function1->code(), isolate);
+ Handle<CodeT> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
@@ -282,7 +282,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
EXPECT_EQ(object, *shared1);
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
- EXPECT_EQ(object, ToCodeT(*code1));
+ EXPECT_EQ(object, *code1);
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
@@ -290,7 +290,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
GetSource(&source2, 2);
Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
- Handle<Code> code2(function2->code(), isolate);
+ Handle<CodeT> code2(function2->code(), isolate);
bailout_id++;
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
BytecodeOffset(bailout_id));
@@ -304,7 +304,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
EXPECT_EQ(object, *shared2);
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
- EXPECT_EQ(object, ToCodeT(*code2));
+ EXPECT_EQ(object, *code2);
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
}
@@ -320,13 +320,13 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
base::ScopedVector<char> source1(1024);
GetSource(&source1, 1);
Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
- Handle<Code> deopt_code(deopt_function->code(), isolate);
+ Handle<CodeT> deopt_code(deopt_function->code(), isolate);
int num_entries = kInitialEntries * 2;
int expected_length = kInitialLength * 2;
@@ -379,13 +379,13 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
Isolate* isolate = function->GetIsolate();
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<Code> code(function->code(), isolate);
+ Handle<CodeT> code(function->code(), isolate);
base::ScopedVector<char> source1(1024);
GetSource(&source1, 1);
Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
- Handle<Code> deopt_code(deopt_function->code(), isolate);
+ Handle<CodeT> deopt_code(deopt_function->code(), isolate);
int num_entries = kInitialEntries + 1;
int expected_length = kInitialLength * 2;
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 9f6ad56288..d3bec38c40 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -14,6 +14,7 @@
#include "include/v8-local-handle.h"
#include "include/v8-primitive-object.h"
#include "include/v8-template.h"
+#include "include/v8-value-serializer-version.h"
#include "include/v8-value-serializer.h"
#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
@@ -193,6 +194,24 @@ class ValueSerializerTest : public TestWithIsolate {
return result;
}
+ template <typename Lambda>
+ void DecodeTestFutureVersions(std::vector<uint8_t>&& data, Lambda test) {
+ DecodeTestUpToVersion(v8::CurrentValueSerializerFormatVersion(),
+ std::move(data), test);
+ }
+
+ template <typename Lambda>
+ void DecodeTestUpToVersion(int last_version, std::vector<uint8_t>&& data,
+ Lambda test) {
+ // Check that there is at least one version to test.
+ CHECK_LE(data[1], last_version);
+ for (int version = data[1]; version <= last_version; ++version) {
+ data[1] = version;
+ Local<Value> value = DecodeTest(data);
+ test(value);
+ }
+ }
+
Local<Value> DecodeTestForVersion0(const std::vector<uint8_t>& data) {
Local<Context> context = deserialization_context();
Context::Scope scope(context);
@@ -304,17 +323,21 @@ TEST_F(ValueSerializerTest, RoundTripOddball) {
TEST_F(ValueSerializerTest, DecodeOddball) {
// What this code is expected to generate.
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x5F});
- EXPECT_TRUE(value->IsUndefined());
- value = DecodeTest({0xFF, 0x09, 0x54});
- EXPECT_TRUE(value->IsTrue());
- value = DecodeTest({0xFF, 0x09, 0x46});
- EXPECT_TRUE(value->IsFalse());
- value = DecodeTest({0xFF, 0x09, 0x30});
- EXPECT_TRUE(value->IsNull());
+ DecodeTestFutureVersions({0xFF, 0x09, 0x5F}, [](Local<Value> value) {
+ EXPECT_TRUE(value->IsUndefined());
+ });
+ DecodeTestFutureVersions({0xFF, 0x09, 0x54}, [](Local<Value> value) {
+ EXPECT_TRUE(value->IsTrue());
+ });
+ DecodeTestFutureVersions({0xFF, 0x09, 0x46}, [](Local<Value> value) {
+ EXPECT_TRUE(value->IsFalse());
+ });
+ DecodeTestFutureVersions({0xFF, 0x09, 0x30}, [](Local<Value> value) {
+ EXPECT_TRUE(value->IsNull());
+ });
// What v9 of the Blink code generates.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x5F, 0x00});
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x5F, 0x00});
EXPECT_TRUE(value->IsUndefined());
value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x54, 0x00});
EXPECT_TRUE(value->IsTrue());
@@ -443,43 +466,55 @@ TEST_F(ValueSerializerTest, RoundTripNumber) {
TEST_F(ValueSerializerTest, DecodeNumber) {
// 42 zig-zag encoded (signed)
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x49, 0x54});
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ DecodeTestFutureVersions({0xFF, 0x09, 0x49, 0x54}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
// 42 varint encoded (unsigned)
- value = DecodeTest({0xFF, 0x09, 0x55, 0x2A});
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ DecodeTestFutureVersions({0xFF, 0x09, 0x55, 0x2A}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
// 160 zig-zag encoded (signed)
- value = DecodeTest({0xFF, 0x09, 0x49, 0xC0, 0x02});
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ DecodeTestFutureVersions({0xFF, 0x09, 0x49, 0xC0, 0x02},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
// 160 varint encoded (unsigned)
- value = DecodeTest({0xFF, 0x09, 0x55, 0xA0, 0x01});
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ DecodeTestFutureVersions({0xFF, 0x09, 0x55, 0xA0, 0x01},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
#if defined(V8_TARGET_LITTLE_ENDIAN)
// IEEE 754 doubles, little-endian byte order
- value = DecodeTest(
- {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBF});
- ASSERT_TRUE(value->IsNumber());
- EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBF},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ });
// quiet NaN
- value = DecodeTest(
- {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F});
- ASSERT_TRUE(value->IsNumber());
- EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
// signaling NaN
- value = DecodeTest(
- {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x7F});
- ASSERT_TRUE(value->IsNumber());
- EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x7F},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
#endif
// TODO(jbroman): Equivalent test for big-endian machines.
}
@@ -508,46 +543,58 @@ TEST_F(ValueSerializerTest, RoundTripBigInt) {
}
TEST_F(ValueSerializerTest, DecodeBigInt) {
- Local<Value> value = DecodeTest({
- 0xFF, 0x0D, // Version 13
- 0x5A, // BigInt
- 0x08, // Bitfield: sign = false, bytelength = 4
- 0x2A, 0x00, 0x00, 0x00, // Digit: 42
- });
- ASSERT_TRUE(value->IsBigInt());
- ExpectScriptTrue("result === 42n");
-
- value = DecodeTest({
- 0xFF, 0x0D, // Version 13
- 0x7A, // BigIntObject
- 0x11, // Bitfield: sign = true, bytelength = 8
- 0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // Digit: 42
- });
- ASSERT_TRUE(value->IsBigIntObject());
- ExpectScriptTrue("result == -42n");
-
- value = DecodeTest({
- 0xFF, 0x0D, // Version 13
- 0x5A, // BigInt
- 0x10, // Bitfield: sign = false, bytelength = 8
- 0xEF, 0xCD, 0xAB, 0x90, 0x78, 0x56, 0x34, 0x12 // Digit(s).
- });
- ExpectScriptTrue("result === 0x1234567890abcdefn");
-
- value = DecodeTest({0xFF, 0x0D, // Version 13
- 0x5A, // BigInt
- 0x17, // Bitfield: sign = true, bytelength = 11
- 0xEF, 0xCD, 0xAB, 0x90, // Digits.
- 0x78, 0x56, 0x34, 0x12, 0x33, 0x44, 0x55});
- ExpectScriptTrue("result === -0x5544331234567890abcdefn");
-
- value = DecodeTest({
- 0xFF, 0x0D, // Version 13
- 0x5A, // BigInt
- 0x02, // Bitfield: sign = false, bytelength = 1
- 0x2A, // Digit: 42
- });
- ExpectScriptTrue("result === 42n");
+ DecodeTestFutureVersions(
+ {
+ 0xFF, 0x0D, // Version 13
+ 0x5A, // BigInt
+ 0x08, // Bitfield: sign = false, bytelength = 4
+ 0x2A, 0x00, 0x00, 0x00, // Digit: 42
+ },
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsBigInt());
+ ExpectScriptTrue("result === 42n");
+ });
+
+ DecodeTestFutureVersions(
+ {
+ 0xFF, 0x0D, // Version 13
+ 0x7A, // BigIntObject
+ 0x11, // Bitfield: sign = true, bytelength = 8
+ 0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // Digit: 42
+ },
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsBigIntObject());
+ ExpectScriptTrue("result == -42n");
+ });
+
+ DecodeTestFutureVersions(
+ {
+ 0xFF, 0x0D, // Version 13
+ 0x5A, // BigInt
+ 0x10, // Bitfield: sign = false, bytelength = 8
+ 0xEF, 0xCD, 0xAB, 0x90, 0x78, 0x56, 0x34, 0x12 // Digit(s).
+ },
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result === 0x1234567890abcdefn");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0D, // Version 13
+ 0x5A, // BigInt
+ 0x17, // Bitfield: sign = true, bytelength = 11
+ 0xEF, 0xCD, 0xAB, 0x90, // Digits.
+ 0x78, 0x56, 0x34, 0x12, 0x33, 0x44, 0x55},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result === -0x5544331234567890abcdefn");
+ });
+ DecodeTestFutureVersions(
+ {
+ 0xFF, 0x0D, // Version 13
+ 0x5A, // BigInt
+ 0x02, // Bitfield: sign = false, bytelength = 1
+ 0x2A, // Digit: 42
+ },
+ [this](Local<Value> value) { ExpectScriptTrue("result === 42n"); });
}
// String constants (in UTF-8) used for string encoding tests.
@@ -581,63 +628,83 @@ TEST_F(ValueSerializerTest, RoundTripString) {
TEST_F(ValueSerializerTest, DecodeString) {
// Decoding the strings above from UTF-8.
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x53, 0x00});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
-
- value = DecodeTest({0xFF, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x09, 0x53, 0x00}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
- value =
- DecodeTest({0xFF, 0x09, 0x53, 0x07, 'Q', 'u', 0xC3, 0xA9, 'b', 'e', 'c'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
-
- value = DecodeTest({0xFF, 0x09, 0x53, 0x04, 0xF0, 0x9F, 0x91, 0x8A});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(2, String::Cast(*value)->Length());
- EXPECT_EQ(kEmojiString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x53, 0x07, 'Q', 'u', 0xC3, 0xA9, 'b', 'e', 'c'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+
+ DecodeTestFutureVersions({0xFF, 0x09, 0x53, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
// And from Latin-1 (for the ones that fit).
- value = DecodeTest({0xFF, 0x0A, 0x22, 0x00});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
-
- value = DecodeTest({0xFF, 0x0A, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x0A, 0x22, 0x00}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
- value = DecodeTest({0xFF, 0x0A, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x0A, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0A, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
// And from two-byte strings (endianness dependent).
#if defined(V8_TARGET_LITTLE_ENDIAN)
- value = DecodeTest({0xFF, 0x09, 0x63, 0x00});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
-
- value = DecodeTest({0xFF, 0x09, 0x63, 0x0A, 'H', '\0', 'e', '\0', 'l', '\0',
- 'l', '\0', 'o', '\0'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
-
- value = DecodeTest({0xFF, 0x09, 0x63, 0x0C, 'Q', '\0', 'u', '\0', 0xE9, '\0',
- 'b', '\0', 'e', '\0', 'c', '\0'});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x09, 0x63, 0x00}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
- value = DecodeTest({0xFF, 0x09, 0x63, 0x04, 0x3D, 0xD8, 0x4A, 0xDC});
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(2, String::Cast(*value)->Length());
- EXPECT_EQ(kEmojiString, Utf8Value(value));
+ DecodeTestFutureVersions({0xFF, 0x09, 0x63, 0x0A, 'H', '\0', 'e', '\0', 'l',
+ '\0', 'l', '\0', 'o', '\0'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+
+ DecodeTestFutureVersions({0xFF, 0x09, 0x63, 0x0C, 'Q', '\0', 'u', '\0', 0xE9,
+ '\0', 'b', '\0', 'e', '\0', 'c', '\0'},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+
+ DecodeTestFutureVersions({0xFF, 0x09, 0x63, 0x04, 0x3D, 0xD8, 0x4A, 0xDC},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
#endif
// TODO(jbroman): The same for big-endian systems.
}
@@ -726,58 +793,73 @@ TEST_F(ValueSerializerTest, RoundTripDictionaryObject) {
TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
// Empty object.
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x7B, 0x00, 0x00});
- ASSERT_TRUE(value->IsObject());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
- ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 0");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x7B, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 0");
+ });
// String key.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x49, 0x54, 0x7B, 0x01});
- ASSERT_TRUE(value->IsObject());
- ExpectScriptTrue("result.hasOwnProperty('a')");
- ExpectScriptTrue("result.a === 42");
- ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x54, 0x7B, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('a')");
+ ExpectScriptTrue("result.a === 42");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+ });
// Integer key (treated as a string, but may be encoded differently).
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x54,
- 0x3F, 0x01, 0x53, 0x01, 0x61, 0x7B, 0x01});
- ASSERT_TRUE(value->IsObject());
- ExpectScriptTrue("result.hasOwnProperty('42')");
- ExpectScriptTrue("result[42] === 'a'");
- ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x7B, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('42')");
+ ExpectScriptTrue("result[42] === 'a'");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+ });
// Key order must be preserved.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x78, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01, 0x53, 0x01,
- 0x79, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x49, 0x06, 0x7B, 0x03});
- ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === 'x,y,a'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x78, 0x3F, 0x01,
+ 0x49, 0x02, 0x3F, 0x01, 0x53, 0x01, 0x79, 0x3F, 0x01, 0x49, 0x04, 0x3F,
+ 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x7B, 0x03},
+ [this](Local<Value> value) {
+ ExpectScriptTrue(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'");
+ });
// A harder case of enumeration order.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x02,
- 0x3F, 0x01, 0x49, 0x00, 0x3F, 0x01, 0x55, 0xFE, 0xFF,
- 0xFF, 0xFF, 0x0F, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
- 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01,
- 0x53, 0x0A, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36, 0x37,
- 0x32, 0x39, 0x35, 0x3F, 0x01, 0x49, 0x02, 0x7B, 0x04});
- ExpectScriptTrue(
- "Object.getOwnPropertyNames(result).toString() === "
- "'1,4294967294,a,4294967295'");
- ExpectScriptTrue("result.a === 2");
- ExpectScriptTrue("result[0xFFFFFFFF] === 1");
- ExpectScriptTrue("result[0xFFFFFFFE] === 3");
- ExpectScriptTrue("result[1] === 0");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01,
+ 0x49, 0x00, 0x3F, 0x01, 0x55, 0xFE, 0xFF, 0xFF, 0xFF, 0x0F, 0x3F,
+ 0x01, 0x49, 0x06, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49,
+ 0x04, 0x3F, 0x01, 0x53, 0x0A, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
+ 0x37, 0x32, 0x39, 0x35, 0x3F, 0x01, 0x49, 0x02, 0x7B, 0x04},
+ [this](Local<Value> value) {
+ ExpectScriptTrue(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'");
+ ExpectScriptTrue("result.a === 2");
+ ExpectScriptTrue("result[0xFFFFFFFF] === 1");
+ ExpectScriptTrue("result[0xFFFFFFFE] === 3");
+ ExpectScriptTrue("result[1] === 0");
+ });
// This detects a fairly subtle case: the object itself must be in the map
// before its properties are deserialized, so that references to it can be
// resolved.
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x04, 0x73,
- 0x65, 0x6C, 0x66, 0x3F, 0x01, 0x5E, 0x00, 0x7B, 0x01, 0x00});
- ASSERT_TRUE(value->IsObject());
- ExpectScriptTrue("result === result.self");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x04, 0x73,
+ 0x65, 0x6C, 0x66, 0x3F, 0x01, 0x5E, 0x00, 0x7B, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result === result.self");
+ });
}
TEST_F(ValueSerializerTest, InvalidDecodeObjectWithInvalidKeyType) {
@@ -986,85 +1068,100 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
TEST_F(ValueSerializerTest, DecodeArray) {
// A simple array of integers.
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x05, 0x3F, 0x01, 0x49, 0x02,
- 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
- 0x49, 0x08, 0x3F, 0x01, 0x49, 0x0A, 0x24, 0x00, 0x05, 0x00});
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(5u, Array::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Array.prototype");
- ExpectScriptTrue("result.toString() === '1,2,3,4,5'");
-
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x05, 0x3F, 0x01, 0x49, 0x02,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
+ 0x49, 0x08, 0x3F, 0x01, 0x49, 0x0A, 0x24, 0x00, 0x05, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Array.prototype");
+ ExpectScriptTrue("result.toString() === '1,2,3,4,5'");
+ });
// A long (sparse) array.
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0xE8, 0x07, 0x3F, 0x01, 0x49, 0x54, 0x40, 0x01, 0xE8, 0x07});
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1000u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result[500] === 42");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0xE8, 0x07, 0x3F, 0x01, 0x49, 0x54, 0x40, 0x01, 0xE8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[500] === 42");
+ });
// Duplicate reference.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x6F,
- 0x7B, 0x00, 0x3F, 0x02, 0x5E, 0x01, 0x24, 0x00, 0x02});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result[0] === result[1]");
-
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F,
+ 0x02, 0x5E, 0x01, 0x24, 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result[1]");
+ });
// Duplicate reference in a sparse array.
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F, 0x02, 0x49, 0xE8,
- 0x07, 0x3F, 0x02, 0x5E, 0x01, 0x40, 0x02, 0xE8, 0x07, 0x00});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- ExpectScriptTrue("typeof result[1] === 'object'");
- ExpectScriptTrue("result[1] === result[500]");
-
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F, 0x02, 0x49, 0xE8,
+ 0x07, 0x3F, 0x02, 0x5E, 0x01, 0x40, 0x02, 0xE8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[1] === 'object'");
+ ExpectScriptTrue("result[1] === result[500]");
+ });
// Self reference.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x01, 0x3F, 0x01, 0x5E,
- 0x00, 0x24, 0x00, 0x01, 0x00});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result[0] === result");
-
+ DecodeTestFutureVersions({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x01, 0x3F, 0x01,
+ 0x5E, 0x00, 0x24, 0x00, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result");
+ });
// Self reference in a sparse array.
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0x8E, 0x08, 0x3F, 0x01, 0x5E, 0x00, 0x40, 0x01, 0xE8, 0x07});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result[519] === result");
-
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x8E, 0x08, 0x3F, 0x01, 0x5E, 0x00, 0x40, 0x01, 0xE8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[519] === result");
+ });
// Array with additional properties.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01,
- 0x49, 0x02, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01,
- 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53,
- 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result.toString() === '1,2'");
- ExpectScriptTrue("result.foo === 'bar'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x49, 0x02, 0x3F,
+ 0x01, 0x49, 0x04, 0x3F, 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F,
+ 0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === '1,2'");
+ ExpectScriptTrue("result.foo === 'bar'");
+ });
// Sparse array with additional properties.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01,
- 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53, 0x03,
- 0x62, 0x61, 0x72, 0x40, 0x01, 0xE8, 0x07, 0x00});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- ExpectScriptTrue("result.toString() === ','.repeat(999)");
- ExpectScriptTrue("result.foo === 'bar'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01,
+ 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53, 0x03,
+ 0x62, 0x61, 0x72, 0x40, 0x01, 0xE8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === ','.repeat(999)");
+ ExpectScriptTrue("result.foo === 'bar'");
+ });
// The distinction between holes and undefined elements must be maintained.
// Note that since the previous output from Chrome fails this test, an
// encoding using the sparse format was constructed instead.
- value =
- DecodeTest({0xFF, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5F, 0x40, 0x01, 0x02});
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- ExpectScriptTrue("typeof result[0] === 'undefined'");
- ExpectScriptTrue("typeof result[1] === 'undefined'");
- ExpectScriptTrue("!result.hasOwnProperty(0)");
- ExpectScriptTrue("result.hasOwnProperty(1)");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5F, 0x40, 0x01, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[0] === 'undefined'");
+ ExpectScriptTrue("typeof result[1] === 'undefined'");
+ ExpectScriptTrue("!result.hasOwnProperty(0)");
+ ExpectScriptTrue("result.hasOwnProperty(1)");
+ });
}
TEST_F(ValueSerializerTest, DecodeInvalidOverLargeArray) {
@@ -1242,19 +1339,25 @@ TEST_F(ValueSerializerTest, RoundTripDenseArrayContainingUndefined) {
ExpectScriptTrue("result[0] === undefined");
}
-TEST_F(ValueSerializerTest, DecodeDenseArrayContainingUndefined) {
+TEST_F(ValueSerializerTest,
+ DecodeDenseArrayContainingUndefinedBackwardCompatibility) {
// In previous versions, "undefined" in a dense array signified absence of the
// element (for compatibility). In new versions, it has a separate encoding.
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01});
- ExpectScriptTrue("!(0 in result)");
+ DecodeTestUpToVersion(
+ 10, {0xFF, 0x09, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
+ [this](Local<Value> value) { ExpectScriptTrue("!(0 in result)"); });
+}
- value = DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01});
- ExpectScriptTrue("0 in result");
- ExpectScriptTrue("result[0] === undefined");
+TEST_F(ValueSerializerTest, DecodeDenseArrayContainingUndefined) {
+ DecodeTestFutureVersions({0xFF, 0x0B, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("0 in result");
+ ExpectScriptTrue("result[0] === undefined");
+ });
- value = DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x2D, 0x24, 0x00, 0x01});
- ExpectScriptTrue("!(0 in result)");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0B, 0x41, 0x01, 0x2D, 0x24, 0x00, 0x01},
+ [this](Local<Value> value) { ExpectScriptTrue("!(0 in result)"); });
}
TEST_F(ValueSerializerTest, RoundTripDate) {
@@ -1279,44 +1382,64 @@ TEST_F(ValueSerializerTest, RoundTripDate) {
TEST_F(ValueSerializerTest, DecodeDate) {
Local<Value> value;
#if defined(V8_TARGET_LITTLE_ENDIAN)
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
- 0x80, 0x84, 0x2E, 0x41, 0x00});
- ASSERT_TRUE(value->IsDate());
- EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45,
- 0x27, 0x89, 0x87, 0xC2, 0x00});
- ASSERT_TRUE(value->IsDate());
- ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xF8, 0x7F, 0x00});
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84, 0x2E,
+ 0x41, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89, 0x87,
+ 0xC2, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8,
+ 0x7F, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
#else
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x41, 0x2E, 0x84, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x00});
- ASSERT_TRUE(value->IsDate());
- EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0xC2, 0x87, 0x89, 0x27,
- 0x45, 0x20, 0x00, 0x00, 0x00});
- ASSERT_TRUE(value->IsDate());
- ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x7F, 0xF8, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00});
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0x41, 0x2E, 0x84, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0xC2, 0x87, 0x89, 0x27, 0x45, 0x20, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x44, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
#endif
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
- 0x01, 0x61, 0x3F, 0x01, 0x44, 0x00, 0x20, 0x39,
- 0x50, 0x37, 0x6A, 0x75, 0x42, 0x3F, 0x02, 0x53,
- 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
- ExpectScriptTrue("result.a instanceof Date");
- ExpectScriptTrue("result.a === result.b");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
+ 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6A, 0x75, 0x42, 0x3F,
+ 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof Date");
+ ExpectScriptTrue("result.a === result.b");
+ });
}
TEST_F(ValueSerializerTest, RoundTripValueObjects) {
@@ -1366,72 +1489,100 @@ TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
}
TEST_F(ValueSerializerTest, DecodeValueObjects) {
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x79, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
- ExpectScriptTrue("result.valueOf() === true");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x78, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
- ExpectScriptTrue("result.valueOf() === false");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
- 0x01, 0x61, 0x3F, 0x01, 0x79, 0x3F, 0x02, 0x53,
- 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
- ExpectScriptTrue("result.a instanceof Boolean");
- ExpectScriptTrue("result.a === result.b");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x79, 0x00}, [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === true");
+ });
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x78, 0x00}, [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === false");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x79, 0x3F, 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof Boolean");
+ ExpectScriptTrue("result.a === result.b");
+ });
#if defined(V8_TARGET_LITTLE_ENDIAN)
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x45, 0xC0, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
- ExpectScriptTrue("result.valueOf() === -42");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xF8, 0x7F, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
- ExpectScriptTrue("Number.isNaN(result.valueOf())");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
+ 0xC0, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("result.valueOf() === -42");
+ });
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8,
+ 0x7F, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("Number.isNaN(result.valueOf())");
+ });
#else
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0xC0, 0x45, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
- ExpectScriptTrue("result.valueOf() === -42");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x7F, 0xF8, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
- ExpectScriptTrue("Number.isNaN(result.valueOf())");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0xC0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("result.valueOf() === -42");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("Number.isNaN(result.valueOf())");
+ });
#endif
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
- 0x01, 0x61, 0x3F, 0x01, 0x6E, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x18, 0x40, 0x3F, 0x02, 0x53,
- 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
- ExpectScriptTrue("result.a instanceof Number");
- ExpectScriptTrue("result.a === result.b");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x07, 0x51, 0x75, 0xC3,
- 0xA9, 0x62, 0x65, 0x63, 0x00});
- ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
- ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
- ExpectScriptTrue("result.length === 6");
-
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x04, 0xF0, 0x9F, 0x91, 0x8A});
- ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
- ExpectScriptTrue("result.valueOf() === '\\ud83d\\udc4a'");
- ExpectScriptTrue("result.length === 2");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x73, 0x00, 0x3F, 0x02, 0x53, 0x01,
- 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
- ExpectScriptTrue("result.a instanceof String");
- ExpectScriptTrue("result.a === result.b");
-
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
+ 0x01, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3F,
+ 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof Number");
+ ExpectScriptTrue("result.a === result.b");
+ });
+
+ DecodeTestUpToVersion(
+ 11,
+ {0xFF, 0x09, 0x3F, 0x00, 0x73, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62, 0x65,
+ 0x63, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
+ ExpectScriptTrue("result.length === 6");
+ });
+
+ DecodeTestUpToVersion(
+ 11, {0xFF, 0x09, 0x3F, 0x00, 0x73, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === '\\ud83d\\udc4a'");
+ ExpectScriptTrue("result.length === 2");
+ });
+
+ DecodeTestUpToVersion(11,
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x73, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof String");
+ ExpectScriptTrue("result.a === result.b");
+ });
// String object containing a Latin-1 string.
- value =
- DecodeTest({0xFF, 0x0C, 0x73, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'});
- ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
- ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
- ExpectScriptTrue("result.length === 6");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0C, 0x73, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
+ ExpectScriptTrue("result.length === 6");
+ });
}
TEST_F(ValueSerializerTest, RoundTripRegExp) {
@@ -1454,49 +1605,63 @@ TEST_F(ValueSerializerTest, RoundTripRegExp) {
}
TEST_F(ValueSerializerTest, DecodeRegExp) {
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x01});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
- ExpectScriptTrue("result.toString() === '/foo/g'");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x07, 0x51, 0x75, 0xC3,
- 0xA9, 0x62, 0x65, 0x63, 0x02});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
-
- value = DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x04, 0xF0, 0x9F, 0x91, 0x8A, 0x11, 0x00});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("result.toString() === '/\\ud83d\\udc4a/gu'");
-
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61,
- 0x3F, 0x01, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x03, 0x3F, 0x02,
- 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
- ExpectScriptTrue("result.a instanceof RegExp");
- ExpectScriptTrue("result.a === result.b");
-
+ DecodeTestUpToVersion(
+ 11, {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/g'");
+ });
+ DecodeTestUpToVersion(
+ 11,
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62, 0x65,
+ 0x63, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
+ });
+ DecodeTestUpToVersion(
+ 11,
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x04, 0xF0, 0x9F, 0x91, 0x8A, 0x11, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/\\ud83d\\udc4a/gu'");
+ });
+
+ DecodeTestUpToVersion(
+ 11, {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61,
+ 0x3F, 0x01, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x03, 0x3F, 0x02,
+ 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof RegExp");
+ ExpectScriptTrue("result.a === result.b");
+ });
// RegExp containing a Latin-1 string.
- value = DecodeTest(
- {0xFF, 0x0C, 0x52, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c', 0x02});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0C, 0x52, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c', 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
+ });
}
// Tests that invalid flags are not accepted by the deserializer.
TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x1F});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
- ExpectScriptTrue("result.toString() === '/foo/gimuy'");
-
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x3F});
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
- ExpectScriptTrue("result.toString() === '/foo/gimsuy'");
+ DecodeTestUpToVersion(
+ 11, {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x1F},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/gimuy'");
+ });
+
+ DecodeTestUpToVersion(
+ 11, {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x3F},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/gimsuy'");
+ });
InvalidDecodeTest(
{0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0xFF});
@@ -1510,10 +1675,14 @@ TEST_F(ValueSerializerTest, DecodeLinearRegExp) {
0x03, 0x66, 0x6F, 0x6F, 0x6D};
i::FLAG_enable_experimental_regexp_engine = true;
- Local<Value> value = DecodeTest(regexp_encoding);
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
- ExpectScriptTrue("result.toString() === '/foo/glmsy'");
+ // DecodeTestUpToVersion will overwrite the version number in the data but
+ // it's fine.
+ DecodeTestUpToVersion(
+ 11, std::move(regexp_encoding), [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/glmsy'");
+ });
i::FLAG_enable_experimental_regexp_engine = false;
InvalidDecodeTest(regexp_encoding);
@@ -1526,10 +1695,12 @@ TEST_F(ValueSerializerTest, DecodeHasIndicesRegExp) {
std::vector<uint8_t> regexp_encoding = {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03,
0x66, 0x6F, 0x6F, 0xAD, 0x01};
- Local<Value> value = DecodeTest(regexp_encoding);
- ASSERT_TRUE(value->IsRegExp());
- ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
- ExpectScriptTrue("result.toString() === '/foo/dgmsy'");
+ DecodeTestUpToVersion(
+ 11, std::move(regexp_encoding), [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/dgmsy'");
+ });
}
TEST_F(ValueSerializerTest, RoundTripMap) {
@@ -1554,28 +1725,34 @@ TEST_F(ValueSerializerTest, RoundTripMap) {
}
TEST_F(ValueSerializerTest, DecodeMap) {
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x54, 0x3F,
- 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3A, 0x02});
- ASSERT_TRUE(value->IsMap());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Map.prototype");
- ExpectScriptTrue("result.size === 1");
- ExpectScriptTrue("result.get(42) === 'foo'");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x5E, 0x00,
- 0x3F, 0x01, 0x5E, 0x00, 0x3A, 0x02, 0x00});
- ASSERT_TRUE(value->IsMap());
- ExpectScriptTrue("result.size === 1");
- ExpectScriptTrue("result.get(result) === result");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
+ 0x03, 0x66, 0x6F, 0x6F, 0x3A, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Map.prototype");
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(42) === 'foo'");
+ });
+
+ DecodeTestFutureVersions({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x5E,
+ 0x00, 0x3F, 0x01, 0x5E, 0x00, 0x3A, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(result) === result");
+ });
// Iteration order must be preserved.
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x02, 0x3F,
- 0x01, 0x49, 0x00, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
- 0x49, 0x00, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x00,
- 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x00, 0x3A, 0x08});
- ASSERT_TRUE(value->IsMap());
- ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x02, 0x3F,
+ 0x01, 0x49, 0x00, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x00, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x00,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x00, 0x3A, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
+ });
}
TEST_F(ValueSerializerTest, RoundTripMapWithTrickyGetters) {
@@ -1630,27 +1807,33 @@ TEST_F(ValueSerializerTest, RoundTripSet) {
}
TEST_F(ValueSerializerTest, DecodeSet) {
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x54, 0x3F,
- 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x2C, 0x02});
- ASSERT_TRUE(value->IsSet());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Set.prototype");
- ExpectScriptTrue("result.size === 2");
- ExpectScriptTrue("result.has(42)");
- ExpectScriptTrue("result.has('foo')");
-
- value = DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x5E, 0x00, 0x2C, 0x01, 0x00});
- ASSERT_TRUE(value->IsSet());
- ExpectScriptTrue("result.size === 1");
- ExpectScriptTrue("result.has(result)");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
+ 0x03, 0x66, 0x6F, 0x6F, 0x2C, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Set.prototype");
+ ExpectScriptTrue("result.size === 2");
+ ExpectScriptTrue("result.has(42)");
+ ExpectScriptTrue("result.has('foo')");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x5E, 0x00, 0x2C, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.has(result)");
+ });
// Iteration order must be preserved.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49,
- 0x02, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
- 0x49, 0x06, 0x3F, 0x01, 0x49, 0x04, 0x2C, 0x04});
- ASSERT_TRUE(value->IsSet());
- ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x04, 0x2C, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
+ });
}
TEST_F(ValueSerializerTest, RoundTripSetWithTrickyGetters) {
@@ -1696,22 +1879,30 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
}
TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00});
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
- ExpectScriptTrue("Object.getPrototypeOf(result) === ArrayBuffer.prototype");
-
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x03, 0x00, 0x80, 0xFF, 0x00});
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
- ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x42, 0x00, 0x3F, 0x02, 0x53, 0x01,
- 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
- ExpectScriptTrue("result.a instanceof ArrayBuffer");
- ExpectScriptTrue("result.a === result.b");
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00}, [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ArrayBuffer.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x42, 0x03, 0x00, 0x80, 0xFF, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x42, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.a instanceof ArrayBuffer");
+ ExpectScriptTrue("result.a === result.b");
+ });
}
TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
@@ -1859,92 +2050,272 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Check that the right type comes out the other side for every kind of typed
- // array.
- Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42,
- 0x02, 0x00, 0x00, 0x56, 0x42, 0x00, 0x02});
- ASSERT_TRUE(value->IsUint8Array());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Uint8Array.prototype");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00,
- 0x00, 0x56, 0x62, 0x00, 0x02});
- ASSERT_TRUE(value->IsInt8Array());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Int8Array.prototype");
+ // array (version 14 and above).
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42,
+ 0x00, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint8Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56, 0x62,
+ 0x00, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int8Array.prototype");
+ });
#if defined(V8_TARGET_LITTLE_ENDIAN)
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x56, 0x57, 0x00, 0x04});
- ASSERT_TRUE(value->IsUint16Array());
- EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Uint16Array.prototype");
-
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x56, 0x77, 0x00, 0x04});
- ASSERT_TRUE(value->IsInt16Array());
- EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Int16Array.prototype");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x57, 0x00, 0x04, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint16Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x77, 0x00, 0x04, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int16Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint32Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int32Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Float32Array.prototype");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat64Array());
+ EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Float64Array.prototype");
+ });
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08});
- ASSERT_TRUE(value->IsUint32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Uint32Array.prototype");
+#endif // V8_TARGET_LITTLE_ENDIAN
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08});
- ASSERT_TRUE(value->IsInt32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Int32Array.prototype");
+ // Check that values of various kinds are suitably preserved.
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01, 0x80, 0xFF, 0x56,
+ 0x42, 0x00, 0x03, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '1,128,255'");
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x80, 0x56, 0x77, 0x00, 0x06, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '0,256,-32768'");
+ });
+
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
+ 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '0,-0.5,NaN,Infinity'");
+ });
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08});
- ASSERT_TRUE(value->IsFloat32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Float32Array.prototype");
+#endif // V8_TARGET_LITTLE_ENDIAN
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10});
- ASSERT_TRUE(value->IsFloat64Array());
- EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- ExpectScriptTrue("Object.getPrototypeOf(result) === Float64Array.prototype");
+ // Array buffer views sharing a buffer should do so on the other side.
+ // Similarly, multiple references to the same typed array should be resolved.
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3F,
+ 0x01, 0x3F, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x42, 0x00, 0x20, 0x00, 0x3F, 0x03, 0x53, 0x04, 0x75, 0x38,
+ 0x5F, 0x32, 0x3F, 0x03, 0x5E, 0x02, 0x3F, 0x03, 0x53, 0x03, 0x66, 0x33,
+ 0x32, 0x3F, 0x03, 0x3F, 0x03, 0x5E, 0x01, 0x56, 0x66, 0x04, 0x14, 0x00,
+ 0x3F, 0x04, 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04, 0x00},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.u8 instanceof Uint8Array");
+ ExpectScriptTrue("result.u8 === result.u8_2");
+ ExpectScriptTrue("result.f32 instanceof Float32Array");
+ ExpectScriptTrue("result.u8.buffer === result.f32.buffer");
+ ExpectScriptTrue("result.f32.byteOffset === 4");
+ ExpectScriptTrue("result.f32.length === 5");
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeTypedArrayBackwardsCompatiblity) {
+ // Check that we can still decode TypedArrays in the version <= 13 format.
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42,
+ 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint8Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56, 0x62,
+ 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int8Array.prototype");
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x57, 0x00, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint16Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x77, 0x00, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int16Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13, {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Uint32Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13, {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Int32Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13, {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Float32Array.prototype");
+ });
+
+ DecodeTestUpToVersion(
+ 13, {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat64Array());
+ EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === Float64Array.prototype");
+ });
#endif // V8_TARGET_LITTLE_ENDIAN
// Check that values of various kinds are suitably preserved.
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01,
- 0x80, 0xFF, 0x56, 0x42, 0x00, 0x03});
- ExpectScriptTrue("result.toString() === '1,128,255'");
+ DecodeTestUpToVersion(13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01,
+ 0x80, 0xFF, 0x56, 0x42, 0x00, 0x03},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '1,128,255'");
+ });
#if defined(V8_TARGET_LITTLE_ENDIAN)
- value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00,
- 0x00, 0x00, 0x01, 0x00, 0x80, 0x56, 0x77, 0x00, 0x06});
- ExpectScriptTrue("result.toString() === '0,256,-32768'");
-
- value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
- 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10});
- ExpectScriptTrue("result.toString() === '0,-0.5,NaN,Infinity'");
-
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x80, 0x56, 0x77, 0x00, 0x06},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '0,256,-32768'");
+ });
+
+ DecodeTestUpToVersion(
+ 13, {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
+ 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.toString() === '0,-0.5,NaN,Infinity'");
+ });
#endif // V8_TARGET_LITTLE_ENDIAN
// Array buffer views sharing a buffer should do so on the other side.
// Similarly, multiple references to the same typed array should be resolved.
- value = DecodeTest(
+ DecodeTestUpToVersion(
+ 13,
{0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3F,
0x01, 0x3F, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1952,13 +2323,15 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x56, 0x42, 0x00, 0x20, 0x00, 0x3F, 0x03, 0x53, 0x04, 0x75, 0x38,
0x5F, 0x32, 0x3F, 0x03, 0x5E, 0x02, 0x3F, 0x03, 0x53, 0x03, 0x66, 0x33,
0x32, 0x3F, 0x03, 0x3F, 0x03, 0x5E, 0x01, 0x56, 0x66, 0x04, 0x14, 0x00,
- 0x3F, 0x04, 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04});
- ExpectScriptTrue("result.u8 instanceof Uint8Array");
- ExpectScriptTrue("result.u8 === result.u8_2");
- ExpectScriptTrue("result.f32 instanceof Float32Array");
- ExpectScriptTrue("result.u8.buffer === result.f32.buffer");
- ExpectScriptTrue("result.f32.byteOffset === 4");
- ExpectScriptTrue("result.f32.length === 5");
+ 0x3F, 0x04, 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04},
+ [this](Local<Value> value) {
+ ExpectScriptTrue("result.u8 instanceof Uint8Array");
+ ExpectScriptTrue("result.u8 === result.u8_2");
+ ExpectScriptTrue("result.f32 instanceof Float32Array");
+ ExpectScriptTrue("result.u8.buffer === result.f32.buffer");
+ ExpectScriptTrue("result.f32.byteOffset === 4");
+ ExpectScriptTrue("result.f32.length === 5");
+ });
}
TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
@@ -1986,7 +2359,8 @@ TEST_F(ValueSerializerTest, RoundTripDataView) {
EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
- // TODO(v8:11111): Use API functions for testing these, once they're exposed
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed
// via the API.
i::Handle<i::JSDataView> i_dv = v8::Utils::OpenHandle(DataView::Cast(*value));
EXPECT_EQ(false, i_dv->is_length_tracking());
@@ -1994,14 +2368,32 @@ TEST_F(ValueSerializerTest, RoundTripDataView) {
}
TEST_F(ValueSerializerTest, DecodeDataView) {
- Local<Value> value =
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x56, 0x3F, 0x01, 0x02, 0x00});
- ASSERT_TRUE(value->IsDataView());
- EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
- EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
- EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
- ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0E, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x3F, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === DataView.prototype");
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDataViewBackwardsCompatibility) {
+ DecodeTestUpToVersion(
+ 13,
+ {0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x3F, 0x01, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === DataView.prototype");
+ });
}
TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty1) {
@@ -2200,7 +2592,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(*input_buffer());
input = Utils::Convert<i::WasmMemoryObject, Value>(
- i::WasmMemoryObject::New(i_isolate, obj, kMaxPages));
+ i::WasmMemoryObject::New(i_isolate, obj, kMaxPages).ToHandleChecked());
}
RoundTripTest(input);
ExpectScriptTrue("result instanceof WebAssembly.Memory");
@@ -2453,9 +2845,11 @@ TEST_F(ValueSerializerTestWithHostObject, DecodeSimpleHostObject) {
EXPECT_TRUE(ReadExampleHostObjectTag());
return NewHostObject(deserialization_context(), 0, nullptr);
}));
- DecodeTest({0xFF, 0x0D, 0x5C, kExampleHostObjectTag});
- ExpectScriptTrue(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
+ DecodeTestFutureVersions(
+ {0xFF, 0x0D, 0x5C, kExampleHostObjectTag}, [this](Local<Value> value) {
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
+ });
}
class ValueSerializerTestWithHostArrayBufferView
diff --git a/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
index e1e70f5f8d..80c6e6f4dd 100644
--- a/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
+++ b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
@@ -64,7 +64,7 @@ TEST_F(BackingStoreTest, CopyWasmMemory) {
EXPECT_EQ(1 * wasm::kWasmPageSize, bs1->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, bs1->byte_capacity());
- auto bs2 = bs1->CopyWasmMemory(isolate(), 3);
+ auto bs2 = bs1->CopyWasmMemory(isolate(), 3, 3);
EXPECT_TRUE(bs2->is_wasm_memory());
EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_length());
EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_capacity());
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 3cef764855..8437ac0acb 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -21,8 +21,8 @@ class DefaultPlatformEnvironment final : public ::testing::Environment {
0, v8::platform::IdleTaskSupport::kEnabled);
ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::InitializePlatform(platform_.get());
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- ASSERT_TRUE(v8::V8::InitializeVirtualMemoryCage());
+#ifdef V8_SANDBOX
+ ASSERT_TRUE(v8::V8::InitializeSandbox());
#endif
cppgc::InitializeProcess(platform_->GetPageAllocator());
v8::V8::Initialize();
diff --git a/deps/v8/test/unittests/sandbox/sandbox-unittest.cc b/deps/v8/test/unittests/sandbox/sandbox-unittest.cc
new file mode 100644
index 0000000000..8818d197c4
--- /dev/null
+++ b/deps/v8/test/unittests/sandbox/sandbox-unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/sandbox/sandbox.h"
+
+#include <vector>
+
+#include "src/base/virtual-address-space.h"
+#include "test/unittests/test-utils.h"
+
+#ifdef V8_SANDBOX_IS_AVAILABLE
+
+namespace v8 {
+namespace internal {
+
+TEST(SandboxTest, Initialization) {
+ base::VirtualAddressSpace vas;
+
+ Sandbox sandbox;
+
+ EXPECT_FALSE(sandbox.is_initialized());
+ EXPECT_FALSE(sandbox.is_disabled());
+ EXPECT_FALSE(sandbox.is_partially_reserved());
+ EXPECT_EQ(sandbox.size(), 0UL);
+
+ EXPECT_TRUE(sandbox.Initialize(&vas));
+
+ EXPECT_TRUE(sandbox.is_initialized());
+ EXPECT_NE(sandbox.base(), 0UL);
+ EXPECT_GT(sandbox.size(), 0UL);
+
+ sandbox.TearDown();
+
+ EXPECT_FALSE(sandbox.is_initialized());
+}
+
+TEST(SandboxTest, InitializationWithSize) {
+ base::VirtualAddressSpace vas;
+ // This test only works if virtual memory subspaces can be allocated.
+ if (!vas.CanAllocateSubspaces()) return;
+
+ Sandbox sandbox;
+ size_t size = kSandboxMinimumSize;
+ const bool use_guard_regions = false;
+ EXPECT_TRUE(sandbox.Initialize(&vas, size, use_guard_regions));
+
+ EXPECT_TRUE(sandbox.is_initialized());
+ EXPECT_FALSE(sandbox.is_partially_reserved());
+ EXPECT_EQ(sandbox.size(), size);
+
+ sandbox.TearDown();
+}
+
+TEST(SandboxTest, PartiallyReservedSandboxInitialization) {
+ base::VirtualAddressSpace vas;
+ Sandbox sandbox;
+ // Total size of the sandbox.
+ size_t size = kSandboxSize;
+ // Size of the virtual memory that is actually reserved at the start of the
+ // sandbox.
+ size_t reserved_size = 2 * vas.allocation_granularity();
+ EXPECT_TRUE(
+ sandbox.InitializeAsPartiallyReservedSandbox(&vas, size, reserved_size));
+
+ EXPECT_TRUE(sandbox.is_initialized());
+ EXPECT_TRUE(sandbox.is_partially_reserved());
+ EXPECT_NE(sandbox.base(), 0UL);
+ EXPECT_EQ(sandbox.size(), size);
+
+ sandbox.TearDown();
+
+ EXPECT_FALSE(sandbox.is_initialized());
+}
+
+TEST(SandboxTest, Contains) {
+ base::VirtualAddressSpace vas;
+ Sandbox sandbox;
+ EXPECT_TRUE(sandbox.Initialize(&vas));
+
+ Address base = sandbox.base();
+ size_t size = sandbox.size();
+ base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
+
+ EXPECT_TRUE(sandbox.Contains(base));
+ EXPECT_TRUE(sandbox.Contains(base + size - 1));
+ for (int i = 0; i < 10; i++) {
+ size_t offset = rng.NextInt64() % size;
+ EXPECT_TRUE(sandbox.Contains(base + offset));
+ }
+
+ EXPECT_FALSE(sandbox.Contains(base - 1));
+ EXPECT_FALSE(sandbox.Contains(base + size));
+ for (int i = 0; i < 10; i++) {
+ Address addr = rng.NextInt64();
+ if (addr < base || addr >= base + size) {
+ EXPECT_FALSE(sandbox.Contains(addr));
+ }
+ }
+
+ sandbox.TearDown();
+}
+
+void TestPageAllocationInSandbox(Sandbox& sandbox) {
+ const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
+ constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
+
+ VirtualAddressSpace* vas = sandbox.address_space();
+ size_t allocation_granularity = vas->allocation_granularity();
+ std::vector<Address> allocations;
+ for (int i = 0; i < kNumAllocations; i++) {
+ size_t length = allocation_granularity * kAllocatinSizesInPages[i];
+ size_t alignment = allocation_granularity;
+ Address ptr = vas->AllocatePages(VirtualAddressSpace::kNoHint, length,
+ alignment, PagePermissions::kNoAccess);
+ EXPECT_NE(ptr, kNullAddress);
+ EXPECT_TRUE(sandbox.Contains(ptr));
+ allocations.push_back(ptr);
+ }
+
+ for (int i = 0; i < kNumAllocations; i++) {
+ size_t length = allocation_granularity * kAllocatinSizesInPages[i];
+ vas->FreePages(allocations[i], length);
+ }
+}
+
+TEST(SandboxTest, PageAllocation) {
+ base::VirtualAddressSpace vas;
+ Sandbox sandbox;
+ EXPECT_TRUE(sandbox.Initialize(&vas));
+
+ TestPageAllocationInSandbox(sandbox);
+
+ sandbox.TearDown();
+}
+
+TEST(SandboxTest, PartiallyReservedSandboxPageAllocation) {
+ base::VirtualAddressSpace vas;
+ Sandbox sandbox;
+ size_t size = kSandboxSize;
+ // Only reserve two pages so the test will allocate memory inside and outside
+ // of the reserved region.
+ size_t reserved_size = 2 * vas.allocation_granularity();
+ EXPECT_TRUE(
+ sandbox.InitializeAsPartiallyReservedSandbox(&vas, size, reserved_size));
+
+ TestPageAllocationInSandbox(sandbox);
+
+ sandbox.TearDown();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SANDBOX_IS_AVAILABLE
diff --git a/deps/v8/test/unittests/security/virtual-memory-cage-unittest.cc b/deps/v8/test/unittests/security/virtual-memory-cage-unittest.cc
deleted file mode 100644
index 8ee4381b0f..0000000000
--- a/deps/v8/test/unittests/security/virtual-memory-cage-unittest.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "src/base/virtual-address-space.h"
-#include "src/security/vm-cage.h"
-#include "test/unittests/test-utils.h"
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-namespace v8 {
-namespace internal {
-
-TEST(VirtualMemoryCageTest, Initialization) {
- base::VirtualAddressSpace vas;
-
- V8VirtualMemoryCage cage;
-
- EXPECT_FALSE(cage.is_initialized());
- EXPECT_FALSE(cage.is_disabled());
- EXPECT_FALSE(cage.is_fake_cage());
- EXPECT_EQ(cage.size(), 0UL);
-
- EXPECT_TRUE(cage.Initialize(&vas));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_NE(cage.base(), 0UL);
- EXPECT_GT(cage.size(), 0UL);
-
- cage.TearDown();
-
- EXPECT_FALSE(cage.is_initialized());
-}
-
-TEST(VirtualMemoryCageTest, InitializationWithSize) {
- base::VirtualAddressSpace vas;
- // This test only works if virtual memory subspaces can be allocated.
- if (!vas.CanAllocateSubspaces()) return;
-
- V8VirtualMemoryCage cage;
- size_t size = kVirtualMemoryCageMinimumSize;
- const bool use_guard_regions = false;
- EXPECT_TRUE(cage.Initialize(&vas, size, use_guard_regions));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_FALSE(cage.is_fake_cage());
- EXPECT_EQ(cage.size(), size);
-
- cage.TearDown();
-}
-
-TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
- base::VirtualAddressSpace vas;
- V8VirtualMemoryCage cage;
- // Total size of the fake cage.
- size_t size = kVirtualMemoryCageSize;
- // Size of the virtual memory that is actually reserved at the start of the
- // cage.
- size_t reserved_size = 2 * vas.allocation_granularity();
- EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_TRUE(cage.is_fake_cage());
- EXPECT_NE(cage.base(), 0UL);
- EXPECT_EQ(cage.size(), size);
-
- cage.TearDown();
-
- EXPECT_FALSE(cage.is_initialized());
-}
-
-TEST(VirtualMemloryCageTest, Contains) {
- base::VirtualAddressSpace vas;
- V8VirtualMemoryCage cage;
- EXPECT_TRUE(cage.Initialize(&vas));
-
- Address base = cage.base();
- size_t size = cage.size();
- base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
-
- EXPECT_TRUE(cage.Contains(base));
- EXPECT_TRUE(cage.Contains(base + size - 1));
- for (int i = 0; i < 10; i++) {
- size_t offset = rng.NextInt64() % size;
- EXPECT_TRUE(cage.Contains(base + offset));
- }
-
- EXPECT_FALSE(cage.Contains(base - 1));
- EXPECT_FALSE(cage.Contains(base + size));
- for (int i = 0; i < 10; i++) {
- Address addr = rng.NextInt64();
- if (addr < base || addr >= base + size) {
- EXPECT_FALSE(cage.Contains(addr));
- }
- }
-
- cage.TearDown();
-}
-
-void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
- const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
- constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
-
- VirtualAddressSpace* vas = cage.virtual_address_space();
- size_t allocation_granularity = vas->allocation_granularity();
- std::vector<Address> allocations;
- for (int i = 0; i < kNumAllocations; i++) {
- size_t length = allocation_granularity * kAllocatinSizesInPages[i];
- size_t alignment = allocation_granularity;
- Address ptr = vas->AllocatePages(VirtualAddressSpace::kNoHint, length,
- alignment, PagePermissions::kNoAccess);
- EXPECT_NE(ptr, kNullAddress);
- EXPECT_TRUE(cage.Contains(ptr));
- allocations.push_back(ptr);
- }
-
- for (int i = 0; i < kNumAllocations; i++) {
- size_t length = allocation_granularity * kAllocatinSizesInPages[i];
- EXPECT_TRUE(vas->FreePages(allocations[i], length));
- }
-}
-
-TEST(VirtualMemoryCageTest, PageAllocation) {
- base::VirtualAddressSpace vas;
- V8VirtualMemoryCage cage;
- EXPECT_TRUE(cage.Initialize(&vas));
-
- TestCagePageAllocation(cage);
-
- cage.TearDown();
-}
-
-TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
- base::VirtualAddressSpace vas;
- V8VirtualMemoryCage cage;
- size_t size = kVirtualMemoryCageSize;
- // Only reserve two pages so the test will allocate memory inside and outside
- // of the reserved region.
- size_t reserved_size = 2 * vas.allocation_granularity();
- EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
-
- TestCagePageAllocation(cage);
-
- cage.TearDown();
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 759c7d1003..0bdf866ae0 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -79,7 +79,7 @@ TEST_F(BackgroundCompileTaskTest, SyntaxError) {
std::unique_ptr<BackgroundCompileTask> task(
NewBackgroundCompileTask(isolate(), shared));
- task->Run();
+ task->RunOnMainThread(isolate());
ASSERT_FALSE(Compiler::FinalizeBackgroundCompileTask(
task.get(), isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -105,7 +105,7 @@ TEST_F(BackgroundCompileTaskTest, CompileAndRun) {
std::unique_ptr<BackgroundCompileTask> task(
NewBackgroundCompileTask(isolate(), shared));
- task->Run();
+ task->RunOnMainThread(isolate());
ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
task.get(), isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
@@ -131,7 +131,7 @@ TEST_F(BackgroundCompileTaskTest, CompileFailure) {
std::unique_ptr<BackgroundCompileTask> task(
NewBackgroundCompileTask(isolate(), shared, 100));
- task->Run();
+ task->RunOnMainThread(isolate());
ASSERT_FALSE(Compiler::FinalizeBackgroundCompileTask(
task.get(), isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -201,7 +201,7 @@ TEST_F(BackgroundCompileTaskTest, EagerInnerFunctions) {
std::unique_ptr<BackgroundCompileTask> task(
NewBackgroundCompileTask(isolate(), shared));
- task->Run();
+ task->RunOnMainThread(isolate());
ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
task.get(), isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
@@ -231,7 +231,7 @@ TEST_F(BackgroundCompileTaskTest, LazyInnerFunctions) {
// There's already a task for this SFI.
- task->Run();
+ task->RunOnMainThread(isolate());
ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
task.get(), isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index f6a3eef8e1..5299766823 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -20,6 +20,8 @@ constexpr const char* kTestTorquePrelude = R"(
type void;
type never;
+type IntegerLiteral constexpr 'IntegerLiteral';
+
namespace torque_internal {
struct Reference<T: type> {
const object: HeapObject;
@@ -112,6 +114,8 @@ extern macro TaggedToHeapObject(Object): HeapObject
extern macro Float64SilenceNaN(float64): float64;
extern macro IntPtrConstant(constexpr int31): intptr;
+extern macro ConstexprIntegerLiteralToInt32(constexpr IntegerLiteral): constexpr int32;
+extern macro SmiFromInt32(int32): Smi;
macro FromConstexpr<To: type, From: type>(o: From): To;
FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
@@ -133,6 +137,15 @@ FromConstexpr<bool, constexpr bool>(b: constexpr bool): bool {
FromConstexpr<int32, constexpr int31>(i: constexpr int31): int32 {
return %FromConstexpr<int32>(i);
}
+FromConstexpr<int32, constexpr int32>(i: constexpr int32): int32 {
+ return %FromConstexpr<int32>(i);
+}
+FromConstexpr<int32, constexpr IntegerLiteral>(i: constexpr IntegerLiteral): int32 {
+ return FromConstexpr<int32>(ConstexprIntegerLiteralToInt32(i));
+}
+FromConstexpr<Smi, constexpr IntegerLiteral>(i: constexpr IntegerLiteral): Smi {
+ return SmiFromInt32(FromConstexpr<int32>(i));
+}
macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
labels CastError {
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 66026f79db..0f317d90d6 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -79,6 +79,10 @@
# Tests that need to run sequentially (e.g. due to memory consumption).
'MachineOperatorReducerTest.Word32EqualWithShiftedMaskedValueAndConstant': [PASS, HEAVY],
'SequentialUnmapperTest.UnmapOnTeardown': [PASS, HEAVY],
+
+ #https://crbug.com/v8/12638
+ # Test seems super flaky, disabling for now
+ 'SandboxTest.PartiallyReservedSandboxPageAllocation':[SKIP],
}], # tsan == True
##############################################################################
diff --git a/deps/v8/test/unittests/utils/allocation-unittest.cc b/deps/v8/test/unittests/utils/allocation-unittest.cc
index ac72ab38f1..b6f8d8699f 100644
--- a/deps/v8/test/unittests/utils/allocation-unittest.cc
+++ b/deps/v8/test/unittests/utils/allocation-unittest.cc
@@ -35,7 +35,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
}
struct sigaction old_action_;
// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
struct sigaction old_bus_action_;
#endif
@@ -46,7 +46,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, &old_action_);
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
sigaction(SIGBUS, &action, &old_bus_action_);
#endif
}
@@ -54,7 +54,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TearDown() override {
// Be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_action_, nullptr);
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
sigaction(SIGBUS, &old_bus_action_, nullptr);
#endif
}
@@ -102,7 +102,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
page_allocator, nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
- CHECK(FreePages(page_allocator, buffer, page_size));
+ FreePages(page_allocator, buffer, page_size);
}
};
@@ -141,7 +141,7 @@ TEST(AllocationTest, AllocateAndFree) {
page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
page_size, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
- CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
+ v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize);
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
@@ -151,8 +151,7 @@ TEST(AllocationTest, AllocateAndFree) {
kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
- CHECK(v8::internal::FreePages(page_allocator, aligned_mem_addr,
- kAllocationSize));
+ v8::internal::FreePages(page_allocator, aligned_mem_addr, kAllocationSize);
}
TEST(AllocationTest, ReserveMemory) {
@@ -172,7 +171,7 @@ TEST(AllocationTest, ReserveMemory) {
addr[v8::internal::KB - 1] = 2;
CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kNoAccess));
- CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
+ v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 894c0d0611..b2afc18bc5 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -48,7 +48,7 @@ static const byte kCodeSetLocal0[] = {WASM_LOCAL_SET(0, WASM_ZERO)};
static const byte kCodeTeeLocal0[] = {WASM_LOCAL_TEE(0, WASM_ZERO)};
static const ValueType kValueTypes[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
- kWasmExternRef};
+ kWasmAnyRef};
static const MachineType machineTypes[] = {
MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
@@ -86,24 +86,19 @@ class TestModuleBuilder {
CHECK_LE(mod.globals.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.globals.size() - 1);
}
- byte AddSignature(const FunctionSig* sig) {
- mod.add_signature(sig, kNoSuperType);
+ byte AddSignature(const FunctionSig* sig, uint32_t supertype = kNoSuperType) {
+ mod.add_signature(sig, supertype);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.types.size() - 1);
}
byte AddFunction(const FunctionSig* sig, bool declared = true) {
byte sig_index = AddSignature(sig);
- mod.functions.push_back(
- {sig, // sig
- static_cast<uint32_t>(mod.functions.size()), // func_index
- sig_index, // sig_index
- {0, 0}, // code
- 0, // feedback slots
- false, // import
- false, // export
- declared}); // declared
- CHECK_LE(mod.functions.size(), kMaxByteSizedLeb128);
- return static_cast<byte>(mod.functions.size() - 1);
+ return AddFunctionImpl(sig, sig_index, declared);
+ }
+ byte AddFunction(uint32_t sig_index, bool declared = true) {
+ DCHECK(mod.has_signature(sig_index));
+ return AddFunctionImpl(mod.types[sig_index].function_sig, sig_index,
+ declared);
}
byte AddImport(const FunctionSig* sig) {
byte result = AddFunction(sig);
@@ -136,13 +131,13 @@ class TestModuleBuilder {
type_builder.AddField(field.first, field.second);
}
mod.add_struct_type(type_builder.Build(), supertype);
- return static_cast<byte>(mod.type_kinds.size() - 1);
+ return static_cast<byte>(mod.types.size() - 1);
}
byte AddArray(ValueType type, bool mutability) {
ArrayType* array = mod.signature_zone->New<ArrayType>(type, mutability);
mod.add_array_type(array, kNoSuperType);
- return static_cast<byte>(mod.type_kinds.size() - 1);
+ return static_cast<byte>(mod.types.size() - 1);
}
void InitializeMemory(MemoryType mem_type = kMemory32) {
@@ -159,20 +154,15 @@ class TestModuleBuilder {
}
byte AddPassiveElementSegment(wasm::ValueType type) {
- mod.elem_segments.emplace_back(type, false);
- auto& init = mod.elem_segments.back();
- // Add 5 empty elements.
- for (uint32_t j = 0; j < 5; j++) {
- init.entries.push_back(WasmElemSegment::Entry(
- WasmElemSegment::Entry::kRefNullEntry, type.heap_representation()));
- }
+ mod.elem_segments.emplace_back(type, WasmElemSegment::kStatusPassive,
+ WasmElemSegment::kExpressionElements);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
byte AddDeclarativeElementSegment() {
- mod.elem_segments.emplace_back(kWasmFuncRef, true);
- mod.elem_segments.back().entries.push_back(WasmElemSegment::Entry(
- WasmElemSegment::Entry::kRefNullEntry, HeapType::kFunc));
+ mod.elem_segments.emplace_back(kWasmFuncRef,
+ WasmElemSegment::kStatusDeclarative,
+ WasmElemSegment::kExpressionElements);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
@@ -186,6 +176,21 @@ class TestModuleBuilder {
WasmModule* module() { return &mod; }
private:
+ byte AddFunctionImpl(const FunctionSig* sig, uint32_t sig_index,
+ bool declared) {
+ mod.functions.push_back(
+ {sig, // sig
+ static_cast<uint32_t>(mod.functions.size()), // func_index
+ sig_index, // sig_index
+ {0, 0}, // code
+ 0, // feedback slots
+ false, // import
+ false, // export
+ declared}); // declared
+ CHECK_LE(mod.functions.size(), kMaxByteSizedLeb128);
+ return static_cast<byte>(mod.functions.size() - 1);
+ }
+
AccountingAllocator allocator;
WasmModule mod;
};
@@ -346,11 +351,9 @@ TEST_F(FunctionBodyDecoderTest, Int32Const1) {
}
TEST_F(FunctionBodyDecoderTest, RefFunc) {
- WASM_FEATURE_SCOPE(reftypes);
-
builder.AddFunction(sigs.v_ii());
builder.AddFunction(sigs.ii_v());
- ExpectValidates(sigs.a_v(), {kExprRefFunc, 1});
+ ExpectValidates(sigs.c_v(), {kExprRefFunc, 1});
}
TEST_F(FunctionBodyDecoderTest, EmptyFunction) {
@@ -1102,7 +1105,6 @@ TEST_F(FunctionBodyDecoderTest, Unreachable_select2) {
}
TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(return_call);
@@ -1172,13 +1174,6 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
WASM_GC_OP(kExprRefCast), struct_index,
struct_index, kExprDrop});
- ExpectValidates(sigs.v_v(),
- {WASM_UNREACHABLE, WASM_GC_OP(kExprRttSub), array_index,
- WASM_GC_OP(kExprRttSub), array_index, kExprDrop});
- ExpectValidates(sigs.v_v(),
- {WASM_UNREACHABLE, WASM_GC_OP(kExprRttFreshSub), array_index,
- WASM_GC_OP(kExprRttFreshSub), array_index, kExprDrop});
-
ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, kExprBrOnNull, 0, WASM_DROP});
ExpectValidates(&sig_v_s, {WASM_UNREACHABLE, WASM_LOCAL_GET(0), kExprBrOnNull,
@@ -1517,7 +1512,6 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
}
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
- WASM_FEATURE_SCOPE(reftypes);
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
{ \
@@ -1729,8 +1723,8 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallsWithTooFewArguments) {
TEST_F(FunctionBodyDecoderTest, ReturnCallWithSubtype) {
WASM_FEATURE_SCOPE(return_call);
- auto sig = MakeSig::Returns(kWasmExternRef);
- auto callee_sig = MakeSig::Returns(kWasmExternNonNullableRef);
+ auto sig = MakeSig::Returns(kWasmAnyRef);
+ auto callee_sig = MakeSig::Returns(kWasmAnyNonNullableRef);
builder.AddFunction(&callee_sig);
ExpectValidates(&sig, {WASM_RETURN_CALL_FUNCTION0(0)});
@@ -1935,7 +1929,6 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs1) {
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs2) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
byte table_type_index = builder.AddSignature(sigs.i_i());
byte table_index =
@@ -1953,7 +1946,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs2) {
"call_indirect: Immediate signature #1 is not a subtype of "
"immediate table #0");
- byte non_function_table_index = builder.InitializeTable(kWasmExternRef);
+ byte non_function_table_index = builder.InitializeTable(kWasmAnyRef);
ExpectFailure(
sigs.i_v(),
{WASM_CALL_INDIRECT_TABLE(non_function_table_index, table_type_index,
@@ -1963,27 +1956,28 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs2) {
}
TEST_F(FunctionBodyDecoderTest, TablesWithFunctionSubtyping) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
EXPERIMENTAL_FLAG_SCOPE(gc);
byte empty_struct = builder.AddStruct({});
- byte super_struct = builder.AddStruct({F(kWasmI32, false)});
- byte sub_struct = builder.AddStruct({F(kWasmI32, false), F(kWasmF64, false)});
+ byte super_struct = builder.AddStruct({F(kWasmI32, false)}, empty_struct);
+ byte sub_struct =
+ builder.AddStruct({F(kWasmI32, false), F(kWasmF64, false)}, super_struct);
- byte table_type = builder.AddSignature(
- FunctionSig::Build(zone(), {ValueType::Ref(super_struct, kNullable)},
- {ValueType::Ref(sub_struct, kNullable)}));
byte table_supertype = builder.AddSignature(
FunctionSig::Build(zone(), {ValueType::Ref(empty_struct, kNullable)},
{ValueType::Ref(sub_struct, kNullable)}));
+ byte table_type = builder.AddSignature(
+ FunctionSig::Build(zone(), {ValueType::Ref(super_struct, kNullable)},
+ {ValueType::Ref(sub_struct, kNullable)}),
+ table_supertype);
auto function_sig =
FunctionSig::Build(zone(), {ValueType::Ref(sub_struct, kNullable)},
{ValueType::Ref(super_struct, kNullable)});
- byte function_type = builder.AddSignature(function_sig);
+ byte function_type = builder.AddSignature(function_sig, table_type);
- byte function = builder.AddFunction(function_sig);
+ byte function = builder.AddFunction(function_type);
byte table = builder.InitializeTable(ValueType::Ref(table_type, kNullable));
@@ -2183,18 +2177,17 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
}
TEST_F(FunctionBodyDecoderTest, TableSet) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
byte tab_type = builder.AddSignature(sigs.i_i());
- byte tab_ref1 = builder.AddTable(kWasmExternRef, 10, true, 20);
+ byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
- byte tab_ref2 = builder.AddTable(kWasmExternRef, 10, false, 20);
+ byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
byte tab_typed_func =
builder.AddTable(ValueType::Ref(tab_type, kNullable), 10, false, 20);
- ValueType sig_types[]{kWasmExternRef, kWasmFuncRef, kWasmI32,
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32,
ValueType::Ref(tab_type, kNonNullable)};
FunctionSig sig(0, 4, sig_types);
byte local_ref = 0;
@@ -2240,18 +2233,17 @@ TEST_F(FunctionBodyDecoderTest, TableSet) {
}
TEST_F(FunctionBodyDecoderTest, TableGet) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
byte tab_type = builder.AddSignature(sigs.i_i());
- byte tab_ref1 = builder.AddTable(kWasmExternRef, 10, true, 20);
+ byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
- byte tab_ref2 = builder.AddTable(kWasmExternRef, 10, false, 20);
+ byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
byte tab_typed_func =
builder.AddTable(ValueType::Ref(tab_type, kNullable), 10, false, 20);
- ValueType sig_types[]{kWasmExternRef, kWasmFuncRef, kWasmI32,
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32,
ValueType::Ref(tab_type, kNullable)};
FunctionSig sig(0, 4, sig_types);
byte local_ref = 0;
@@ -2308,11 +2300,10 @@ TEST_F(FunctionBodyDecoderTest, TableGet) {
}
TEST_F(FunctionBodyDecoderTest, MultiTableCallIndirect) {
- WASM_FEATURE_SCOPE(reftypes);
- byte tab_ref = builder.AddTable(kWasmExternRef, 10, true, 20);
+ byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
byte tab_func = builder.AddTable(kWasmFuncRef, 20, true, 30);
- ValueType sig_types[]{kWasmExternRef, kWasmFuncRef, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte sig_index = builder.AddSignature(sigs.i_v());
@@ -2547,7 +2538,6 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheck) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2562,7 +2552,6 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2577,7 +2566,6 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll3) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2608,7 +2596,6 @@ TEST_F(FunctionBodyDecoderTest, Break_Unify) {
}
TEST_F(FunctionBodyDecoderTest, BreakIf_cond_type) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType types[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2622,7 +2609,6 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_cond_type) {
}
TEST_F(FunctionBodyDecoderTest, BreakIf_val_type) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType types[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j],
@@ -2693,14 +2679,14 @@ TEST_F(FunctionBodyDecoderTest, BrTable2b) {
}
TEST_F(FunctionBodyDecoderTest, BrTableSubtyping) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- byte supertype1 = builder.AddStruct({F(kWasmI8, true), F(kWasmI16, false)});
- byte supertype2 = builder.AddStruct({F(kWasmI8, true)});
+ byte supertype1 = builder.AddStruct({F(kWasmI8, true)});
+ byte supertype2 =
+ builder.AddStruct({F(kWasmI8, true), F(kWasmI16, false)}, supertype1);
byte subtype = builder.AddStruct(
- {F(kWasmI8, true), F(kWasmI16, false), F(kWasmI32, true)});
+ {F(kWasmI8, true), F(kWasmI16, false), F(kWasmI32, true)}, supertype2);
ExpectValidates(
sigs.v_v(),
{WASM_BLOCK_R(wasm::ValueType::Ref(supertype1, kNonNullable),
@@ -2863,8 +2849,7 @@ TEST_F(FunctionBodyDecoderTest, Select) {
}
TEST_F(FunctionBodyDecoderTest, Select_needs_value_type) {
- WASM_FEATURE_SCOPE(reftypes);
- ExpectFailure(sigs.e_e(),
+ ExpectFailure(sigs.a_a(),
{WASM_SELECT(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0), WASM_ZERO)});
ExpectFailure(sigs.c_c(),
{WASM_SELECT(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0), WASM_ZERO)});
@@ -2884,7 +2869,7 @@ TEST_F(FunctionBodyDecoderTest, Select_fail2) {
ValueType type = kValueTypes[i];
if (type == kWasmI32) continue;
// Select without specified type is only allowed for number types.
- if (type == kWasmExternRef) continue;
+ if (type == kWasmAnyRef) continue;
ValueType types[] = {type, kWasmI32, type};
FunctionSig sig(1, 2, types);
@@ -2915,7 +2900,6 @@ TEST_F(FunctionBodyDecoderTest, Select_TypeCheck) {
}
TEST_F(FunctionBodyDecoderTest, SelectWithType) {
- WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(sigs.i_i(), {WASM_SELECT_I(WASM_LOCAL_GET(0),
WASM_LOCAL_GET(0), WASM_ZERO)});
ExpectValidates(sigs.f_ff(),
@@ -2924,16 +2908,15 @@ TEST_F(FunctionBodyDecoderTest, SelectWithType) {
{WASM_SELECT_D(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO)});
ExpectValidates(sigs.l_l(),
{WASM_SELECT_L(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO)});
- ExpectValidates(sigs.e_e(),
- {WASM_SELECT_R(WASM_REF_NULL(kExternRefCode),
- WASM_REF_NULL(kExternRefCode), WASM_ZERO)});
+ ExpectValidates(sigs.a_a(),
+ {WASM_SELECT_R(WASM_REF_NULL(kAnyRefCode),
+ WASM_REF_NULL(kAnyRefCode), WASM_ZERO)});
ExpectValidates(sigs.c_c(),
{WASM_SELECT_A(WASM_REF_NULL(kFuncRefCode),
WASM_REF_NULL(kFuncRefCode), WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, SelectWithType_fail) {
- WASM_FEATURE_SCOPE(reftypes);
ExpectFailure(sigs.i_i(), {WASM_SELECT_F(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
WASM_ZERO)});
ExpectFailure(sigs.f_ff(),
@@ -3366,10 +3349,7 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
TEST_F(FunctionBodyDecoderTest, TableInitWrongType) {
uint32_t table_index = builder.InitializeTable(wasm::kWasmFuncRef);
- uint32_t element_index =
- builder.AddPassiveElementSegment(wasm::kWasmExternRef);
-
- WASM_FEATURE_SCOPE(reftypes);
+ uint32_t element_index = builder.AddPassiveElementSegment(wasm::kWasmAnyRef);
ExpectFailure(sigs.v_v(), {WASM_TABLE_INIT(table_index, element_index,
WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
@@ -3397,8 +3377,6 @@ TEST_F(FunctionBodyDecoderTest, ElemDrop) {
TEST_F(FunctionBodyDecoderTest, TableInitDeclarativeElem) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddDeclarativeElementSegment();
-
- WASM_FEATURE_SCOPE(reftypes);
byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
@@ -3410,27 +3388,18 @@ TEST_F(FunctionBodyDecoderTest, TableInitDeclarativeElem) {
TEST_F(FunctionBodyDecoderTest, DeclarativeElemDrop) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddDeclarativeElementSegment();
-
- WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(sigs.v_v(), {WASM_ELEM_DROP(0)});
ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
}
TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
- builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i());
-
- ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
- WASM_FEATURE_SCOPE(reftypes);
- ExpectValidates(sigs.a_v(), {WASM_REF_FUNC(function_index)});
+ ExpectValidates(sigs.c_v(), {WASM_REF_FUNC(function_index)});
}
TEST_F(FunctionBodyDecoderTest, RefFuncUndeclared) {
- builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i(), false);
-
- WASM_FEATURE_SCOPE(reftypes);
- ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
+ ExpectFailure(sigs.c_v(), {WASM_REF_FUNC(function_index)});
}
TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
@@ -3455,84 +3424,67 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
TEST_F(FunctionBodyDecoderTest, TableCopyWrongType) {
uint32_t dst_table_index = builder.InitializeTable(wasm::kWasmFuncRef);
- uint32_t src_table_index = builder.InitializeTable(wasm::kWasmExternRef);
-
- WASM_FEATURE_SCOPE(reftypes);
+ uint32_t src_table_index = builder.InitializeTable(wasm::kWasmAnyRef);
ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(dst_table_index, src_table_index,
WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableGrow) {
byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
- byte tab_ref = builder.AddTable(kWasmExternRef, 10, true, 20);
+ byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
- ExpectFailure(
- sigs.i_c(),
- {WASM_TABLE_GROW(tab_func, WASM_REF_NULL(kFuncRefCode), WASM_ONE)});
- WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(
sigs.i_c(),
{WASM_TABLE_GROW(tab_func, WASM_REF_NULL(kFuncRefCode), WASM_ONE)});
ExpectValidates(
- sigs.i_e(),
- {WASM_TABLE_GROW(tab_ref, WASM_REF_NULL(kExternRefCode), WASM_ONE)});
+ sigs.i_a(),
+ {WASM_TABLE_GROW(tab_ref, WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
// FuncRef table cannot be initialized with an ExternRef value.
- ExpectFailure(sigs.i_e(),
+ ExpectFailure(sigs.i_a(),
{WASM_TABLE_GROW(tab_func, WASM_LOCAL_GET(0), WASM_ONE)});
// ExternRef table cannot be initialized with a FuncRef value.
ExpectFailure(sigs.i_c(),
{WASM_TABLE_GROW(tab_ref, WASM_LOCAL_GET(0), WASM_ONE)});
// Check that the table index gets verified.
ExpectFailure(
- sigs.i_e(),
- {WASM_TABLE_GROW(tab_ref + 2, WASM_REF_NULL(kExternRefCode), WASM_ONE)});
+ sigs.i_a(),
+ {WASM_TABLE_GROW(tab_ref + 2, WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
}
TEST_F(FunctionBodyDecoderTest, TableSize) {
int tab = builder.AddTable(kWasmFuncRef, 10, true, 20);
-
- ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(tab)});
- WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(sigs.i_v(), {WASM_TABLE_SIZE(tab)});
ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(tab + 2)});
}
TEST_F(FunctionBodyDecoderTest, TableFill) {
byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
- byte tab_ref = builder.AddTable(kWasmExternRef, 10, true, 20);
-
- ExpectFailure(sigs.v_c(),
- {WASM_TABLE_FILL(tab_func, WASM_ONE,
- WASM_REF_NULL(kFuncRefCode), WASM_ONE)});
- WASM_FEATURE_SCOPE(reftypes);
+ byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
ExpectValidates(sigs.v_c(),
{WASM_TABLE_FILL(tab_func, WASM_ONE,
WASM_REF_NULL(kFuncRefCode), WASM_ONE)});
- ExpectValidates(sigs.v_e(),
+ ExpectValidates(sigs.v_a(),
{WASM_TABLE_FILL(tab_ref, WASM_ONE,
- WASM_REF_NULL(kExternRefCode), WASM_ONE)});
+ WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
// FuncRef table cannot be initialized with an ExternRef value.
- ExpectFailure(sigs.v_e(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
+ ExpectFailure(sigs.v_a(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
WASM_LOCAL_GET(0), WASM_ONE)});
// ExternRef table cannot be initialized with a FuncRef value.
ExpectFailure(sigs.v_c(), {WASM_TABLE_FILL(tab_ref, WASM_ONE,
WASM_LOCAL_GET(0), WASM_ONE)});
// Check that the table index gets verified.
- ExpectFailure(sigs.v_e(),
+ ExpectFailure(sigs.v_a(),
{WASM_TABLE_FILL(tab_ref + 2, WASM_ONE,
- WASM_REF_NULL(kExternRefCode), WASM_ONE)});
+ WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
}
TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
- {
- WASM_FEATURE_SCOPE(reftypes);
- ExpectFailure(sigs.i_v(), {WASM_TABLE_GROW(0, WASM_REF_NULL(kExternRefCode),
- WASM_ONE)});
- ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(0)});
- ExpectFailure(sigs.i_e(),
- {WASM_TABLE_FILL(0, WASM_ONE, WASM_REF_NULL(kExternRefCode),
- WASM_ONE)});
- }
+ ExpectFailure(sigs.i_v(),
+ {WASM_TABLE_GROW(0, WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
+ ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(0)});
+ ExpectFailure(
+ sigs.i_a(),
+ {WASM_TABLE_FILL(0, WASM_ONE, WASM_REF_NULL(kAnyRefCode), WASM_ONE)});
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
ExpectFailure(sigs.v_v(),
{WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
@@ -3541,10 +3493,9 @@ TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
}
TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
- WASM_FEATURE_SCOPE(reftypes);
{
TestModuleBuilder builder;
- builder.AddTable(kWasmExternRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added one table, therefore table.copy on table 0 should work.
@@ -3565,8 +3516,8 @@ TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
}
{
TestModuleBuilder builder;
- builder.AddTable(kWasmExternRef, 10, true, 20);
- builder.AddTable(kWasmExternRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added two tables, therefore table.copy on table 0 should work.
@@ -3590,11 +3541,10 @@ TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
}
TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
- WASM_FEATURE_SCOPE(reftypes);
{
TestModuleBuilder builder;
- builder.AddTable(kWasmExternRef, 10, true, 20);
- builder.AddPassiveElementSegment(wasm::kWasmExternRef);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment(wasm::kWasmAnyRef);
module = builder.module();
// We added one table, therefore table.init on table 0 should work.
int table_index = 0;
@@ -3607,9 +3557,9 @@ TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
}
{
TestModuleBuilder builder;
- builder.AddTable(kWasmExternRef, 10, true, 20);
- builder.AddTable(kWasmExternRef, 10, true, 20);
- builder.AddPassiveElementSegment(wasm::kWasmExternRef);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment(wasm::kWasmAnyRef);
module = builder.module();
// We added two tables, therefore table.init on table 0 should work.
int table_index = 0;
@@ -3623,7 +3573,6 @@ TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
}
TEST_F(FunctionBodyDecoderTest, UnpackPackedTypes) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
{
@@ -3659,7 +3608,6 @@ ValueType optref(byte type_index) {
}
TEST_F(FunctionBodyDecoderTest, StructNewDefaultWithRtt) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
{
@@ -3692,11 +3640,10 @@ TEST_F(FunctionBodyDecoderTest, StructNewDefaultWithRtt) {
}
TEST_F(FunctionBodyDecoderTest, NominalStructSubtyping) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
byte structural_type = builder.AddStruct({F(kWasmI32, true)});
- byte nominal_type = builder.AddStruct({F(kWasmI32, true)}, kGenericSuperType);
+ byte nominal_type = builder.AddStruct({F(kWasmI32, true)});
AddLocals(optref(structural_type), 1);
AddLocals(optref(nominal_type), 1);
// Try to assign a nominally-typed value to a structurally-typed local.
@@ -3720,22 +3667,19 @@ TEST_F(FunctionBodyDecoderTest, NominalStructSubtyping) {
TEST_F(FunctionBodyDecoderTest, DefaultableLocal) {
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
- AddLocals(kWasmExternRef, 1);
+ AddLocals(kWasmAnyRef, 1);
ExpectValidates(sigs.v_v(), {});
}
TEST_F(FunctionBodyDecoderTest, NonDefaultableLocal) {
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
- AddLocals(ValueType::Ref(HeapType::kExtern, kNonNullable), 1);
+ AddLocals(ValueType::Ref(HeapType::kAny, kNonNullable), 1);
ExpectFailure(sigs.v_v(), {}, kAppendEnd,
"Cannot define function-level local of non-defaultable type");
}
TEST_F(FunctionBodyDecoderTest, AllowingNonDefaultableLocals) {
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(nn_locals);
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
ValueType rep = ref(struct_type_index);
@@ -3770,7 +3714,6 @@ TEST_F(FunctionBodyDecoderTest, AllowingNonDefaultableLocals) {
TEST_F(FunctionBodyDecoderTest, UnsafeNonDefaultableLocals) {
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(unsafe_nn_locals);
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
ValueType rep = ref(struct_type_index);
@@ -3801,7 +3744,6 @@ TEST_F(FunctionBodyDecoderTest, UnsafeNonDefaultableLocals) {
}
TEST_F(FunctionBodyDecoderTest, RefEq) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(eh);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(simd);
@@ -3820,10 +3762,9 @@ TEST_F(FunctionBodyDecoderTest, RefEq) {
kWasmF32,
kWasmF64,
kWasmS128,
- kWasmExternRef,
kWasmFuncRef,
kWasmAnyRef,
- ValueType::Ref(HeapType::kExtern, kNonNullable),
+ ValueType::Ref(HeapType::kAny, kNonNullable),
ValueType::Ref(HeapType::kFunc, kNonNullable)};
for (ValueType type1 : eqref_subtypes) {
@@ -3848,7 +3789,6 @@ TEST_F(FunctionBodyDecoderTest, RefEq) {
}
TEST_F(FunctionBodyDecoderTest, RefAsNonNull) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(eh);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(simd);
@@ -3856,9 +3796,8 @@ TEST_F(FunctionBodyDecoderTest, RefAsNonNull) {
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
- uint32_t heap_types[] = {
- struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
- HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ uint32_t heap_types[] = {struct_type_index, array_type_index, HeapType::kFunc,
+ HeapType::kEq, HeapType::kAny, HeapType::kI31};
ValueType non_compatible_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
kWasmS128};
@@ -3889,16 +3828,14 @@ TEST_F(FunctionBodyDecoderTest, RefAsNonNull) {
}
TEST_F(FunctionBodyDecoderTest, RefNull) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(eh);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
- uint32_t type_reprs[] = {
- struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
- HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ uint32_t type_reprs[] = {struct_type_index, array_type_index, HeapType::kFunc,
+ HeapType::kEq, HeapType::kAny, HeapType::kI31};
// It works with heap types.
for (uint32_t type_repr : type_reprs) {
const ValueType type = ValueType::Ref(type_repr, kNullable);
@@ -3911,22 +3848,19 @@ TEST_F(FunctionBodyDecoderTest, RefNull) {
}
TEST_F(FunctionBodyDecoderTest, RefIsNull) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(eh);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- ExpectValidates(sigs.i_i(),
- {WASM_REF_IS_NULL(WASM_REF_NULL(kExternRefCode))});
+ ExpectValidates(sigs.i_i(), {WASM_REF_IS_NULL(WASM_REF_NULL(kAnyRefCode))});
ExpectFailure(
sigs.i_i(), {WASM_REF_IS_NULL(WASM_LOCAL_GET(0))}, kAppendEnd,
"ref.is_null[0] expected reference type, found local.get of type i32");
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
- uint32_t heap_types[] = {
- struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
- HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ uint32_t heap_types[] = {struct_type_index, array_type_index, HeapType::kFunc,
+ HeapType::kEq, HeapType::kAny, HeapType::kI31};
for (uint32_t heap_type : heap_types) {
const ValueType types[] = {kWasmI32, ValueType::Ref(heap_type, kNullable)};
@@ -3945,7 +3879,6 @@ TEST_F(FunctionBodyDecoderTest, RefIsNull) {
}
TEST_F(FunctionBodyDecoderTest, BrOnNull) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -3967,9 +3900,9 @@ TEST_F(FunctionBodyDecoderTest, BrOnNull) {
}
TEST_F(FunctionBodyDecoderTest, BrOnNonNull) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(experimental_wasm_gc);
const ValueType reps[] = {ValueType::Ref(HeapType::kFunc, kNonNullable),
ValueType::Ref(HeapType::kFunc, kNullable)};
@@ -3992,7 +3925,6 @@ TEST_F(FunctionBodyDecoderTest, BrOnNonNull) {
}
TEST_F(FunctionBodyDecoderTest, GCStruct) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4045,8 +3977,8 @@ TEST_F(FunctionBodyDecoderTest, GCStruct) {
WASM_RTT_CANON(array_type_index)),
kExprDrop},
kAppendEnd,
- "struct.new_with_rtt[1] expected rtt with depth for type 0, "
- "found rtt.canon of type (rtt 0 1)");
+ "struct.new_with_rtt[1] expected type (rtt 0), found "
+ "rtt.canon of type (rtt 1)");
// Out-of-bounds index.
ExpectFailure(sigs.v_v(),
{WASM_STRUCT_NEW_WITH_RTT(42, WASM_I32V(0),
@@ -4125,7 +4057,6 @@ TEST_F(FunctionBodyDecoderTest, GCStruct) {
}
TEST_F(FunctionBodyDecoderTest, GCArray) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4139,6 +4070,9 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
ValueType reps_c_r[] = {kWasmFuncRef, array_type};
ValueType reps_f_r[] = {kWasmF32, array_type};
ValueType reps_i_r[] = {kWasmI32, array_type};
+ ValueType reps_i_a[] = {kWasmI32, kWasmArrayRef};
+ ValueType reps_i_s[] = {kWasmI32,
+ ValueType::Ref(struct_type_index, kNonNullable)};
const FunctionSig sig_c_r(1, 1, reps_c_r);
const FunctionSig sig_v_r(0, 1, &array_type);
const FunctionSig sig_v_r2(0, 1, &immutable_array_type);
@@ -4146,6 +4080,8 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
const FunctionSig sig_f_r(1, 1, reps_f_r);
const FunctionSig sig_v_cr(0, 2, reps_c_r);
const FunctionSig sig_i_r(1, 1, reps_i_r);
+ const FunctionSig sig_i_a(1, 1, reps_i_a);
+ const FunctionSig sig_i_s(1, 1, reps_i_s);
/** array.new_with_rtt **/
ExpectValidates(&sig_r_v,
@@ -4162,8 +4098,8 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
// Mistyped initializer.
ExpectFailure(&sig_r_v,
{WASM_ARRAY_NEW_WITH_RTT(
- array_type_index, WASM_REF_NULL(kExternRefCode),
- WASM_I32V(10), WASM_RTT_CANON(array_type_index))},
+ array_type_index, WASM_REF_NULL(kAnyRefCode), WASM_I32V(10),
+ WASM_RTT_CANON(array_type_index))},
kAppendEnd,
"array.new_with_rtt[0] expected type funcref, found ref.null "
"of type externref");
@@ -4180,8 +4116,8 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
array_type_index, WASM_REF_NULL(kFuncRefCode), WASM_I32V(5),
WASM_RTT_CANON(struct_type_index))},
kAppendEnd,
- "array.new_with_rtt[2] expected rtt with depth for type 0, "
- "found rtt.canon of type (rtt 0 1)");
+ "array.new_with_rtt[2] expected type (rtt 0), found "
+ "rtt.canon of type (rtt 1)");
// Wrong type index.
ExpectFailure(
sigs.v_v(),
@@ -4254,16 +4190,16 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
"array.set[2] expected type funcref, found i64.const of type i64");
/** array.len **/
- ExpectValidates(&sig_i_r,
- {WASM_ARRAY_LEN(array_type_index, WASM_LOCAL_GET(0))});
+ // Works both with conrete array types and arrayref.
+ ExpectValidates(&sig_i_r, {WASM_ARRAY_LEN(WASM_LOCAL_GET(0))});
+ ExpectValidates(&sig_i_a, {WASM_ARRAY_LEN(WASM_LOCAL_GET(0))});
// Wrong return type.
- ExpectFailure(&sig_f_r, {WASM_ARRAY_LEN(array_type_index, WASM_LOCAL_GET(0))},
- kAppendEnd,
+ ExpectFailure(&sig_f_r, {WASM_ARRAY_LEN(WASM_LOCAL_GET(0))}, kAppendEnd,
"type error in fallthru[0] (expected f32, got i32)");
- // Non-array type index.
- ExpectFailure(&sig_i_r,
- {WASM_ARRAY_LEN(struct_type_index, WASM_LOCAL_GET(0))},
- kAppendEnd, "invalid array index: 1");
+ // Non-array argument.
+ ExpectFailure(&sig_i_s, {WASM_ARRAY_LEN(WASM_LOCAL_GET(0))}, kAppendEnd,
+ "array.len[0] expected type (ref null array), found local.get "
+ "of type (ref 1)");
// Immutable array.
// Allocating and reading is OK:
@@ -4282,7 +4218,6 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
}
TEST_F(FunctionBodyDecoderTest, PackedFields) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4367,7 +4302,6 @@ TEST_F(FunctionBodyDecoderTest, PackedFields) {
}
TEST_F(FunctionBodyDecoderTest, PackedTypesAsLocals) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
AddLocals(kWasmI8, 1);
@@ -4375,7 +4309,6 @@ TEST_F(FunctionBodyDecoderTest, PackedTypesAsLocals) {
}
TEST_F(FunctionBodyDecoderTest, RttCanon) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(eh);
@@ -4384,109 +4317,13 @@ TEST_F(FunctionBodyDecoderTest, RttCanon) {
uint8_t struct_type_index = builder.AddStruct({F(kWasmI64, true)});
for (uint32_t type_index : {array_type_index, struct_type_index}) {
- ValueType rtt1 = ValueType::Rtt(type_index, 0);
+ ValueType rtt1 = ValueType::Rtt(type_index);
FunctionSig sig1(1, 0, &rtt1);
ExpectValidates(&sig1, {WASM_RTT_CANON(type_index)});
-
- // rtt.canon should fail for incorrect depth.
- ValueType rtt2 = ValueType::Rtt(type_index, 1);
- FunctionSig sig2(1, 0, &rtt2);
- ExpectFailure(&sig2, {WASM_RTT_CANON(type_index)}, kAppendEnd,
- "type error in fallthru[0]");
- }
-}
-
-TEST_F(FunctionBodyDecoderTest, RttSub) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- uint8_t array_type_index = builder.AddArray(kWasmI8, true);
- uint8_t super_struct_type_index = builder.AddStruct({F(kWasmI16, true)});
- uint8_t sub_struct_type_index =
- builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)});
-
- // Trivial type error.
- ExpectFailure(
- sigs.v_v(), {WASM_RTT_SUB(array_type_index, WASM_I32V(42)), kExprDrop},
- kAppendEnd, "rtt.sub[0] expected rtt for a supertype of type 0");
- ExpectFailure(
- sigs.v_v(),
- {WASM_RTT_FRESH_SUB(array_type_index, WASM_I32V(42)), kExprDrop},
- kAppendEnd, "rtt.fresh_sub[0] expected rtt for a supertype of type 0");
-
- {
- ValueType type = ValueType::Rtt(array_type_index, 1);
- FunctionSig sig(1, 0, &type);
- // Can build an rtt.sub with self type for an array type.
- ExpectValidates(&sig, {WASM_RTT_SUB(array_type_index,
- WASM_RTT_CANON(array_type_index))});
- ExpectValidates(&sig,
- {WASM_RTT_FRESH_SUB(array_type_index,
- WASM_RTT_CANON(array_type_index))});
- // Fails when argument to rtt.sub is not a supertype.
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_SUB(super_struct_type_index,
- WASM_RTT_CANON(array_type_index)),
- kExprDrop},
- kAppendEnd,
- "rtt.sub[0] expected rtt for a supertype of type 1");
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_FRESH_SUB(super_struct_type_index,
- WASM_RTT_CANON(array_type_index)),
- kExprDrop},
- kAppendEnd,
- "rtt.fresh_sub[0] expected rtt for a supertype of type 1");
- }
-
- {
- ValueType type = ValueType::Rtt(super_struct_type_index, 1);
- FunctionSig sig(1, 0, &type);
- // Can build an rtt.sub with self type for a struct type.
- ExpectValidates(&sig,
- {WASM_RTT_SUB(super_struct_type_index,
- WASM_RTT_CANON(super_struct_type_index))});
- ExpectValidates(
- &sig, {WASM_RTT_FRESH_SUB(super_struct_type_index,
- WASM_RTT_CANON(super_struct_type_index))});
- // Fails when argument to rtt.sub is not a supertype.
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_SUB(super_struct_type_index,
- WASM_RTT_CANON(array_type_index))},
- kAppendEnd,
- "rtt.sub[0] expected rtt for a supertype of type 1");
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_FRESH_SUB(super_struct_type_index,
- WASM_RTT_CANON(array_type_index))},
- kAppendEnd,
- "rtt.fresh_sub[0] expected rtt for a supertype of type 1");
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_SUB(super_struct_type_index,
- WASM_RTT_CANON(sub_struct_type_index))},
- kAppendEnd,
- "rtt.sub[0] expected rtt for a supertype of type 1");
- ExpectFailure(sigs.v_v(),
- {WASM_RTT_FRESH_SUB(super_struct_type_index,
- WASM_RTT_CANON(sub_struct_type_index))},
- kAppendEnd,
- "rtt.fresh_sub[0] expected rtt for a supertype of type 1");
- }
-
- {
- // Can build an rtt from a stuct supertype.
- ValueType type = ValueType::Rtt(sub_struct_type_index, 1);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(sub_struct_type_index,
- WASM_RTT_CANON(super_struct_type_index))});
- ExpectValidates(
- &sig, {WASM_RTT_FRESH_SUB(sub_struct_type_index,
- WASM_RTT_CANON(super_struct_type_index))});
}
}
TEST_F(FunctionBodyDecoderTest, RefTestCast) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4580,12 +4417,13 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
}
TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(experimental_wasm_gc);
byte super_struct = builder.AddStruct({F(kWasmI16, true)});
- byte sub_struct = builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)});
+ byte sub_struct =
+ builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)}, super_struct);
ValueType supertype = ValueType::Ref(super_struct, kNullable);
ValueType subtype = ValueType::Ref(sub_struct, kNullable);
@@ -4648,9 +4486,9 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
}
TEST_F(FunctionBodyDecoderTest, BrOnAbstractType) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(experimental_wasm_gc);
ValueType kNonNullableFunc = ValueType::Ref(HeapType::kFunc, kNonNullable);
@@ -4705,7 +4543,6 @@ TEST_F(FunctionBodyDecoderTest, BrOnAbstractType) {
}
TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4724,7 +4561,6 @@ TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
}
TEST_F(FunctionBodyDecoderTest, MergeNullableTypes) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -4746,7 +4582,6 @@ TEST_F(FunctionBodyDecoderTest, MergeNullableTypes) {
// This tests that num_locals_ in decoder remains consistent, even if we fail
// mid-DecodeLocals().
TEST_F(FunctionBodyDecoderTest, Regress_1154439) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
AddLocals(kWasmI32, 1);
AddLocals(kWasmI64, 1000000);
@@ -5057,7 +4892,6 @@ class TypeReaderTest : public TestWithZone {
TEST_F(TypeReaderTest, HeapTypeDecodingTest) {
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
HeapType heap_func = HeapType(HeapType::kFunc);
@@ -5160,7 +4994,6 @@ TEST_F(LocalDeclDecoderTest, WrongLocalDeclsCount2) {
}
TEST_F(LocalDeclDecoderTest, OneLocal) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
const byte data[] = {1, 1, static_cast<byte>(type.value_type_code())};
@@ -5175,7 +5008,6 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
}
TEST_F(LocalDeclDecoderTest, FiveLocals) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
const byte data[] = {1, 5, static_cast<byte>(type.value_type_code())};
@@ -5241,7 +5073,6 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
}
TEST_F(LocalDeclDecoderTest, InvalidTypeIndex) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
const byte* data = nullptr;
diff --git a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
index 743bc5b2af..21a124fead 100644
--- a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
+++ b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
@@ -51,12 +51,9 @@ class MemoryProtectionTest : public TestWithNativeContext {
mode_ = mode;
bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback;
FLAG_wasm_memory_protection_keys = enable_pku;
- if (enable_pku) {
- GetWasmCodeManager()->InitializeMemoryProtectionKeyForTesting();
- // The key is initially write-protected.
- CHECK_IMPLIES(GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
- !GetWasmCodeManager()->MemoryProtectionKeyWritable());
- }
+ // The key is initially write-protected.
+ CHECK_IMPLIES(GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
+ !GetWasmCodeManager()->MemoryProtectionKeyWritable());
bool enable_mprotect =
mode == kMprotect || mode == kPkuWithMprotectFallback;
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index b2bb0742b5..f33a8d8c1d 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -31,7 +31,7 @@ namespace module_decoder_unittest {
#define WASM_INIT_EXPR_F32(val) WASM_F32(val), kExprEnd
#define WASM_INIT_EXPR_I64(val) WASM_I64(val), kExprEnd
#define WASM_INIT_EXPR_F64(val) WASM_F64(val), kExprEnd
-#define WASM_INIT_EXPR_EXTERN_REF_NULL WASM_REF_NULL(kExternRefCode), kExprEnd
+#define WASM_INIT_EXPR_EXTERN_REF_NULL WASM_REF_NULL(kAnyRefCode), kExprEnd
#define WASM_INIT_EXPR_FUNC_REF_NULL WASM_REF_NULL(kFuncRefCode), kExprEnd
#define WASM_INIT_EXPR_REF_FUNC(val) WASM_REF_FUNC(val), kExprEnd
#define WASM_INIT_EXPR_GLOBAL(index) WASM_GLOBAL_GET(index), kExprEnd
@@ -76,9 +76,11 @@ namespace module_decoder_unittest {
'H', 'i', 'n', 't', 's'), \
ADD_COUNT(__VA_ARGS__))
-#define SECTION_BRANCH_HINTS(...) \
- SECTION(Unknown, \
- ADD_COUNT('b', 'r', 'a', 'n', 'c', 'h', 'H', 'i', 'n', 't', 's'), \
+#define SECTION_BRANCH_HINTS(...) \
+ SECTION(Unknown, \
+ ADD_COUNT('m', 'e', 't', 'a', 'd', 'a', 't', 'a', '.', 'c', 'o', \
+ 'd', 'e', '.', 'b', 'r', 'a', 'n', 'c', 'h', '_', 'h', \
+ 'i', 'n', 't'), \
__VA_ARGS__)
#define FAIL_IF_NO_EXPERIMENTAL_EH(data) \
@@ -183,12 +185,12 @@ struct ValueTypePair {
uint8_t code;
ValueType type;
} kValueTypes[] = {
- {kI32Code, kWasmI32}, // --
- {kI64Code, kWasmI64}, // --
- {kF32Code, kWasmF32}, // --
- {kF64Code, kWasmF64}, // --
- {kFuncRefCode, kWasmFuncRef}, // --
- {kExternRefCode, kWasmExternRef}, // --
+ {kI32Code, kWasmI32}, // --
+ {kI64Code, kWasmI64}, // --
+ {kF32Code, kWasmF32}, // --
+ {kF64Code, kWasmF64}, // --
+ {kFuncRefCode, kWasmFuncRef}, // --
+ {kAnyRefCode, kWasmAnyRef}, // --
};
class WasmModuleVerifyTest : public TestWithIsolateAndZone {
@@ -300,7 +302,6 @@ TEST_F(WasmModuleVerifyTest, S128Global) {
}
TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -308,7 +309,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
SECTION(Global, // --
ENTRY_COUNT(2), // --
- kExternRefCode, // local type
+ kAnyRefCode, // local type
0, // immutable
WASM_INIT_EXPR_EXTERN_REF_NULL, // init
kFuncRefCode, // local type
@@ -335,7 +336,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
EXPECT_EQ(0u, result.value()->data_segments.size());
const WasmGlobal* global = &result.value()->globals[0];
- EXPECT_EQ(kWasmExternRef, global->type);
+ EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
global = &result.value()->globals[1];
@@ -345,7 +346,6 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
}
TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -389,7 +389,6 @@ TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
}
TEST_F(WasmModuleVerifyTest, InvalidFuncRefGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -405,19 +404,18 @@ TEST_F(WasmModuleVerifyTest, InvalidFuncRefGlobal) {
}
TEST_F(WasmModuleVerifyTest, ExternRefGlobalWithGlobalInit) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
SECTION(Import, // --
ENTRY_COUNT(1), // number of imports
ADD_COUNT('m'), // module name
ADD_COUNT('f'), // global name
kExternalGlobal, // import kind
- kExternRefCode, // type
+ kAnyRefCode, // type
0), // mutability
SECTION(Global, // --
ENTRY_COUNT(1),
- kExternRefCode, // local type
- 0, // immutable
+ kAnyRefCode, // local type
+ 0, // immutable
WASM_INIT_EXPR_GLOBAL(0)),
};
@@ -431,25 +429,24 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobalWithGlobalInit) {
const WasmGlobal* global = &result.value()->globals.back();
- EXPECT_EQ(kWasmExternRef, global->type);
+ EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
}
}
TEST_F(WasmModuleVerifyTest, NullGlobalWithGlobalInit) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
SECTION(Import, // --
ENTRY_COUNT(1), // number of imports
ADD_COUNT('m'), // module name
ADD_COUNT('n'), // global name
kExternalGlobal, // import kind
- kExternRefCode, // type
+ kAnyRefCode, // type
0), // mutability
SECTION(Global, // --
ENTRY_COUNT(1),
- kExternRefCode, // local type
- 0, // immutable
+ kAnyRefCode, // local type
+ 0, // immutable
WASM_INIT_EXPR_GLOBAL(0)),
};
@@ -464,7 +461,7 @@ TEST_F(WasmModuleVerifyTest, NullGlobalWithGlobalInit) {
const WasmGlobal* global = &result.value()->globals.back();
- EXPECT_EQ(kWasmExternRef, global->type);
+ EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
}
}
@@ -500,8 +497,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
kI32Code, // type
1) // mutable
};
- EXPECT_FAILURE_WITH_MSG(no_initializer_no_end,
- "Initializer expression is missing 'end'");
+ EXPECT_FAILURE_WITH_MSG(no_initializer_no_end, "Beyond end of code");
static const byte no_initializer[] = {
SECTION(Global, //--
@@ -569,20 +565,19 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
"Invalid global index: 1");
{
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte referencing_undefined_global_nested[] = {
SECTION(Type, ENTRY_COUNT(1), WASM_ARRAY_DEF(kI32Code, true)),
- SECTION(Global, ENTRY_COUNT(2), // --
- WASM_RTT_WITH_DEPTH(1, 0), // type
- 0, // mutable
- WASM_RTT_SUB(0, // init value
- WASM_GLOBAL_GET(1)), // --
- kExprEnd, // --
- WASM_RTT_WITH_DEPTH(0, 0), // type
- 0, // mutable
- WASM_RTT_CANON(0), kExprEnd) // init value
+ SECTION(Global, ENTRY_COUNT(2), // --
+ kRefCode, 0, // type
+ 0, // mutable
+ WASM_ARRAY_NEW_DEFAULT(0, // init value
+ WASM_GLOBAL_GET(1)), // --
+ kExprEnd, // --
+ kI32Code, // type
+ 0, // mutable
+ WASM_I32V(10), kExprEnd) // init value
};
EXPECT_FAILURE_WITH_MSG(referencing_undefined_global_nested,
"Invalid global index: 1");
@@ -766,7 +761,6 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
}
TEST_F(WasmModuleVerifyTest, RefNullGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
WASM_REF_NULL(kFuncRefCode), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
@@ -774,7 +768,6 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobal) {
}
TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid1) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kOptRefCode, 0, 1,
WASM_REF_NULL(0), kExprEnd)};
@@ -783,7 +776,6 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid1) {
}
TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid2) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
kExprRefNull, U32V_5(1000001), kExprEnd)};
@@ -794,271 +786,33 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid2) {
}
TEST_F(WasmModuleVerifyTest, RttCanonGlobalStruct) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(0, 0), 0,
- WASM_RTT_CANON(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, RttCanonGlobalTypeError) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 0), 1,
- WASM_RTT_CANON(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(
- result,
- "type error in init. expression[0] (expected (rtt 1 0), got (rtt 0 0))");
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttSubOfCanon) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfCanon) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_FRESH_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttSubOfSubOfCanon) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(2, 1), 1,
- WASM_RTT_SUB(1, WASM_RTT_SUB(1, WASM_RTT_CANON(0))), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfSubOfCanon) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(2, 1), 1,
- WASM_RTT_FRESH_SUB(1, WASM_RTT_SUB(1, WASM_RTT_CANON(0))),
- kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfFreshSubOfCanon) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(2, 1), 1,
- WASM_RTT_FRESH_SUB(1, WASM_RTT_FRESH_SUB(1, WASM_RTT_CANON(0))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT(0), 0, WASM_RTT_CANON(0),
kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
}
-TEST_F(WasmModuleVerifyTest, GlobalRttSubOfGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Import, // section header
- ENTRY_COUNT(1), // number of imports
- ADD_COUNT('m'), // module name
- ADD_COUNT('f'), // global name
- kExternalGlobal, // import kind
- WASM_RTT_WITH_DEPTH(0, 0), // type
- 0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_SUB(1, WASM_GLOBAL_GET(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
- STRUCT_FIELD(kI32Code, true))),
- SECTION(Import, // section header
- ENTRY_COUNT(1), // number of imports
- ADD_COUNT('m'), // module name
- ADD_COUNT('f'), // global name
- kExternalGlobal, // import kind
- WASM_RTT_WITH_DEPTH(0, 0), // type
- 0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_FRESH_SUB(1, WASM_GLOBAL_GET(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttSubOfGlobalTypeError) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Import, // section header
- ENTRY_COUNT(1), // number of imports
- ADD_COUNT('m'), // module name
- ADD_COUNT('f'), // global name
- kExternalGlobal, // import kind
- kI32Code, // type
- 0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 0), 1,
- WASM_RTT_SUB(0, WASM_GLOBAL_GET(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result,
- "rtt.sub[0] expected rtt for a supertype of type 0, found "
- "global.get of type i32");
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfGlobalTypeError) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Import, // section header
- ENTRY_COUNT(1), // number of imports
- ADD_COUNT('m'), // module name
- ADD_COUNT('f'), // global name
- kExternalGlobal, // import kind
- kI32Code, // type
- 0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 0), 1,
- WASM_RTT_FRESH_SUB(0, WASM_GLOBAL_GET(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result,
- "rtt.fresh_sub[0] expected rtt for a supertype of type 0, "
- "found global.get of type i32");
-}
-
-#if !V8_OS_FUCHSIA
-TEST_F(WasmModuleVerifyTest, GlobalRttSubIllegalParent) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(2),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kF32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result,
- "rtt.sub[0] expected rtt for a supertype of type 1, found "
- "rtt.canon of type (rtt 0 0)");
-}
-
-TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubIllegalParent) {
- WASM_FEATURE_SCOPE(reftypes);
+TEST_F(WasmModuleVerifyTest, RttCanonGlobalTypeError) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
-
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kF32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
- WASM_RTT_FRESH_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result,
- "rtt.fresh_sub[0] expected rtt for a supertype of type 1, "
- "found rtt.canon of type (rtt 0 0)");
-}
-#endif // !V8_OS_FUCHSIA
-
-TEST_F(WasmModuleVerifyTest, RttSubGlobalTypeError) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(0, 0), 1,
- WASM_RTT_SUB(0, WASM_RTT_CANON(0)), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(
- result,
- "type error in init. expression[0] (expected (rtt 0 0), got (rtt 1 0))");
-}
-
-TEST_F(WasmModuleVerifyTest, RttFreshSubGlobalTypeError) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
-
- static const byte data[] = {
- SECTION(Type, ENTRY_COUNT(1),
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(0, 0), 1,
- WASM_RTT_FRESH_SUB(0, WASM_RTT_CANON(0)), kExprEnd)};
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT(0), 1, WASM_RTT_CANON(1),
+ kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(
result,
- "type error in init. expression[0] (expected (rtt 0 0), got (rtt 1 0))");
+ "type error in init. expression[0] (expected (rtt 0), got (rtt 1))");
}
TEST_F(WasmModuleVerifyTest, StructNewInitExpr) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -1073,12 +827,12 @@ TEST_F(WasmModuleVerifyTest, StructNewInitExpr) {
static const byte global_args[] = {
SECTION(Type, ENTRY_COUNT(1), // --
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(3), // --
- kI32Code, 0, // type, mutability
- WASM_INIT_EXPR_I32V_1(10), // --
- kRttWithDepthCode, 1, 0, 0, // type, mutability
- WASM_RTT_SUB(0, WASM_RTT_CANON(0)), kExprEnd, // --
- kRefCode, 0, 0, // type, mutability
+ SECTION(Global, ENTRY_COUNT(3), // --
+ kI32Code, 0, // type, mutability
+ WASM_INIT_EXPR_I32V_1(10), // --
+ kRttCode, 0, 0, // type, mutability
+ WASM_RTT_CANON(0), kExprEnd, // --
+ kRefCode, 0, 0, // type, mutability
WASM_INIT_EXPR_STRUCT_NEW(0, WASM_GLOBAL_GET(0),
WASM_GLOBAL_GET(1)))};
EXPECT_VERIFIES(global_args);
@@ -1102,12 +856,11 @@ TEST_F(WasmModuleVerifyTest, StructNewInitExpr) {
kRefCode, 0, 0, // type, mutability
WASM_INIT_EXPR_STRUCT_NEW(0, WASM_I32V(42), WASM_RTT_CANON(1)))};
EXPECT_FAILURE_WITH_MSG(subexpr_type_error,
- "struct.new_with_rtt[1] expected rtt with depth for "
- "type 0, found rtt.canon of type (rtt 0 1)");
+ "struct.new_with_rtt[1] expected type (rtt 0), found "
+ "rtt.canon of type (rtt 1)");
}
TEST_F(WasmModuleVerifyTest, ArrayInitInitExpr) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -1169,7 +922,6 @@ TEST_F(WasmModuleVerifyTest, ArrayInitInitExpr) {
}
TEST_F(WasmModuleVerifyTest, EmptyStruct) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte empty_struct[] = {SECTION(Type, ENTRY_COUNT(1), // --
@@ -1180,7 +932,6 @@ TEST_F(WasmModuleVerifyTest, EmptyStruct) {
}
TEST_F(WasmModuleVerifyTest, InvalidStructTypeDef) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte all_good[] = {
@@ -1253,25 +1004,24 @@ TEST_F(WasmModuleVerifyTest, InvalidStructTypeDef) {
}
TEST_F(WasmModuleVerifyTest, NominalStructTypeDef) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
// Inheritance: t1 <: t2 <: t0
static const byte all_good[] = {
SECTION(Type, ENTRY_COUNT(3), // --
- kWasmStructSubtypeCode, // type #0
+ kWasmStructNominalCode, // type #0
1, // field count
kI32Code, 1, // mut i32
kDataRefCode, // root of type hierarchy
- kWasmStructSubtypeCode, // type #1
+ kWasmStructNominalCode, // type #1
2, // field count
kI32Code, 1, // mut i32 (inherited)
kI64Code, 1, // mut i32 (added)
2, // supertype
- kWasmStructSubtypeCode, // type #2
+ kWasmStructNominalCode, // type #2
1, // field count
kI32Code, 1, // mut i32 (inherited)
0)}; // supertype
@@ -1279,26 +1029,26 @@ TEST_F(WasmModuleVerifyTest, NominalStructTypeDef) {
ModuleResult result = DecodeModule(all_good, all_good + sizeof(all_good));
EXPECT_OK(result);
WasmModule* module = result.value().get();
- EXPECT_EQ(kGenericSuperType, module->supertype(0));
+ EXPECT_EQ(kNoSuperType, module->supertype(0));
EXPECT_EQ(2u, module->supertype(1));
EXPECT_EQ(0u, module->supertype(2));
static const byte self_or_mutual_ref[] = {
SECTION(Type, ENTRY_COUNT(4), // --
- kWasmStructSubtypeCode, 0, // empty struct
+ kWasmStructNominalCode, 0, // empty struct
kDataRefCode, // root of hierarchy
- kWasmStructSubtypeCode, // type1
+ kWasmStructNominalCode, // type1
1, // field count
kOptRefCode, 1, 1, // mut optref type1
0, // supertype
- kWasmStructSubtypeCode, // type 2
+ kWasmStructNominalCode, // type 2
1, // field count
kOptRefCode, 3, 1, // mut optref type3
0, // supertype
- kWasmStructSubtypeCode, // type 3
+ kWasmStructNominalCode, // type 3
1, // field count
kOptRefCode, 2, 1, // mut optref type2
0)}; // supertype
@@ -1307,17 +1057,17 @@ TEST_F(WasmModuleVerifyTest, NominalStructTypeDef) {
static const byte mutual_ref_with_subtyping[] = {
SECTION(Type,
ENTRY_COUNT(3), // --
- kWasmStructSubtypeCode, //
+ kWasmStructNominalCode, //
1, // field count
kOptRefCode, 0, 0, // ref type0
kDataRefCode, // root of hierarchy
- kWasmStructSubtypeCode, // --
+ kWasmStructNominalCode, // --
1, // field count
kOptRefCode, 2, 0, // ref type2
0, // supertype
- kWasmStructSubtypeCode, // --
+ kWasmStructNominalCode, // --
1, // field count
kOptRefCode, 1, 0, // ref type1
0)}; // supertype
@@ -1325,49 +1075,51 @@ TEST_F(WasmModuleVerifyTest, NominalStructTypeDef) {
static const byte inheritance_cycle[] = {
SECTION(Type, ENTRY_COUNT(2), // --
- kWasmStructSubtypeCode, 0, 1, // no fields, supertype 1
- kWasmStructSubtypeCode, 0, 0)}; // no fields, supertype 0
+ kWasmStructNominalCode, 0, 1, // no fields, supertype 1
+ kWasmStructNominalCode, 0, 0)}; // no fields, supertype 0
EXPECT_FAILURE_WITH_MSG(inheritance_cycle, "cyclic inheritance");
static const byte invalid_field[] = {
SECTION(Type, ENTRY_COUNT(2), // --
kWasmStructTypeCode, U32V_1(1), kI32Code, 1, // t0: [i32]
- kWasmStructSubtypeCode, U32V_1(2), // t1:
+ kWasmStructNominalCode, U32V_1(2), // t1:
kI64Code, 1, // i64 (invalid inheritance)
kI32Code, 1, U32V_1(0))}; // i32 (added), supertype 0
- EXPECT_FAILURE_WITH_MSG(invalid_field, "invalid explicit supertype");
+ EXPECT_FAILURE_WITH_MSG(
+ invalid_field, "mixing nominal and isorecursive types is not allowed");
static const byte structural_supertype[] = {
SECTION(Type, ENTRY_COUNT(2), // --
kWasmStructTypeCode, 0, // empty struct
- kWasmStructSubtypeCode, 0, // also empty
+ kWasmStructNominalCode, 0, // also empty
0)}; // supertype is structural type
- EXPECT_FAILURE_WITH_MSG(structural_supertype, "invalid explicit supertype");
+ EXPECT_FAILURE_WITH_MSG(
+ structural_supertype,
+ "mixing nominal and isorecursive types is not allowed");
static const byte supertype_oob[] = {
SECTION(Type, ENTRY_COUNT(1), // --
- kWasmStructSubtypeCode,
+ kWasmStructNominalCode,
0, // empty struct
13)}; // supertype with invalid index
EXPECT_FAILURE_WITH_MSG(supertype_oob, "Type index 13 is out of bounds");
}
TEST_F(WasmModuleVerifyTest, NominalFunctionTypeDef) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
EXPERIMENTAL_FLAG_SCOPE(gc); // Needed for subtype checking.
static const byte all_good[] = {
SECTION(Type, ENTRY_COUNT(2), // --
- kWasmFunctionSubtypeCode, // type #0
+ kWasmFunctionNominalCode, // type #0
1, // params count
kRefCode, 0, // ref #0
1, // results count
kOptRefCode, 0, // optref #0
kFuncRefCode, // root of type hierarchy
- kWasmFunctionSubtypeCode, // type #1
+ kWasmFunctionNominalCode, // type #1
1, // params count
kOptRefCode, 0, // refined (contravariant)
1, // results count
@@ -1377,12 +1129,11 @@ TEST_F(WasmModuleVerifyTest, NominalFunctionTypeDef) {
ModuleResult result = DecodeModule(all_good, all_good + sizeof(all_good));
EXPECT_OK(result);
WasmModule* module = result.value().get();
- EXPECT_EQ(kGenericSuperType, module->supertype(0));
+ EXPECT_EQ(kNoSuperType, module->supertype(0));
EXPECT_EQ(0u, module->supertype(1));
}
TEST_F(WasmModuleVerifyTest, InvalidArrayTypeDef) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte all_good[] = {
@@ -1648,7 +1399,6 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
TEST_F(WasmModuleVerifyTest, CanonicalTypeIds) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
SECTION(Type, // --
@@ -1667,7 +1417,6 @@ TEST_F(WasmModuleVerifyTest, CanonicalTypeIds) {
const WasmModule* module = result.value().get();
EXPECT_EQ(5u, module->types.size());
- EXPECT_EQ(5u, module->type_kinds.size());
EXPECT_EQ(5u, module->canonicalized_type_ids.size());
EXPECT_EQ(2u, module->signature_map.size());
@@ -1999,7 +1748,6 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
// Test that if we have multiple tables, in the element section we can target
// and initialize all tables.
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2031,7 +1779,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
// Test that if we have multiple tables, both imported and module-defined, in
// the element section we can target and initialize all tables.
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2088,7 +1835,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion
// can be arbitrary.
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2124,7 +1870,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion can
// be arbitrary. In this test, tables can be both imported and module-defined.
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2179,7 +1924,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
}
TEST_F(WasmModuleVerifyTest, ElementSectionInitExternRefTableWithFuncRef) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2187,7 +1931,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitExternRefTableWithFuncRef) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kExternRefCode, 0, 5, // table 0
+ kAnyRefCode, 0, 5, // table 0
kFuncRefCode, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
@@ -2213,7 +1957,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitExternRefTableWithFuncRef) {
}
TEST_F(WasmModuleVerifyTest, ElementSectionInitFuncRefTableWithFuncRefNull) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), // section header
@@ -2232,7 +1975,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitFuncRefTableWithFuncRefNull) {
}
TEST_F(WasmModuleVerifyTest, ElementSectionInitFuncRefTableWithExternRefNull) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), // section header
@@ -2249,14 +1991,12 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitFuncRefTableWithExternRefNull) {
EXPECT_FAILURE_WITH_MSG(
data,
- "Invalid type in the init expression. The expected "
- "type is 'funcref', but the actual type is 'externref'.");
+ "type error in init. expression[0] (expected funcref, got externref)");
}
TEST_F(WasmModuleVerifyTest, ElementSectionDontInitExternRefImportedTable) {
// Test that imported tables of type ExternRef cannot be initialized in the
// elements section.
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -2271,7 +2011,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitExternRefImportedTable) {
ADD_COUNT('m'), // module name
ADD_COUNT('s'), // table name
kExternalTable, // import kind
- kExternRefCode, // elem_type
+ kAnyRefCode, // elem_type
0, // no maximum field
10), // initial size
// funcs ---------------------------------------------------------------
@@ -2299,14 +2039,72 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitExternRefImportedTable) {
}
TEST_F(WasmModuleVerifyTest, ElementSectionGlobalGetOutOfBounds) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
SECTION(Element, ENTRY_COUNT(1),
0x05, // Mode: Passive with expressions-as-elements
kFuncRefCode, // type
ENTRY_COUNT(1), // element count
kExprGlobalGet, 0x00, kExprEnd)}; // init. expression
- EXPECT_FAILURE_WITH_MSG(data, "Out-of-bounds global index 0");
+ EXPECT_FAILURE_WITH_MSG(data, "Invalid global index: 0");
+}
+
+// Make sure extended constants do not work without the experimental feature.
+TEST_F(WasmModuleVerifyTest, ExtendedConstantsFail) {
+ static const byte data[] = {
+ SECTION(Import, ENTRY_COUNT(1), // one import
+ 0x01, 'm', 0x01, 'g', // module, name
+ kExternalGlobal, kI32Code, 0), // type, mutability
+ SECTION(Global, ENTRY_COUNT(1), // one defined global
+ kI32Code, 0, // type, mutability
+ // initializer
+ kExprGlobalGet, 0x00, kExprGlobalGet, 0x00, kExprI32Add,
+ kExprEnd)};
+ EXPECT_FAILURE_WITH_MSG(data,
+ "opcode i32.add is not allowed in init. expressions");
+}
+
+TEST_F(WasmModuleVerifyTest, ExtendedConstantsI32) {
+ WASM_FEATURE_SCOPE(extended_const);
+ static const byte data[] = {
+ SECTION(Import, ENTRY_COUNT(1), // one import
+ 0x01, 'm', 0x01, 'g', // module, name
+ kExternalGlobal, kI32Code, 0), // type, mutability
+ SECTION(Global, ENTRY_COUNT(1), // one defined global
+ kI32Code, 0, // type, mutability
+ // initializer
+ kExprGlobalGet, 0x00, kExprGlobalGet, 0x00, kExprI32Add,
+ kExprGlobalGet, 0x00, kExprI32Sub, kExprGlobalGet, 0x00,
+ kExprI32Mul, kExprEnd)};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExtendedConstantsI64) {
+ WASM_FEATURE_SCOPE(extended_const);
+ static const byte data[] = {
+ SECTION(Import, ENTRY_COUNT(1), // one import
+ 0x01, 'm', 0x01, 'g', // module, name
+ kExternalGlobal, kI64Code, 0), // type, mutability
+ SECTION(Global, ENTRY_COUNT(1), // one defined global
+ kI64Code, 0, // type, mutability
+ // initializer
+ kExprGlobalGet, 0x00, kExprGlobalGet, 0x00, kExprI64Add,
+ kExprGlobalGet, 0x00, kExprI64Sub, kExprGlobalGet, 0x00,
+ kExprI64Mul, kExprEnd)};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExtendedConstantsTypeError) {
+ WASM_FEATURE_SCOPE(extended_const);
+ static const byte data[] = {
+ SECTION(Import, ENTRY_COUNT(1), // one import
+ 0x01, 'm', 0x01, 'g', // module, name
+ kExternalGlobal, kI32Code, 0), // type, mutability
+ SECTION(Global, ENTRY_COUNT(1), // one defined global
+ kI32Code, 0, // type, mutability
+ // initializer
+ kExprGlobalGet, 0x00, kExprI64Const, 1, kExprI32Add, kExprEnd)};
+ EXPECT_FAILURE_WITH_MSG(
+ data, "i32.add[1] expected type i32, found i64.const of type i64");
}
TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
@@ -2331,29 +2129,14 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
EXPECT_FAILURE(data);
}
-TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
- static const byte data[] = {
- SECTION(Table, // table section
- ENTRY_COUNT(2), // 2 tables
- kFuncRefCode, // table 1: type
- 0, // table 1: no maximum
- 10, // table 1: minimum size
- kFuncRefCode, // table 2: type
- 0, // table 2: no maximum
- 10), // table 2: minimum size
- };
- EXPECT_FAILURE(data);
-}
-
-TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
- WASM_FEATURE_SCOPE(reftypes);
+TEST_F(WasmModuleVerifyTest, MultipleTables) {
static const byte data[] = {
SECTION(Table, // table section
ENTRY_COUNT(2), // 2 tables
kFuncRefCode, // table 1: type
0, // table 1: no maximum
10, // table 1: minimum size
- kExternRefCode, // table 2: type
+ kAnyRefCode, // table 2: type
0, // table 2: no maximum
11), // table 2: minimum size
};
@@ -2367,11 +2150,10 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
EXPECT_EQ(kWasmFuncRef, result.value()->tables[0].type);
EXPECT_EQ(11u, result.value()->tables[1].initial_size);
- EXPECT_EQ(kWasmExternRef, result.value()->tables[1].type);
+ EXPECT_EQ(kWasmAnyRef, result.value()->tables[1].type);
}
TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
@@ -2387,7 +2169,6 @@ TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
}
TEST_F(WasmModuleVerifyTest, NullableTableIllegalInitializer) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
@@ -2405,7 +2186,6 @@ TEST_F(WasmModuleVerifyTest, NullableTableIllegalInitializer) {
}
TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -2415,7 +2195,7 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
{kOptRefCode, 1},
{kOptRefCode, kI31RefCode},
{kI31RefCode},
- {kRttWithDepthCode, 2, 0}};
+ {kRttCode, 0}};
for (Vec type : table_types) {
Vec data = {
@@ -2439,7 +2219,6 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
}
TEST_F(WasmModuleVerifyTest, NonNullableTable) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
@@ -2457,7 +2236,6 @@ TEST_F(WasmModuleVerifyTest, NonNullableTable) {
}
TEST_F(WasmModuleVerifyTest, NonNullableTableNoInitializer) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
@@ -2511,10 +2289,10 @@ TEST_F(WasmModuleVerifyTest, BranchHinting) {
WASM_FEATURE_SCOPE(branch_hinting);
static const byte data[] = {
TYPE_SECTION(1, SIG_ENTRY_v_v), FUNCTION_SECTION(2, 0, 0),
- SECTION_BRANCH_HINTS(ENTRY_COUNT(2), 0 /*func_index*/, 0 /*reserved*/,
- ENTRY_COUNT(1), 1 /*likely*/, 2 /* if offset*/,
- 1 /*func_index*/, 0 /*reserved*/, ENTRY_COUNT(1),
- 0 /*unlikely*/, 4 /* br_if offset*/),
+ SECTION_BRANCH_HINTS(ENTRY_COUNT(2), 0 /*func_index*/, ENTRY_COUNT(1),
+ 3 /* if offset*/, 1 /*reserved*/, 1 /*likely*/,
+ 1 /*func_index*/, ENTRY_COUNT(1),
+ 5 /* br_if offset*/, 1 /*reserved*/, 0 /*unlikely*/),
SECTION(Code, ENTRY_COUNT(2),
ADD_COUNT(0, /*no locals*/
WASM_IF(WASM_I32V_1(1), WASM_NOP), WASM_END),
@@ -2526,9 +2304,9 @@ TEST_F(WasmModuleVerifyTest, BranchHinting) {
EXPECT_EQ(2u, result.value()->branch_hints.size());
EXPECT_EQ(WasmBranchHint::kLikely,
- result.value()->branch_hints[0].GetHintFor(2));
+ result.value()->branch_hints[0].GetHintFor(3));
EXPECT_EQ(WasmBranchHint::kUnlikely,
- result.value()->branch_hints[1].GetHintFor(4));
+ result.value()->branch_hints[1].GetHintFor(5));
}
class WasmSignatureDecodeTest : public TestWithZone {
@@ -2552,7 +2330,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
}
TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
@@ -2566,7 +2343,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
}
TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair param_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
@@ -2580,7 +2356,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
}
TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
@@ -2598,7 +2373,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
}
TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair p0_type = kValueTypes[i];
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
@@ -2617,7 +2391,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
}
TEST_F(WasmSignatureDecodeTest, Ok_tt_tt) {
- WASM_FEATURE_SCOPE(reftypes);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair p0_type = kValueTypes[i];
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
@@ -2668,21 +2441,6 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
}
}
-TEST_F(WasmSignatureDecodeTest, Fail_externref_without_flag) {
- // Disable ExternRef support and check that decoding fails.
- WASM_FEATURE_SCOPE_VAL(reftypes, false);
- byte ref_types[] = {kFuncRefCode, kExternRefCode};
- for (byte invalid_type : ref_types) {
- for (size_t i = 0;; i++) {
- byte data[] = {SIG_ENTRY_x_xx(kI32Code, kI32Code, kI32Code)};
- if (i >= arraysize(data)) break;
- data[i] = invalid_type;
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
- EXPECT_EQ(nullptr, sig);
- }
- }
-}
-
TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte kInvalidType = 76;
for (size_t i = 0;; i++) {
@@ -3490,11 +3248,11 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentExternRef) {
// table declaration -----------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), kFuncRefCode, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE_WITH_ELEMENTS, kExternRefCode,
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE_WITH_ELEMENTS, kAnyRefCode,
U32V_1(0)),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- EXPECT_FAILURE(data);
+ EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, PassiveElementSegmentWithIndices) {
@@ -3528,13 +3286,10 @@ TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentFuncRef) {
U32V_1(0)), // func ref count
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(reftypes);
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentWithInvalidIndex) {
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 -----------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -3634,10 +3389,10 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
+/* TODO(7748): Add support for rec. groups.
TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {SECTION(
Type, ENTRY_COUNT(3),
@@ -3649,10 +3404,9 @@ TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_ARRAY_DEF(WASM_OPT_REF(0), true))};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
-}
+}*/
TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInGlobal) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kRefCode, 0, WASM_REF_NULL(0), kExprEnd)};
@@ -3661,7 +3415,6 @@ TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInGlobal) {
}
TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInType) {
- WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
@@ -3671,10 +3424,21 @@ TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInType) {
EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
}
+// TODO(7748): Add support for rec. groups.
+TEST_F(WasmModuleVerifyTest, ForwardSupertype) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1),
+ kWasmSubtypeCode, ENTRY_COUNT(1), 0,
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kRefCode, true)))};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "type 0: forward-declared supertype 0");
+}
+
TEST_F(WasmModuleVerifyTest, IllegalPackedFields) {
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kI16Code, 0, WASM_INIT_EXPR_I32V_1(13))};
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index 19ebd8e998..2dea26453a 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -14,36 +14,44 @@ namespace subtyping_unittest {
class WasmSubtypingTest : public ::testing::Test {};
using FieldInit = std::pair<ValueType, bool>;
-ValueType ref(uint32_t index) { return ValueType::Ref(index, kNonNullable); }
-ValueType optRef(uint32_t index) { return ValueType::Ref(index, kNullable); }
+constexpr ValueType ref(uint32_t index) {
+ return ValueType::Ref(index, kNonNullable);
+}
+constexpr ValueType optRef(uint32_t index) {
+ return ValueType::Ref(index, kNullable);
+}
FieldInit mut(ValueType type) { return FieldInit(type, true); }
FieldInit immut(ValueType type) { return FieldInit(type, false); }
-void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields) {
+void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields,
+ uint32_t supertype = kNoSuperType) {
StructType::Builder builder(module->signature_zone.get(),
static_cast<uint32_t>(fields.size()));
for (FieldInit field : fields) {
builder.AddField(field.first, field.second);
}
- return module->add_struct_type(builder.Build(), kNoSuperType);
+ return module->add_struct_type(builder.Build(), supertype);
}
-void DefineArray(WasmModule* module, FieldInit element_type) {
+void DefineArray(WasmModule* module, FieldInit element_type,
+ uint32_t supertype = kNoSuperType) {
module->add_array_type(module->signature_zone->New<ArrayType>(
element_type.first, element_type.second),
- kNoSuperType);
+ supertype);
}
void DefineSignature(WasmModule* module,
std::initializer_list<ValueType> params,
- std::initializer_list<ValueType> returns) {
+ std::initializer_list<ValueType> returns,
+ uint32_t supertype = kNoSuperType) {
module->add_signature(
FunctionSig::Build(module->signature_zone.get(), returns, params),
- kNoSuperType);
+ supertype);
}
TEST_F(WasmSubtypingTest, Subtyping) {
+ FLAG_SCOPE(experimental_wasm_gc);
v8::internal::AccountingAllocator allocator;
WasmModule module1_(std::make_unique<Zone>(&allocator, ZONE_NAME));
WasmModule module2_(std::make_unique<Zone>(&allocator, ZONE_NAME));
@@ -54,41 +62,50 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Set up two identical modules.
for (WasmModule* module : {module1, module2}) {
/* 0 */ DefineStruct(module, {mut(ref(2)), immut(optRef(2))});
- /* 1 */ DefineStruct(module, {mut(ref(2)), immut(ref(2))});
+ /* 1 */ DefineStruct(module, {mut(ref(2)), immut(ref(2))}, 0);
/* 2 */ DefineArray(module, immut(ref(0)));
- /* 3 */ DefineArray(module, immut(ref(1)));
- /* 4 */ DefineStruct(module,
- {mut(ref(2)), immut(ref(3)), immut(kWasmF64)});
+ /* 3 */ DefineArray(module, immut(ref(1)), 2);
+ /* 4 */ DefineStruct(module, {mut(ref(2)), immut(ref(3)), immut(kWasmF64)},
+ 1);
/* 5 */ DefineStruct(module, {mut(optRef(2)), immut(ref(2))});
/* 6 */ DefineArray(module, mut(kWasmI32));
/* 7 */ DefineArray(module, immut(kWasmI32));
/* 8 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(8))});
- /* 9 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(8))});
+ /* 9 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(8))}, 8);
/* 10 */ DefineSignature(module, {}, {});
/* 11 */ DefineSignature(module, {kWasmI32}, {kWasmI32});
/* 12 */ DefineSignature(module, {kWasmI32, kWasmI32}, {kWasmI32});
/* 13 */ DefineSignature(module, {ref(1)}, {kWasmI32});
- /* 14 */ DefineSignature(module, {ref(0)}, {kWasmI32});
- /* 15 */ DefineSignature(module, {ref(0)}, {ref(4)});
+ /* 14 */ DefineSignature(module, {ref(0)}, {kWasmI32}, 13);
+ /* 15 */ DefineSignature(module, {ref(0)}, {ref(4)}, 16);
/* 16 */ DefineSignature(module, {ref(0)}, {ref(0)});
}
- ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
- kWasmS128};
- ValueType ref_types[] = {kWasmExternRef, kWasmFuncRef, kWasmEqRef,
- kWasmI31Ref, kWasmDataRef, kWasmAnyRef,
- optRef(0), ref(0), optRef(2),
- ref(2), optRef(11), ref(11)};
+ constexpr ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
+ kWasmS128};
+ constexpr ValueType ref_types[] = {kWasmFuncRef, kWasmEqRef, kWasmI31Ref,
+ kWasmDataRef, kWasmArrayRef, kWasmAnyRef,
+ optRef(0), ref(0), optRef(2),
+ ref(2), optRef(11), ref(11)};
#define SUBTYPE(type1, type2) \
EXPECT_TRUE(IsSubtypeOf(type1, type2, module1, module))
-#define NOT_SUBTYPE(type1, type2) \
- EXPECT_FALSE(IsSubtypeOf(type1, type2, module1, module))
#define SUBTYPE_IFF(type1, type2, condition) \
EXPECT_EQ(IsSubtypeOf(type1, type2, module1, module), condition)
+#define NOT_SUBTYPE(type1, type2) \
+ EXPECT_FALSE(IsSubtypeOf(type1, type2, module1, module))
+// Use only with indexed types.
+#define VALID_SUBTYPE(type1, type2) \
+ EXPECT_TRUE(ValidSubtypeDefinition(type1.ref_index(), type2.ref_index(), \
+ module1, module)); \
+ EXPECT_TRUE(IsSubtypeOf(type1, type2, module1, module));
+#define NOT_VALID_SUBTYPE(type1, type2) \
+ EXPECT_FALSE(ValidSubtypeDefinition(type1.ref_index(), type2.ref_index(), \
+ module1, module));
// Type judgements across modules should work the same as within one module.
- for (WasmModule* module : {module1, module2}) {
+ // TODO(7748): add module2 once we have a cross-module story.
+ for (WasmModule* module : {module1 /* , module2 */}) {
// Value types are unrelated, except if they are equal.
for (ValueType subtype : numeric_types) {
for (ValueType supertype : numeric_types) {
@@ -108,13 +125,15 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Concrete reference types, i31ref and dataref are subtypes of eqref,
// externref/funcref/anyref/functions are not.
SUBTYPE_IFF(ref_type, kWasmEqRef,
- ref_type != kWasmFuncRef && ref_type != kWasmExternRef &&
- ref_type != kWasmAnyRef && ref_type != optRef(11) &&
- ref_type != ref(11));
+ ref_type != kWasmFuncRef && ref_type != kWasmAnyRef &&
+ ref_type != optRef(11) && ref_type != ref(11));
// Non-nullable struct/array types are subtypes of dataref.
- SUBTYPE_IFF(
- ref_type, kWasmDataRef,
- ref_type == kWasmDataRef || ref_type == ref(0) || ref_type == ref(2));
+ SUBTYPE_IFF(ref_type, kWasmDataRef,
+ ref_type == kWasmDataRef || ref_type == kWasmArrayRef ||
+ ref_type == ref(0) || ref_type == ref(2));
+ // Non-nullable array types are subtypes of arrayref.
+ SUBTYPE_IFF(ref_type, kWasmArrayRef,
+ ref_type == kWasmArrayRef || ref_type == ref(2));
// Functions are subtypes of funcref.
SUBTYPE_IFF(ref_type, kWasmFuncRef,
ref_type == kWasmFuncRef || ref_type == optRef(11) ||
@@ -128,89 +147,60 @@ TEST_F(WasmSubtypingTest, Subtyping) {
}
// The rest of ref. types are unrelated.
- for (ValueType type_1 : {kWasmExternRef, kWasmFuncRef, kWasmI31Ref}) {
- for (ValueType type_2 : {kWasmExternRef, kWasmFuncRef, kWasmI31Ref}) {
+ for (ValueType type_1 : {kWasmFuncRef, kWasmI31Ref, kWasmArrayRef}) {
+ for (ValueType type_2 : {kWasmFuncRef, kWasmI31Ref, kWasmArrayRef}) {
SUBTYPE_IFF(type_1, type_2, type_1 == type_2);
}
}
// Unrelated refs are unrelated.
- NOT_SUBTYPE(ref(0), ref(2));
- NOT_SUBTYPE(optRef(3), optRef(1));
+ NOT_VALID_SUBTYPE(ref(0), ref(2));
+ NOT_VALID_SUBTYPE(optRef(3), optRef(1));
// ref is a subtype of optref for the same struct/array.
- SUBTYPE(ref(0), optRef(0));
- SUBTYPE(ref(2), optRef(2));
+ VALID_SUBTYPE(ref(0), optRef(0));
+ VALID_SUBTYPE(ref(2), optRef(2));
// optref is not a subtype of ref for the same struct/array.
NOT_SUBTYPE(optRef(0), ref(0));
NOT_SUBTYPE(optRef(2), ref(2));
// ref is a subtype of optref if the same is true for the underlying
// structs/arrays.
- SUBTYPE(ref(3), optRef(2));
+ VALID_SUBTYPE(ref(3), optRef(2));
// Prefix subtyping for structs.
- SUBTYPE(optRef(4), optRef(0));
+ VALID_SUBTYPE(optRef(4), optRef(0));
// Mutable fields are invariant.
- NOT_SUBTYPE(ref(0), ref(5));
+ NOT_VALID_SUBTYPE(ref(0), ref(5));
// Immutable fields are covariant.
- SUBTYPE(ref(1), ref(0));
+ VALID_SUBTYPE(ref(1), ref(0));
// Prefix subtyping + immutable field covariance for structs.
- SUBTYPE(optRef(4), optRef(1));
+ VALID_SUBTYPE(optRef(4), optRef(1));
// No subtyping between mutable/immutable fields.
- NOT_SUBTYPE(ref(7), ref(6));
- NOT_SUBTYPE(ref(6), ref(7));
+ NOT_VALID_SUBTYPE(ref(7), ref(6));
+ NOT_VALID_SUBTYPE(ref(6), ref(7));
// Recursive types.
- SUBTYPE(ref(9), ref(8));
+ VALID_SUBTYPE(ref(9), ref(8));
// Identical rtts are subtypes of each other.
- SUBTYPE(ValueType::Rtt(5, 3), ValueType::Rtt(5, 3));
SUBTYPE(ValueType::Rtt(5), ValueType::Rtt(5));
// Rtts of unrelated types are unrelated.
- NOT_SUBTYPE(ValueType::Rtt(1, 1), ValueType::Rtt(2, 1));
NOT_SUBTYPE(ValueType::Rtt(1), ValueType::Rtt(2));
- NOT_SUBTYPE(ValueType::Rtt(1, 0), ValueType::Rtt(2));
- // Rtts of different depth are unrelated.
- NOT_SUBTYPE(ValueType::Rtt(5, 1), ValueType::Rtt(5, 3));
- NOT_SUBTYPE(ValueType::Rtt(5, 8), ValueType::Rtt(5, 3));
// Rtts of identical types are subtype-related.
- SUBTYPE(ValueType::Rtt(8, 1), ValueType::Rtt(9, 1));
- SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(9));
+ // TODO(7748): Implement type canonicalization.
+ // SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(9));
// Rtts of subtypes are not related.
- NOT_SUBTYPE(ValueType::Rtt(1, 1), ValueType::Rtt(0, 1));
NOT_SUBTYPE(ValueType::Rtt(1), ValueType::Rtt(0));
- // rtt(t, d) <: rtt(t)
- for (uint8_t depth : {0, 1, 5}) {
- SUBTYPE(ValueType::Rtt(1, depth), ValueType::Rtt(1));
- }
-
- // Function subtyping depends on the selected wasm features.
- // Without wasm-gc:
+ // Function subtyping;
// Unrelated function types are unrelated.
- NOT_SUBTYPE(ref(10), ref(11));
+ NOT_VALID_SUBTYPE(ref(10), ref(11));
// Function type with different parameter counts are unrelated.
- NOT_SUBTYPE(ref(12), ref(11));
- // Parameter contravariance does not hold.
- NOT_SUBTYPE(ref(14), ref(13));
- // Return type covariance does not hold.
- NOT_SUBTYPE(ref(15), ref(16));
- // Only identical types are subtype-related.
- SUBTYPE(ref(10), ref(10));
- SUBTYPE(ref(11), ref(11));
-
- {
- // With wasm-gc:
- EXPERIMENTAL_FLAG_SCOPE(gc);
- // Unrelated function types are unrelated.
- NOT_SUBTYPE(ref(10), ref(11));
- // Function type with different parameter counts are unrelated.
- NOT_SUBTYPE(ref(12), ref(11));
- // Parameter contravariance holds.
- SUBTYPE(ref(14), ref(13));
- // Return type covariance holds.
- SUBTYPE(ref(15), ref(16));
- // Identical types are subtype-related.
- SUBTYPE(ref(10), ref(10));
- SUBTYPE(ref(11), ref(11));
- }
+ NOT_VALID_SUBTYPE(ref(12), ref(11));
+ // Parameter contravariance holds.
+ VALID_SUBTYPE(ref(14), ref(13));
+ // Return type covariance holds.
+ VALID_SUBTYPE(ref(15), ref(16));
+ // Identical types are subtype-related.
+ VALID_SUBTYPE(ref(10), ref(10));
+ VALID_SUBTYPE(ref(11), ref(11));
}
#undef SUBTYPE
#undef NOT_SUBTYPE
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 0356782c17..37f22072b5 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -7,7 +7,7 @@
#if V8_OS_LINUX || V8_OS_FREEBSD
#include <signal.h>
#include <ucontext.h>
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
#include <signal.h>
#include <sys/ucontext.h>
#elif V8_OS_WIN
@@ -40,7 +40,7 @@ namespace wasm {
namespace {
constexpr Register scratch = r10;
bool g_test_handler_executed = false;
-#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
+#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
struct sigaction g_old_segv_action;
struct sigaction g_old_fpe_action;
struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
@@ -96,7 +96,7 @@ class TrapHandlerTest : public TestWithIsolate,
}
void InstallFallbackHandler() {
-#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
+#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
// Set up a signal handler to recover from the expected crash.
struct sigaction action;
action.sa_sigaction = SignalHandler;
@@ -124,7 +124,7 @@ class TrapHandlerTest : public TestWithIsolate,
// Clean up the trap handler
trap_handler::RemoveTrapHandler();
if (!g_test_handler_executed) {
-#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
+#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
// The test handler cleans up the signal handler setup in the test. If the
// test handler was not called, we have to do the cleanup ourselves.
EXPECT_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
@@ -155,7 +155,7 @@ class TrapHandlerTest : public TestWithIsolate,
reinterpret_cast<Address>(desc.buffer + recovery_offset);
}
-#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
+#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
static void SignalHandler(int signal, siginfo_t* info, void* context) {
if (g_use_as_first_chance_handler) {
if (v8::TryHandleWebAssemblyTrapPosix(signal, info, context)) {
@@ -174,7 +174,7 @@ class TrapHandlerTest : public TestWithIsolate,
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
#if V8_OS_LINUX
uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
-#elif V8_OS_MACOSX
+#elif V8_OS_DARWIN
uc->uc_mcontext->__ss.__rip = g_recovery_address;
#elif V8_OS_FREEBSD
uc->uc_mcontext.mc_rip = g_recovery_address;
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index b9970cc097..2eafdcec54 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -22,7 +22,7 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
ValueType params[kMaxCount];
for (size_t i = 0; i < kMaxCount; i += 2) {
- params[i] = kWasmExternRef;
+ params[i] = kWasmAnyRef;
EXPECT_TRUE(i + 1 < kMaxCount);
params[i + 1] = kWasmI32;
}
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 2d6fdcee3a..72b604a8d4 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -188,9 +188,9 @@ TEST_F(WasmCapiTest, DirectCallCapiFunction) {
ValType::make(::wasm::ANYREF)));
own<Func> func = Func::make(store(), cpp_sig.get(), PlusOne);
Extern* imports[] = {func.get()};
- ValueType wasm_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
- kWasmExternRef, kWasmI32, kWasmI64, kWasmF32,
- kWasmF64, kWasmExternRef};
+ ValueType wasm_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
+ kWasmAnyRef, kWasmI32, kWasmI64, kWasmF32,
+ kWasmF64, kWasmAnyRef};
FunctionSig wasm_sig(5, 5, wasm_types);
int func_index = builder()->AddImport(base::CStrVector("func"), &wasm_sig);
builder()->ExportImportedFunction(base::CStrVector("func"), func_index);
diff --git a/deps/v8/test/wasm-api-tests/hostref.cc b/deps/v8/test/wasm-api-tests/hostref.cc
index fb6181b26a..7e5af0221b 100644
--- a/deps/v8/test/wasm-api-tests/hostref.cc
+++ b/deps/v8/test/wasm-api-tests/hostref.cc
@@ -23,9 +23,9 @@ own<Trap> IdentityCallback(const Val args[], Val results[]) {
} // namespace
TEST_F(WasmCapiTest, HostRef) {
- ValueType rr_reps[] = {kWasmExternRef, kWasmExternRef};
- ValueType ri_reps[] = {kWasmExternRef, kWasmI32};
- ValueType ir_reps[] = {kWasmI32, kWasmExternRef};
+ ValueType rr_reps[] = {kWasmAnyRef, kWasmAnyRef};
+ ValueType ri_reps[] = {kWasmAnyRef, kWasmI32};
+ ValueType ir_reps[] = {kWasmI32, kWasmAnyRef};
// Naming convention: result_params_sig.
FunctionSig r_r_sig(1, 1, rr_reps);
FunctionSig v_r_sig(0, 1, rr_reps);
@@ -35,9 +35,9 @@ TEST_F(WasmCapiTest, HostRef) {
uint32_t func_index = builder()->AddImport(base::CStrVector("f"), &r_r_sig);
const bool kMutable = true;
uint32_t global_index = builder()->AddExportedGlobal(
- kWasmExternRef, kMutable, WasmInitExpr::RefNullConst(HeapType::kExtern),
+ kWasmAnyRef, kMutable, WasmInitExpr::RefNullConst(HeapType::kAny),
base::CStrVector("global"));
- uint32_t table_index = builder()->AddTable(kWasmExternRef, 10);
+ uint32_t table_index = builder()->AddTable(kWasmAnyRef, 10);
builder()->AddExport(base::CStrVector("table"), kExternalTable, table_index);
byte global_set_code[] = {WASM_GLOBAL_SET(global_index, WASM_LOCAL_GET(0))};
AddExportedFunction(base::CStrVector("global.set"), global_set_code,
diff --git a/deps/v8/test/wasm-api-tests/reflect.cc b/deps/v8/test/wasm-api-tests/reflect.cc
index 9831e693bb..ccbc2d723c 100644
--- a/deps/v8/test/wasm-api-tests/reflect.cc
+++ b/deps/v8/test/wasm-api-tests/reflect.cc
@@ -35,8 +35,8 @@ void ExpectName(const char* expected, const ::wasm::Name& name) {
TEST_F(WasmCapiTest, Reflect) {
// Create a module exporting a function, a global, a table, and a memory.
byte code[] = {WASM_UNREACHABLE};
- ValueType types[] = {kWasmI32, kWasmExternRef, kWasmI32,
- kWasmI64, kWasmF32, kWasmF64};
+ ValueType types[] = {kWasmI32, kWasmAnyRef, kWasmI32,
+ kWasmI64, kWasmF32, kWasmF64};
FunctionSig sig(2, 4, types);
AddExportedFunction(base::CStrVector(kFuncName), code, sizeof(code), &sig);
diff --git a/deps/v8/test/wasm-api-tests/serialize.cc b/deps/v8/test/wasm-api-tests/serialize.cc
index 1b82b47884..c74bbbd707 100644
--- a/deps/v8/test/wasm-api-tests/serialize.cc
+++ b/deps/v8/test/wasm-api-tests/serialize.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
+#include "src/wasm/c-api.h"
#include "test/wasm-api-tests/wasm-api-test.h"
namespace v8 {
@@ -29,8 +31,24 @@ TEST_F(WasmCapiTest, Serialize) {
vec<byte_t> serialized = module()->serialize();
EXPECT_TRUE(serialized); // Serialization succeeded.
+
+ // We reset the module and collect it to make sure the NativeModuleCache does
+ // not contain it anymore. Otherwise deserialization will not happen.
+ ResetModule();
+ i::Isolate* isolate =
+ reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
+ isolate->heap()->PreciseCollectAllGarbage(
+ i::Heap::kForcedGC, i::GarbageCollectionReason::kTesting,
+ v8::kNoGCCallbackFlags);
+ isolate->heap()->PreciseCollectAllGarbage(
+ i::Heap::kForcedGC, i::GarbageCollectionReason::kTesting,
+ v8::kNoGCCallbackFlags);
own<Module> deserialized = Module::deserialize(store(), serialized);
+ // Try to serialize the module again. This can fail if deserialization does
+ // not set up a clean state.
+ deserialized->serialize();
+
own<FuncType> callback_type =
FuncType::make(ownvec<ValType>::make(), ownvec<ValType>::make());
own<Func> callback = Func::make(store(), callback_type.get(), Callback);
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-test.h b/deps/v8/test/wasm-api-tests/wasm-api-test.h
index 434dcf7690..92fe8dbab3 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-test.h
+++ b/deps/v8/test/wasm-api-tests/wasm-api-test.h
@@ -121,6 +121,8 @@ class WasmCapiTest : public ::testing::Test {
return table;
}
+ void ResetModule() { module_.reset(); }
+
void Shutdown() {
exports_.reset();
instance_.reset();
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index 77bb70be11..4892fd8257 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-032b07ce80fd04f7b3f143d1d00eb7454156e2e3 \ No newline at end of file
+2d699b7feef41ef625bf790a1c583710a6f8a83e \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index a2abfe3b0f..a057f8e717 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -8,18 +8,15 @@
# This test can only be executed in the browser
'wpt/idlharness': [SKIP],
# Failing WPT tests
+ 'wpt/exception/getArg.tentative': [FAIL],
'wpt/exception/toString.tentative': [FAIL],
'wpt/exception/type.tentative': [FAIL],
'wpt/function/constructor.tentative': [FAIL],
'wpt/function/table.tentative': [FAIL],
'wpt/function/type.tentative': [FAIL],
-
- # Failing tests after update
- 'proposals/js-types/global/type': [FAIL],
- 'proposals/js-types/module/exports': [FAIL],
- 'proposals/js-types/module/imports': [FAIL],
- 'proposals/js-types/table/constructor': [FAIL],
- 'proposals/js-types/table/type': [FAIL],
+ 'wpt/global/type.tentative': [FAIL],
+ 'wpt/interface': [FAIL],
+ 'wpt/table/type.tentative': [FAIL],
# Outdated proposal tests.
'proposals/js-types/table/get-set': [FAIL],
@@ -33,7 +30,6 @@
# TODO(v8:10556): Remove sub-typing in the reference-types implementation
'proposals/js-types/constructor/instantiate': [FAIL],
- 'proposals/js-types/global/constructor': [FAIL],
'proposals/js-types/global/value-get-set': [FAIL],
'proposals/js-types/instance/constructor': [FAIL],
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index caa4a20b6f..90dbf9c2d7 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -47,7 +47,7 @@ class TestCase(testcase.D8TestCase):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
- return ['--experimental-wasm-reftypes']
+ return []
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index e6fc7039fb..6c83797a22 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-1cce5fa8f76a93b77bf022aefbe2832a8f4e0588 \ No newline at end of file
+7e099a5b3671628e2a64798d3c3a0228e1d1883b \ No newline at end of file
diff --git a/deps/v8/third_party/google_benchmark/BUILD.gn b/deps/v8/third_party/google_benchmark/BUILD.gn
index 72d8393f4d..fc42dac851 100644
--- a/deps/v8/third_party/google_benchmark/BUILD.gn
+++ b/deps/v8/third_party/google_benchmark/BUILD.gn
@@ -7,7 +7,10 @@ import("../../gni/v8.gni")
if (v8_enable_google_benchmark) {
config("benchmark_config") {
- include_dirs = [ "src/include" ]
+ include_dirs = [
+ "src/include",
+ "precompiled_headers",
+ ]
}
source_set("google_benchmark") {
@@ -25,6 +28,7 @@ if (v8_enable_google_benchmark) {
"src/src/benchmark_register.h",
"src/src/benchmark_runner.cc",
"src/src/benchmark_runner.h",
+ "src/src/check.cc",
"src/src/check.h",
"src/src/colorprint.cc",
"src/src/colorprint.h",
diff --git a/deps/v8/third_party/google_benchmark/precompiled_headers/benchmark/export.h b/deps/v8/third_party/google_benchmark/precompiled_headers/benchmark/export.h
new file mode 100644
index 0000000000..a69f5a6ddc
--- /dev/null
+++ b/deps/v8/third_party/google_benchmark/precompiled_headers/benchmark/export.h
@@ -0,0 +1,31 @@
+// Copyright 2022 The V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_GOOGLE_BENCHMARK_PRECOMPILED_HEADERS_BENCHMARK_EXPORT_H
+#define THIRD_PARTY_GOOGLE_BENCHMARK_PRECOMPILED_HEADERS_BENCHMARK_EXPORT_H
+
+// This is a precompiled header as export.h is usually generated by the
+// libraries build system.
+
+#ifndef BENCHMARK_EXPORT
+#define BENCHMARK_EXPORT
+#endif
+
+#ifndef BENCHMARK_NO_EXPORT
+#define BENCHMARK_NO_EXPORT
+#endif
+
+#ifndef BENCHMARK_DEPRECATED
+#define BENCHMARK_DEPRECATED __attribute__((__deprecated__))
+#endif
+
+#ifndef BENCHMARK_DEPRECATED_EXPORT
+#define BENCHMARK_DEPRECATED_EXPORT BENCHMARK_EXPORT BENCHMARK_DEPRECATED
+#endif
+
+#ifndef BENCHMARK_DEPRECATED_NO_EXPORT
+#define BENCHMARK_DEPRECATED_NO_EXPORT BENCHMARK_NO_EXPORT BENCHMARK_DEPRECATED
+#endif
+
+#endif // THIRD_PARTY_GOOGLE_BENCHMARK_PRECOMPILED_HEADERS_BENCHMARK_EXPORT_H
diff --git a/deps/v8/third_party/googletest/BUILD.gn b/deps/v8/third_party/googletest/BUILD.gn
index 4d393efd95..bc82c635da 100644
--- a/deps/v8/third_party/googletest/BUILD.gn
+++ b/deps/v8/third_party/googletest/BUILD.gn
@@ -51,6 +51,7 @@ config("gmock_config") {
source_set("gtest") {
testonly = true
sources = [
+ "src/googletest/include/gtest/gtest-assertion-result.h",
"src/googletest/include/gtest/gtest-death-test.h",
"src/googletest/include/gtest/gtest-matchers.h",
"src/googletest/include/gtest/gtest-message.h",
@@ -70,6 +71,7 @@ source_set("gtest") {
"src/googletest/include/gtest/internal/gtest-type-util.h",
#"src/googletest/src/gtest-all.cc", # Not needed by our build.
+ "src/googletest/src/gtest-assertion-result.cc",
"src/googletest/src/gtest-death-test.cc",
"src/googletest/src/gtest-filepath.cc",
"src/googletest/src/gtest-internal-inl.h",
diff --git a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
index 25766ee78d..b22030a8a8 100644
--- a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+++ b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -27,8 +27,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Google C++ Testing and Mocking Framework definitions useful in production code.
+// Google C++ Testing and Mocking Framework definitions useful in production
+// code.
#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
#define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/deps/v8/third_party/inspector_protocol/BUILD.gn b/deps/v8/third_party/inspector_protocol/BUILD.gn
index 94b7fe2677..d3fb166a0f 100644
--- a/deps/v8/third_party/inspector_protocol/BUILD.gn
+++ b/deps/v8/third_party/inspector_protocol/BUILD.gn
@@ -31,7 +31,6 @@ v8_source_set("crdtp") {
"crdtp/protocol_core.h",
"crdtp/serializable.cc",
"crdtp/serializable.h",
- "crdtp/serializer_traits.h",
"crdtp/span.cc",
"crdtp/span.h",
"crdtp/status.cc",
@@ -63,7 +62,6 @@ v8_source_set("crdtp_test") {
"crdtp/glue_test.cc",
"crdtp/json_test.cc",
"crdtp/serializable_test.cc",
- "crdtp/serializer_traits_test.cc",
"crdtp/span_test.cc",
"crdtp/status_test.cc",
"crdtp/status_test_support.cc",
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index c2870d7a39..a9c4531440 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 32cf5f2bf4dc20c73ead291e68d2e2f6b638cd57
+Revision: 5221cbfa7f940d56ae8b79bf34c446a56781dd56
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index 92ca530e05..11a93e7622 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import os
import os.path
import sys
import argparse
@@ -144,6 +145,7 @@ def dash_to_camelcase(word):
def to_snake_case(name):
+ name = re.sub(r"([A-Z]{2,})([A-Z][a-z])", r"\1_\2", name)
return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower()
@@ -693,6 +695,11 @@ def main():
sys.exit()
for file_name, content in outputs.items():
+ # Remove output file first to account for potential case changes.
+ try:
+ os.remove(file_name)
+ except OSError:
+ pass
out_file = open(file_name, "w")
out_file.write(content)
out_file.close()
diff --git a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
index f98bebcd5e..7070a80a46 100755
--- a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
+++ b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
@@ -10,6 +10,13 @@ import sys
import pdl
+def open_to_write(path):
+ if sys.version_info >= (3,0):
+ return open(path, 'w', encoding='utf-8')
+ else:
+ return open(path, 'wb')
+
+
def main(argv):
parser = argparse.ArgumentParser(description=(
"Converts from .pdl to .json by invoking the pdl Python module."))
@@ -25,8 +32,7 @@ def main(argv):
pdl_string = input_file.read()
protocol = pdl.loads(pdl_string, file_name, args.map_binary_to_string)
input_file.close()
-
- output_file = open(os.path.normpath(args.json_file), 'wb')
+ output_file = open_to_write(os.path.normpath(args.json_file))
json.dump(protocol, output_file, indent=4, separators=(',', ': '))
output_file.close()
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/cbor.cc b/deps/v8/third_party/inspector_protocol/crdtp/cbor.cc
index 5be7d61c55..d64fbbffac 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/cbor.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/cbor.cc
@@ -695,8 +695,9 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
// inspector protocol, it's not a CBOR limitation), so we check
// against the signed max, so that the allowable values are
// 0, 1, 2, ... 2^31 - 1.
- if (!bytes_read || std::numeric_limits<int32_t>::max() <
- token_start_internal_value_) {
+ if (!bytes_read ||
+ static_cast<uint64_t>(std::numeric_limits<int32_t>::max()) <
+ static_cast<uint64_t>(token_start_internal_value_)) {
SetError(Error::CBOR_INVALID_INT32);
return;
}
@@ -713,8 +714,9 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
// We check the payload in token_start_internal_value_ against
// that range (2^31-1 is also known as
// std::numeric_limits<int32_t>::max()).
- if (!bytes_read || token_start_internal_value_ >
- std::numeric_limits<int32_t>::max()) {
+ if (!bytes_read ||
+ static_cast<uint64_t>(token_start_internal_value_) >
+ static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) {
SetError(Error::CBOR_INVALID_INT32);
return;
}
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/protocol_core_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/protocol_core_test.cc
index 501075c073..fcf9c2f991 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/protocol_core_test.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/protocol_core_test.cc
@@ -10,31 +10,10 @@
#include "maybe.h"
#include "status_test_support.h"
#include "test_platform.h"
+#include "test_string_traits.h"
namespace v8_crdtp {
-// Test-only. Real-life bindings use UTF8/16 conversions as needed.
-template <>
-struct ProtocolTypeTraits<std::string> {
- static bool Deserialize(DeserializerState* state, std::string* value) {
- if (state->tokenizer()->TokenTag() == cbor::CBORTokenTag::STRING8) {
- auto cbor_span = state->tokenizer()->GetString8();
- value->assign(reinterpret_cast<const char*>(cbor_span.data()),
- cbor_span.size());
- return true;
- }
- state->RegisterError(Error::BINDINGS_STRING8_VALUE_EXPECTED);
- return false;
- }
-
- static void Serialize(const std::string& value, std::vector<uint8_t>* bytes) {
- cbor::EncodeString8(
- span<uint8_t>(reinterpret_cast<const uint8_t*>(value.data()),
- value.size()),
- bytes);
- }
-};
-
namespace {
using ::testing::Eq;
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits.h b/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits.h
deleted file mode 100644
index 0bb4d90ada..0000000000
--- a/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRDTP_SERIALIZER_TRAITS_H_
-#define V8_CRDTP_SERIALIZER_TRAITS_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-#include "cbor.h"
-#include "maybe.h"
-#include "span.h"
-
-namespace v8_crdtp {
-// =============================================================================
-// SerializerTraits - Encodes field values of protocol objects in CBOR.
-// =============================================================================
-//
-// A family of serialization functions which are used by FieldSerializerTraits
-// (below) to encode field values in CBOR. Conceptually, it's this:
-//
-// Serialize(bool value, std::vector<uint8_t>* out);
-// Serialize(int32_t value, std::vector<uint8_t>* out);
-// Serialize(double value, std::vector<uint8_t>* out);
-// ...
-//
-// However, if this was to use straight-forward overloading, implicit
-// type conversions would lead to ambiguity - e.g., a bool could be
-// represented as an int32_t, but it should really be encoded as a bool.
-// The template parameterized / specialized structs accomplish this.
-//
-// SerializerTraits<bool>::Serialize(bool value, std::vector<uint8_t>* out);
-// SerializerTraits<int32>::Serialize(int32_t value, std::vector<uint8_t>* out);
-// SerializerTraits<double>::Serialize(double value, std::vector<uint8_t>* out);
-template <typename T>
-struct SerializerTraits {
- // |Serializable| (defined in serializable.h) already knows how to serialize
- // to CBOR, so we can just delegate. This covers domain specific types,
- // protocol::Binary, etc.
- // However, we use duck-typing here, because Exported, which is part of the V8
- // headers also comes with AppendSerialized, and logically it's the same type,
- // but it lives in a different namespace (v8_inspector::protocol::Exported).
- template <
- typename LikeSerializable,
- typename std::enable_if_t<std::is_member_pointer<decltype(
- &LikeSerializable::AppendSerialized)>{},
- int> = 0>
- static void Serialize(const LikeSerializable& value,
- std::vector<uint8_t>* out) {
- value.AppendSerialized(out);
- }
-};
-
-// This covers std::string, which is assumed to be UTF-8.
-// The two other string implementations that are used in the protocol bindings:
-// - WTF::String, for which the SerializerTraits specialization is located
-// in third_party/blink/renderer/core/inspector/v8-inspector-string.h.
-// - v8_inspector::String16, implemented in v8/src/inspector/string-16.h
-// along with its SerializerTraits specialization.
-template <>
-struct SerializerTraits<std::string> {
- static void Serialize(const std::string& str, std::vector<uint8_t>* out) {
- cbor::EncodeString8(SpanFrom(str), out);
- }
-};
-
-template <>
-struct SerializerTraits<bool> {
- static void Serialize(bool value, std::vector<uint8_t>* out) {
- out->push_back(value ? cbor::EncodeTrue() : cbor::EncodeFalse());
- }
-};
-
-template <>
-struct SerializerTraits<int32_t> {
- static void Serialize(int32_t value, std::vector<uint8_t>* out) {
- cbor::EncodeInt32(value, out);
- }
-};
-
-template <>
-struct SerializerTraits<double> {
- static void Serialize(double value, std::vector<uint8_t>* out) {
- cbor::EncodeDouble(value, out);
- }
-};
-
-template <typename T>
-struct SerializerTraits<std::vector<T>> {
- static void Serialize(const std::vector<T>& value,
- std::vector<uint8_t>* out) {
- out->push_back(cbor::EncodeIndefiniteLengthArrayStart());
- for (const T& element : value)
- SerializerTraits<T>::Serialize(element, out);
- out->push_back(cbor::EncodeStop());
- }
-};
-
-template <typename T>
-struct SerializerTraits<std::unique_ptr<T>> {
- static void Serialize(const std::unique_ptr<T>& value,
- std::vector<uint8_t>* out) {
- SerializerTraits<T>::Serialize(*value, out);
- }
-};
-
-// =============================================================================
-// FieldSerializerTraits - Encodes fields of protocol objects in CBOR
-// =============================================================================
-//
-// The generated code (see TypeBuilder_cpp.template) invokes SerializeField,
-// which then instantiates the FieldSerializerTraits to emit the appropriate
-// existence checks / dereference for the field value. This avoids emitting
-// the field name if the value for an optional field isn't set.
-template <typename T>
-struct FieldSerializerTraits {
- static void Serialize(span<uint8_t> field_name,
- const T& field_value,
- std::vector<uint8_t>* out) {
- cbor::EncodeString8(field_name, out);
- SerializerTraits<T>::Serialize(field_value, out);
- }
-};
-
-template <typename T>
-struct FieldSerializerTraits<detail::PtrMaybe<T>> {
- static void Serialize(span<uint8_t> field_name,
- const detail::PtrMaybe<T>& field_value,
- std::vector<uint8_t>* out) {
- if (!field_value.isJust())
- return;
- cbor::EncodeString8(field_name, out);
- SerializerTraits<T>::Serialize(*field_value.fromJust(), out);
- }
-};
-
-template <typename T>
-struct FieldSerializerTraits<detail::ValueMaybe<T>> {
- static void Serialize(span<uint8_t> field_name,
- const detail::ValueMaybe<T>& field_value,
- std::vector<uint8_t>* out) {
- if (!field_value.isJust())
- return;
- cbor::EncodeString8(field_name, out);
- SerializerTraits<T>::Serialize(field_value.fromJust(), out);
- }
-};
-
-template <typename T>
-void SerializeField(span<uint8_t> field_name,
- const T& field_value,
- std::vector<uint8_t>* out) {
- FieldSerializerTraits<T>::Serialize(field_name, field_value, out);
-}
-} // namespace v8_crdtp
-
-#endif // V8_CRDTP_SERIALIZER_TRAITS_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits_test.cc
deleted file mode 100644
index e5543bd930..0000000000
--- a/deps/v8/third_party/inspector_protocol/crdtp/serializer_traits_test.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "serializer_traits.h"
-
-#include <array>
-#include "serializable.h"
-#include "test_platform.h"
-
-// The purpose of this test is to ensure that the
-// {Field}SerializerTraits<X>::Serialize methods invoke the appropriate
-// functions from cbor.h; so, it's usually sufficient to compare with what
-// cbor.h function invocations would produce, rather than making assertions on
-// the specific bytes emitted by the SerializerTraits code.
-
-namespace v8_crdtp {
-namespace {
-// =============================================================================
-// SerializerTraits - Encodes field values of protocol objects in CBOR.
-// =============================================================================
-
-TEST(SerializerTraits, Bool) {
- std::vector<uint8_t> out;
- SerializerTraits<bool>::Serialize(true, &out);
- SerializerTraits<bool>::Serialize(false, &out);
- EXPECT_THAT(out,
- testing::ElementsAre(cbor::EncodeTrue(), cbor::EncodeFalse()));
-}
-
-TEST(SerializerTraits, Double) {
- std::vector<uint8_t> out;
- SerializerTraits<double>::Serialize(1.00001, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeDouble(1.00001, &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(SerializerTraits, Int32) {
- std::vector<uint8_t> out;
- SerializerTraits<int32_t>::Serialize(42, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeInt32(42, &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(SerializerTraits, VectorOfInt32) {
- std::vector<int32_t> ints = {1, 2, 3};
-
- std::vector<uint8_t> out;
- SerializerTraits<std::vector<int32_t>>::Serialize(ints, &out);
-
- std::vector<uint8_t> expected;
- expected.push_back(cbor::EncodeIndefiniteLengthArrayStart());
- for (int32_t v : ints)
- cbor::EncodeInt32(v, &expected);
- expected.push_back(cbor::EncodeStop());
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-// Foo is an example for a domain specific type.
-class Foo : public Serializable {
- public:
- Foo(int32_t value) : value(value) {}
-
- int32_t value;
-
- void AppendSerialized(std::vector<uint8_t>* out) const override {
- // In production, this would be generated code which emits a
- // CBOR map that has STRING8 keys corresponding to the field names
- // and field values encoded using SerializerTraits::Serialize.
- //
- // For the test we simplify this drastically and just emit the field
- // value, for conveniently testing std::vector<std::unique_ptr<Foo>>,
- // as well as the convenience methods for raw pointer and unique_ptr.
- SerializerTraits<int32_t>::Serialize(value, out);
- }
-};
-
-TEST(SerializerTraits, VectorOfDomainSpecificType) {
- std::vector<std::unique_ptr<Foo>> foos;
- foos.push_back(std::make_unique<Foo>(1));
- foos.push_back(std::make_unique<Foo>(2));
- foos.push_back(std::make_unique<Foo>(3));
-
- std::vector<uint8_t> out;
- SerializerTraits<std::vector<std::unique_ptr<Foo>>>::Serialize(foos, &out);
-
- std::vector<uint8_t> expected;
- expected.push_back(cbor::EncodeIndefiniteLengthArrayStart());
- for (int32_t v : {1, 2, 3})
- cbor::EncodeInt32(v, &expected);
- expected.push_back(cbor::EncodeStop());
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(SerializerTraits, ConstRefAndUniquePtr) {
- // Shows that SerializerTraits<Foo> allows unique_ptr.
- Foo foo(42);
- auto bar = std::make_unique<Foo>(21);
-
- std::vector<uint8_t> out;
- // In this case, |foo| is taken as a const Foo&.
- SerializerTraits<Foo>::Serialize(foo, &out);
- // In this case, |bar| is taken as a const std::unique_ptr<Foo>&.
- SerializerTraits<std::unique_ptr<Foo>>::Serialize(bar, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeInt32(42, &expected);
- cbor::EncodeInt32(21, &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(SerializerTraits, UTF8String) {
- std::string msg = "Hello, 🌎.";
-
- std::vector<uint8_t> out;
- SerializerTraits<std::string>::Serialize(msg, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom(msg), &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-// A trivial model of Exported.
-// (see
-// https://cs.chromium.org/chromium/src/out/Debug/gen/v8/include/inspector/Debugger.h).
-struct Exported {
- std::string msg;
- void AppendSerialized(std::vector<uint8_t>* out) const {
- cbor::EncodeString8(SpanFrom(msg), out);
- }
-};
-
-TEST(SerializerTraits, Exported) {
- Exported exported;
- exported.msg = "Hello, world.";
-
- std::vector<uint8_t> out;
- SerializerTraits<Exported>::Serialize(exported, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom(exported.msg), &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-// =============================================================================
-// FieldSerializerTraits - Encodes fields of protocol objects in CBOR
-// =============================================================================
-TEST(FieldSerializerTraits, RequiredField) {
- std::string value = "Hello, world.";
-
- std::vector<uint8_t> out;
- SerializeField(SpanFrom("msg"), value, &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom("msg"), &expected);
- cbor::EncodeString8(SpanFrom(value), &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-template <typename T>
-class FieldSerializerTraits_MaybeTest : public ::testing::Test {};
-using MaybeTypes =
- ::testing::Types<detail::ValueMaybe<bool>,
- detail::ValueMaybe<double>,
- detail::ValueMaybe<int32_t>,
- detail::ValueMaybe<std::string>,
- detail::PtrMaybe<Foo>,
- detail::PtrMaybe<std::vector<std::unique_ptr<Foo>>>>;
-TYPED_TEST_SUITE(FieldSerializerTraits_MaybeTest, MaybeTypes);
-
-TYPED_TEST(FieldSerializerTraits_MaybeTest, NoOutputForFieldsIfNotJust) {
- std::vector<uint8_t> out;
- SerializeField(SpanFrom("maybe"), TypeParam(), &out);
- EXPECT_THAT(out, testing::ElementsAreArray(std::vector<uint8_t>()));
-}
-
-TEST(FieldSerializerTraits, MaybeBool) {
- std::vector<uint8_t> out;
- SerializeField(SpanFrom("true"), detail::ValueMaybe<bool>(true), &out);
- SerializeField(SpanFrom("false"), detail::ValueMaybe<bool>(false), &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom("true"), &expected);
- expected.push_back(cbor::EncodeTrue());
- cbor::EncodeString8(SpanFrom("false"), &expected);
- expected.push_back(cbor::EncodeFalse());
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(FieldSerializerTraits, MaybeDouble) {
- std::vector<uint8_t> out;
- SerializeField(SpanFrom("dbl"), detail::ValueMaybe<double>(3.14), &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom("dbl"), &expected);
- cbor::EncodeDouble(3.14, &expected);
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-
-TEST(FieldSerializerTraits, MaybePtrFoo) {
- std::vector<uint8_t> out;
- SerializeField(SpanFrom("foo"),
- detail::PtrMaybe<Foo>(std::make_unique<Foo>(42)), &out);
-
- std::vector<uint8_t> expected;
- cbor::EncodeString8(SpanFrom("foo"), &expected);
- cbor::EncodeInt32(42, &expected); // Simplified relative to production.
-
- EXPECT_THAT(out, testing::ElementsAreArray(expected));
-}
-} // namespace
-} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
index 54f25e1c9c..10b6c4ba2e 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -12,11 +12,8 @@
{% endif %}
#include {{format_include(config.lib.string_header)}}
-#include <cstddef>
#include <memory>
#include <vector>
-#include <unordered_map>
-#include <unordered_set>
#include "{{config.crdtp.dir}}/error_support.h"
#include "{{config.crdtp.dir}}/dispatch.h"
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
index a16b522c38..998808be6c 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
@@ -18,7 +18,7 @@ namespace {{namespace}} {
{% endfor %}
{% for namespace in config.protocol.namespace %}
-} // namespce
+} // namespace
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
index 8208009009..53087c914c 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -10,6 +10,11 @@
//#include "Allocator.h"
//#include "Forward.h"
+#include <memory>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
#include {{format_include(config.protocol.package, "Forward")}}
{% for namespace in config.protocol.namespace %}
@@ -165,7 +170,9 @@ public:
static std::unique_ptr<DictionaryValue> cast(std::unique_ptr<Value> value)
{
- return std::unique_ptr<DictionaryValue>(DictionaryValue::cast(value.release()));
+ DictionaryValue* dictionaryValue = cast(value.get());
+ if (dictionaryValue) value.release();
+ return std::unique_ptr<DictionaryValue>(dictionaryValue);
}
void AppendSerialized(std::vector<uint8_t>* bytes) const override;
@@ -231,7 +238,9 @@ public:
static std::unique_ptr<ListValue> cast(std::unique_ptr<Value> value)
{
- return std::unique_ptr<ListValue>(ListValue::cast(value.release()));
+ ListValue* listValue = cast(value.get());
+ if (listValue) value.release();
+ return std::unique_ptr<ListValue>(listValue);
}
~ListValue() override;
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
index 96a0c8e3f3..4bb10dcc29 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -27,205 +27,97 @@ using {{"::".join(config.protocol.namespace)}}::StringUtil;
namespace {{namespace}} {
{% endfor %}
+// In Chromium, we do not support big endian architectures, so no conversion is needed
+// to interpret UTF16LE.
+// static
+String StringUtil::fromUTF16LE(const uint16_t* data, size_t length) {
+ std::string utf8;
+ base::UTF16ToUTF8(reinterpret_cast<const char16_t*>(data), length, &utf8);
+ return utf8;
+}
+
std::unique_ptr<protocol::Value> toProtocolValue(
- const base::Value* value, int depth) {
- if (!value || !depth)
+ const base::Value& value, int depth) {
+ if (!depth)
return nullptr;
- if (value->is_none())
+ if (value.is_none())
return protocol::Value::null();
- if (value->is_bool())
- return protocol::FundamentalValue::create(value->GetBool());
- if (value->is_int())
- return protocol::FundamentalValue::create(value->GetInt());
- if (value->is_double())
- return protocol::FundamentalValue::create(value->GetDouble());
- if (value->is_string())
- return protocol::StringValue::create(value->GetString());
- if (value->is_list()) {
- std::unique_ptr<protocol::ListValue> result = protocol::ListValue::create();
- for (const base::Value& item : value->GetList()) {
- std::unique_ptr<protocol::Value> converted =
- toProtocolValue(&item, depth - 1);
- if (converted)
+ if (value.is_bool())
+ return protocol::FundamentalValue::create(value.GetBool());
+ if (value.is_int())
+ return protocol::FundamentalValue::create(value.GetInt());
+ if (value.is_double())
+ return protocol::FundamentalValue::create(value.GetDouble());
+ if (value.is_string())
+ return protocol::StringValue::create(value.GetString());
+ if (value.is_list()) {
+ auto result = protocol::ListValue::create();
+ for (const base::Value& item : value.GetList()) {
+ if (auto converted = toProtocolValue(item, depth - 1)) {
result->pushValue(std::move(converted));
+ }
}
return result;
}
- if (value->is_dict()) {
- std::unique_ptr<protocol::DictionaryValue> result =
- protocol::DictionaryValue::create();
- for (const auto& it : value->DictItems()) {
- std::unique_ptr<protocol::Value> converted =
- toProtocolValue(&it.second, depth - 1);
- if (converted)
- result->setValue(it.first, std::move(converted));
+ if (value.is_dict()) {
+ auto result = protocol::DictionaryValue::create();
+ for (auto kv : value.DictItems()) {
+ if (auto converted = toProtocolValue(kv.second, depth - 1)) {
+ result->setValue(kv.first, std::move(converted));
+ }
}
return result;
}
return nullptr;
}
-std::unique_ptr<base::Value> toBaseValue(Value* value, int depth) {
+base::Value toBaseValue(Value* value, int depth) {
if (!value || !depth)
- return nullptr;
- if (value->type() == Value::TypeNull)
- return std::make_unique<base::Value>();
+ return base::Value();
if (value->type() == Value::TypeBoolean) {
bool inner;
value->asBoolean(&inner);
- return base::WrapUnique(new base::Value(inner));
+ return base::Value(inner);
}
if (value->type() == Value::TypeInteger) {
int inner;
value->asInteger(&inner);
- return base::WrapUnique(new base::Value(inner));
+ return base::Value(inner);
}
if (value->type() == Value::TypeDouble) {
double inner;
value->asDouble(&inner);
- return base::WrapUnique(new base::Value(inner));
+ return base::Value(inner);
}
if (value->type() == Value::TypeString) {
std::string inner;
value->asString(&inner);
- return base::WrapUnique(new base::Value(inner));
+ return base::Value(inner);
}
if (value->type() == Value::TypeArray) {
ListValue* list = ListValue::cast(value);
- std::unique_ptr<base::Value> result(new base::Value(
- base::Value::Type::LIST));
+ base::Value result(base::Value::Type::LIST);
for (size_t i = 0; i < list->size(); i++) {
- std::unique_ptr<base::Value> converted =
- toBaseValue(list->at(i), depth - 1);
- if (converted)
- result->Append(std::move(*converted));
+ base::Value converted = toBaseValue(list->at(i), depth - 1);
+ if (!converted.is_none())
+ result.Append(std::move(converted));
}
return result;
}
if (value->type() == Value::TypeObject) {
DictionaryValue* dict = DictionaryValue::cast(value);
- std::unique_ptr<base::Value> result(new base::Value(
- base::Value::Type::DICTIONARY));
+ base::Value result(base::Value::Type::DICTIONARY);
for (size_t i = 0; i < dict->size(); i++) {
DictionaryValue::Entry entry = dict->at(i);
- std::unique_ptr<base::Value> converted =
- toBaseValue(entry.second, depth - 1);
- if (converted)
- result->SetKey(entry.first, std::move(*converted));
+ base::Value converted = toBaseValue(entry.second, depth - 1);
+ if (!converted.is_none())
+ result.SetKey(entry.first, std::move(converted));
}
return result;
}
- return nullptr;
-}
-
-// In Chromium, we do not support big endian architectures, so no conversion is needed
-// to interpret UTF16LE.
-// static
-String StringUtil::fromUTF16LE(const uint16_t* data, size_t length) {
- std::string utf8;
- base::UTF16ToUTF8(reinterpret_cast<const char16_t*>(data), length, &utf8);
- return utf8;
-}
-
-bool StringUtil::ReadString(DeserializerState* state, String* value) {
- auto* tokenizer = state->tokenizer();
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING8) {
- const auto str = tokenizer->GetString8();
- *value = StringUtil::fromUTF8(str.data(), str.size());
- return true;
- }
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING16) {
- const auto str = tokenizer->GetString16WireRep();
- *value = StringUtil::fromUTF16LE(reinterpret_cast<const uint16_t*>(str.data()), str.size() / 2);
- return true;
- }
- state->RegisterError(Error::BINDINGS_STRING_VALUE_EXPECTED);
- return false;
-}
-
-void StringUtil::WriteString(const String& str, std::vector<uint8_t>* bytes) {
- cbor::EncodeString8(span<uint8_t>(StringUtil::CharactersUTF8(str),
- StringUtil::CharacterCount(str)),
- bytes);
-}
-
-Binary::Binary() : bytes_(new base::RefCountedBytes) {}
-Binary::Binary(const Binary& binary) : bytes_(binary.bytes_) {}
-Binary::Binary(scoped_refptr<base::RefCountedMemory> bytes) : bytes_(bytes) {}
-Binary::~Binary() {}
-
-void Binary::AppendSerialized(std::vector<uint8_t>* out) const {
- crdtp::cbor::EncodeBinary(crdtp::span<uint8_t>(data(), size()), out);
-}
-
-String Binary::toBase64() const {
- std::string encoded;
- base::Base64Encode(
- base::StringPiece(reinterpret_cast<const char*>(bytes_->front()),
- bytes_->size()),
- &encoded);
- return encoded;
-}
-
-// static
-Binary Binary::fromBase64(const String& base64, bool* success) {
- std::string decoded;
- *success = base::Base64Decode(base::StringPiece(base64), &decoded);
- if (*success) {
- return Binary::fromString(std::move(decoded));
- }
- return Binary();
-}
-
-// static
-Binary Binary::fromRefCounted(scoped_refptr<base::RefCountedMemory> memory) {
- return Binary(memory);
-}
-
-// static
-Binary Binary::fromVector(std::vector<uint8_t> data) {
- return Binary(base::RefCountedBytes::TakeVector(&data));
-}
-
-// static
-Binary Binary::fromString(std::string data) {
- return Binary(base::RefCountedString::TakeString(&data));
-}
-
-// static
-Binary Binary::fromSpan(const uint8_t* data, size_t size) {
- return Binary(scoped_refptr<base::RefCountedBytes>(
- new base::RefCountedBytes(data, size)));
+ return base::Value();
}
{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
+} // namespace {{namespace}} {
{% endfor %}
-
-namespace {{config.crdtp.namespace}} {
-
-// static
-bool ProtocolTypeTraits<Binary>::Deserialize(DeserializerState* state, Binary* value) {
- auto* tokenizer = state->tokenizer();
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::BINARY) {
- const span<uint8_t> bin = tokenizer->GetBinary();
- *value = Binary::fromSpan(bin.data(), bin.size());
- return true;
- }
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING8) {
- const auto str_span = tokenizer->GetString8();
- String str = StringUtil::fromUTF8(str_span.data(), str_span.size());
- bool success = false;
- *value = Binary::fromBase64(str, &success);
- return success;
- }
- state->RegisterError(Error::BINDINGS_BINARY_VALUE_EXPECTED);
- return false;
-}
-
-// static
-void ProtocolTypeTraits<Binary>::Serialize(const Binary& value, std::vector<uint8_t>* bytes) {
- value.AppendSerialized(bytes);
-}
-
-} // namespace {{config.crdtp.namespace}}
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
index 8c28ab033e..6e40de2b55 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
@@ -7,28 +7,12 @@
#ifndef {{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H
#define {{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted_memory.h"
-#include "{{config.crdtp.dir}}/serializable.h"
-#include "{{config.crdtp.dir}}/protocol_core.h"
+#include "{{config.crdtp.dir}}/chromium/protocol_traits.h"
{% if config.lib.export_header %}
#include "{{config.lib.export_header}}"
{% endif %}
-namespace base {
-class Value;
-}
-
-namespace {{config.crdtp.namespace}} {
-class DeserializerState;
-}
-
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
@@ -36,6 +20,7 @@ namespace {{namespace}} {
class Value;
using String = std::string;
+using Binary = crdtp::Binary;
class {{config.lib.export_macro}} StringUtil {
public:
@@ -51,67 +36,13 @@ class {{config.lib.export_macro}} StringUtil {
}
static const uint16_t* CharactersUTF16(const String& s) { return nullptr; }
static size_t CharacterCount(const String& s) { return s.size(); }
-
- static bool ReadString({{config.crdtp.namespace}}::DeserializerState* state, String* str);
- static void WriteString(const String& str, std::vector<uint8_t>* bytes);
};
-// A read-only sequence of uninterpreted bytes with reference-counted storage.
-class {{config.lib.export_macro}} Binary : public {{config.crdtp.namespace}}::Serializable {
- public:
- Binary(const Binary&);
- Binary();
- ~Binary();
-
- // Implements Serializable.
- void AppendSerialized(std::vector<uint8_t>* out) const override;
-
- const uint8_t* data() const { return bytes_->front(); }
- size_t size() const { return bytes_->size(); }
- scoped_refptr<base::RefCountedMemory> bytes() const { return bytes_; }
-
- String toBase64() const;
-
- static Binary fromBase64(const String& base64, bool* success);
- static Binary fromRefCounted(scoped_refptr<base::RefCountedMemory> memory);
- static Binary fromVector(std::vector<uint8_t> data);
- static Binary fromString(std::string data);
- static Binary fromSpan(const uint8_t* data, size_t size);
-
- private:
- explicit Binary(scoped_refptr<base::RefCountedMemory> bytes);
- scoped_refptr<base::RefCountedMemory> bytes_;
-};
+std::unique_ptr<Value> toProtocolValue(const base::Value& value, int depth);
+base::Value toBaseValue(Value* value, int depth);
-std::unique_ptr<Value> toProtocolValue(const base::Value* value, int depth);
-std::unique_ptr<base::Value> toBaseValue(Value* value, int depth);
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
-namespace {{config.crdtp.namespace}} {
-
-template <>
-struct ProtocolTypeTraits<{{"::".join(config.protocol.namespace)}}::String> {
- static bool Deserialize(DeserializerState* state, {{"::".join(config.protocol.namespace)}}::String* value) {
- return {{"::".join(config.protocol.namespace)}}::StringUtil::ReadString(state, value);
- }
- static void Serialize(const {{"::".join(config.protocol.namespace)}}::String& value, std::vector<uint8_t>* bytes) {
- {{"::".join(config.protocol.namespace)}}::StringUtil::WriteString(value, bytes);
- }
-};
-
-template <>
-struct ProtocolTypeTraits<{{"::".join(config.protocol.namespace)}}::Binary> {
- static bool Deserialize(DeserializerState* state, {{"::".join(config.protocol.namespace)}}::Binary* value);
- static void Serialize(const {{"::".join(config.protocol.namespace)}}::Binary& value, std::vector<uint8_t>* bytes);
-};
-
-template <>
-struct {{config.crdtp.namespace}}::detail::MaybeTypedef<{{"::".join(config.protocol.namespace)}}::Binary> {
- typedef ValueMaybe<{{"::".join(config.protocol.namespace)}}::Binary> type;
-};
-
-} // {{config.crdtp.namespace}}
-
#endif // !defined({{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H)
diff --git a/deps/v8/third_party/test262-harness/LICENSE b/deps/v8/third_party/test262-harness/LICENSE
new file mode 100644
index 0000000000..e0c79c800e
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/LICENSE
@@ -0,0 +1,28 @@
+The << Software identified by reference to the Ecma Standard* (Software)>> is protected by copyright and is being
+made available under the BSD License, included below. This Software may be subject to third party rights (rights
+from parties other than Ecma International), including patent rights, and no licenses under such third party rights
+are granted under this license even if the third party concerned is a member of Ecma International. SEE THE ECMA
+CODE OF CONDUCT IN PATENT MATTERS AVAILABLE AT http://www.ecma-international.org/memento/codeofconduct.htm FOR
+INFORMATION REGARDING THE LICENSING OF PATENT CLAIMS THAT ARE REQUIRED TO IMPLEMENT ECMA INTERNATIONAL STANDARDS*.
+
+Copyright <<2012>> <<Ecma International>>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
+following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
+ disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
+ following disclaimer in the documentation and/or other materials provided with the distribution.
+3. Neither the name of the authors nor Ecma International may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE ECMA INTERNATIONAL "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+SHALL ECMA INTERNATIONAL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+* Ecma International Standards hereafter means Ecma International Standards as well as Ecma Technical Reports \ No newline at end of file
diff --git a/deps/v8/third_party/test262-harness/OWNERS b/deps/v8/third_party/test262-harness/OWNERS
new file mode 100644
index 0000000000..e713ef5e50
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/OWNERS
@@ -0,0 +1,2 @@
+file:../../INFRA_OWNERS
+syg@chromium.org
diff --git a/deps/v8/third_party/test262-harness/README.md b/deps/v8/third_party/test262-harness/README.md
new file mode 100644
index 0000000000..e25602906f
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/README.md
@@ -0,0 +1,19 @@
+# Test262 Python Harness
+
+### Usage
+
+Loaded as a module, this project defines a single function, `parseTestRecord`.
+This function creates an object representation of the metadata encoded in the
+"frontmatter" of the provided Test262 test source code.
+
+`test262.py` is an executable designed to execute Test262 tests. It is exposed
+for public use. For usage instructions, invoke this executable with the
+`--help` flag, as in:
+
+ $ test262.py --help
+
+### Tests
+
+Run the following command from the root of this projcet:
+
+ $ python -m unittest discover test
diff --git a/deps/v8/third_party/test262-harness/README.v8 b/deps/v8/third_party/test262-harness/README.v8
new file mode 100644
index 0000000000..2f09a4a1bb
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/README.v8
@@ -0,0 +1,15 @@
+Name: test262-harness
+Short Name: test262-harness
+URL: https://github.com/test262-utils/test262-harness-py
+Version: 0
+Revision: 278bcfaed0dcaa13936831fb1769d15e7c1e3b2b
+Date: 2022-02-18
+License: BSD
+License File: LICENSE
+Security Critical: no
+
+Description:
+Provides the parseTestRecord function in Python for the test262 runner.
+
+Local modifications:
+src/_monkeyYaml.py was made Python 3 compatible
diff --git a/deps/v8/third_party/test262-harness/__init__.py b/deps/v8/third_party/test262-harness/__init__.py
new file mode 100644
index 0000000000..0587339fdf
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/__init__.py
@@ -0,0 +1 @@
+from src.parseTestRecord import parseTestRecord
diff --git a/deps/v8/third_party/test262-harness/excludelist.xml b/deps/v8/third_party/test262-harness/excludelist.xml
new file mode 100644
index 0000000000..39cffbe061
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/excludelist.xml
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<excludeList>
+</excludeList>
diff --git a/deps/v8/third_party/test262-harness/src/__init__.py b/deps/v8/third_party/test262-harness/src/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/__init__.py
diff --git a/deps/v8/third_party/test262-harness/src/_common.py b/deps/v8/third_party/test262-harness/src/_common.py
new file mode 100644
index 0000000000..d982e55bd1
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/_common.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2012 Ecma International. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+#--Imports---------------------------------------------------------------------
+import parseTestRecord
+
+#--Stubs-----------------------------------------------------------------------
+
+#--Globals---------------------------------------------------------------------
+
+#--Helpers--------------------------------------------------------------------#
+
+def convertDocString(docString):
+ envelope = parseTestRecord.parseTestRecord(docString, '')
+ envelope.pop('header', None)
+ envelope.pop('test', None)
+
+ return envelope
diff --git a/deps/v8/third_party/test262-harness/src/_monkeyYaml.py b/deps/v8/third_party/test262-harness/src/_monkeyYaml.py
new file mode 100644
index 0000000000..b988c433c1
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/_monkeyYaml.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# Copyright 2014 by Sam Mikes. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+# This code provides a fallback parser that can handle the subset of
+# YAML used in test262 frontmatter
+
+import re
+
+mYamlKV = re.compile(r"(.*?):(.*)")
+mYamlIntDigits = re.compile(r"^[-0-9]*$")
+mYamlFloatDigits = re.compile(r"^[-.0-9eE]*$")
+mYamlListPattern = re.compile(r"^\[(.*)\]$")
+mYamlMultilineList = re.compile(r"^ *- (.*)$")
+mYamlStringValue = re.compile(r"^('|\").*\1$")
+
+def load(str):
+ return myReadDict(str.splitlines())[1]
+
+def myReadDict(lines, indent=""):
+ dict = None
+ key = None
+ emptyLines = 0
+
+ while lines:
+ if not lines[0].startswith(indent):
+ break
+
+ line = lines.pop(0)
+ if myIsAllSpaces(line):
+ emptyLines += 1
+ continue
+ result = mYamlKV.match(line)
+
+ if result:
+ if not dict:
+ dict = {}
+ key = result.group(1).strip()
+ value = result.group(2).strip()
+ (lines, value) = myReadValue(lines, value, indent)
+ dict[key] = value
+ else:
+ if dict and key and key in dict:
+ c = " " if emptyLines == 0 else "\n" * emptyLines
+ dict[key] += c + line.strip()
+ else:
+ raise Exception("monkeyYaml is confused at " + line)
+ emptyLines = 0
+ return lines, dict
+
+def myReadValue(lines, value, indent):
+ if value == ">" or value == "|":
+ (lines, value) = myMultiline(lines, value == "|")
+ value = value + "\n"
+ return (lines, value)
+ if lines and not value:
+ if myMaybeList(lines[0]):
+ return myMultilineList(lines, value)
+ indentMatch = re.match("(" + indent + r"\s+)", lines[0])
+ if indentMatch:
+ if ":" in lines[0]:
+ return myReadDict(lines, indentMatch.group(1))
+ return myMultiline(lines, False)
+ return lines, myReadOneLine(value)
+
+def myMaybeList(value):
+ return mYamlMultilineList.match(value)
+
+def myMultilineList(lines, value):
+ # assume no explcit indentor (otherwise have to parse value)
+ value = []
+ indent = 0
+ while lines:
+ line = lines.pop(0)
+ leading = myLeadingSpaces(line)
+ if myIsAllSpaces(line):
+ pass
+ elif leading < indent:
+ lines.insert(0, line)
+ break;
+ else:
+ indent = indent or leading
+ value += [myReadOneLine(myRemoveListHeader(indent, line))]
+ return (lines, value)
+
+def myRemoveListHeader(indent, line):
+ line = line[indent:]
+ return mYamlMultilineList.match(line).group(1)
+
+def myReadOneLine(value):
+ if mYamlListPattern.match(value):
+ return myFlowList(value)
+ elif mYamlIntDigits.match(value):
+ try:
+ value = int(value)
+ except ValueError:
+ pass
+ elif mYamlFloatDigits.match(value):
+ try:
+ value = float(value)
+ except ValueError:
+ pass
+ elif mYamlStringValue.match(value):
+ value = value[1:-1]
+ return value
+
+def myFlowList(value):
+ result = mYamlListPattern.match(value)
+ values = result.group(1).split(",")
+ return [myReadOneLine(v.strip()) for v in values]
+
+def myMultiline(lines, preserveNewlines=False):
+ # assume no explcit indentor (otherwise have to parse value)
+ value = ""
+ indent = myLeadingSpaces(lines[0])
+ wasEmpty = None
+
+ while lines:
+ line = lines.pop(0)
+ isEmpty = myIsAllSpaces(line)
+
+ if isEmpty:
+ if preserveNewlines:
+ value += "\n"
+ elif myLeadingSpaces(line) < indent:
+ lines.insert(0, line)
+ break;
+ else:
+ if preserveNewlines:
+ if wasEmpty != None:
+ value += "\n"
+ else:
+ if wasEmpty == False:
+ value += " "
+ elif wasEmpty == True:
+ value += "\n"
+ value += line[(indent):]
+
+ wasEmpty = isEmpty
+
+ return (lines, value)
+
+def myIsAllSpaces(line):
+ return len(line.strip()) == 0
+
+def myLeadingSpaces(line):
+ return len(line) - len(line.lstrip(' '))
diff --git a/deps/v8/third_party/test262-harness/src/_packager.py b/deps/v8/third_party/test262-harness/src/_packager.py
new file mode 100644
index 0000000000..b44f4c5dee
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/_packager.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2012 Ecma International. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+#--Imports---------------------------------------------------------------------
+import argparse
+import os
+import sys
+import xml.dom.minidom
+import base64
+import datetime
+import shutil
+import re
+import json
+import stat
+
+from _common import convertDocString
+
+#--Stubs-----------------------------------------------------------------------
+def generateHarness(harnessType, jsonFile, description):
+ pass
+
+
+#------------------------------------------------------------------------------
+from _packagerConfig import *
+
+#--Globals---------------------------------------------------------------------
+
+__parser = argparse.ArgumentParser(description= \
+ 'Tool used to generate the test262 website')
+__parser.add_argument('version', action='store',
+ help='Version of the test suite.')
+__parser.add_argument('--type', action='store', default=DEFAULT_TESTCASE_TEMPLATE,
+ help='Type of test case runner to generate.')
+__parser.add_argument('--console', action='store_true', default=False,
+ help='Type of test case runner to generate.')
+ARGS = __parser.parse_args()
+
+if not os.path.exists(EXCLUDED_FILENAME):
+ print "Cannot generate (JSON) test262 tests without a file," + \
+ " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME
+ sys.exit(1)
+EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME)
+EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test")
+EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST]
+
+#a list of all ES5 test chapter directories
+TEST_SUITE_SECTIONS = []
+
+#total number of tests accross the entire set of tests.
+TOTAL_TEST_COUNT = 0
+
+#List of all *.json files containing encoded test cases
+SECTIONS_LIST = []
+
+
+#--Sanity checks--------------------------------------------------------------#
+if not os.path.exists(TEST262_CASES_DIR):
+ print "Cannot generate (JSON) test262 tests when the path containing said tests, %s, does not exist!" % TEST262_CASES_DIR
+ sys.exit(1)
+
+if not os.path.exists(TEST262_HARNESS_DIR):
+ print "Cannot copy the test harness from a path, %s, that does not exist!" % TEST262_HARNESS_DIR
+ sys.exit(1)
+
+if not os.path.exists(TEST262_WEB_CASES_DIR):
+ os.mkdir(TEST262_WEB_CASES_DIR)
+
+if not os.path.exists(TEST262_WEB_HARNESS_DIR):
+ os.mkdir(TEST262_WEB_HARNESS_DIR)
+
+if not hasattr(ARGS, "version"):
+ print "A test262 suite version must be specified from the command-line to run this script!"
+ sys.exit(1)
+
+#--Helpers--------------------------------------------------------------------#
+def createDepDirs(dirName):
+ #base case
+ if dirName==os.path.dirname(dirName):
+ if not os.path.exists(dirName):
+ os.mkdir(dirName)
+ else:
+ if not os.path.exists(dirName):
+ createDepDirs(os.path.dirname(dirName))
+ os.mkdir(dirName)
+
+def test262PathToConsoleFile(path):
+ stuff = os.path.join(TEST262_CONSOLE_CASES_DIR,
+ path.replace("/", os.path.sep))
+ createDepDirs(os.path.dirname(stuff))
+ return stuff
+
+def getJSCount(dirName):
+ '''
+ Returns the total number of *.js files (recursively) under a given
+ directory, dirName.
+ '''
+ retVal = 0
+ if os.path.isfile(dirName) and dirName.endswith(".js"):
+ retVal = 1
+ elif os.path.isdir(dirName):
+ tempList = [os.path.join(dirName, x) for x in os.listdir(dirName)]
+ for x in tempList:
+ retVal += getJSCount(x)
+ #else:
+ # raise Exception("getJSCount: encountered a non-file/non-dir!")
+ return retVal
+
+#------------------------------------------------------------------------------
+def dirWalker(dirName):
+ '''
+ Populates TEST_SUITE_SECTIONS with ES5 test directories based
+ upon the number of test files per directory.
+ '''
+ global TEST_SUITE_SECTIONS
+ #First check to see if it has test files directly inside it
+ temp = [os.path.join(dirName, x) for x in os.listdir(dirName) \
+ if not os.path.isdir(os.path.join(dirName, x))]
+ if len(temp)!=0:
+ TEST_SUITE_SECTIONS.append(dirName)
+ return
+
+ #Next check to see if all *.js files under this directory exceed our max
+ #for a JSON file
+ temp = getJSCount(dirName)
+ if temp==0:
+ print "ERROR: expected there to be JavaScript tests under dirName!"
+ sys.exit(1)
+ #TODO - commenting out this elif/else clause seems to be causing *.json
+ #naming conflicts WRT Sputnik test dirs.
+ # elif temp < MAX_CASES_PER_JSON:
+ TEST_SUITE_SECTIONS.append(dirName)
+ return
+ #TODO else:
+ # #Max has been exceeded. We need to look at each subdir individually
+ # temp = os.listdir(dirName)
+ # for tempSubdir in temp:
+ # dirWalker(os.path.join(dirName, tempSubdir))
+
+#------------------------------------------------------------------------------
+def isTestStarted(line):
+ '''
+ Used to detect if we've gone past extraneous test comments in a test case.
+
+ Note this is a naive approach on the sense that "/*abc*/" could be on one
+ line. However, we know for a fact this is not the case in IE Test Center
+ or Sputnik tests.
+ '''
+ if re.search("^\s*//", line)!=None: #//blah
+ return False
+ elif ("//" in line) and ("Copyright " in line):
+ #BOM hack
+ return False
+ elif re.match("^\s*$", line)!=None: #newlines
+ return False
+ return True
+
+#------------------------------------------------------------------------------
+def getAllJSFiles(dirName):
+ retVal = []
+ for fullPath,dontCare,files in os.walk(dirName):
+ retVal += [os.path.join(fullPath,b) for b in files if b.endswith(".js")]
+ return retVal
+
+#--MAIN------------------------------------------------------------------------
+for temp in os.listdir(TEST262_CASES_DIR):
+ temp = os.path.join(TEST262_CASES_DIR, temp)
+ if not os.path.exists(temp):
+ print "The expected ES5 test directory,", temp, "did not exist!"
+ sys.exit(1)
+
+ if temp.find("/.") != -1:
+ # skip hidden files on Unix, such as ".DS_Store" on Mac
+ continue
+
+ if not ONE_JSON_PER_CHAPTER:
+ dirWalker(temp)
+ else:
+ TEST_SUITE_SECTIONS.append(temp)
+
+for chapter in TEST_SUITE_SECTIONS:
+ chapterName = chapter.rsplit(os.path.sep, 1)[1]
+ print "Generating test cases for ES5 chapter:", chapterName
+ #create dictionaries for all our tests and a section
+ testsList = {}
+ sect = {}
+ sect["name"] = "Chapter - " + chapterName
+
+ #create an array for tests in a chapter
+ tests = []
+ sourceFiles = getAllJSFiles(chapter)
+
+ if len(sourceFiles)!=0:
+ excluded = 0
+ testCount = 0
+ for test in sourceFiles:
+ #TODO - use something other than the hard-coded 'TestCases' below
+ testPath = "TestCases" + \
+ test.split(TEST262_CASES_DIR, 1)[1].replace("\\", "/")
+ testName=test.rsplit(".", 1)[0]
+ testName=testName.rsplit(os.path.sep, 1)[1]
+ if EXCLUDE_LIST.count(testName)==0:
+ # dictionary for each test
+ testDict = {}
+ testDict["path"] = testPath
+
+ tempFile = open(test, "rb")
+ scriptCode = tempFile.readlines()
+ tempFile.close()
+ scriptCodeContent=""
+ #Rip out license headers that add unnecessary bytes to
+ #the JSON'ized test cases
+ inBeginning = True
+
+ #Hack to preserve the BOM
+ if "Copyright " in scriptCode[0]:
+ scriptCodeContent += scriptCode[0]
+ for line in scriptCode:
+ if inBeginning:
+ isStarted = isTestStarted(line)
+ if not isStarted:
+ continue
+ inBeginning = False
+ scriptCodeContent += line
+
+ if scriptCodeContent==scriptCode[0]:
+ print "WARNING (" + test + \
+ "): unable to strip comments/license header/etc."
+ scriptCodeContent = "".join(scriptCode)
+ scriptCodeContentB64 = base64.b64encode(scriptCodeContent)
+
+ #add the test encoded code node to our test dictionary
+ testDict["code"] = scriptCodeContentB64
+ #now close the dictionary for the test
+
+ #now get the metadata added.
+ tempDict = convertDocString("".join(scriptCode))
+ for tempKey in tempDict.keys():
+ #path is set from the file path above; the "@path" property
+ #in comments is redundant
+ if not (tempKey in ["path"]):
+ testDict[tempKey] = tempDict[tempKey]
+
+ #this adds the test to our tests array
+ tests.append(testDict)
+
+ if ARGS.console:
+ with open(test262PathToConsoleFile(testDict["path"]),
+ "w") as fConsole:
+ fConsole.write(scriptCodeContent)
+ with open(test262PathToConsoleFile(testDict["path"][:-3] + \
+ "_metadata.js"),
+ "w") as fConsoleMeta:
+ metaDict = testDict.copy()
+ del metaDict["code"]
+ fConsoleMeta.write("testDescrip = " + str(metaDict))
+ testCount += 1
+ else:
+ print "Excluded:", testName
+ excluded = excluded + 1
+
+ #we have completed our tests
+ # add section node, number of tests and the tests themselves.
+ sect["numTests"] = str(len(sourceFiles)-excluded)
+ sect["tests"] = tests
+
+ #create a node for the tests and add it to our testsLists
+ testsList["testsCollection"] = sect
+ with open(os.path.join(TEST262_WEB_CASES_DIR, chapterName + ".json"),
+ "w") as f:
+ json.dump(testsList, f, separators=(',',':'), sort_keys=True,
+ indent=0)
+
+
+ if TESTCASELIST_PER_JSON:
+ CHAPTER_TEST_CASES_JSON = {}
+ CHAPTER_TEST_CASES_JSON["numTests"] = int(sect["numTests"])
+ CHAPTER_TEST_CASES_JSON["testSuite"] = \
+ [WEBSITE_CASES_PATH + chapterName + ".json"]
+ with open(os.path.join(TEST262_WEB_CASES_DIR,
+ "testcases_%s.json" % chapterName),
+ "w") as f:
+ json.dump(CHAPTER_TEST_CASES_JSON, f, separators=(',',':'),
+ sort_keys=True, indent=0)
+ generateHarness(ARGS.type, "testcases_%s.json" % chapterName,
+ chapterName.replace("ch", "Chapter "))
+
+ #add the name of the chapter test to our complete list
+ tempBool = True
+ for tempRe in WEBSITE_EXCLUDE_RE_LIST:
+ if tempRe.search(chapterName)!=None:
+ tempBool = False
+ if tempBool:
+ SECTIONS_LIST.append(WEBSITE_CASES_PATH + chapterName + ".json")
+ TOTAL_TEST_COUNT += int(sect["numTests"])
+
+
+#we now have the list of files for each chapter
+#create a root node for our suite
+TEST_CASES_JSON = {}
+TEST_CASES_JSON["numTests"] = TOTAL_TEST_COUNT
+TEST_CASES_JSON["testSuite"] = SECTIONS_LIST
+with open(os.path.join(TEST262_WEB_CASES_DIR, "default.json"), "w") as f:
+ json.dump(TEST_CASES_JSON, f, separators=(',',':'), sort_keys=True, indent=0)
+generateHarness(ARGS.type, "default.json", "Chapters 1-16")
+
+#Overall description of this version of the test suite
+SUITE_DESCRIP_JSON = {}
+SUITE_DESCRIP_JSON["version"] = ARGS.version
+SUITE_DESCRIP_JSON["date"] = str(datetime.datetime.now().date())
+with open(os.path.join(TEST262_WEB_CASES_DIR, "suiteDescrip.json"), "w") as f:
+ json.dump(SUITE_DESCRIP_JSON, f, separators=(',',':'), sort_keys=True)
+
+#Deploy test harness to website as well
+print ""
+print "Deploying test harness files to 'TEST262_WEB_HARNESS_DIR'..."
+if TEST262_HARNESS_DIR!=TEST262_WEB_HARNESS_DIR:
+ for filename in [x for x in os.listdir(TEST262_HARNESS_DIR) \
+ if x.endswith(".js")]:
+ toFilenameList = [ os.path.join(TEST262_WEB_HARNESS_DIR, filename)]
+ if ARGS.console:
+ toFilenameList.append(os.path.join(TEST262_CONSOLE_HARNESS_DIR,
+ filename))
+
+ for toFilename in toFilenameList:
+ if not os.path.exists(os.path.dirname(toFilename)):
+ os.mkdir(os.path.dirname(toFilename))
+ fileExists = os.path.exists(toFilename)
+ if fileExists:
+ SC_HELPER.edit(toFilename)
+ shutil.copy(os.path.join(TEST262_HARNESS_DIR, filename),
+ toFilename)
+ if not fileExists:
+ SC_HELPER.add(toFilename)
+
+print "Done."
diff --git a/deps/v8/third_party/test262-harness/src/_packagerConfig.py b/deps/v8/third_party/test262-harness/src/_packagerConfig.py
new file mode 100644
index 0000000000..0098239f57
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/_packagerConfig.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2012 Ecma International. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+#--Imports---------------------------------------------------------------------
+import os
+import subprocess
+import stat
+import re
+
+#--Globals---------------------------------------------------------------------
+MAX_CASES_PER_JSON = 1000
+
+WEBSITE_SHORT_NAME = "website"
+CONSOLE_SHORT_NAME = "console"
+
+DEFAULT_TESTCASE_TEMPLATE="test262"
+
+ONE_JSON_PER_CHAPTER = False
+TESTCASELIST_PER_JSON = True
+
+#Path to the root of the Hg repository (relative to this file's location)
+TEST262_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
+TEST262_ROOT = os.path.abspath(TEST262_ROOT)
+
+#Directory full of test cases we want to port to the website's test
+#harness runner
+TEST262_CASES_DIR = os.path.join(TEST262_ROOT, "test")
+
+#Directory containing test harness files to be ported over to the
+#website. Note that only *.js files will be migrated from this dir.
+TEST262_HARNESS_DIR = os.path.join(TEST262_ROOT, "harness")
+
+#Directory full of website test cases (ported over from TEST262_CASES_DIR)
+TEST262_WEB_CASES_DIR = os.path.join(TEST262_ROOT, WEBSITE_SHORT_NAME, "json")
+TEST262_CONSOLE_CASES_DIR = os.path.join(TEST262_ROOT, CONSOLE_SHORT_NAME)
+
+#Directory containing the website's test harness (ported over from
+#TEST262_HARNESS_DIR)
+TEST262_WEB_HARNESS_DIR = os.path.join(TEST262_ROOT, WEBSITE_SHORT_NAME,
+ "harness")
+TEST262_CONSOLE_HARNESS_DIR = os.path.join(TEST262_ROOT, CONSOLE_SHORT_NAME,
+ "harness")
+
+#Path to the ported test case files on the actual website as opposed
+#to the Hg layout
+WEBSITE_CASES_PATH = "json/"
+
+#The name of a file which contains a list of tests which should be
+#disabled in test262. These tests are either invalid as-per ES5 or
+#have issues with the test262 web harness.
+EXCLUDED_FILENAME = os.path.join(TEST262_ROOT, "excludelist.xml")
+
+WEBSITE_EXCLUDE_RE_LIST = ["bestPractice", "intl402"]
+WEBSITE_EXCLUDE_RE_LIST = [ re.compile(x) for x in WEBSITE_EXCLUDE_RE_LIST]
+
+#------------------------------------------------------------------------------
+
+TEMPLATE_LINES = None
+__lastHarnessType = None
+
+def generateHarness(harnessType, jsonName, title):
+ global TEMPLATE_LINES
+ global __lastHarnessType
+
+ #TODO: temp hack to make experimental internationalization tests work
+ if jsonName=="testcases_intl402.json":
+ harnessType = "intl402"
+ elif jsonName=="testcases_bestPractice.json":
+ harnessType = "bestPractice"
+
+ if TEMPLATE_LINES==None or harnessType!=__lastHarnessType:
+ __lastHarnessType = harnessType
+ TEMPLATE_LINES = []
+ with open(os.path.join(os.getcwd(), "templates",
+ "runner." + harnessType + ".html"), "r") as f:
+ TEMPLATE_LINES = f.readlines()
+ fileName = os.path.join(TEST262_ROOT, WEBSITE_SHORT_NAME,
+ jsonName.replace(".json", ".html"))
+ fileNameExists = False
+ if os.path.exists(fileName):
+ SC_HELPER.edit(fileName)
+ fileNameExists = True
+ with open(fileName, "w") as f:
+ for line in TEMPLATE_LINES:
+ if "var TEST_LIST_PATH =" in line:
+ f.write(" var TEST_LIST_PATH = \"json/" + jsonName + \
+ "\";" + os.linesep)
+ #elif "ECMAScript 5" in line:
+ # f.write(line.replace("ECMAScript 5",
+ # "ECMAScript 5: %s" % title))
+ else:
+ f.write(line)
+ if not fileNameExists:
+ SC_HELPER.add(fileName)
+
+#------------------------------------------------------------------------------
+class SCAbstraction(object):
+ '''
+ A class which abstracts working with source control systems in relation to
+ generated test262 files. Useful when test262 is also used internally by
+ browser implementors.
+ '''
+ def edit(self, filename):
+ '''
+ Source control edit of a file. For Mercurial, just make sure it's
+ writable.
+ '''
+ if not(os.stat(filename).st_mode & stat.S_IWRITE):
+ os.chmod(filename, stat.S_IWRITE)
+
+ def add(self, filename):
+ '''
+ Source control add of a file.
+ '''
+ subprocess.call(["git", "add", filename])
+
+SC_HELPER = SCAbstraction()
diff --git a/deps/v8/third_party/test262-harness/src/parseTestRecord.py b/deps/v8/third_party/test262-harness/src/parseTestRecord.py
new file mode 100644
index 0000000000..1c2aba80af
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/parseTestRecord.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+# Copyright 2011 by Google, Inc. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+# TODO: resolve differences with common.py and unify into one file.
+
+
+from __future__ import print_function
+
+import os
+import re
+import imp
+
+from _monkeyYaml import load as yamlLoad
+
+#def onerror(message):
+# print(message)
+
+# Matches trailing whitespace and any following blank lines.
+_BLANK_LINES = r"([ \t]*[\r\n]{1,2})*"
+
+# Matches the YAML frontmatter block.
+_YAML_PATTERN = re.compile(r"/\*---(.*)---\*/" + _BLANK_LINES, re.DOTALL)
+
+# Matches all known variants for the license block.
+# https://github.com/tc39/test262/blob/705d78299cf786c84fa4df473eff98374de7135a/tools/lint/lib/checks/license.py
+_LICENSE_PATTERN = re.compile(
+ r'// Copyright( \([C]\))? (\w+) .+\. {1,2}All rights reserved\.[\r\n]{1,2}' +
+ r'(' +
+ r'// This code is governed by the( BSD)? license found in the LICENSE file\.' +
+ r'|' +
+ r'// See LICENSE for details.' +
+ r'|' +
+ r'// Use of this source code is governed by a BSD-style license that can be[\r\n]{1,2}' +
+ r'// found in the LICENSE file\.' +
+ r'|' +
+ r'// See LICENSE or https://github\.com/tc39/test262/blob/(master|HEAD)/LICENSE' +
+ r')' + _BLANK_LINES, re.IGNORECASE)
+
+def yamlAttrParser(testRecord, attrs, name, onerror = print):
+ parsed = yamlLoad(attrs)
+ if parsed is None:
+ onerror("Failed to parse yaml in name %s" % name)
+ return
+
+ for key in parsed:
+ value = parsed[key]
+ if key == "info":
+ key = "commentary"
+ testRecord[key] = value
+
+ if 'flags' in testRecord:
+ for flag in testRecord['flags']:
+ testRecord[flag] = ""
+
+def findLicense(src):
+ match = _LICENSE_PATTERN.search(src)
+ if not match:
+ return None
+
+ return match.group(0)
+
+def findAttrs(src):
+ match = _YAML_PATTERN.search(src)
+ if not match:
+ return (None, None)
+
+ return (match.group(0), match.group(1).strip())
+
+def parseTestRecord(src, name, onerror = print):
+ # Find the license block.
+ header = findLicense(src)
+
+ # Find the YAML frontmatter.
+ (frontmatter, attrs) = findAttrs(src)
+
+ # YAML frontmatter is required for all tests.
+ if frontmatter is None:
+ onerror("Missing frontmatter: %s" % name)
+
+ # The license shuold be placed before the frontmatter and there shouldn't be
+ # any extra content between the license and the frontmatter.
+ if header is not None and frontmatter is not None:
+ headerIdx = src.index(header)
+ frontmatterIdx = src.index(frontmatter)
+ if headerIdx > frontmatterIdx:
+ onerror("Unexpected license after frontmatter: %s" % name)
+
+ # Search for any extra test content, but ignore whitespace only or comment lines.
+ extra = src[headerIdx + len(header) : frontmatterIdx]
+ if extra and any(line.strip() and not line.lstrip().startswith("//") for line in extra.split("\n")):
+ onerror("Unexpected test content between license and frontmatter: %s" % name)
+
+ # Remove the license and YAML parts from the actual test content.
+ test = src
+ if frontmatter is not None:
+ test = test.replace(frontmatter, '')
+ if header is not None:
+ test = test.replace(header, '')
+
+ testRecord = {}
+ testRecord['header'] = header.strip() if header else ''
+ testRecord['test'] = test
+
+ if attrs:
+ yamlAttrParser(testRecord, attrs, name, onerror)
+
+ # Report if the license block is missing in non-generated tests.
+ if header is None and "generated" not in testRecord and "hashbang" not in name:
+ onerror("No license found in: %s" % name)
+
+ return testRecord
diff --git a/deps/v8/third_party/test262-harness/src/templates/runner.bestPractice.html b/deps/v8/third_party/test262-harness/src/templates/runner.bestPractice.html
new file mode 100644
index 0000000000..ab1174d51c
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/templates/runner.bestPractice.html
@@ -0,0 +1,170 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html dir="ltr" xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="Content-Type" />
+<script type="text/javascript" src="scripts/lib/jquery-1.4.2.min.js"></script>
+<script type="text/javascript" src="scripts/sections.js"></script>
+<script type="text/javascript">
+ //Globals
+ var STANDARD = "ECMA-262";
+ var TEST_LIST_PATH = "json/testcaseslist.json";
+</script>
+<script type="text/javascript" src="scripts/sth.js"></script>
+<script type="text/javascript" src="harness/sta.js"></script>
+<script type="text/javascript" src="scripts/lib/jqueryprogressbar.js"></script>
+<script type="text/javascript" src="scripts/helper.js"></script>
+<script type="text/javascript" src="scripts/lib/jquery.base64.js"></script>
+<script type="text/javascript">
+ //To support all the browsers
+ $(window).resize(ResizeLoadIndicator);
+ $(window).load(ResizeLoadIndicator);
+ function ResizeLoadIndicator() {
+ $(".indicatorContainer .disabledBackground").css({ height: ($(window).height() - 20) + "px" });
+ }
+
+ $(".indicatorContainer").click(function(e) {
+ if (!e) { var e = window.event; }
+ e.cancelBubble = true;
+ if (e.stopPropagation) { e.stopPropagation(); }
+ });
+</script>
+
+<title>ECMAScript Best Practices – test262</title>
+<link href="styles/style.css" media="screen" rel="stylesheet" title="CSS" type="text/css" />
+</head>
+<body>
+ <div class="indicatorContainer">
+ <!--Blank div to disable back portion when indicator is shown-->
+ <div class="disabledBackground"></div>
+ <div id="loadingIndicator">
+ <div>
+ <img src="./images/spinner.gif" alt="Loading..." />
+ <span>Loading...</span>
+ </div>
+ </div>
+ </div>
+
+ <div class="wrapper">
+ <!-- This Container holds the main header -->
+ <div class="mainHeader">
+ <p><span id="ECMAscript">ECMA</span><span id="ecmaSCRIPT">Script</span> <strong>Best Practices</strong> test262
+ <a id="ecmascriptbacklink" href='javascript:void(window.open("http://www.ecmascript.org/"));'>ECMAScript.org</a></p>
+ </div>
+ <!-- This Container holds the Navigation -->
+ <div class="navBar">
+ <ul>
+ <li><a href="#" class="selected nav-link" id="home">Home</a></li>
+ <li><a href="#" class="nav-link" id="run">Run</a></li>
+ <li><a href="#" class="nav-link test-report-link" id="results">Results</a></li>
+ <li><a href="#" class="nav-link" id="development">Development</a></li>
+ </ul>
+ </div>
+ <div class="content-container" id="contentContainer">
+ <!-- This is the Main Content Container -->
+ <div class="content-home">
+ <p class="headers">What is test262 <i>Best Practices</i>?</p>
+ <p class="content"><i>test262 Best Practices</i> is a supplemental test suite to <a href="default.html">test262</a> containing test cases that are <b>not</b> required by the ECMAScript specification, but deemed best practices for JavaScript implementers by Ecma's TC-39 committee.</p>
+
+ <p class="headers">Running the Tests</p>
+ <p class="content">Click the “Run” tab at the top of this page for instructions and follow the instructions to run the tests.</p>
+
+ <a href='javascript:void(window.open("http://www.ecma-international.org/memento/TC39.htm"));'></a>
+
+ </div>
+
+ <div class="content-dev">
+ <p class="headers">Development</p>
+ <p class="content">Test262 is being developed by the members of Ecma TC39. Ecma's intellectual property policies, permit only Ecma
+ members to directly contribute code to the project. However, a <a href='javascript:void(window.open("http://mail.mozilla.org/pipermail/test262-discuss/"));'>public mailing list</a> is used to coordinate development of Test262. If you wish to participate in the discussion please <a href='javascript:void(window.open("http://mail.mozilla.org/listinfo/test262-discuss"));'>subscribe</a>. Bug reports and suggestions should be sent to the mailing list.
+ </p>
+ <p class="content">
+ Ecma members can find detailed instructions on Test262 development procedures at the <a href='javascript:void(window.open("http://wiki.ecmascript.org/doku.php?id=test262:test262"));'>Test262 Wiki</a>.
+ </p>
+ </div>
+
+ <div class="content-tests">
+ <!-- This is the Main Content Container -->
+ <p class="content">Please click on the Run All button to run all the tests. Once you start the test you may pause the test anytime by clicking on the Pause button. You can click on the Results tab once the test is completed or after pausing the test. The Reset button is for restarting the test run. You may run individual tests by clicking the Run button next to the tests listed below. If you wish to run several chapters in sequence, but not the entire test suite, click the Select button for the chapters you wish to run and then click the Run Selected button.</p>
+
+ <!-- This is the Progress Bar Holder -->
+ <div class="progressBarHolder">
+ <div id="progressbar"></div>
+ <div class="progressBarButtons">
+ <!-- Loading: Run All, Run Selected -->
+ <!-- Loaded: Run All, Run Selected -->
+ <!-- Running: Pause -->
+ <!-- Paused: Resume, Reset -->
+ <img src="images/runall.png" alt="Run All" title="Run all tests." id="btnRunAll" />
+ <img src="images/runselected.png" alt="Run Selected Tests" title="Run the tests selected below." id="btnRunSelected" />
+ <img src="images/pause.png" alt="Pause" title="Pause the running tests." id="btnPause" />
+ <img src="images/resume.png" alt="Resume" title="Resume the running tests." id="btnResume" />
+ <img src="images/reset.png" alt="Reset" title="Reset testing status." id="btnReset" />
+ </div>
+ <div style="clear: both;"></div>
+ </div>
+ <p class="hide">
+ Timer Value(ms) : <input id="txtTimerValue" value="50" /> <input id="btnSetTimerValue" value="Set Timer Value" type="button"/>
+ </p>
+
+ <!-- This is the Results Text Holder -->
+ <div class="resultsHeader">
+ Tests to run: <strong><span class="teststorun-counter" id="testsToRun"></span></strong>&nbsp;<span class="separator">|</span>
+ Total tests ran: <strong><span class="total-counter" id="totalCounter"></span></strong> <span class="separator">|</span>
+ Pass: <span class="pass" id="Pass"></span> <span class="separator">|</span>
+ Fail: <span class="fail" id="Fail"></span> <span class="separator">|</span>
+ Failed to load: <span class="fail" id="failedToLoadCounter1"></span>
+ <p><span id="nextActivity"></span></p>
+ </div>
+
+ <!-- Test Chapter selector -->
+ <div id="chapterSelector">
+ <table width="100%" border="0" cellspacing="0" cellpadding="2"></table>
+ </div>
+
+ <!-- This is the Table -->
+ <div class="resultsTableHolder" id="tableLoggerParent">
+ <table width="100%" border="0" cellspacing="0" cellpadding="0" class="table-logger" id="tableLogger"></table>
+ </div>
+ <div>
+ Test suite version: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ </div>
+
+ <div class="content-results">
+ <div class="crumbContainer">
+ <div class="crumbs"></div>
+ <div style="float:right;"><a class="setBlue hide" id="backlinkDiv" href="#">&lt;&lt; back</a></div>
+ <div style="clear : both;"></div>
+ </div>
+ <div class="resultsHeader"> <strong>Total tests: <span class="totalCases"></span></strong><br />
+ Passed: <span class="passedCases"></span> <span class="separator">|</span> Failed: <span class="failedCases"></span> <span class="separator">|</span>
+ Failed to load: <strong><span id="failedToLoadCounter"></span></strong>
+ </div>
+ <!-- This is the Table -->
+ <div class="resultsTableHolder">
+ <table width="100%" cellspacing="0" cellpadding="0" border="0" class="results-data-table"> </table>
+ <div id="resultMessage">Test results will be displayed after the tests are executed using the Run page.</div>
+ </div>
+ <div>
+ Test suite version: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ <div class="downloadLinks">
+ <p><a class="anchor-download-xml" id="ancGenXMLReport"><strong>Download results as XML</strong></a></p> <!--| <strong><a href="scripts/testcases.zip">Download Source</a></strong></p>-->
+ </div>
+ <div id="legend" class="hide">
+ <label class="reportGreen">Green:</label>&nbsp;100%&nbsp;
+ <label class="reportLightGreen">Green:</label>&nbsp;75% to 99.9%&nbsp;
+ <label class="reportYellow">Yellow:</label>&nbsp;50% to 75% &nbsp;
+ <label class="reportRed">Red:</label>&nbsp;less than 50%
+ </div>
+ </div>
+ </div>
+ </div>
+ <!-- This is the Footer -->
+ <div class="footer">
+ <!--<div class="Links"> <a href="">Privacy</a> | <a href="">Terms of Use</a> </div>-->
+ <div class="copyright"> &copy; <a href='javascript:void(window.open("http://www.ecma-international.org"));'>Ecma International</a> </div>
+ </div>
+ <iframe id="scriptLoader" class="hide"></iframe>
+</body>
+</html>
diff --git a/deps/v8/third_party/test262-harness/src/templates/runner.intl402.html b/deps/v8/third_party/test262-harness/src/templates/runner.intl402.html
new file mode 100644
index 0000000000..349c637954
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/templates/runner.intl402.html
@@ -0,0 +1,192 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html dir="ltr" xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="Content-Type" />
+<script type="text/javascript" src="scripts/lib/jquery-1.4.2.min.js"></script>
+<script type="text/javascript" src="scripts/sections.js"></script>
+<script type="text/javascript">
+ //Globals
+ var STANDARD = "ECMA-402";
+ var TEST_LIST_PATH = "json/testcaseslist.json";
+</script>
+<script type="text/javascript" src="scripts/sth.js"></script>
+<script type="text/javascript" src="harness/sta.js"></script>
+<script type="text/javascript" src="scripts/lib/jqueryprogressbar.js"></script>
+<script type="text/javascript" src="scripts/helper.js"></script>
+<script type="text/javascript" src="scripts/lib/jquery.base64.js"></script>
+<script type="text/javascript">
+ //To support all the browsers
+ $(window).resize(ResizeLoadIndicator);
+ $(window).load(ResizeLoadIndicator);
+ function ResizeLoadIndicator() {
+ $(".indicatorContainer .disabledBackground").css({ height: ($(window).height() - 20) + "px" });
+ }
+
+ $(".indicatorContainer").click(function(e) {
+ if (!e) { var e = window.event; }
+ e.cancelBubble = true;
+ if (e.stopPropagation) { e.stopPropagation(); }
+ });
+</script>
+
+<title>ECMAScript Internationalization – test402</title>
+<link href="styles/style.css" media="screen" rel="stylesheet" title="CSS" type="text/css" />
+</head>
+<body>
+ <div class="indicatorContainer">
+ <!--Blank div to disable back portion when indicator is shown-->
+ <div class="disabledBackground"></div>
+ <div id="loadingIndicator">
+ <div>
+ <img src="./images/spinner.gif" alt="Loading..." />
+ <span>Loading...</span>
+ </div>
+ </div>
+ </div>
+
+ <div class="wrapper">
+ <!-- This Container holds the main header -->
+ <div class="mainHeader">
+ <p><span id="ECMAscript">ECMA</span><span id="ecmaSCRIPT">Script</span> <strong>Internationalization</strong> test402
+ <a id="ecmascriptbacklink" href='javascript:void(window.open("http://www.ecmascript.org/"));'>ECMAScript.org</a></p>
+ </div>
+ <!-- This Container holds the Navigation -->
+ <div class="navBar">
+ <ul>
+ <li><a href="#" class="selected nav-link" id="home">Home</a></li>
+ <li><a href="#" class="nav-link" id="run">Run</a></li>
+ <li><a href="#" class="nav-link test-report-link" id="results">Results</a></li>
+ <li><a href="#" class="nav-link" id="development">Development</a></li>
+ </ul>
+ </div>
+ <div class="content-container" id="contentContainer">
+ <!-- This is the Main Content Container -->
+ <div class="content-home">
+ <p class="headers">What is test402?</p>
+ <p class="content">test402 is a test suite intended to check agreement between JavaScript implementations and the ECMAScript Internationalization API Specification.
+ The test suite contains many individual tests, each of which tests some specific requirements of the ECMAScript Internationalization API Specification.</p>
+ <p class="headers">What is the ECMAScript Internationalization API?</p>
+ <p class="content">The ECMAScript Internationalization API is a complement to the ECMAScript Language Specification, 5.1 edition.
+ It enables internationalization of JavaScript applications by providing collation (string comparison), number formatting, and date and time formatting, and lets applications choose the language and tailor the functionality to their needs.
+ The ECMAScript Internationalization API Specification 1.0 was approved as an official Ecma standard by the Ecma General Assembly in December 2012.
+ The ECMAScript Internationalization 1.0 standard is available in
+ <a href='javascript:void(window.open("http://www.ecma-international.org/ecma-402/1.0/ECMA-402.pdf"));'>PDF</a>,
+ <a href='javascript:void(window.open("http://www.ecma-international.org/ecma-402/1.0/"));'>HTML</a>, and
+ <a href='javascript:void(window.open("http://www.ecma-international.org/ecma-402/1.0/ECMA-402.epub"));'>EPUB</a>
+ versions from the Ecma International web site.</p>
+ <p class="headers">What is ECMAScript?</p>
+ <p class="content">"ECMAScript" is the name under which the language more commonly known as "JavaScript" is standardized. Development of the ECMAScript standard is the responsibility of <a href='javascript:void(window.open("http://www.ecma-international.org/memento/TC39.htm"));'>Technical Committee 39 (TC39)</a> of <a href='javascript:void(window.open("http://www.ecma-international.org/"));'>Ecma International</a>.
+ The ECMAScript Language Specification standard is officially known as ECMA-262.
+ ECMAScript 5.1 (or just ES5.1) is short hand for the "ECMA-262, 5.1 Edition ECMAScript Language Specification" the official name of the current edition of the standard.
+ ECMAScript 5.1 was approved as an official Ecma standard by the Ecma General Assembly in June 2011.
+ The ECMAScript 5.1 standard is available in <a href='javascript:void(window.open("http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf"));'>PDF</a> and <a href='javascript:void(window.open("http://ecma-international.org/ecma-262/5.1/"));'>HTML</a> versions from the Ecma International web site.</p>
+ <p class="headers">Who creates and maintains test402?</p>
+ <p class="content">Development of test402 is a project of Ecma TC39.
+ The testing framework and individual tests are created by member organizations of TC39 and contributed to Ecma for use in test402.
+ For more information about how test402 is developed and maintained click the “Development” tab at the top of this page.</p>
+ <p class="headers">What is the status of test402?</p>
+ <p class="content"><strong>test402 is very very very incomplete.
+ It is still undergoing active development.</strong></p>
+ <p class="headers">Where can I find out more?</p>
+ <p class="content">Please visit our <a href='javascript:void(window.open("http://wiki.ecmascript.org/doku.php?id=test262:faq"));'>Frequently Asked Questions</a> section on the <a href='javascript:void(window.open("http://wiki.ecmascript.org/doku.php?id="));'>ECMAScript Wiki</a>.</p>
+
+ <p class="headers">Running the Tests</p>
+ <p class="content">Click the “Run” tab at the top of this page for instructions and follow the instructions to run the tests.</p>
+
+ <a href='javascript:void(window.open("http://www.ecma-international.org/memento/TC39.htm"));'></a>
+
+ </div>
+
+ <div class="content-dev">
+ <p class="headers">Development</p>
+ <p class="content">Test402 is being developed by the members of Ecma TC39. Ecma's intellectual property policies permit only Ecma
+ members to directly contribute code to the project. However, a <a href='javascript:void(window.open("http://mail.mozilla.org/pipermail/test262-discuss/"));'>public mailing list</a> is used to coordinate development of test402 and its sibling test262. If you wish to participate in the discussion please <a href='javascript:void(window.open("http://mail.mozilla.org/listinfo/test262-discuss"));'>subscribe</a>. Bug reports and suggestions should be sent to the mailing list.
+ </p>
+ </div>
+
+ <div class="content-tests">
+ <!-- This is the Main Content Container -->
+ <p class="content">Please click on the Run All button to run all the tests. Once you start the test you may pause the test anytime by clicking on the Pause button. You can click on the Results tab once the test is completed or after pausing the test. The Reset button is for restarting the test run. You may run individual tests by clicking the Run button next to the tests listed below. If you wish to run several chapters in sequence, but not the entire test suite, click the Select button for the chapters you wish to run and then click the Run Selected button.</p>
+
+ <!-- This is the Progress Bar Holder -->
+ <div class="progressBarHolder">
+ <div id="progressbar"></div>
+ <div class="progressBarButtons">
+ <!-- Loading: Run All, Run Selected -->
+ <!-- Loaded: Run All, Run Selected -->
+ <!-- Running: Pause -->
+ <!-- Paused: Resume, Reset -->
+ <img src="images/runall.png" alt="Run All" title="Run all tests." id="btnRunAll" />
+ <img src="images/runselected.png" alt="Run Selected Tests" title="Run the tests selected below." id="btnRunSelected" />
+ <img src="images/pause.png" alt="Pause" title="Pause the running tests." id="btnPause" />
+ <img src="images/resume.png" alt="Resume" title="Resume the running tests." id="btnResume" />
+ <img src="images/reset.png" alt="Reset" title="Reset testing status." id="btnReset" />
+ </div>
+ <div style="clear: both;"></div>
+ </div>
+ <p class="hide">
+ Timer Value(ms) : <input id="txtTimerValue" value="50" /> <input id="btnSetTimerValue" value="Set Timer Value" type="button"/>
+ </p>
+
+ <!-- This is the Results Text Holder -->
+ <div class="resultsHeader">
+ Tests to run: <strong><span class="teststorun-counter" id="testsToRun"></span></strong>&nbsp;<span class="separator">|</span>
+ Total tests ran: <strong><span class="total-counter" id="totalCounter"></span></strong> <span class="separator">|</span>
+ Pass: <span class="pass" id="Pass"></span> <span class="separator">|</span>
+ Fail: <span class="fail" id="Fail"></span> <span class="separator">|</span>
+ Failed to load: <span class="fail" id="failedToLoadCounter1"></span>
+ <p><span id="nextActivity"></span></p>
+ </div>
+
+ <!-- Test Chapter selector -->
+ <div id="chapterSelector">
+ <table width="100%" border="0" cellspacing="0" cellpadding="2"></table>
+ </div>
+
+ <!-- This is the Table -->
+ <div class="resultsTableHolder" id="tableLoggerParent">
+ <table width="100%" border="0" cellspacing="0" cellpadding="0" class="table-logger" id="tableLogger"></table>
+ </div>
+ <div>
+ Test suite version: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ </div>
+
+ <div class="content-results">
+ <div class="crumbContainer">
+ <div class="crumbs"></div>
+ <div style="float:right;"><a class="setBlue hide" id="backlinkDiv" href="#">&lt;&lt; back</a></div>
+ <div style="clear : both;"></div>
+ </div>
+ <div class="resultsHeader"> <strong>Total tests: <span class="totalCases"></span></strong><br />
+ Passed: <span class="passedCases"></span> <span class="separator">|</span> Failed: <span class="failedCases"></span> <span class="separator">|</span>
+ Failed to load: <strong><span id="failedToLoadCounter"></span></strong>
+ </div>
+ <!-- This is the Table -->
+ <div class="resultsTableHolder">
+ <table width="100%" cellspacing="0" cellpadding="0" border="0" class="results-data-table"> </table>
+ <div id="resultMessage">Test results will be displayed after the tests are executed using the Run page.</div>
+ </div>
+ <div>
+ Test suite version: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ <div class="downloadLinks">
+ <p><a class="anchor-download-xml" id="ancGenXMLReport"><strong>Download results as XML</strong></a></p> <!--| <strong><a href="scripts/testcases.zip">Download Source</a></strong></p>-->
+ </div>
+ <div id="legend" class="hide">
+ <label class="reportGreen">Green:</label>&nbsp;100%&nbsp;
+ <label class="reportLightGreen">Green:</label>&nbsp;75% to 99.9%&nbsp;
+ <label class="reportYellow">Yellow:</label>&nbsp;50% to 75% &nbsp;
+ <label class="reportRed">Red:</label>&nbsp;less than 50%
+ </div>
+ </div>
+ </div>
+ </div>
+ <!-- This is the Footer -->
+ <div class="footer">
+ <!--<div class="Links"> <a href="">Privacy</a> | <a href="">Terms of Use</a> </div>-->
+ <div class="copyright"> &copy; <a href='javascript:void(window.open("http://www.ecma-international.org"));'>Ecma International</a> </div>
+ </div>
+ <iframe id="scriptLoader" class="hide"></iframe>
+</body>
+</html>
diff --git a/deps/v8/third_party/test262-harness/src/templates/runner.test262.html b/deps/v8/third_party/test262-harness/src/templates/runner.test262.html
new file mode 100644
index 0000000000..6eb493f1fd
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/templates/runner.test262.html
@@ -0,0 +1,203 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html dir="ltr" xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="Content-Type" />
+<script type="text/javascript" src="scripts/lib/jquery-1.4.2.min.js"></script>
+<script type="text/javascript" src="scripts/sections.js"></script>
+<script type="text/javascript">
+ //Globals
+ var STANDARD = "ECMA-262";
+ var TEST_LIST_PATH = "json/testcaseslist.json";
+</script>
+<script type="text/javascript" src="scripts/sth.js"></script>
+<script type="text/javascript" src="harness/sta.js"></script>
+<script type="text/javascript" src="scripts/lib/jqueryprogressbar.js"></script>
+<script type="text/javascript" src="scripts/helper.js"></script>
+<script type="text/javascript" src="scripts/lib/jquery.base64.js"></script>
+<script type="text/javascript">
+ //To support all the browsers
+ $(window).resize(ResizeLoadIndicator);
+ $(window).load(ResizeLoadIndicator);
+ function ResizeLoadIndicator() {
+ $(".indicatorContainer .disabledBackground").css({ height: ($(window).height() - 20) + "px" });
+ }
+
+ $(".indicatorContainer").click(function(e) {
+ if (!e) { var e = window.event; }
+ e.cancelBubble = true;
+ if (e.stopPropagation) { e.stopPropagation(); }
+ });
+</script>
+
+<title>ECMAScript Language – test262</title>
+<link href="styles/style.css" media="screen" rel="stylesheet" title="CSS" type="text/css" />
+</head>
+<body>
+ <div class="indicatorContainer">
+ <!--Blank div to disable back portion when indicator is shown-->
+ <div class="disabledBackground"></div>
+ <div id="loadingIndicator">
+ <div>
+ <img src="./images/spinner.gif" alt="Loading..." />
+ <span>Loading...</span>
+ </div>
+ </div>
+ </div>
+
+ <div class="wrapper">
+ <!-- This Container holds the main header -->
+ <div class="mainHeader">
+ <p><span id="ECMAscript">ECMA</span><span id="ecmaSCRIPT">Script</span> <strong>Language</strong> test262
+ <a id="ecmascriptbacklink" href='javascript:void(window.open("http://www.ecmascript.org/"));'>ECMAScript.org</a></p>
+ </div>
+ <!-- This Container holds the Navigation -->
+ <div class="navBar">
+ <ul>
+ <li><a href="#" class="selected nav-link" id="home">Home</a></li>
+ <li><a href="#" class="nav-link" id="run">Run</a></li>
+ <li><a href="#" class="nav-link test-report-link" id="results">Results</a></li>
+ <li><a href="#" class="nav-link" id="development">Development</a></li>
+ </ul>
+ </div>
+ <div class="content-container" id="contentContainer">
+ <!-- This is the Main Content Container -->
+ <div class="content-home">
+ <p class="headers">What is test262?</p>
+ <p class="content">test262 is a test suite intended to check agreement between JavaScript implementations and ECMA-262, the ECMAScript Language Specification (currently 5.1 Edition).
+ The test suite contains thousands of individual tests, each of which tests some specific requirements of the ECMAScript Language Specification.</p>
+ <p class="headers">What is ECMAScript?</p>
+ <p class="content">"ECMAScript" is the name under which the language more commonly known as "JavaScript" is standardized. Development of the ECMAScript standard is the responsibility of <a href='javascript:void(window.open("http://www.ecma-international.org/memento/TC39.htm"));'>Technical Committee 39 (TC39)</a> of <a href='javascript:void(window.open("http://www.ecma-international.org/"));'>Ecma International</a>.
+ The ECMAScript Language Specification standard is officially known as ECMA-262.
+ ECMAScript 5.1 (or just ES5.1) is short hand for the "ECMA-262, 5.1 Edition ECMAScript Language Specification" the official name of the current edition of the standard.
+ ECMAScript 5.1 was approved as an official Ecma standard by the Ecma General Assembly in June 2011.
+ The ECMAScript 5.1 standard is available in <a href='javascript:void(window.open("http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf"));'>PDF</a> and <a href='javascript:void(window.open("http://ecma-international.org/ecma-262/5.1/"));'>HTML</a> versions from the Ecma International web site.</p>
+ <p class="headers">Who creates and maintains test262?</p>
+ <p class="content">
+ Development of test262 is a project of Ecma TC39. The
+ testing framework and individual tests are created by
+ developers all over the world and contributed to Ecma for
+ use in test262. For more information about how test262 is
+ developed and maintained, click the
+ &#8220;Development&#8221; tab at the top of this page.
+ </p>
+ <p class="headers">What is the status of test262?</p>
+ <p class="content"><strong>test262 is not yet complete. It is still undergoing active development.</strong> Some portions of the ES5 specification have very complete test coverage while other portions of the specification have only partial test coverage. Some tests may be invalid or may yield false positive or false negative results. A perfect passing score on test262 does not guarantee that a JavaScript implementation perfectly supports ES5. Because tests are being actively added and modified, tests results from different days or times may not be directly comparable. Click the “Development” tab at the top of this page for instructions for reporting test262 bugs.</p>
+ <p class="headers">Where can I find out more?</p>
+ <p class="content">Please visit our <a href='javascript:void(window.open("http://wiki.ecmascript.org/doku.php?id=test262:faq"));'>Frequently Asked Questions</a> section on the <a href='javascript:void(window.open("http://wiki.ecmascript.org/doku.php?id="));'>ECMAScript Wiki</a>.</p>
+
+ <p class="headers">Running the Tests</p>
+ <p class="content">Click the “Run” tab at the top of this page for instructions and follow the instructions to run the tests.</p>
+
+ <a href='javascript:void(window.open("http://www.ecma-international.org/memento/TC39.htm"));'></a>
+
+ </div>
+
+ <div class="content-dev">
+ <p class="headers">Development</p>
+ <p class="content">
+ Test262 is being developed as an open source project and
+ the maintainers are accepting patches from the community.
+ The project is maintained using <a
+ href='javascript:void(window.open("https://git-scm.com/"));'>the
+ git version control system</a> and is <a
+ href='javascript:void(window.open("https://github.com/tc39/test262"));'>currently
+ hosted on GitHub.com</a>. Bug reports and patches may be
+ submitted to the GitHub repository.
+ </p>
+
+ <p class="content">
+ A <a
+ href='javascript:void(window.open("http://mail.mozilla.org/pipermail/test262-discuss/"));'>public
+ mailing list</a> is used to coordinate development of
+ test262. If you wish to participate in the discussion,
+ please <a
+ href='javascript:void(window.open("http://mail.mozilla.org/listinfo/test262-discuss"));'>subscribe</a>.
+ </p>
+ </div>
+
+ <div class="content-tests">
+ <!-- This is the Main Content Container -->
+ <p class="content">Please click on the Run All button to run all the tests. Once you start the test you may pause the test anytime by clicking on the Pause button. You can click on the Results tab once the test is completed or after pausing the test. The Reset button is for restarting the test run. You may run individual tests by clicking the Run button next to the tests listed below. If you wish to run several chapters in sequence, but not the entire test suite, click the Select button for the chapters you wish to run and then click the Run Selected button.</p>
+
+ <!-- This is the Progress Bar Holder -->
+ <div class="progressBarHolder">
+ <div id="progressbar"></div>
+ <div class="progressBarButtons">
+ <!-- Loading: Run All, Run Selected -->
+ <!-- Loaded: Run All, Run Selected -->
+ <!-- Running: Pause -->
+ <!-- Paused: Resume, Reset -->
+ <img src="images/runall.png" alt="Run All" title="Run all tests." id="btnRunAll" />
+ <img src="images/runselected.png" alt="Run Selected Tests" title="Run the tests selected below." id="btnRunSelected" />
+ <img src="images/pause.png" alt="Pause" title="Pause the running tests." id="btnPause" />
+ <img src="images/resume.png" alt="Resume" title="Resume the running tests." id="btnResume" />
+ <img src="images/reset.png" alt="Reset" title="Reset testing status." id="btnReset" />
+ </div>
+ <div style="clear: both;"></div>
+ </div>
+ <p class="hide">
+ Timer Value(ms) : <input id="txtTimerValue" value="50" /> <input id="btnSetTimerValue" value="Set Timer Value" type="button"/>
+ </p>
+
+ <!-- This is the Results Text Holder -->
+ <div class="resultsHeader">
+ Tests To run: <strong><span class="teststorun-counter" id="testsToRun"></span></strong>&nbsp;<span class="separator">|</span>
+ Total tests ran: <strong><span class="total-counter" id="totalCounter"></span></strong> <span class="separator">|</span>
+ Pass: <span class="pass" id="Pass"></span> <span class="separator">|</span>
+ Fail: <span class="fail" id="Fail"></span> <span class="separator">|</span>
+ Failed to load: <span class="fail" id="failedToLoadCounter1"></span>
+ <p><span id="nextActivity"></span></p>
+ </div>
+
+ <!-- Test Chapter selector -->
+ <div id="chapterSelector">
+ <table width="100%" border="0" cellspacing="0" cellpadding="2"></table>
+ </div>
+
+ <!-- This is the Table -->
+ <div class="resultsTableHolder" id="tableLoggerParent">
+ <table width="100%" border="0" cellspacing="0" cellpadding="0" class="table-logger" id="tableLogger"></table>
+ </div>
+ <div>
+ Test suite version: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ </div>
+
+ <div class="content-results">
+ <div class="crumbContainer">
+ <div class="crumbs"></div>
+ <div style="float:right;"><a class="setBlue hide" id="backlinkDiv" href="#">&lt;&lt; back</a></div>
+ <div style="clear : both;"></div>
+ </div>
+ <div class="resultsHeader"> <strong>Total tests: <span class="totalCases"></span></strong><br />
+ Passed: <span class="passedCases"></span> <span class="separator">|</span> Failed: <span class="failedCases"></span> <span class="separator">|</span>
+ Failed to load: <strong><span id="failedToLoadCounter"></span></strong>
+ </div>
+ <!-- This is the Table -->
+ <div class="resultsTableHolder">
+ <table width="100%" cellspacing="0" cellpadding="0" border="0" class="results-data-table"> </table>
+ <div id="resultMessage">Test results will be displayed after the tests are executed using the Run page.</div>
+ </div>
+ <div>
+ Test suite version.: <span class="targetTestSuiteVersion"></span>&nbsp;<span class="separator">|</span>&nbsp;Test suite date: <span class="targetTestSuiteDate"></span>
+ </div>
+ <div class="downloadLinks">
+ <p><a class="anchor-download-xml" id="ancGenXMLReport"><strong>Download results as XML</strong></a></p> <!--| <strong><a href="scripts/testcases.zip">Download Source</a></strong></p>-->
+ </div>
+ <div id="legend" class="hide">
+ <label class="reportGreen">Green:</label>&nbsp;100%&nbsp;
+ <label class="reportLightGreen">Green:</label>&nbsp;75% to 99.9%&nbsp;
+ <label class="reportYellow">Yellow:</label>&nbsp;50% to 75% &nbsp;
+ <label class="reportRed">Red:</label>&nbsp;less than 50%
+ </div>
+ </div>
+ </div>
+ </div>
+ <!-- This is the Footer -->
+ <div class="footer">
+ <!--<div class="Links"> <a href="">Privacy</a> | <a href="">Terms of Use</a> </div>-->
+ <div class="copyright"> &copy; <a href='javascript:void(window.open("http://www.ecma-international.org"));'>Ecma International</a> </div>
+ </div>
+ <iframe id="scriptLoader" class="hide"></iframe>
+</body>
+</html>
diff --git a/deps/v8/third_party/test262-harness/src/test262.py b/deps/v8/third_party/test262-harness/src/test262.py
new file mode 100755
index 0000000000..c92e5bf0cf
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/src/test262.py
@@ -0,0 +1,664 @@
+#!/usr/bin/env python
+# Copyright 2009 the Sputnik authors. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+# This is derived from sputnik.py, the Sputnik console test runner,
+# with elements from packager.py, which is separately
+# copyrighted. TODO: Refactor so there is less duplication between
+# test262.py and packager.py.
+
+
+import logging
+import optparse
+import os
+from os import path
+import platform
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import xml.dom.minidom
+import datetime
+import shutil
+import json
+import stat
+import xml.etree.ElementTree as xmlj
+import unicodedata
+from collections import Counter
+
+
+from parseTestRecord import parseTestRecord, stripHeader
+
+from _packagerConfig import *
+
+class Test262Error(Exception):
+ def __init__(self, message):
+ self.message = message
+
+def ReportError(s):
+ raise Test262Error(s)
+
+
+
+if not os.path.exists(EXCLUDED_FILENAME):
+ print "Cannot generate (JSON) test262 tests without a file," + \
+ " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME
+ sys.exit(1)
+EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME)
+EXCLUDE_REASON = EXCLUDE_LIST.getElementsByTagName("reason")
+EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test")
+EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST]
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("--command", default=None, help="The command-line to run")
+ result.add_option("--tests", default=path.abspath('.'),
+ help="Path to the tests")
+ result.add_option("--cat", default=False, action="store_true",
+ help="Print packaged test code that would be run")
+ result.add_option("--summary", default=False, action="store_true",
+ help="Print summary after running tests")
+ result.add_option("--full-summary", default=False, action="store_true",
+ help="Print summary and test output after running tests")
+ result.add_option("--strict_only", default=False, action="store_true",
+ help="Test only strict mode")
+ result.add_option("--non_strict_only", default=False, action="store_true",
+ help="Test only non-strict mode")
+ result.add_option("--unmarked_default", default="both",
+ help="default mode for tests of unspecified strictness")
+ result.add_option("--logname", help="Filename to save stdout to")
+ result.add_option("--junitname", help="Filename to save test results in JUnit XML format")
+ result.add_option("--loglevel", default="warning",
+ help="sets log level to debug, info, warning, error, or critical")
+ result.add_option("--print-handle", default="print", help="Command to print from console")
+ result.add_option("--list-includes", default=False, action="store_true",
+ help="List includes required by tests")
+ return result
+
+
+def ValidateOptions(options):
+ if not options.command:
+ ReportError("A --command must be specified.")
+ if not path.exists(options.tests):
+ ReportError("Couldn't find test path '%s'" % options.tests)
+
+
+placeHolderPattern = re.compile(r"\{\{(\w+)\}\}")
+
+
+def IsWindows():
+ p = platform.system()
+ return (p == 'Windows') or (p == 'Microsoft')
+
+
+class TempFile(object):
+
+ def __init__(self, suffix="", prefix="tmp", text=False):
+ self.suffix = suffix
+ self.prefix = prefix
+ self.text = text
+ self.fd = None
+ self.name = None
+ self.is_closed = False
+ self.Open()
+
+ def Open(self):
+ (self.fd, self.name) = tempfile.mkstemp(
+ suffix = self.suffix,
+ prefix = self.prefix,
+ text = self.text)
+
+ def Write(self, str):
+ os.write(self.fd, str)
+
+ def Read(self):
+ f = file(self.name)
+ result = f.read()
+ f.close()
+ return result
+
+ def Close(self):
+ if not self.is_closed:
+ self.is_closed = True
+ os.close(self.fd)
+
+ def Dispose(self):
+ try:
+ self.Close()
+ os.unlink(self.name)
+ except OSError, e:
+ logging.error("Error disposing temp file: %s", str(e))
+
+
+class TestResult(object):
+
+ def __init__(self, exit_code, stdout, stderr, case):
+ self.exit_code = exit_code
+ self.stdout = stdout
+ self.stderr = stderr
+ self.case = case
+
+ def ReportOutcome(self, long_format):
+ name = self.case.GetName()
+ mode = self.case.GetMode()
+ if self.HasUnexpectedOutcome():
+ if self.case.IsNegative():
+ print "=== %s was expected to fail in %s, but didn't ===" % (name, mode)
+ print "--- expected error: %s ---\n" % self.case.GetNegativeType()
+ else:
+ if long_format:
+ print "=== %s failed in %s ===" % (name, mode)
+ else:
+ print "%s in %s: " % (name, mode)
+ self.WriteOutput(sys.stdout)
+ if long_format:
+ print "==="
+ elif self.case.IsNegative():
+ print "%s failed in %s as expected" % (name, mode)
+ else:
+ print "%s passed in %s" % (name, mode)
+
+ def WriteOutput(self, target):
+ out = self.stdout.strip()
+ if len(out) > 0:
+ target.write("--- output --- \n %s" % out)
+ err = self.stderr.strip()
+ if len(err) > 0:
+ target.write("--- errors --- \n %s" % err)
+
+ # This is a way to make the output from the "whitespace" tests into valid XML
+ def SafeFormat(self, msg):
+ try:
+ msg = msg.encode(encoding='ascii', errors='strict')
+ msg = msg.replace('\u000Bx', '?')
+ msg = msg.replace('\u000Cx', '?')
+ except:
+ return 'Output contained invalid characters'
+
+ def XmlAssemble(self, result):
+ test_name = self.case.GetName()
+ test_mode = self.case.GetMode()
+ testCaseElement = xmlj.Element("testcase")
+ testpath = self.TestPathManipulation(test_name)
+ testCaseElement.attrib["classname"] = "%s.%s" % (testpath[0] , testpath[1])
+ testCaseElement.attrib["name"] = "%s %s" % (testpath[2].replace('.','_') , test_mode)
+ if self.HasUnexpectedOutcome():
+ failureElement = xmlj.Element("failure")
+ out = self.stdout.strip().decode('utf-8')
+ err = self.stderr.strip().decode('utf-8')
+ if len(out) > 0:
+ failureElement.text = self.SafeFormat(out)
+ if len(err) > 0:
+ failureElement.text = self.SafeFormat(err)
+ testCaseElement.append(failureElement)
+ return testCaseElement
+
+ def TestPathManipulation(self, test_name):
+ testdirlist = test_name.split('/')
+ testcase = testdirlist.pop()
+ testclass = testdirlist.pop()
+ testclass = testclass.replace('.','_')
+ if len(testdirlist) >= 1:
+ testpackage = testdirlist.pop(0)
+ else:
+ testpackage = testclass
+ return(testpackage,testclass,testcase)
+
+ def HasFailed(self):
+ return self.exit_code != 0
+
+ def AsyncHasFailed(self):
+ return 'Test262:AsyncTestComplete' not in self.stdout
+
+ def HasUnexpectedOutcome(self):
+ if self.case.IsAsyncTest():
+ return self.AsyncHasFailed() or self.HasFailed()
+ elif self.case.IsNegative():
+ return not (self.HasFailed() and self.case.NegativeMatch(self.GetErrorOutput()))
+ else:
+ return self.HasFailed()
+
+ def GetErrorOutput(self):
+ if len(self.stderr) != 0:
+ return self.stderr
+ return self.stdout
+
+
+class TestCase(object):
+
+ def __init__(self, suite, name, full_path, strict_mode):
+ self.suite = suite
+ self.name = name
+ self.full_path = full_path
+ self.strict_mode = strict_mode
+ f = open(self.full_path)
+ self.contents = f.read()
+ f.close()
+ testRecord = parseTestRecord(self.contents, name)
+ self.test = testRecord["test"]
+ del testRecord["test"]
+ del testRecord["header"]
+ testRecord.pop("commentary", None) # do not throw if missing
+ self.testRecord = testRecord;
+
+ self.validate()
+
+ def NegativeMatch(self, stderr):
+ neg = re.compile(self.GetNegativeType())
+ return re.search(neg, stderr)
+
+ def GetNegative(self):
+ if not self.IsNegative():
+ return None
+ return self.testRecord["negative"]
+
+ def GetNegativeType(self):
+ negative = self.GetNegative()
+ return negative and negative["type"]
+
+ def GetNegativePhase(self):
+ negative = self.GetNegative()
+ return negative and negative["phase"]
+
+ def GetName(self):
+ return path.join(*self.name)
+
+ def GetMode(self):
+ if self.strict_mode:
+ return "strict mode"
+ else:
+ return "non-strict mode"
+
+ def GetPath(self):
+ return self.name
+
+ def IsNegative(self):
+ return 'negative' in self.testRecord
+
+ def IsOnlyStrict(self):
+ return 'onlyStrict' in self.testRecord
+
+ def IsNoStrict(self):
+ return 'noStrict' in self.testRecord or self.IsRaw()
+
+ def IsRaw(self):
+ return 'raw' in self.testRecord
+
+ def IsAsyncTest(self):
+ return 'async' in self.testRecord
+
+ def GetIncludeList(self):
+ if self.testRecord.get('includes'):
+ return self.testRecord['includes']
+ return []
+
+ def GetAdditionalIncludes(self):
+ return '\n'.join([self.suite.GetInclude(include) for include in self.GetIncludeList()])
+
+ def GetSource(self):
+ if self.IsRaw():
+ return self.test
+
+ source = self.suite.GetInclude("sta.js") + \
+ self.suite.GetInclude("cth.js") + \
+ self.suite.GetInclude("assert.js")
+
+ if self.IsAsyncTest():
+ source = source + \
+ self.suite.GetInclude("timer.js") + \
+ self.suite.GetInclude("doneprintHandle.js").replace('print', self.suite.print_handle)
+
+ source = source + \
+ self.GetAdditionalIncludes() + \
+ self.test + '\n'
+
+ if self.GetNegativePhase() == "early":
+ source = ("throw 'Expected an early error, but code was executed.';\n" +
+ source)
+
+ if self.strict_mode:
+ source = '"use strict";\nvar strict_mode = true;\n' + source
+ else:
+ # add comment line so line numbers match in both strict and non-strict version
+ source = '//"no strict";\nvar strict_mode = false;\n' + source
+
+ return source
+
+ def InstantiateTemplate(self, template, params):
+ def GetParameter(match):
+ key = match.group(1)
+ return params.get(key, match.group(0))
+ return placeHolderPattern.sub(GetParameter, template)
+
+ def Execute(self, command):
+ if IsWindows():
+ args = '%s' % command
+ else:
+ args = command.split(" ")
+ stdout = TempFile(prefix="test262-out-")
+ stderr = TempFile(prefix="test262-err-")
+ try:
+ logging.info("exec: %s", str(args))
+ process = subprocess.Popen(
+ args,
+ shell = IsWindows(),
+ stdout = stdout.fd,
+ stderr = stderr.fd
+ )
+ code = process.wait()
+ out = stdout.Read()
+ err = stderr.Read()
+ finally:
+ stdout.Dispose()
+ stderr.Dispose()
+ return (code, out, err)
+
+ def RunTestIn(self, command_template, tmp):
+ tmp.Write(self.GetSource())
+ tmp.Close()
+ command = self.InstantiateTemplate(command_template, {
+ 'path': tmp.name
+ })
+ (code, out, err) = self.Execute(command)
+ return TestResult(code, out, err, self)
+
+ def Run(self, command_template):
+ tmp = TempFile(suffix=".js", prefix="test262-", text=True)
+ try:
+ result = self.RunTestIn(command_template, tmp)
+ finally:
+ tmp.Dispose()
+ return result
+
+ def Print(self):
+ print self.GetSource()
+
+ def validate(self):
+ flags = self.testRecord.get("flags")
+ phase = self.GetNegativePhase()
+
+ if phase not in [None, "early", "runtime"]:
+ raise TypeError("Invalid value for negative phase: " + phase)
+
+ if not flags:
+ return
+
+ if 'raw' in flags:
+ if 'noStrict' in flags:
+ raise TypeError("The `raw` flag implies the `noStrict` flag")
+ elif 'onlyStrict' in flags:
+ raise TypeError(
+ "The `raw` flag is incompatible with the `onlyStrict` flag")
+ elif len(self.GetIncludeList()) > 0:
+ raise TypeError(
+ "The `raw` flag is incompatible with the `includes` tag")
+
+class ProgressIndicator(object):
+
+ def __init__(self, count):
+ self.count = count
+ self.succeeded = 0
+ self.failed = 0
+ self.failed_tests = []
+
+ def HasRun(self, result):
+ result.ReportOutcome(True)
+ if result.HasUnexpectedOutcome():
+ self.failed += 1
+ self.failed_tests.append(result)
+ else:
+ self.succeeded += 1
+
+
+def MakePlural(n):
+ if (n == 1):
+ return (n, "")
+ else:
+ return (n, "s")
+
+def PercentFormat(partial, total):
+ return "%i test%s (%.1f%%)" % (MakePlural(partial) +
+ ((100.0 * partial)/total,))
+
+
+class TestSuite(object):
+
+ def __init__(self, root, strict_only, non_strict_only, unmarked_default, print_handle):
+ # TODO: derive from packagerConfig.py
+ self.test_root = path.join(root, 'test')
+ self.lib_root = path.join(root, 'harness')
+ self.strict_only = strict_only
+ self.non_strict_only = non_strict_only
+ self.unmarked_default = unmarked_default
+ self.print_handle = print_handle
+ self.include_cache = { }
+
+
+ def Validate(self):
+ if not path.exists(self.test_root):
+ ReportError("No test repository found")
+ if not path.exists(self.lib_root):
+ ReportError("No test library found")
+
+ def IsHidden(self, path):
+ return path.startswith('.') or path == 'CVS'
+
+ def IsTestCase(self, path):
+ return path.endswith('.js')
+
+ def ShouldRun(self, rel_path, tests):
+ if len(tests) == 0:
+ return True
+ for test in tests:
+ if test in rel_path:
+ return True
+ return False
+
+ def GetInclude(self, name):
+ if not name in self.include_cache:
+ static = path.join(self.lib_root, name)
+ if path.exists(static):
+ f = open(static)
+ contents = stripHeader(f.read())
+ contents = re.sub(r'\r\n', '\n', contents)
+ self.include_cache[name] = contents + "\n"
+ f.close()
+ else:
+ ReportError("Can't find: " + static)
+ return self.include_cache[name]
+
+ def EnumerateTests(self, tests):
+ logging.info("Listing tests in %s", self.test_root)
+ cases = []
+ for root, dirs, files in os.walk(self.test_root):
+ for f in [x for x in dirs if self.IsHidden(x)]:
+ dirs.remove(f)
+ dirs.sort()
+ for f in sorted(files):
+ if self.IsTestCase(f):
+ full_path = path.join(root, f)
+ if full_path.startswith(self.test_root):
+ rel_path = full_path[len(self.test_root)+1:]
+ else:
+ logging.warning("Unexpected path %s", full_path)
+ rel_path = full_path
+ if self.ShouldRun(rel_path, tests):
+ basename = path.basename(full_path)[:-3]
+ name = rel_path.split(path.sep)[:-1] + [basename]
+ if EXCLUDE_LIST.count(basename) >= 1:
+ print 'Excluded: ' + basename
+ else:
+ if not self.non_strict_only:
+ strict_case = TestCase(self, name, full_path, True)
+ if not strict_case.IsNoStrict():
+ if strict_case.IsOnlyStrict() or \
+ self.unmarked_default in ['both', 'strict']:
+ cases.append(strict_case)
+ if not self.strict_only:
+ non_strict_case = TestCase(self, name, full_path, False)
+ if not non_strict_case.IsOnlyStrict():
+ if non_strict_case.IsNoStrict() or \
+ self.unmarked_default in ['both', 'non_strict']:
+ cases.append(non_strict_case)
+ logging.info("Done listing tests")
+ return cases
+
+
+ def PrintSummary(self, progress, logfile):
+
+ def write(s):
+ if logfile:
+ self.logf.write(s + "\n")
+ print s
+
+ print
+ write("=== Summary ===");
+ count = progress.count
+ succeeded = progress.succeeded
+ failed = progress.failed
+ write(" - Ran %i test%s" % MakePlural(count))
+ if progress.failed == 0:
+ write(" - All tests succeeded")
+ else:
+ write(" - Passed " + PercentFormat(succeeded, count))
+ write(" - Failed " + PercentFormat(failed, count))
+ positive = [c for c in progress.failed_tests if not c.case.IsNegative()]
+ negative = [c for c in progress.failed_tests if c.case.IsNegative()]
+ if len(positive) > 0:
+ print
+ write("Failed Tests")
+ for result in positive:
+ write(" %s in %s" % (result.case.GetName(), result.case.GetMode()))
+ if len(negative) > 0:
+ print
+ write("Expected to fail but passed ---")
+ for result in negative:
+ write(" %s in %s" % (result.case.GetName(), result.case.GetMode()))
+
+ def PrintFailureOutput(self, progress, logfile):
+ for result in progress.failed_tests:
+ if logfile:
+ self.WriteLog(result)
+ print
+ result.ReportOutcome(False)
+
+ def Run(self, command_template, tests, print_summary, full_summary, logname, junitfile):
+ if not "{{path}}" in command_template:
+ command_template += " {{path}}"
+ cases = self.EnumerateTests(tests)
+ if len(cases) == 0:
+ ReportError("No tests to run")
+ progress = ProgressIndicator(len(cases))
+ if logname:
+ self.logf = open(logname, "w")
+ if junitfile:
+ self.outfile = open(junitfile, "w")
+ TestSuitesElement = xmlj.Element("testsuites")
+ TestSuiteElement = xmlj.Element("testsuite")
+ TestSuitesElement.append(TestSuiteElement)
+ TestSuiteElement.attrib["name "] = "test262"
+ for x in range(len(EXCLUDE_LIST)):
+ if self.ShouldRun (unicode(EXCLUDE_LIST[x].encode('utf-8','ignore')), tests):
+ SkipCaseElement = xmlj.Element("testcase")
+ SkipCaseElement.attrib["classname"] = unicode(EXCLUDE_LIST[x]).encode('utf-8','ignore')
+ SkipCaseElement.attrib["name"] = unicode(EXCLUDE_LIST[x]).encode('utf-8','ignore')
+ SkipElement = xmlj.Element("skipped")
+ SkipElement.attrib["message"] = unicode(EXCLUDE_REASON[x].firstChild.nodeValue)
+ SkipCaseElement.append(SkipElement)
+ TestSuiteElement.append(SkipCaseElement)
+
+ for case in cases:
+ result = case.Run(command_template)
+ if junitfile:
+ TestCaseElement = result.XmlAssemble(result)
+ TestSuiteElement.append(TestCaseElement)
+ if case == cases[len(cases)-1]:
+ xmlj.ElementTree(TestSuitesElement).write(junitfile, "UTF-8")
+ if logname:
+ self.WriteLog(result)
+ progress.HasRun(result)
+
+ if print_summary:
+ self.PrintSummary(progress, logname)
+ if full_summary:
+ self.PrintFailureOutput(progress, logname)
+ else:
+ print
+ print "Use --full-summary to see output from failed tests"
+ print
+ return progress.failed
+
+ def WriteLog(self, result):
+ name = result.case.GetName()
+ mode = result.case.GetMode()
+ if result.HasUnexpectedOutcome():
+ if result.case.IsNegative():
+ self.logf.write("=== %s was expected to fail in %s, but didn't === \n" % (name, mode))
+ self.logf.write("--- expected error: %s ---\n" % result.case.GetNegativeType())
+ result.WriteOutput(self.logf)
+ else:
+ self.logf.write("=== %s failed in %s === \n" % (name, mode))
+ result.WriteOutput(self.logf)
+ self.logf.write("===\n")
+ elif result.case.IsNegative():
+ self.logf.write("%s failed in %s as expected \n" % (name, mode))
+ else:
+ self.logf.write("%s passed in %s \n" % (name, mode))
+
+ def Print(self, tests):
+ cases = self.EnumerateTests(tests)
+ if len(cases) > 0:
+ cases[0].Print()
+
+ def ListIncludes(self, tests):
+ cases = self.EnumerateTests(tests)
+ includes_dict = Counter()
+ for case in cases:
+ includes = case.GetIncludeList()
+ includes_dict.update(includes)
+
+ print includes_dict
+
+
+def Main():
+ code = 0
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ ValidateOptions(options)
+ test_suite = TestSuite(options.tests,
+ options.strict_only,
+ options.non_strict_only,
+ options.unmarked_default,
+ options.print_handle)
+ test_suite.Validate()
+ if options.loglevel == 'debug':
+ logging.basicConfig(level=logging.DEBUG)
+ elif options.loglevel == 'info':
+ logging.basicConfig(level=logging.INFO)
+ elif options.loglevel == 'warning':
+ logging.basicConfig(level=logging.WARNING)
+ elif options.loglevel == 'error':
+ logging.basicConfig(level=logging.ERROR)
+ elif options.loglevel == 'critical':
+ logging.basicConfig(level=logging.CRITICAL)
+ if options.cat:
+ test_suite.Print(args)
+ elif options.list_includes:
+ test_suite.ListIncludes(args)
+ else:
+ code = test_suite.Run(options.command, args,
+ options.summary or options.full_summary,
+ options.full_summary,
+ options.logname,
+ options.junitname)
+ return code
+
+if __name__ == '__main__':
+ try:
+ code = Main()
+ sys.exit(code)
+ except Test262Error, e:
+ print "Error: %s" % e.message
+ sys.exit(1)
diff --git a/deps/v8/third_party/test262-harness/test/README.md b/deps/v8/third_party/test262-harness/test/README.md
new file mode 100644
index 0000000000..6084f93b8e
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/README.md
@@ -0,0 +1,11 @@
+# Unit tests for python packaging tools
+
+This directory holds tests for the python code, not tests of EMCAScript
+
+## Running tests
+
+````
+$ cd tools/packaging/test
+$ for x in test*.py; do python $x; done
+````
+
diff --git a/deps/v8/third_party/test262-harness/test/fixtures/negative.js b/deps/v8/third_party/test262-harness/test/fixtures/negative.js
new file mode 100644
index 0000000000..f772b2e8bd
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/fixtures/negative.js
@@ -0,0 +1,11 @@
+// fake copyright comment
+/*---
+info: >
+ Sample test info
+description: Sample test description
+negative:
+ phase: early
+ type: SyntaxError
+---*/
+
+???
diff --git a/deps/v8/third_party/test262-harness/test/fixtures/test262-old-headers.js b/deps/v8/third_party/test262-harness/test/fixtures/test262-old-headers.js
new file mode 100644
index 0000000000..ff41177c5d
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/fixtures/test262-old-headers.js
@@ -0,0 +1,19 @@
+// Copyright 2009 the Sputnik authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.
+
+/**
+ * The production Block { } in strict code can't contain function
+ * declaration;
+ *
+ * @path bestPractice/Sbp_A1_T1.js
+ * @description Trying to declare function at the Block statement
+ * @onlyStrict
+ * @negative SyntaxError
+ * @bestPractice http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls
+ */
+
+"use strict";
+{
+ function __func(){}
+}
+
diff --git a/deps/v8/third_party/test262-harness/test/fixtures/test262-yaml-headers.js b/deps/v8/third_party/test262-harness/test/fixtures/test262-yaml-headers.js
new file mode 100644
index 0000000000..e897237bcb
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/fixtures/test262-yaml-headers.js
@@ -0,0 +1,18 @@
+// Copyright 2009 the Sputnik authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.
+
+/*---
+info: >
+ The production Block { } in strict code can't contain function
+ declaration;
+description: Trying to declare function at the Block statement
+negative: SyntaxError
+bestPractice: "http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls"
+flags: [onlyStrict]
+---*/
+
+"use strict";
+{
+ function __func(){}
+}
+
diff --git a/deps/v8/third_party/test262-harness/test/test_common.py b/deps/v8/third_party/test262-harness/test/test_common.py
new file mode 100644
index 0000000000..4c86b0a334
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/test_common.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# Copyright 2014 by Sam Mikes. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+import unittest
+
+import os
+
+# add parent dir to search path
+import sys
+sys.path.append("src")
+
+
+from _common import *
+
+def slurpFile(name):
+ with open('test/' + name) as f:
+ contents = f.read()
+ return contents
+
+
+class TestOldParsing(unittest.TestCase):
+
+ def test_test(self):
+ pass
+
+ def test_overview(self):
+ name = 'fixtures/test262-old-headers.js'
+ contents = slurpFile(name)
+ record = convertDocString(contents)
+
+ self.assertEqual("""The production Block { } in strict code can't contain function
+declaration;""", record['commentary'])
+
+ self.assertEqual("bestPractice/Sbp_A1_T1.js", record['path'])
+ self.assertEqual("Trying to declare function at the Block statement",
+ record['description'])
+ self.assertEqual("", record['onlyStrict'])
+ self.assertEqual("SyntaxError", record['negative'])
+ self.assertEqual("http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls",
+ record['bestPractice'])
+
+
+class TestYAMLParsing(unittest.TestCase):
+
+ def test_overview(self):
+ name = 'fixtures/test262-yaml-headers.js'
+ contents = slurpFile(name)
+ record = convertDocString(contents)
+
+ self.assertEqual("The production Block { } in strict code can't contain function declaration;\n", record['commentary'])
+
+ self.assertEqual("Trying to declare function at the Block statement",
+ record['description'])
+ self.assertEqual(['onlyStrict'], record['flags'])
+ self.assertEqual("", record['onlyStrict'])
+ self.assertEqual("SyntaxError", record['negative'])
+ self.assertEqual("http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls",
+ record['bestPractice'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/third_party/test262-harness/test/test_monkeyYaml.py b/deps/v8/third_party/test262-harness/test/test_monkeyYaml.py
new file mode 100644
index 0000000000..428e45b6a0
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/test_monkeyYaml.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+
+# Copyright 2014 by Sam Mikes. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+import unittest
+
+import os
+import yaml
+import imp
+
+# add parent dir to search path
+import sys
+sys.path.append("src")
+
+import _monkeyYaml as monkeyYaml
+
+class TestMonkeyYAMLParsing(unittest.TestCase):
+
+ def test_empty(self):
+ self.assertEqual(monkeyYaml.load(""), yaml.load(""))
+
+ def test_newline(self):
+ self.assertEqual(monkeyYaml.load("\n"), yaml.load("\n"))
+
+ def test_oneline(self):
+ y = "foo: bar"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_twolines(self):
+ y = "foo: bar\nbaz_bletch : blith:er"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_multiLine(self):
+ y = "foo: >\n bar\nbaz: 3"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_es5id(self):
+ y = "es5id: 15.2.3.6-4-102"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_Multiline_1(self):
+ lines = [" foo"]
+ value = ">"
+ y = "\n".join([value] + lines)
+ (lines, value) = monkeyYaml.myMultiline(lines, value)
+ self.assertEqual(lines, [])
+ self.assertEqual(value, yaml.load(y))
+
+ def test_Multiline_2(self):
+ lines = [" foo", " bar"]
+ y = "\n".join([">"] + lines)
+ (lines, value) = monkeyYaml.myMultiline(lines)
+ self.assertEqual(lines, [])
+ self.assertEqual(value, yaml.load(y))
+
+ def test_Multiline_3(self):
+ lines = [" foo", " bar"]
+ y = "\n".join([">"] + lines)
+ (lines, value) = monkeyYaml.myMultiline(lines)
+ self.assertEqual(lines, [])
+ self.assertEqual(value, yaml.load(y))
+
+ def test_Multiline_4(self):
+ lines = [" foo", " bar", " other: 42"]
+ (lines, value) = monkeyYaml.myMultiline(lines)
+ self.assertEqual(lines, [" other: 42"])
+ self.assertEqual(value, "foo bar")
+
+ def test_myLeading(self):
+ self.assertEqual(2, monkeyYaml.myLeadingSpaces(" foo"))
+ self.assertEqual(2, monkeyYaml.myLeadingSpaces(" "))
+ self.assertEqual(0, monkeyYaml.myLeadingSpaces("\t "))
+
+ def test_includes_flow(self):
+ y = "includes: [a.js,b.js, c_with_wings.js]\n"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_myFlowList_1(self):
+ y = "[a.js,b.js, c_with_wings.js, 3, 4.12]"
+ self.assertEqual(monkeyYaml.myFlowList(y), ['a.js', 'b.js', 'c_with_wings.js', 3, 4.12])
+
+ def test_multiline_list_1(self):
+ y = "foo:\n - bar\n - baz"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_multiline_list2(self):
+ self.assertEqual(monkeyYaml.myRemoveListHeader(2, " - foo"), "foo")
+
+ def test_multiline_list3(self):
+ (lines, value) = monkeyYaml.myMultilineList([" - foo", " - bar", "baz: bletch"], "")
+ self.assertEqual(lines, ["baz: bletch"])
+ self.assertEqual(value, ["foo", "bar"])
+
+ def test_multiline_list_carriage_return(self):
+ y = "foo:\r\n - bar\r\n - baz"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_oneline_indented(self):
+ y = " foo: bar\n baz: baf\n"
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+
+ def test_indentation_215(self):
+ self.maxDiff = None
+ y = """
+ description: >
+ The method should exist on the Array prototype, and it should be writable
+ and configurable, but not enumerable.
+ includes: [propertyHelper.js]
+ es6id: 22.1.3.13
+ """
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_indentation_215_2(self):
+ self.maxDiff = None
+ y = """
+ description: >
+ The method should exist
+ includes: [propertyHelper.js]
+ es6id: 22.1.3.13
+ """
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_line_folding(self):
+ self.maxDiff = None
+ y = """
+description: aaa
+ bbb
+es6id: 19.1.2.1
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_line_folding_2(self):
+ self.maxDiff = None
+ y = """
+description: ccc
+
+ ddd
+
+es6id: 19.1.2.1
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_line_folding_3(self):
+ self.maxDiff = None
+ y = """
+description: eee
+
+
+ fff
+es6id: 19.1.2.1
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_line_folding_4(self):
+ self.maxDiff = None
+ y = """
+description: ggg
+
+ hhh
+ iii
+
+ jjj
+es6id: 19.1.2.1
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_no_folding(self):
+ y = """
+description: |
+ This is text that, naively parsed, would appear
+
+ to: have
+ nested: data
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_value_multiline(self):
+ y = """
+description:
+ This is a multi-line value
+
+ whose trailing newline should be stripped
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_nested_1(self):
+ y = """
+es61d: 19.1.2.1
+negative:
+ stage: early
+ type: ReferenceError
+description: foo
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+ def test_nested_2(self):
+ y = """
+es61d: 19.1.2.1
+first:
+ second_a:
+ third: 1
+ second_b: 3
+description: foo
+"""
+ self.assertEqual(monkeyYaml.load(y), yaml.load(y))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/third_party/test262-harness/test/test_parseTestRecord.py b/deps/v8/third_party/test262-harness/test/test_parseTestRecord.py
new file mode 100644
index 0000000000..36576b57b1
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/test_parseTestRecord.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+
+# Copyright 2014 by Sam Mikes. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+import unittest
+
+import os
+import yaml
+
+# add parent dir to search path
+import sys
+sys.path.append("src")
+
+from parseTestRecord import *
+
+def slurpFile(name):
+ with open('test/' + name) as f:
+ contents = f.read()
+ return contents
+
+
+class TestOldParsing(unittest.TestCase):
+
+ def test_test(self):
+ self.assertTrue(True)
+
+ def test_overview(self):
+ name = 'fixtures/test262-old-headers.js'
+ contents = slurpFile(name)
+ record = parseTestRecord(contents, name)
+
+ self.assertEqual("""// Copyright 2009 the Sputnik authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.""",
+ record['header'])
+ self.assertEqual("""The production Block { } in strict code can't contain function
+declaration;""", record['commentary'])
+
+ self.assertEqual("bestPractice/Sbp_A1_T1.js", record['path'])
+ self.assertEqual("Trying to declare function at the Block statement",
+ record['description'])
+ self.assertEqual("", record['onlyStrict'])
+ self.assertEqual("SyntaxError", record['negative'])
+ self.assertEqual("http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls",
+ record['bestPractice'])
+
+ self.assertEqual(""""use strict";
+{
+ function __func(){}
+}
+
+""", record['test'])
+
+ @unittest.expectedFailure
+ def test_nomatch(self):
+ with self.assertRaisesRegexp(Exception, "unrecognized"):
+ parseTestRecord("#!/usr/bin/env python", "random.py")
+
+ def test_duplicate(self):
+ with self.assertRaisesRegexp(Exception, "duplicate: foo"):
+ parseTestRecord("""
+// Copyright
+
+/**
+ * @foo bar
+ * @foo bar
+ */
+
+1;
+"""
+ , "name")
+
+ def test_malformed(self):
+ with self.assertRaisesRegexp(Exception, 'Malformed "@" attribute: name'):
+ parseTestRecord("""
+// Copyright
+
+/**
+ * @ baz
+ * @foo bar
+ */
+
+1;
+"""
+ , "name")
+
+ def test_stripStars(self):
+ self.assertEqual("", stripStars(""))
+ self.assertEqual("foo", stripStars("\n* foo"))
+ self.assertEqual("@foo bar", stripStars("\n* @foo bar"))
+ self.assertEqual("@foo bar", stripStars("\n *@foo bar"))
+
+
+class TestYAMLParsing(unittest.TestCase):
+ def test_test(self):
+ self.assertTrue(True)
+
+ def test_split(self):
+ name = 'fixtures/test262-yaml-headers.js'
+ contents = slurpFile(name)
+ self.assertTrue('---' in contents)
+ match = matchParts(contents, name)
+ self.assertEqual("""---
+info: >
+ The production Block { } in strict code can't contain function
+ declaration;
+description: Trying to declare function at the Block statement
+negative: SyntaxError
+bestPractice: "http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls"
+flags: [onlyStrict]
+---""", match.group(2))
+
+ def test_yamlParse(self):
+ text = """
+info: >
+ The production Block { } in strict code can't contain function
+ declaration;
+description: Trying to declare function at the Block statement
+negative: SyntaxError
+bestPractice: "http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls"
+flags: [onlyStrict]"""
+ parsed = yaml.load(text)
+
+ self.assertEqual("Trying to declare function at the Block statement",
+ parsed['description'])
+ self.assertEqual("SyntaxError", parsed['negative'])
+ self.assertEqual('http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls', parsed['bestPractice'])
+ self.assertEqual(["onlyStrict"], parsed['flags'])
+ self.assertEqual("The production Block { } in strict code can't contain function declaration;\n", parsed['info'])
+
+ def test_hasYAML(self):
+ self.assertTrue(hasYAML("---\n some: yaml\n\n---"))
+ self.assertFalse(hasYAML("\n* Test description\n *\n * @foo bar\n* @noStrict\n"))
+
+ def test_fixturehasYAML(self):
+ name = 'fixtures/test262-yaml-headers.js'
+ contents = slurpFile(name)
+ self.assertTrue('---' in contents)
+ match = matchParts(contents, name)
+ self.assertTrue(hasYAML(match.group(2)))
+
+ def test_missingKeys(self):
+ result = {}
+ yamlAttrParser(result, """---
+ info: some info (note no flags or includes)
+---""", "")
+ self.assertEqual("some info (note no flags or includes)", result['commentary'])
+
+ def test_overview(self):
+ name = 'fixtures/test262-yaml-headers.js'
+ contents = slurpFile(name)
+ record = parseTestRecord(contents, name)
+
+ self.assertEqual("""// Copyright 2009 the Sputnik authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.""",
+ record['header'])
+ self.assertEqual("The production Block { } in strict code can't contain function declaration;\n", record['commentary'])
+
+ self.assertEqual("Trying to declare function at the Block statement",
+ record['description'])
+ self.assertEqual(['onlyStrict'], record['flags'])
+ self.assertEqual("", record['onlyStrict'])
+ self.assertEqual("SyntaxError", record['negative'])
+ self.assertEqual("http://wiki.ecmascript.org/doku.php?id=conventions:no_non_standard_strict_decls",
+ record['bestPractice'])
+
+ self.assertEqual(""""use strict";
+{
+ function __func(){}
+}
+
+""", record['test'])
+
+ def test_negative(self):
+ name = 'fixtures/negative.js'
+ contents = slurpFile(name)
+ record = parseTestRecord(contents, name)
+
+ self.assertEqual('early', record['negative']['phase'])
+ self.assertEqual('SyntaxError', record['negative']['type'])
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/third_party/test262-harness/test/test_test262.py b/deps/v8/third_party/test262-harness/test/test_test262.py
new file mode 100644
index 0000000000..8cf41d79f0
--- /dev/null
+++ b/deps/v8/third_party/test262-harness/test/test_test262.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+
+# Copyright 2014 by Sam Mikes. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+import unittest
+
+import sys
+import os
+import cStringIO
+from functools import wraps
+
+sys.path.append("src")
+
+import test262
+
+class TestTest262(unittest.TestCase):
+
+ def test_that_tests_run(self):
+ self.assertEqual(1 + 2, 3)
+
+class MockTest(object):
+
+ def __init__(self, name, negative):
+ self.name = name
+ self.negative = negative if negative else False
+ self.strict_mode = False
+
+ def GetName(self):
+ return self.name
+
+ def IsNegative(self):
+ return self.negative
+
+ def GetMode(self):
+ if self.strict_mode:
+ return "strict mode"
+
+ return "non-strict mode"
+
+class MockResult(object):
+
+ def __init__(self, case):
+ self.case = case
+
+
+
+class TestTestSuite(unittest.TestCase):
+
+ def test_that_tests_run(self):
+ self.assertEqual(1 + 2, 3)
+
+ def test_create_test_suite(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+ self.assertNotEqual(test_suite, None)
+
+ def test_summary(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+
+ progress = test262.ProgressIndicator(100)
+ progress.succeeded = 98
+ progress.failed = 2
+
+ result = mute(True)(test_suite.PrintSummary)(progress, None)
+ self.assertEqual("""
+=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+""", result)
+
+ def test_summary_logfile(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+
+ progress = test262.ProgressIndicator(100)
+ progress.succeeded = 98
+ progress.failed = 2
+
+ fake_log = cStringIO.StringIO()
+ test_suite.logf = fake_log
+
+ result = mute(True)(test_suite.PrintSummary)(progress, True)
+
+ expected_out = """
+=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+"""
+
+ expected_log = """=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+"""
+ self.assertEqual(expected_out, result)
+ self.assertEqual(expected_log, fake_log.getvalue())
+
+
+ def test_summary_withfails(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+
+ progress = test262.ProgressIndicator(100)
+ progress.succeeded = 98
+ progress.failed = 2
+ progress.failed_tests = [
+ MockResult(MockTest("foo", False)),
+ MockResult(MockTest("bar", True))
+ ]
+
+ result = mute(True)(test_suite.PrintSummary)(progress, None)
+ self.assertEqual("""
+=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+
+Failed Tests
+ foo in non-strict mode
+
+Expected to fail but passed ---
+ bar in non-strict mode
+""", result)
+
+
+ def test_summary_withfails_andlog(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+
+ progress = test262.ProgressIndicator(100)
+ progress.succeeded = 98
+ progress.failed = 2
+ progress.failed_tests = [
+ MockResult(MockTest("foo", False)),
+ MockResult(MockTest("bar", True))
+ ]
+
+ fake_log = cStringIO.StringIO()
+ test_suite.logf = fake_log
+
+ expected_out = """
+=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+
+Failed Tests
+ foo in non-strict mode
+
+Expected to fail but passed ---
+ bar in non-strict mode
+"""
+ expected_log = """=== Summary ===
+ - Ran 100 tests
+ - Passed 98 tests (98.0%)
+ - Failed 2 tests (2.0%)
+Failed Tests
+ foo in non-strict mode
+Expected to fail but passed ---
+ bar in non-strict mode
+"""
+
+ result = mute(True)(test_suite.PrintSummary)(progress, True)
+ self.assertEqual(expected_out, result)
+ self.assertEqual(expected_log, fake_log.getvalue())
+
+
+ def test_summary_success_logfile(self):
+ test_suite = test262.TestSuite(".",
+ False,
+ False,
+ False,
+ None)
+
+ progress = test262.ProgressIndicator(100)
+ progress.succeeded = 100
+ progress.failed = 0
+
+ fake_log = cStringIO.StringIO()
+ test_suite.logf = fake_log
+
+ result = mute(True)(test_suite.PrintSummary)(progress, True)
+
+ expected_out = """
+=== Summary ===
+ - Ran 100 tests
+ - All tests succeeded
+"""
+
+ expected_log = """=== Summary ===
+ - Ran 100 tests
+ - All tests succeeded
+"""
+ self.assertEqual(expected_out, result)
+ self.assertEqual(expected_log, fake_log.getvalue())
+
+
+ def test_percent_format(self):
+ self.assertEqual(test262.PercentFormat(1, 100), "1 test (1.0%)")
+ self.assertEqual(test262.PercentFormat(0, 100), "0 tests (0.0%)")
+ self.assertEqual(test262.PercentFormat(99, 100), "99 tests (99.0%)")
+
+
+# module level utility functions
+# copied from https://stackoverflow.com/questions/2828953/silence-the-stdout-of-a-function-in-python-without-trashing-sys-stdout-and-resto
+
+
+def mute(returns_output=False):
+ """
+ Decorate a function that prints to stdout, intercepting the output.
+ If "returns_output" is True, the function will return a generator
+ yielding the printed lines instead of the return values.
+
+ The decorator litterally hijack sys.stdout during each function
+ execution for ALL THE THREADS, so be careful with what you apply it to
+ and in which context.
+
+ >>> def numbers():
+ print "42"
+ print "1984"
+ ...
+ >>> numbers()
+ 42
+ 1984
+ >>> mute()(numbers)()
+ >>> list(mute(True)(numbers)())
+ ['42', '1984']
+
+ """
+
+ def decorator(func):
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+
+ saved_stdout = sys.stdout
+ sys.stdout = cStringIO.StringIO()
+
+ try:
+ out = func(*args, **kwargs)
+ if returns_output:
+ out = sys.stdout.getvalue()
+ finally:
+ sys.stdout = saved_stdout
+
+ return out
+
+ return wrapper
+
+ return decorator
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/deps/v8/third_party/zlib/google/BUILD.gn b/deps/v8/third_party/zlib/google/BUILD.gn
index c29e892780..e996b167db 100644
--- a/deps/v8/third_party/zlib/google/BUILD.gn
+++ b/deps/v8/third_party/zlib/google/BUILD.gn
@@ -7,6 +7,7 @@ import("//build_overrides/build.gni")
if (build_with_chromium) {
static_library("zip") {
sources = [
+ "redact.h",
"zip.cc",
"zip.h",
"zip_internal.cc",
@@ -18,6 +19,7 @@ if (build_with_chromium) {
]
deps = [
"//base",
+ "//base:i18n",
"//third_party/zlib:minizip",
]
}
diff --git a/deps/v8/third_party/zlib/google/compression_utils_unittest.cc b/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
index 415b9ab922..76572e5a47 100644
--- a/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
+++ b/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
@@ -7,9 +7,9 @@
#include <stddef.h>
#include <stdint.h>
+#include <iterator>
#include <string>
-#include "base/cxx17_backports.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace compression {
@@ -33,24 +33,24 @@ const uint8_t kCompressedData[] = {
} // namespace
TEST(CompressionUtilsTest, GzipCompression) {
- std::string data(reinterpret_cast<const char*>(kData), base::size(kData));
+ std::string data(reinterpret_cast<const char*>(kData), std::size(kData));
std::string compressed_data;
EXPECT_TRUE(GzipCompress(data, &compressed_data));
std::string golden_compressed_data(
reinterpret_cast<const char*>(kCompressedData),
- base::size(kCompressedData));
+ std::size(kCompressedData));
EXPECT_EQ(golden_compressed_data, compressed_data);
}
TEST(CompressionUtilsTest, GzipUncompression) {
std::string compressed_data(reinterpret_cast<const char*>(kCompressedData),
- base::size(kCompressedData));
+ std::size(kCompressedData));
std::string uncompressed_data;
EXPECT_TRUE(GzipUncompress(compressed_data, &uncompressed_data));
std::string golden_data(reinterpret_cast<const char*>(kData),
- base::size(kData));
+ std::size(kData));
EXPECT_EQ(golden_data, uncompressed_data);
}
@@ -59,7 +59,7 @@ TEST(CompressionUtilsTest, GzipUncompressionFromSpanToString) {
EXPECT_TRUE(GzipUncompress(kCompressedData, &uncompressed_data));
std::string golden_data(reinterpret_cast<const char*>(kData),
- base::size(kData));
+ std::size(kData));
EXPECT_EQ(golden_data, uncompressed_data);
}
@@ -84,10 +84,10 @@ TEST(CompressionUtilsTest, LargeInput) {
TEST(CompressionUtilsTest, InPlace) {
const std::string original_data(reinterpret_cast<const char*>(kData),
- base::size(kData));
+ std::size(kData));
const std::string golden_compressed_data(
reinterpret_cast<const char*>(kCompressedData),
- base::size(kCompressedData));
+ std::size(kCompressedData));
std::string data(original_data);
EXPECT_TRUE(GzipCompress(data, &data));
diff --git a/deps/v8/third_party/zlib/google/redact.h b/deps/v8/third_party/zlib/google/redact.h
new file mode 100644
index 0000000000..ea7da16a52
--- /dev/null
+++ b/deps/v8/third_party/zlib/google/redact.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_
+#define THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_
+
+#include <ostream>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+
+namespace zip {
+
+// Redacts file paths in log messages.
+// Example:
+// LOG(ERROR) << "Cannot open " << Redact(path);
+class Redact {
+ public:
+ explicit Redact(const base::FilePath& path) : path_(path) {}
+
+ friend std::ostream& operator<<(std::ostream& out, const Redact&& r) {
+ return LOG_IS_ON(INFO) ? out << "'" << r.path_ << "'" : out << "(redacted)";
+ }
+
+ private:
+ const base::FilePath& path_;
+};
+
+} // namespace zip
+
+#endif // THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_
diff --git a/deps/v8/third_party/zlib/google/zip.cc b/deps/v8/third_party/zlib/google/zip.cc
index 7c46718808..a52f40690b 100644
--- a/deps/v8/third_party/zlib/google/zip.cc
+++ b/deps/v8/third_party/zlib/google/zip.cc
@@ -14,6 +14,7 @@
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
+#include "third_party/zlib/google/redact.h"
#include "third_party/zlib/google/zip_internal.h"
#include "third_party/zlib/google/zip_reader.h"
#include "third_party/zlib/google/zip_writer.h"
@@ -25,10 +26,6 @@ bool IsHiddenFile(const base::FilePath& file_path) {
return file_path.BaseName().value()[0] == '.';
}
-bool ExcludeNoFilesFilter(const base::FilePath& file_path) {
- return true;
-}
-
// Creates a directory at |extract_dir|/|entry_path|, including any parents.
bool CreateDirectory(const base::FilePath& extract_dir,
const base::FilePath& entry_path) {
@@ -59,12 +56,13 @@ class DirectFileAccessor : public FileAccessor {
const base::FilePath absolute_path = src_dir_.Append(path);
if (base::DirectoryExists(absolute_path)) {
files->emplace_back();
- LOG(ERROR) << "Cannot open '" << path << "': It is a directory";
+ LOG(ERROR) << "Cannot open " << Redact(path) << ": It is a directory";
} else {
- files->emplace_back(absolute_path,
- base::File::FLAG_OPEN | base::File::FLAG_READ);
- LOG_IF(ERROR, !files->back().IsValid())
- << "Cannot open '" << path << "'";
+ const base::File& file = files->emplace_back(
+ absolute_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ LOG_IF(ERROR, !file.IsValid())
+ << "Cannot open " << Redact(path) << ": "
+ << base::File::ErrorToString(file.error_details());
}
}
@@ -97,7 +95,7 @@ class DirectFileAccessor : public FileAccessor {
base::File::Info file_info;
if (!base::GetFileInfo(src_dir_.Append(path), &file_info)) {
- LOG(ERROR) << "Cannot get info of '" << path << "'";
+ PLOG(ERROR) << "Cannot get info of " << Redact(path);
return false;
}
@@ -125,7 +123,7 @@ bool Zip(const ZipParams& params) {
std::unique_ptr<internal::ZipWriter> zip_writer;
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
if (params.dest_fd != base::kInvalidPlatformFile) {
DCHECK(params.dest_file.empty());
zip_writer =
@@ -169,79 +167,73 @@ bool Zip(const ZipParams& params) {
return zip_writer->Close();
}
-bool Unzip(const base::FilePath& src_file, const base::FilePath& dest_dir) {
- return UnzipWithFilterCallback(
- src_file, dest_dir, base::BindRepeating(&ExcludeNoFilesFilter), true);
-}
-
-bool UnzipWithFilterCallback(const base::FilePath& src_file,
- const base::FilePath& dest_dir,
- FilterCallback filter_cb,
- bool log_skipped_files) {
+bool Unzip(const base::FilePath& src_file,
+ const base::FilePath& dest_dir,
+ UnzipOptions options) {
base::File file(src_file, base::File::FLAG_OPEN | base::File::FLAG_READ);
if (!file.IsValid()) {
- DLOG(WARNING) << "Cannot open '" << src_file << "'";
+ LOG(ERROR) << "Cannot open " << Redact(src_file) << ": "
+ << base::File::ErrorToString(file.error_details());
return false;
}
- return UnzipWithFilterAndWriters(
- file.GetPlatformFile(),
- base::BindRepeating(&CreateFilePathWriterDelegate, dest_dir),
- base::BindRepeating(&CreateDirectory, dest_dir), std::move(filter_cb),
- log_skipped_files);
+ return Unzip(file.GetPlatformFile(),
+ base::BindRepeating(&CreateFilePathWriterDelegate, dest_dir),
+ base::BindRepeating(&CreateDirectory, dest_dir),
+ std::move(options));
}
-bool UnzipWithFilterAndWriters(const base::PlatformFile& src_file,
- WriterFactory writer_factory,
- DirectoryCreator directory_creator,
- FilterCallback filter_cb,
- bool log_skipped_files) {
+bool Unzip(const base::PlatformFile& src_file,
+ WriterFactory writer_factory,
+ DirectoryCreator directory_creator,
+ UnzipOptions options) {
ZipReader reader;
+ reader.SetEncoding(std::move(options.encoding));
+ reader.SetPassword(std::move(options.password));
+
if (!reader.OpenFromPlatformFile(src_file)) {
- DLOG(WARNING) << "Cannot open '" << src_file << "'";
+ LOG(ERROR) << "Cannot open ZIP from file handle " << src_file;
return false;
}
- while (reader.HasMore()) {
- if (!reader.OpenCurrentEntryInZip()) {
- DLOG(WARNING) << "Failed to open the current file in zip";
+
+ while (const ZipReader::Entry* const entry = reader.Next()) {
+ if (entry->is_unsafe) {
+ LOG(ERROR) << "Found unsafe entry " << Redact(entry->path) << " in ZIP";
return false;
}
- const base::FilePath& entry_path = reader.current_entry_info()->file_path();
- if (reader.current_entry_info()->is_unsafe()) {
- DLOG(WARNING) << "Found an unsafe file in zip " << entry_path;
- return false;
+
+ if (options.filter && !options.filter.Run(entry->path)) {
+ VLOG(1) << "Skipped ZIP entry " << Redact(entry->path);
+ continue;
}
- if (filter_cb.Run(entry_path)) {
- if (reader.current_entry_info()->is_directory()) {
- if (!directory_creator.Run(entry_path))
- return false;
- } else {
- std::unique_ptr<WriterDelegate> writer = writer_factory.Run(entry_path);
- if (!reader.ExtractCurrentEntry(writer.get(),
- std::numeric_limits<uint64_t>::max())) {
- DLOG(WARNING) << "Failed to extract " << entry_path;
- return false;
- }
- }
- } else if (log_skipped_files) {
- DLOG(WARNING) << "Skipped file " << entry_path;
+
+ if (entry->is_directory) {
+ // It's a directory.
+ if (!directory_creator.Run(entry->path))
+ return false;
+
+ continue;
}
- if (!reader.AdvanceToNextEntry()) {
- DLOG(WARNING) << "Failed to advance to the next file";
+ // It's a file.
+ std::unique_ptr<WriterDelegate> writer = writer_factory.Run(entry->path);
+ if (!writer || !reader.ExtractCurrentEntry(writer.get())) {
+ LOG(ERROR) << "Cannot extract file " << Redact(entry->path)
+ << " from ZIP";
return false;
}
}
- return true;
+
+ return reader.ok();
}
bool ZipWithFilterCallback(const base::FilePath& src_dir,
const base::FilePath& dest_file,
- FilterCallback filter_cb) {
+ FilterCallback filter) {
DCHECK(base::DirectoryExists(src_dir));
return Zip({.src_dir = src_dir,
.dest_file = dest_file,
- .filter_callback = std::move(filter_cb)});
+ .filter_callback = std::move(filter)});
}
bool Zip(const base::FilePath& src_dir,
@@ -252,7 +244,7 @@ bool Zip(const base::FilePath& src_dir,
.include_hidden_files = include_hidden_files});
}
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
bool ZipFiles(const base::FilePath& src_dir,
Paths src_relative_paths,
int dest_fd) {
@@ -261,6 +253,6 @@ bool ZipFiles(const base::FilePath& src_dir,
.dest_fd = dest_fd,
.src_files = src_relative_paths});
}
-#endif // defined(OS_POSIX)
+#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip.h b/deps/v8/third_party/zlib/google/zip.h
index ecaecb1dc9..0928bbd12a 100644
--- a/deps/v8/third_party/zlib/google/zip.h
+++ b/deps/v8/third_party/zlib/google/zip.h
@@ -99,7 +99,7 @@ struct ZipParams {
// Either dest_file or dest_fd should be set, but not both.
base::FilePath dest_file;
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Destination file passed a file descriptor.
// Either dest_file or dest_fd should be set, but not both.
int dest_fd = base::kInvalidPlatformFile;
@@ -159,7 +159,7 @@ bool Zip(const base::FilePath& src_dir,
const base::FilePath& dest_file,
bool include_hidden_files);
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Zips files listed in |src_relative_paths| to destination specified by file
// descriptor |dest_fd|, without taking ownership of |dest_fd|. The paths listed
// in |src_relative_paths| are relative to the |src_dir| and will be used as the
@@ -168,35 +168,39 @@ bool Zip(const base::FilePath& src_dir,
bool ZipFiles(const base::FilePath& src_dir,
Paths src_relative_paths,
int dest_fd);
-#endif // defined(OS_POSIX)
-
-// Unzip the contents of zip_file into dest_dir.
-// For each file in zip_file, include it only if the callback |filter_cb|
-// returns true. Otherwise omit it.
-// If |log_skipped_files| is true, files skipped during extraction are printed
-// to debug log.
-bool UnzipWithFilterCallback(const base::FilePath& zip_file,
- const base::FilePath& dest_dir,
- FilterCallback filter_cb,
- bool log_skipped_files);
-
-// Unzip the contents of zip_file, using the writers provided by writer_factory.
-// For each file in zip_file, include it only if the callback |filter_cb|
-// returns true. Otherwise omit it.
-// If |log_skipped_files| is true, files skipped during extraction are printed
-// to debug log.
+#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// Options of the Unzip function, with valid default values.
+struct UnzipOptions {
+ // Encoding of entry paths in the ZIP archive. By default, paths are assumed
+ // to be in UTF-8.
+ std::string encoding;
+
+ // Only extract the entries for which |filter_cb| returns true. By default,
+ // everything gets extracted.
+ FilterCallback filter;
+
+ // Password to decrypt the encrypted files.
+ std::string password;
+};
+
typedef base::RepeatingCallback<std::unique_ptr<WriterDelegate>(
const base::FilePath&)>
WriterFactory;
+
typedef base::RepeatingCallback<bool(const base::FilePath&)> DirectoryCreator;
-bool UnzipWithFilterAndWriters(const base::PlatformFile& zip_file,
- WriterFactory writer_factory,
- DirectoryCreator directory_creator,
- FilterCallback filter_cb,
- bool log_skipped_files);
-
-// Unzip the contents of zip_file into dest_dir.
-bool Unzip(const base::FilePath& zip_file, const base::FilePath& dest_dir);
+
+// Unzips the contents of |zip_file|, using the writers provided by
+// |writer_factory|.
+bool Unzip(const base::PlatformFile& zip_file,
+ WriterFactory writer_factory,
+ DirectoryCreator directory_creator,
+ UnzipOptions options = {});
+
+// Unzips the contents of |zip_file| into |dest_dir|.
+bool Unzip(const base::FilePath& zip_file,
+ const base::FilePath& dest_dir,
+ UnzipOptions options = {});
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_internal.cc b/deps/v8/third_party/zlib/google/zip_internal.cc
index 00e9eefe6c..1adf2e6d0e 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.cc
+++ b/deps/v8/third_party/zlib/google/zip_internal.cc
@@ -342,7 +342,7 @@ zipFile OpenForZipping(const std::string& file_name_utf8, int append_flag) {
zip_func_ptrs);
}
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
zipFile OpenFdForZipping(int zip_fd, int append_flag) {
zlib_filefunc64_def zip_funcs;
FillFdOpenFileFunc(&zip_funcs, zip_fd);
diff --git a/deps/v8/third_party/zlib/google/zip_internal.h b/deps/v8/third_party/zlib/google/zip_internal.h
index c7feba692b..92833fa170 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.h
+++ b/deps/v8/third_party/zlib/google/zip_internal.h
@@ -54,7 +54,7 @@ unzFile PrepareMemoryForUnzipping(const std::string& data);
// Windows. |append_flag| will be passed to zipOpen2().
zipFile OpenForZipping(const std::string& file_name_utf8, int append_flag);
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Opens the file referred to by |zip_fd| for zipping. |append_flag| will be
// passed to zipOpen2().
zipFile OpenFdForZipping(int zip_fd, int append_flag);
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 53fa13fd99..33bf788374 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -4,15 +4,22 @@
#include "third_party/zlib/google/zip_reader.h"
+#include <algorithm>
#include <utility>
#include "base/bind.h"
+#include "base/check.h"
#include "base/files/file.h"
+#include "base/i18n/icu_string_conversions.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/strcat.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "build/build_config.h"
+#include "third_party/zlib/google/redact.h"
#include "third_party/zlib/google/zip_internal.h"
#if defined(USE_SYSTEM_MINIZIP)
@@ -24,116 +31,73 @@
#endif // defined(OS_WIN)
#endif // defined(USE_SYSTEM_MINIZIP)
-namespace zip {
+#if defined(OS_POSIX)
+#include <sys/stat.h>
+#endif
+namespace zip {
namespace {
-// StringWriterDelegate --------------------------------------------------------
+enum UnzipError : int;
+
+std::ostream& operator<<(std::ostream& out, UnzipError error) {
+#define SWITCH_ERR(X) \
+ case X: \
+ return out << #X;
+ switch (error) {
+ SWITCH_ERR(UNZ_OK);
+ SWITCH_ERR(UNZ_END_OF_LIST_OF_FILE);
+ SWITCH_ERR(UNZ_ERRNO);
+ SWITCH_ERR(UNZ_PARAMERROR);
+ SWITCH_ERR(UNZ_BADZIPFILE);
+ SWITCH_ERR(UNZ_INTERNALERROR);
+ SWITCH_ERR(UNZ_CRCERROR);
+ default:
+ return out << "UNZ" << static_cast<int>(error);
+ }
+#undef SWITCH_ERR
+}
-// A writer delegate that writes no more than |max_read_bytes| to a given
-// std::string.
+// A writer delegate that writes to a given string.
class StringWriterDelegate : public WriterDelegate {
public:
- StringWriterDelegate(size_t max_read_bytes, std::string* output);
-
- StringWriterDelegate(const StringWriterDelegate&) = delete;
- StringWriterDelegate& operator=(const StringWriterDelegate&) = delete;
-
- ~StringWriterDelegate() override;
+ explicit StringWriterDelegate(std::string* output) : output_(output) {}
// WriterDelegate methods:
-
- // Returns true.
- bool PrepareOutput() override;
-
- // Appends |num_bytes| bytes from |data| to the output string. Returns false
- // if |num_bytes| will cause the string to exceed |max_read_bytes|.
- bool WriteBytes(const char* data, int num_bytes) override;
-
- void SetTimeModified(const base::Time& time) override;
+ bool WriteBytes(const char* data, int num_bytes) override {
+ output_->append(data, num_bytes);
+ return true;
+ }
private:
- size_t max_read_bytes_;
- std::string* output_;
+ std::string* const output_;
};
-StringWriterDelegate::StringWriterDelegate(size_t max_read_bytes,
- std::string* output)
- : max_read_bytes_(max_read_bytes),
- output_(output) {
-}
-
-StringWriterDelegate::~StringWriterDelegate() {
-}
-
-bool StringWriterDelegate::PrepareOutput() {
- return true;
-}
-
-bool StringWriterDelegate::WriteBytes(const char* data, int num_bytes) {
- if (output_->size() + num_bytes > max_read_bytes_)
- return false;
- output_->append(data, num_bytes);
- return true;
-}
-
-void StringWriterDelegate::SetTimeModified(const base::Time& time) {
- // Do nothing.
+#if defined(OS_POSIX)
+void SetPosixFilePermissions(int fd, int mode) {
+ base::stat_wrapper_t sb;
+ if (base::File::Fstat(fd, &sb)) {
+ return;
+ }
+ mode_t new_mode = sb.st_mode;
+ // Transfer the executable bit only if the file is readable.
+ if ((sb.st_mode & S_IRUSR) == S_IRUSR && (mode & S_IXUSR) == S_IXUSR) {
+ new_mode |= S_IXUSR;
+ }
+ if ((sb.st_mode & S_IRGRP) == S_IRGRP && (mode & S_IXGRP) == S_IXGRP) {
+ new_mode |= S_IXGRP;
+ }
+ if ((sb.st_mode & S_IROTH) == S_IROTH && (mode & S_IXOTH) == S_IXOTH) {
+ new_mode |= S_IXOTH;
+ }
+ if (new_mode != sb.st_mode) {
+ fchmod(fd, new_mode);
+ }
}
+#endif
} // namespace
-// TODO(satorux): The implementation assumes that file names in zip files
-// are encoded in UTF-8. This is true for zip files created by Zip()
-// function in zip.h, but not true for user-supplied random zip files.
-ZipReader::EntryInfo::EntryInfo(const std::string& file_name_in_zip,
- const unz_file_info& raw_file_info)
- : file_path_(base::FilePath::FromUTF8Unsafe(file_name_in_zip)),
- is_directory_(false),
- is_unsafe_(false),
- is_encrypted_(false) {
- original_size_ = raw_file_info.uncompressed_size;
-
- // Directory entries in zip files end with "/".
- is_directory_ = base::EndsWith(file_name_in_zip, "/",
- base::CompareCase::INSENSITIVE_ASCII);
-
- // Check the file name here for directory traversal issues.
- is_unsafe_ = file_path_.ReferencesParent();
-
- // We also consider that the file name is unsafe, if it's invalid UTF-8.
- std::u16string file_name_utf16;
- if (!base::UTF8ToUTF16(file_name_in_zip.data(), file_name_in_zip.size(),
- &file_name_utf16)) {
- is_unsafe_ = true;
- }
-
- // We also consider that the file name is unsafe, if it's absolute.
- // On Windows, IsAbsolute() returns false for paths starting with "/".
- if (file_path_.IsAbsolute() ||
- base::StartsWith(file_name_in_zip, "/",
- base::CompareCase::INSENSITIVE_ASCII))
- is_unsafe_ = true;
-
- // Whether the file is encrypted is bit 0 of the flag.
- is_encrypted_ = raw_file_info.flag & 1;
-
- // Construct the last modified time. The timezone info is not present in
- // zip files, so we construct the time as local time.
- base::Time::Exploded exploded_time = {}; // Zero-clear.
- exploded_time.year = raw_file_info.tmu_date.tm_year;
- // The month in zip file is 0-based, whereas ours is 1-based.
- exploded_time.month = raw_file_info.tmu_date.tm_mon + 1;
- exploded_time.day_of_month = raw_file_info.tmu_date.tm_mday;
- exploded_time.hour = raw_file_info.tmu_date.tm_hour;
- exploded_time.minute = raw_file_info.tmu_date.tm_min;
- exploded_time.second = raw_file_info.tmu_date.tm_sec;
- exploded_time.millisecond = 0;
-
- if (!base::Time::FromUTCExploded(exploded_time, &last_modified_))
- last_modified_ = base::Time::UnixEpoch();
-}
-
ZipReader::ZipReader() {
Reset();
}
@@ -142,13 +106,14 @@ ZipReader::~ZipReader() {
Close();
}
-bool ZipReader::Open(const base::FilePath& zip_file_path) {
+bool ZipReader::Open(const base::FilePath& zip_path) {
DCHECK(!zip_file_);
// Use of "Unsafe" function does not look good, but there is no way to do
// this safely on Linux. See file_util.h for details.
- zip_file_ = internal::OpenForUnzipping(zip_file_path.AsUTF8Unsafe());
+ zip_file_ = internal::OpenForUnzipping(zip_path.AsUTF8Unsafe());
if (!zip_file_) {
+ LOG(ERROR) << "Cannot open ZIP archive " << Redact(zip_path);
return false;
}
@@ -164,6 +129,7 @@ bool ZipReader::OpenFromPlatformFile(base::PlatformFile zip_fd) {
zip_file_ = internal::OpenHandleForUnzipping(zip_fd);
#endif
if (!zip_file_) {
+ LOG(ERROR) << "Cannot open ZIP from file handle " << zip_fd;
return false;
}
@@ -179,107 +145,183 @@ bool ZipReader::OpenFromString(const std::string& data) {
void ZipReader::Close() {
if (zip_file_) {
- unzClose(zip_file_);
+ if (const int err = unzClose(zip_file_); err != UNZ_OK) {
+ LOG(ERROR) << "Error while closing ZIP archive: " << UnzipError(err);
+ }
}
Reset();
}
-bool ZipReader::HasMore() {
- return !reached_end_;
-}
-
-bool ZipReader::AdvanceToNextEntry() {
+const ZipReader::Entry* ZipReader::Next() {
DCHECK(zip_file_);
- // Should not go further if we already reached the end.
if (reached_end_)
- return false;
+ return nullptr;
- unz_file_pos position = {};
- if (unzGetFilePos(zip_file_, &position) != UNZ_OK)
- return false;
- const int current_entry_index = position.num_of_file;
- // If we are currently at the last entry, then the next position is the
- // end of the zip file, so mark that we reached the end.
- if (current_entry_index + 1 == num_entries_) {
- reached_end_ = true;
- } else {
- DCHECK_LT(current_entry_index + 1, num_entries_);
- if (unzGoToNextFile(zip_file_) != UNZ_OK) {
- return false;
+ DCHECK(ok_);
+
+ // Move to the next entry if we're not trying to open the first entry.
+ if (next_index_ > 0) {
+ if (const int err = unzGoToNextFile(zip_file_); err != UNZ_OK) {
+ reached_end_ = true;
+ if (err != UNZ_END_OF_LIST_OF_FILE) {
+ LOG(ERROR) << "Cannot go to next entry in ZIP: " << UnzipError(err);
+ ok_ = false;
+ }
+ return nullptr;
}
}
- current_entry_info_.reset();
- return true;
+
+ next_index_++;
+
+ if (!OpenEntry()) {
+ reached_end_ = true;
+ ok_ = false;
+ return nullptr;
+ }
+
+ return &entry_;
}
-bool ZipReader::OpenCurrentEntryInZip() {
+bool ZipReader::OpenEntry() {
DCHECK(zip_file_);
- unz_file_info raw_file_info = {};
- char raw_file_name_in_zip[internal::kZipMaxPath] = {};
- const int result = unzGetCurrentFileInfo(zip_file_,
- &raw_file_info,
- raw_file_name_in_zip,
- sizeof(raw_file_name_in_zip) - 1,
- NULL, // extraField.
- 0, // extraFieldBufferSize.
- NULL, // szComment.
- 0); // commentBufferSize.
- if (result != UNZ_OK)
+ // Get entry info.
+ unz_file_info64 info = {};
+ char path_in_zip[internal::kZipMaxPath] = {};
+ if (const int err = unzGetCurrentFileInfo64(zip_file_, &info, path_in_zip,
+ sizeof(path_in_zip) - 1, nullptr,
+ 0, nullptr, 0);
+ err != UNZ_OK) {
+ LOG(ERROR) << "Cannot get entry from ZIP: " << UnzipError(err);
return false;
- if (raw_file_name_in_zip[0] == '\0')
+ }
+
+ entry_.path_in_original_encoding = path_in_zip;
+
+ // Convert path from original encoding to Unicode.
+ std::u16string path_in_utf16;
+ const char* const encoding = encoding_.empty() ? "UTF-8" : encoding_.c_str();
+ if (!base::CodepageToUTF16(entry_.path_in_original_encoding, encoding,
+ base::OnStringConversionError::SUBSTITUTE,
+ &path_in_utf16)) {
+ LOG(ERROR) << "Cannot convert path from encoding " << encoding;
return false;
- current_entry_info_.reset(
- new EntryInfo(raw_file_name_in_zip, raw_file_info));
+ }
+
+ entry_.path = base::FilePath::FromUTF16Unsafe(path_in_utf16);
+ entry_.original_size = info.uncompressed_size;
+
+ // Directory entries in ZIP have a path ending with "/".
+ entry_.is_directory = base::EndsWith(path_in_utf16, u"/");
+
+ // Check the entry path for directory traversal issues. We consider entry
+ // paths unsafe if they are absolute or if they contain "..". On Windows,
+ // IsAbsolute() returns false for paths starting with "/".
+ entry_.is_unsafe = entry_.path.ReferencesParent() ||
+ entry_.path.IsAbsolute() ||
+ base::StartsWith(path_in_utf16, u"/");
+
+ // The file content of this entry is encrypted if flag bit 0 is set.
+ entry_.is_encrypted = info.flag & 1;
+
+ // Construct the last modified time. The timezone info is not present in ZIP
+ // archives, so we construct the time as UTC.
+ base::Time::Exploded exploded_time = {};
+ exploded_time.year = info.tmu_date.tm_year;
+ exploded_time.month = info.tmu_date.tm_mon + 1; // 0-based vs 1-based
+ exploded_time.day_of_month = info.tmu_date.tm_mday;
+ exploded_time.hour = info.tmu_date.tm_hour;
+ exploded_time.minute = info.tmu_date.tm_min;
+ exploded_time.second = info.tmu_date.tm_sec;
+ exploded_time.millisecond = 0;
+
+ if (!base::Time::FromUTCExploded(exploded_time, &entry_.last_modified))
+ entry_.last_modified = base::Time::UnixEpoch();
+
+#if defined(OS_POSIX)
+ entry_.posix_mode = (info.external_fa >> 16L) & (S_IRWXU | S_IRWXG | S_IRWXO);
+#else
+ entry_.posix_mode = 0;
+#endif
+
return true;
}
bool ZipReader::ExtractCurrentEntry(WriterDelegate* delegate,
uint64_t num_bytes_to_extract) const {
DCHECK(zip_file_);
-
- const int open_result = unzOpenCurrentFile(zip_file_);
- if (open_result != UNZ_OK)
+ DCHECK_LT(0, next_index_);
+ DCHECK(ok_);
+ DCHECK(!reached_end_);
+
+ // Use password only for encrypted files. For non-encrypted files, no password
+ // is needed, and must be nullptr.
+ const char* const password =
+ entry_.is_encrypted ? password_.c_str() : nullptr;
+ if (const int err = unzOpenCurrentFilePassword(zip_file_, password);
+ err != UNZ_OK) {
+ LOG(ERROR) << "Cannot open file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(err);
return false;
+ }
+ DCHECK(delegate);
if (!delegate->PrepareOutput())
return false;
- std::unique_ptr<char[]> buf(new char[internal::kZipBufSize]);
uint64_t remaining_capacity = num_bytes_to_extract;
bool entire_file_extracted = false;
while (remaining_capacity > 0) {
+ char buf[internal::kZipBufSize];
const int num_bytes_read =
- unzReadCurrentFile(zip_file_, buf.get(), internal::kZipBufSize);
+ unzReadCurrentFile(zip_file_, buf, internal::kZipBufSize);
if (num_bytes_read == 0) {
entire_file_extracted = true;
break;
- } else if (num_bytes_read < 0) {
- // If num_bytes_read < 0, then it's a specific UNZ_* error code.
+ }
+
+ if (num_bytes_read < 0) {
+ LOG(ERROR) << "Cannot read file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(num_bytes_read);
break;
- } else if (num_bytes_read > 0) {
- uint64_t num_bytes_to_write = std::min<uint64_t>(
- remaining_capacity, base::checked_cast<uint64_t>(num_bytes_read));
- if (!delegate->WriteBytes(buf.get(), num_bytes_to_write))
- break;
- if (remaining_capacity == base::checked_cast<uint64_t>(num_bytes_read)) {
- // Ensures function returns true if the entire file has been read.
- entire_file_extracted =
- (unzReadCurrentFile(zip_file_, buf.get(), 1) == 0);
- }
- CHECK_GE(remaining_capacity, num_bytes_to_write);
- remaining_capacity -= num_bytes_to_write;
}
+
+ DCHECK_LT(0, num_bytes_read);
+ CHECK_LE(num_bytes_read, internal::kZipBufSize);
+
+ uint64_t num_bytes_to_write = std::min<uint64_t>(
+ remaining_capacity, base::checked_cast<uint64_t>(num_bytes_read));
+ if (!delegate->WriteBytes(buf, num_bytes_to_write))
+ break;
+
+ if (remaining_capacity == base::checked_cast<uint64_t>(num_bytes_read)) {
+ // Ensures function returns true if the entire file has been read.
+ const int n = unzReadCurrentFile(zip_file_, buf, 1);
+ entire_file_extracted = (n == 0);
+ LOG_IF(ERROR, n < 0) << "Cannot read file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(n);
+ }
+
+ CHECK_GE(remaining_capacity, num_bytes_to_write);
+ remaining_capacity -= num_bytes_to_write;
}
- unzCloseCurrentFile(zip_file_);
+ if (const int err = unzCloseCurrentFile(zip_file_); err != UNZ_OK) {
+ LOG(ERROR) << "Cannot extract file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(err);
+ entire_file_extracted = false;
+ }
- if (entire_file_extracted &&
- current_entry_info()->last_modified() != base::Time::UnixEpoch()) {
- delegate->SetTimeModified(current_entry_info()->last_modified());
+ if (entire_file_extracted) {
+ delegate->SetPosixFilePermissions(entry_.posix_mode);
+ if (entry_.last_modified != base::Time::UnixEpoch()) {
+ delegate->SetTimeModified(entry_.last_modified);
+ }
+ } else {
+ delegate->OnError();
}
return entire_file_extracted;
@@ -289,25 +331,33 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
const base::FilePath& output_file_path,
SuccessCallback success_callback,
FailureCallback failure_callback,
- const ProgressCallback& progress_callback) {
+ ProgressCallback progress_callback) {
DCHECK(zip_file_);
- DCHECK(current_entry_info_.get());
+ DCHECK_LT(0, next_index_);
+ DCHECK(ok_);
+ DCHECK(!reached_end_);
// If this is a directory, just create it and return.
- if (current_entry_info()->is_directory()) {
+ if (entry_.is_directory) {
if (base::CreateDirectory(output_file_path)) {
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(success_callback));
} else {
- DVLOG(1) << "Unzip failed: unable to create directory.";
+ LOG(ERROR) << "Cannot create directory " << Redact(output_file_path);
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(failure_callback));
}
return;
}
- if (unzOpenCurrentFile(zip_file_) != UNZ_OK) {
- DVLOG(1) << "Unzip failed: unable to open current zip entry.";
+ // Use password only for encrypted files. For non-encrypted files, no password
+ // is needed, and must be nullptr.
+ const char* const password =
+ entry_.is_encrypted ? password_.c_str() : nullptr;
+ if (const int err = unzOpenCurrentFilePassword(zip_file_, password);
+ err != UNZ_OK) {
+ LOG(ERROR) << "Cannot open file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(err);
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
@@ -315,7 +365,7 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
base::FilePath output_dir_path = output_file_path.DirName();
if (!base::CreateDirectory(output_dir_path)) {
- DVLOG(1) << "Unzip failed: unable to create containing directory.";
+ LOG(ERROR) << "Cannot create directory " << Redact(output_dir_path);
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
@@ -325,8 +375,7 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
base::File output_file(output_file_path, flags);
if (!output_file.IsValid()) {
- DVLOG(1) << "Unzip failed: unable to create platform file at "
- << output_file_path.value();
+ LOG(ERROR) << "Cannot create file " << Redact(output_file_path);
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
@@ -336,7 +385,7 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
FROM_HERE,
base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
std::move(output_file), std::move(success_callback),
- std::move(failure_callback), progress_callback,
+ std::move(failure_callback), std::move(progress_callback),
0 /* initial offset */));
}
@@ -344,120 +393,121 @@ bool ZipReader::ExtractCurrentEntryToString(uint64_t max_read_bytes,
std::string* output) const {
DCHECK(output);
DCHECK(zip_file_);
+ DCHECK_LT(0, next_index_);
+ DCHECK(ok_);
+ DCHECK(!reached_end_);
- if (max_read_bytes == 0) {
- output->clear();
+ output->clear();
+
+ if (max_read_bytes == 0)
return true;
- }
- if (current_entry_info()->is_directory()) {
- output->clear();
+ if (entry_.is_directory)
return true;
- }
- // The original_size() is the best hint for the real size, so it saves
- // doing reallocations for the common case when the uncompressed size is
- // correct. However, we need to assume that the uncompressed size could be
- // incorrect therefore this function needs to read as much data as possible.
- std::string contents;
- contents.reserve(
- static_cast<size_t>(std::min(base::checked_cast<int64_t>(max_read_bytes),
- current_entry_info()->original_size())));
-
- StringWriterDelegate writer(max_read_bytes, &contents);
- if (!ExtractCurrentEntry(&writer, max_read_bytes)) {
- if (contents.length() < max_read_bytes) {
- // There was an error in extracting entry. If ExtractCurrentEntry()
- // returns false, the entire file was not read - in which case
- // contents.length() should equal |max_read_bytes| unless an error
- // occurred which caused extraction to be aborted.
- output->clear();
- } else {
- // |num_bytes| is less than the length of current entry.
- output->swap(contents);
- }
- return false;
- }
- output->swap(contents);
- return true;
+ // The original_size is the best hint for the real size, so it saves doing
+ // reallocations for the common case when the uncompressed size is correct.
+ // However, we need to assume that the uncompressed size could be incorrect
+ // therefore this function needs to read as much data as possible.
+ output->reserve(base::checked_cast<size_t>(std::min<uint64_t>(
+ max_read_bytes, base::checked_cast<uint64_t>(entry_.original_size))));
+
+ StringWriterDelegate writer(output);
+ return ExtractCurrentEntry(&writer, max_read_bytes);
}
bool ZipReader::OpenInternal() {
DCHECK(zip_file_);
unz_global_info zip_info = {}; // Zero-clear.
- if (unzGetGlobalInfo(zip_file_, &zip_info) != UNZ_OK) {
+ if (const int err = unzGetGlobalInfo(zip_file_, &zip_info); err != UNZ_OK) {
+ LOG(ERROR) << "Cannot get ZIP info: " << UnzipError(err);
return false;
}
- num_entries_ = zip_info.number_entry;
- if (num_entries_ < 0)
- return false;
- // We are already at the end if the zip file is empty.
- reached_end_ = (num_entries_ == 0);
+ num_entries_ = zip_info.number_entry;
+ reached_end_ = (num_entries_ <= 0);
+ ok_ = true;
return true;
}
void ZipReader::Reset() {
- zip_file_ = NULL;
+ zip_file_ = nullptr;
num_entries_ = 0;
- reached_end_ = false;
- current_entry_info_.reset();
+ next_index_ = 0;
+ reached_end_ = true;
+ ok_ = false;
+ entry_ = {};
}
void ZipReader::ExtractChunk(base::File output_file,
SuccessCallback success_callback,
FailureCallback failure_callback,
- const ProgressCallback& progress_callback,
- const int64_t offset) {
+ ProgressCallback progress_callback,
+ int64_t offset) {
char buffer[internal::kZipBufSize];
- const int num_bytes_read = unzReadCurrentFile(zip_file_,
- buffer,
- internal::kZipBufSize);
+ const int num_bytes_read =
+ unzReadCurrentFile(zip_file_, buffer, internal::kZipBufSize);
if (num_bytes_read == 0) {
- unzCloseCurrentFile(zip_file_);
- std::move(success_callback).Run();
- } else if (num_bytes_read < 0) {
- DVLOG(1) << "Unzip failed: error while reading zipfile "
- << "(" << num_bytes_read << ")";
- std::move(failure_callback).Run();
- } else {
- if (num_bytes_read != output_file.Write(offset, buffer, num_bytes_read)) {
- DVLOG(1) << "Unzip failed: unable to write all bytes to target.";
+ if (const int err = unzCloseCurrentFile(zip_file_); err != UNZ_OK) {
+ LOG(ERROR) << "Cannot extract file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(err);
std::move(failure_callback).Run();
return;
}
- int64_t current_progress = offset + num_bytes_read;
+ std::move(success_callback).Run();
+ return;
+ }
- progress_callback.Run(current_progress);
+ if (num_bytes_read < 0) {
+ LOG(ERROR) << "Cannot read file " << Redact(entry_.path)
+ << " from ZIP: " << UnzipError(num_bytes_read);
+ std::move(failure_callback).Run();
+ return;
+ }
- base::SequencedTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
- std::move(output_file), std::move(success_callback),
- std::move(failure_callback), progress_callback,
- current_progress));
+ if (num_bytes_read != output_file.Write(offset, buffer, num_bytes_read)) {
+ LOG(ERROR) << "Cannot write " << num_bytes_read
+ << " bytes to file at offset " << offset;
+ std::move(failure_callback).Run();
+ return;
}
+
+ offset += num_bytes_read;
+ progress_callback.Run(offset);
+
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
+ std::move(output_file), std::move(success_callback),
+ std::move(failure_callback), std::move(progress_callback),
+ offset));
}
// FileWriterDelegate ----------------------------------------------------------
-FileWriterDelegate::FileWriterDelegate(base::File* file) : file_(file) {}
-
-FileWriterDelegate::FileWriterDelegate(std::unique_ptr<base::File> file)
- : file_(file.get()), owned_file_(std::move(file)) {}
+FileWriterDelegate::FileWriterDelegate(base::File* file) : file_(file) {
+ DCHECK(file_);
+}
-FileWriterDelegate::~FileWriterDelegate() {
- if (!file_->SetLength(file_length_)) {
- DVPLOG(1) << "Failed updating length of written file";
- }
+FileWriterDelegate::FileWriterDelegate(base::File owned_file)
+ : owned_file_(std::move(owned_file)) {
+ DCHECK_EQ(file_, &owned_file_);
}
+FileWriterDelegate::~FileWriterDelegate() {}
+
bool FileWriterDelegate::PrepareOutput() {
- return file_->Seek(base::File::FROM_BEGIN, 0) >= 0;
+ DCHECK(file_);
+ const bool ok = file_->IsValid();
+ if (ok) {
+ DCHECK_EQ(file_->GetLength(), 0)
+ << " The output file should be initially empty";
+ }
+ return ok;
}
bool FileWriterDelegate::WriteBytes(const char* data, int num_bytes) {
@@ -471,11 +521,22 @@ void FileWriterDelegate::SetTimeModified(const base::Time& time) {
file_->SetTimes(base::Time::Now(), time);
}
+void FileWriterDelegate::SetPosixFilePermissions(int mode) {
+#if defined(OS_POSIX)
+ zip::SetPosixFilePermissions(file_->GetPlatformFile(), mode);
+#endif
+}
+
+void FileWriterDelegate::OnError() {
+ file_length_ = 0;
+ file_->SetLength(0);
+}
+
// FilePathWriterDelegate ------------------------------------------------------
-FilePathWriterDelegate::FilePathWriterDelegate(
- const base::FilePath& output_file_path)
- : output_file_path_(output_file_path) {}
+FilePathWriterDelegate::FilePathWriterDelegate(base::FilePath output_file_path)
+ : FileWriterDelegate(base::File()),
+ output_file_path_(std::move(output_file_path)) {}
FilePathWriterDelegate::~FilePathWriterDelegate() {}
@@ -485,18 +546,19 @@ bool FilePathWriterDelegate::PrepareOutput() {
if (!base::CreateDirectory(output_file_path_.DirName()))
return false;
- file_.Initialize(output_file_path_,
- base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
- return file_.IsValid();
+ owned_file_.Initialize(output_file_path_, base::File::FLAG_CREATE_ALWAYS |
+ base::File::FLAG_WRITE);
+ return FileWriterDelegate::PrepareOutput();
}
-bool FilePathWriterDelegate::WriteBytes(const char* data, int num_bytes) {
- return num_bytes == file_.WriteAtCurrentPos(data, num_bytes);
-}
+void FilePathWriterDelegate::OnError() {
+ FileWriterDelegate::OnError();
+ owned_file_.Close();
-void FilePathWriterDelegate::SetTimeModified(const base::Time& time) {
- file_.Close();
- base::TouchFile(output_file_path_, base::Time::Now(), time);
+ if (!base::DeleteFile(output_file_path_)) {
+ LOG(ERROR) << "Cannot delete partially extracted file "
+ << Redact(output_file_path_);
+ }
}
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_reader.h b/deps/v8/third_party/zlib/google/zip_reader.h
index e1ca7aa417..6ca9cd9788 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.h
+++ b/deps/v8/third_party/zlib/google/zip_reader.h
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <limits>
#include <memory>
#include <string>
@@ -15,6 +16,7 @@
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/memory/weak_ptr.h"
+#include "base/numerics/safe_conversions.h"
#include "base/time/time.h"
#if defined(USE_SYSTEM_MINIZIP)
@@ -33,33 +35,47 @@ class WriterDelegate {
// Invoked once before any data is streamed out to pave the way (e.g., to open
// the output file). Return false on failure to cancel extraction.
- virtual bool PrepareOutput() = 0;
+ virtual bool PrepareOutput() { return true; }
// Invoked to write the next chunk of data. Return false on failure to cancel
// extraction.
- virtual bool WriteBytes(const char* data, int num_bytes) = 0;
+ virtual bool WriteBytes(const char* data, int num_bytes) { return true; }
// Sets the last-modified time of the data.
- virtual void SetTimeModified(const base::Time& time) = 0;
+ virtual void SetTimeModified(const base::Time& time) {}
+
+ // Called with the POSIX file permissions of the data; POSIX implementations
+ // may apply some of the permissions (for example, the executable bit) to the
+ // output file.
+ virtual void SetPosixFilePermissions(int mode) {}
+
+ // Called if an error occurred while extracting the file. The WriterDelegate
+ // can then remove and clean up the partially extracted data.
+ virtual void OnError() {}
};
-// This class is used for reading zip files. A typical use case of this
-// class is to scan entries in a zip file and extract them. The code will
-// look like:
+// This class is used for reading ZIP archives. A typical use case of this class
+// is to scan entries in a ZIP archive and extract them. The code will look
+// like:
//
// ZipReader reader;
-// reader.Open(zip_file_path);
-// while (reader.HasMore()) {
-// reader.OpenCurrentEntryInZip();
-// const base::FilePath& entry_path =
-// reader.current_entry_info()->file_path();
-// auto writer = CreateFilePathWriterDelegate(extract_dir, entry_path);
-// reader.ExtractCurrentEntry(writer, std::numeric_limits<uint64_t>::max());
-// reader.AdvanceToNextEntry();
+// if (!reader.Open(zip_path)) {
+// // Cannot open
+// return;
+// }
+//
+// while (const ZipReader::entry* entry = reader.Next()) {
+// auto writer = CreateFilePathWriterDelegate(extract_dir, entry->path);
+// if (!reader.ExtractCurrentEntry(writer)) {
+// // Cannot extract
+// return;
+// }
// }
//
-// For simplicity, error checking is omitted in the example code above. The
-// production code should check return values from all of these functions.
+// if (!reader.ok()) {
+// // Error while enumerating entries
+// return;
+// }
//
class ZipReader {
public:
@@ -71,54 +87,51 @@ class ZipReader {
// of bytes that have been processed so far.
using ProgressCallback = base::RepeatingCallback<void(int64_t)>;
- // This class represents information of an entry (file or directory) in
- // a zip file.
- class EntryInfo {
- public:
- EntryInfo(const std::string& filename_in_zip,
- const unz_file_info& raw_file_info);
+ // Information of an entry (file or directory) in a ZIP archive.
+ struct Entry {
+ // Path of this entry, in its original encoding as it is stored in the ZIP
+ // archive. The encoding is not specified here. It might or might not be
+ // UTF-8, and the caller needs to use other means to determine the encoding
+ // if it wants to interpret this path correctly.
+ std::string path_in_original_encoding;
+
+ // Path of the entry, converted to Unicode. This path is usually relative
+ // (eg "foo/bar.txt"), but it can also be absolute (eg "/foo/bar.txt") or
+ // parent-relative (eg "../foo/bar.txt"). See also |is_unsafe|.
+ base::FilePath path;
+
+ // Size of the original uncompressed file, or 0 if the entry is a directory.
+ // This value should not be trusted, because it is stored as metadata in the
+ // ZIP archive and can be different from the real uncompressed size.
+ int64_t original_size;
+
+ // Last modified time. If the timestamp stored in the ZIP archive is not
+ // valid, the Unix epoch will be returned.
+ //
+ // The timestamp stored in the ZIP archive uses the MS-DOS date and time
+ // format.
+ //
+ // http://msdn.microsoft.com/en-us/library/ms724247(v=vs.85).aspx
+ //
+ // As such the following limitations apply:
+ // * Only years from 1980 to 2107 can be represented.
+ // * The timestamp has a 2-second resolution.
+ // * There is no timezone information, so the time is interpreted as UTC.
+ base::Time last_modified;
- EntryInfo(const EntryInfo&) = delete;
- EntryInfo& operator=(const EntryInfo&) = delete;
+ // True if the entry is a directory.
+ // False if the entry is a file.
+ bool is_directory;
- // Returns the file path. The path is usually relative like
- // "foo/bar.txt", but if it's absolute, is_unsafe() returns true.
- const base::FilePath& file_path() const { return file_path_; }
+ // True if the entry path is considered unsafe, ie if it is absolute or if
+ // it contains "..".
+ bool is_unsafe;
- // Returns the size of the original file (i.e. after uncompressed).
- // Returns 0 if the entry is a directory.
- // Note: this value should not be trusted, because it is stored as metadata
- // in the zip archive and can be different from the real uncompressed size.
- int64_t original_size() const { return original_size_; }
+ // True if the file content is encrypted.
+ bool is_encrypted;
- // Returns the last modified time. If the time stored in the zip file was
- // not valid, the unix epoch will be returned.
- //
- // The time stored in the zip archive uses the MS-DOS date and time format.
- // http://msdn.microsoft.com/en-us/library/ms724247(v=vs.85).aspx
- // As such the following limitations apply:
- // * only years from 1980 to 2107 can be represented.
- // * the time stamp has a 2 second resolution.
- // * there's no timezone information, so the time is interpreted as local.
- base::Time last_modified() const { return last_modified_; }
-
- // Returns true if the entry is a directory.
- bool is_directory() const { return is_directory_; }
-
- // Returns true if the entry is unsafe, like having ".." or invalid
- // UTF-8 characters in its file name, or the file path is absolute.
- bool is_unsafe() const { return is_unsafe_; }
-
- // Returns true if the entry is encrypted.
- bool is_encrypted() const { return is_encrypted_; }
-
- private:
- const base::FilePath file_path_;
- int64_t original_size_;
- base::Time last_modified_;
- bool is_directory_;
- bool is_unsafe_;
- bool is_encrypted_;
+ // Entry POSIX permissions (POSIX systems only).
+ int posix_mode;
};
ZipReader();
@@ -128,11 +141,11 @@ class ZipReader {
~ZipReader();
- // Opens the zip file specified by |zip_file_path|. Returns true on
+ // Opens the ZIP archive specified by |zip_path|. Returns true on
// success.
- bool Open(const base::FilePath& zip_file_path);
+ bool Open(const base::FilePath& zip_path);
- // Opens the zip file referred to by the platform file |zip_fd|, without
+ // Opens the ZIP archive referred to by the platform file |zip_fd|, without
// taking ownership of |zip_fd|. Returns true on success.
bool OpenFromPlatformFile(base::PlatformFile zip_fd);
@@ -141,72 +154,94 @@ class ZipReader {
// string until it finishes extracting files.
bool OpenFromString(const std::string& data);
- // Closes the currently opened zip file. This function is called in the
+ // Closes the currently opened ZIP archive. This function is called in the
// destructor of the class, so you usually don't need to call this.
void Close();
- // Returns true if there is at least one entry to read. This function is
- // used to scan entries with AdvanceToNextEntry(), like:
- //
- // while (reader.HasMore()) {
- // // Do something with the current file here.
- // reader.AdvanceToNextEntry();
- // }
- bool HasMore();
+ // Sets the encoding of entry paths in the ZIP archive.
+ // By default, paths are assumed to be in UTF-8.
+ void SetEncoding(std::string encoding) { encoding_ = std::move(encoding); }
- // Advances the next entry. Returns true on success.
- bool AdvanceToNextEntry();
+ // Sets the decryption password that will be used to decrypt encrypted file in
+ // the ZIP archive.
+ void SetPassword(std::string password) { password_ = std::move(password); }
- // Opens the current entry in the zip file. On success, returns true and
- // updates the the current entry state (i.e. current_entry_info() is
- // updated). This function should be called before operations over the
- // current entry like ExtractCurrentEntryToFile().
+ // Gets the next entry. Returns null if there is no more entry, or if an error
+ // occurred while scanning entries. The returned Entry is owned by this
+ // ZipReader, and is valid until Next() is called again or until this
+ // ZipReader is closed.
+ //
+ // This function should be called before operations over the current entry
+ // like ExtractCurrentEntryToFile().
//
- // Note that there is no CloseCurrentEntryInZip(). The the current entry
- // state is reset automatically as needed.
- bool OpenCurrentEntryInZip();
+ // while (const ZipReader::Entry* entry = reader.Next()) {
+ // // Do something with the current entry here.
+ // ...
+ // }
+ //
+ // // Finished scanning entries.
+ // // Check if the scanning stopped because of an error.
+ // if (!reader.ok()) {
+ // // There was an error.
+ // ...
+ // }
+ const Entry* Next();
+
+ // Returns true if the enumeration of entries was successful, or false if it
+ // stopped because of an error.
+ bool ok() const { return ok_; }
// Extracts |num_bytes_to_extract| bytes of the current entry to |delegate|,
- // starting from the beginning of the entry. Return value specifies whether
- // the entire file was extracted.
+ // starting from the beginning of the entry.
+ //
+ // Returns true if the entire file was extracted without error.
+ //
+ // Precondition: Next() returned a non-null Entry.
bool ExtractCurrentEntry(WriterDelegate* delegate,
- uint64_t num_bytes_to_extract) const;
+ uint64_t num_bytes_to_extract =
+ std::numeric_limits<uint64_t>::max()) const;
- // Asynchronously extracts the current entry to the given output file path.
- // If the current entry is a directory it just creates the directory
- // synchronously instead. OpenCurrentEntryInZip() must be called beforehand.
- // success_callback will be called on success and failure_callback will be
- // called on failure. progress_callback will be called at least once.
+ // Asynchronously extracts the current entry to the given output file path. If
+ // the current entry is a directory it just creates the directory
+ // synchronously instead.
+ //
+ // |success_callback| will be called on success and |failure_callback| will be
+ // called on failure. |progress_callback| will be called at least once.
// Callbacks will be posted to the current MessageLoop in-order.
+ //
+ // Precondition: Next() returned a non-null Entry.
void ExtractCurrentEntryToFilePathAsync(
const base::FilePath& output_file_path,
SuccessCallback success_callback,
FailureCallback failure_callback,
- const ProgressCallback& progress_callback);
+ ProgressCallback progress_callback);
// Extracts the current entry into memory. If the current entry is a
- // directory, the |output| parameter is set to the empty string. If the
- // current entry is a file, the |output| parameter is filled with its
- // contents. OpenCurrentEntryInZip() must be called beforehand. Note: the
- // |output| parameter can be filled with a big amount of data, avoid passing
- // it around by value, but by reference or pointer. Note: the value returned
- // by EntryInfo::original_size() cannot be trusted, so the real size of the
- // uncompressed contents can be different. |max_read_bytes| limits the ammount
- // of memory used to carry the entry. Returns true if the entire content is
- // read. If the entry is bigger than |max_read_bytes|, returns false and
- // |output| is filled with |max_read_bytes| of data. If an error occurs,
- // returns false, and |output| is set to the empty string.
+ // directory, |*output| is set to the empty string. If the current entry is a
+ // file, |*output| is filled with its contents.
+ //
+ // The value in |Entry::original_size| cannot be trusted, so the real size of
+ // the uncompressed contents can be different. |max_read_bytes| limits the
+ // amount of memory used to carry the entry.
+ //
+ // Returns true if the entire content is read without error. If the content is
+ // bigger than |max_read_bytes|, this function returns false and |*output| is
+ // filled with |max_read_bytes| of data. If an error occurs, this function
+ // returns false and |*output| contains the content extracted so far, which
+ // might be garbage data.
+ //
+ // Precondition: Next() returned a non-null Entry.
bool ExtractCurrentEntryToString(uint64_t max_read_bytes,
std::string* output) const;
- // Returns the current entry info. Returns NULL if the current entry is
- // not yet opened. OpenCurrentEntryInZip() must be called beforehand.
- EntryInfo* current_entry_info() const {
- return current_entry_info_.get();
+ bool ExtractCurrentEntryToString(std::string* output) const {
+ return ExtractCurrentEntryToString(
+ base::checked_cast<uint64_t>(output->max_size()), output);
}
- // Returns the number of entries in the zip file.
- // Open() must be called beforehand.
+ // Returns the number of entries in the ZIP archive.
+ //
+ // Precondition: one of the Open() methods returned true.
int num_entries() const { return num_entries_; }
private:
@@ -216,23 +251,35 @@ class ZipReader {
// Resets the internal state.
void Reset();
+ // Opens the current entry in the ZIP archive. On success, returns true and
+ // updates the current entry state |entry_|.
+ //
+ // Note that there is no matching CloseEntry(). The current entry state is
+ // reset automatically as needed.
+ bool OpenEntry();
+
// Extracts a chunk of the file to the target. Will post a task for the next
// chunk and success/failure/progress callbacks as necessary.
void ExtractChunk(base::File target_file,
SuccessCallback success_callback,
FailureCallback failure_callback,
- const ProgressCallback& progress_callback,
+ ProgressCallback progress_callback,
const int64_t offset);
+ std::string encoding_;
+ std::string password_;
unzFile zip_file_;
int num_entries_;
+ int next_index_;
bool reached_end_;
- std::unique_ptr<EntryInfo> current_entry_info_;
+ bool ok_;
+ Entry entry_;
base::WeakPtrFactory<ZipReader> weak_ptr_factory_{this};
};
-// A writer delegate that writes to a given File.
+// A writer delegate that writes to a given File. This file is expected to be
+// initially empty.
class FileWriterDelegate : public WriterDelegate {
public:
// Constructs a FileWriterDelegate that manipulates |file|. The delegate will
@@ -241,17 +288,14 @@ class FileWriterDelegate : public WriterDelegate {
explicit FileWriterDelegate(base::File* file);
// Constructs a FileWriterDelegate that takes ownership of |file|.
- explicit FileWriterDelegate(std::unique_ptr<base::File> file);
+ explicit FileWriterDelegate(base::File owned_file);
FileWriterDelegate(const FileWriterDelegate&) = delete;
FileWriterDelegate& operator=(const FileWriterDelegate&) = delete;
- // Truncates the file to the number of bytes written.
~FileWriterDelegate() override;
- // WriterDelegate methods:
-
- // Seeks to the beginning of the file, returning false if the seek fails.
+ // Returns true if the file handle passed to the constructor is valid.
bool PrepareOutput() override;
// Writes |num_bytes| bytes of |data| to the file, returning false on error or
@@ -261,45 +305,45 @@ class FileWriterDelegate : public WriterDelegate {
// Sets the last-modified time of the data.
void SetTimeModified(const base::Time& time) override;
- // Return the actual size of the file.
- int64_t file_length() { return file_length_; }
+ // On POSIX systems, sets the file to be executable if the source file was
+ // executable.
+ void SetPosixFilePermissions(int mode) override;
- private:
- // The file the delegate modifies.
- base::File* file_;
+ // Empties the file to avoid leaving garbage data in it.
+ void OnError() override;
+
+ // Gets the number of bytes written into the file.
+ int64_t file_length() { return file_length_; }
+ protected:
// The delegate can optionally own the file it modifies, in which case
// owned_file_ is set and file_ is an alias for owned_file_.
- std::unique_ptr<base::File> owned_file_;
+ base::File owned_file_;
+
+ // The file the delegate modifies.
+ base::File* const file_ = &owned_file_;
int64_t file_length_ = 0;
};
// A writer delegate that writes a file at a given path.
-class FilePathWriterDelegate : public WriterDelegate {
+class FilePathWriterDelegate : public FileWriterDelegate {
public:
- explicit FilePathWriterDelegate(const base::FilePath& output_file_path);
+ explicit FilePathWriterDelegate(base::FilePath output_file_path);
FilePathWriterDelegate(const FilePathWriterDelegate&) = delete;
FilePathWriterDelegate& operator=(const FilePathWriterDelegate&) = delete;
~FilePathWriterDelegate() override;
- // WriterDelegate methods:
-
// Creates the output file and any necessary intermediate directories.
bool PrepareOutput() override;
- // Writes |num_bytes| bytes of |data| to the file, returning false if not all
- // bytes could be written.
- bool WriteBytes(const char* data, int num_bytes) override;
-
- // Sets the last-modified time of the data.
- void SetTimeModified(const base::Time& time) override;
+ // Deletes the file.
+ void OnError() override;
private:
- base::FilePath output_file_path_;
- base::File file_;
+ const base::FilePath output_file_path_;
};
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
index c1d654afe9..fc80637936 100644
--- a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
@@ -8,13 +8,14 @@
#include <stdint.h>
#include <string.h>
-#include <set>
+#include <iterator>
#include <string>
+#include <vector>
#include "base/bind.h"
#include "base/check.h"
-#include "base/cxx17_backports.h"
#include "base/files/file.h"
+#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/hash/md5.h"
@@ -23,15 +24,20 @@
#include "base/strings/string_piece.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/bind.h"
#include "base/test/task_environment.h"
#include "base/time/time.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
#include "third_party/zlib/google/zip_internal.h"
-using ::testing::Return;
using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Return;
+using ::testing::SizeIs;
namespace {
@@ -39,10 +45,7 @@ const static std::string kQuuxExpectedMD5 = "d1ae4ac8a17a0e09317113ab284b57a6";
class FileWrapper {
public:
- typedef enum {
- READ_ONLY,
- READ_WRITE
- } AccessMode;
+ typedef enum { READ_ONLY, READ_WRITE } AccessMode;
FileWrapper(const base::FilePath& path, AccessMode mode) {
int flags = base::File::FLAG_READ;
@@ -73,18 +76,13 @@ class MockUnzipListener : public base::SupportsWeakPtr<MockUnzipListener> {
: success_calls_(0),
failure_calls_(0),
progress_calls_(0),
- current_progress_(0) {
- }
+ current_progress_(0) {}
// Success callback for async functions.
- void OnUnzipSuccess() {
- success_calls_++;
- }
+ void OnUnzipSuccess() { success_calls_++; }
// Failure callback for async functions.
- void OnUnzipFailure() {
- failure_calls_++;
- }
+ void OnUnzipFailure() { failure_calls_++; }
// Progress callback for async functions.
void OnUnzipProgress(int64_t progress) {
@@ -111,184 +109,189 @@ class MockWriterDelegate : public zip::WriterDelegate {
MOCK_METHOD0(PrepareOutput, bool());
MOCK_METHOD2(WriteBytes, bool(const char*, int));
MOCK_METHOD1(SetTimeModified, void(const base::Time&));
+ MOCK_METHOD1(SetPosixFilePermissions, void(int));
+ MOCK_METHOD0(OnError, void());
};
bool ExtractCurrentEntryToFilePath(zip::ZipReader* reader,
base::FilePath path) {
zip::FilePathWriterDelegate writer(path);
- return reader->ExtractCurrentEntry(&writer,
- std::numeric_limits<uint64_t>::max());
+ return reader->ExtractCurrentEntry(&writer);
}
-bool LocateAndOpenEntry(zip::ZipReader* reader,
- const base::FilePath& path_in_zip) {
+const zip::ZipReader::Entry* LocateAndOpenEntry(
+ zip::ZipReader* const reader,
+ const base::FilePath& path_in_zip) {
+ DCHECK(reader);
+ EXPECT_TRUE(reader->ok());
+
// The underlying library can do O(1) access, but ZipReader does not expose
// that. O(N) access is acceptable for these tests.
- while (reader->HasMore()) {
- if (!reader->OpenCurrentEntryInZip())
- return false;
- if (reader->current_entry_info()->file_path() == path_in_zip)
- return true;
- reader->AdvanceToNextEntry();
+ while (const zip::ZipReader::Entry* const entry = reader->Next()) {
+ EXPECT_TRUE(reader->ok());
+ if (entry->path == path_in_zip)
+ return entry;
}
- return false;
+
+ EXPECT_TRUE(reader->ok());
+ return nullptr;
}
-} // namespace
+using Paths = std::vector<base::FilePath>;
+
+} // namespace
namespace zip {
// Make the test a PlatformTest to setup autorelease pools properly on Mac.
class ZipReaderTest : public PlatformTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
PlatformTest::SetUp();
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
test_dir_ = temp_dir_.GetPath();
-
- ASSERT_TRUE(GetTestDataDirectory(&test_data_dir_));
-
- test_zip_file_ = test_data_dir_.AppendASCII("test.zip");
- encrypted_zip_file_ = test_data_dir_.AppendASCII("test_encrypted.zip");
- evil_zip_file_ = test_data_dir_.AppendASCII("evil.zip");
- evil_via_invalid_utf8_zip_file_ = test_data_dir_.AppendASCII(
- "evil_via_invalid_utf8.zip");
- evil_via_absolute_file_name_zip_file_ = test_data_dir_.AppendASCII(
- "evil_via_absolute_file_name.zip");
-
- test_zip_contents_.insert(base::FilePath(FILE_PATH_LITERAL("foo/")));
- test_zip_contents_.insert(base::FilePath(FILE_PATH_LITERAL("foo/bar/")));
- test_zip_contents_.insert(
- base::FilePath(FILE_PATH_LITERAL("foo/bar/baz.txt")));
- test_zip_contents_.insert(
- base::FilePath(FILE_PATH_LITERAL("foo/bar/quux.txt")));
- test_zip_contents_.insert(
- base::FilePath(FILE_PATH_LITERAL("foo/bar.txt")));
- test_zip_contents_.insert(base::FilePath(FILE_PATH_LITERAL("foo.txt")));
- test_zip_contents_.insert(
- base::FilePath(FILE_PATH_LITERAL("foo/bar/.hidden")));
}
- virtual void TearDown() {
- PlatformTest::TearDown();
+ static base::FilePath GetTestDataDirectory() {
+ base::FilePath path;
+ CHECK(base::PathService::Get(base::DIR_SOURCE_ROOT, &path));
+ return path.AppendASCII("third_party")
+ .AppendASCII("zlib")
+ .AppendASCII("google")
+ .AppendASCII("test")
+ .AppendASCII("data");
}
- bool GetTestDataDirectory(base::FilePath* path) {
- bool success = base::PathService::Get(base::DIR_SOURCE_ROOT, path);
- EXPECT_TRUE(success);
- if (!success)
- return false;
- *path = path->AppendASCII("third_party");
- *path = path->AppendASCII("zlib");
- *path = path->AppendASCII("google");
- *path = path->AppendASCII("test");
- *path = path->AppendASCII("data");
- return true;
- }
+ static Paths GetPaths(const base::FilePath& zip_path,
+ base::StringPiece encoding = {}) {
+ Paths paths;
+
+ if (ZipReader reader; reader.Open(zip_path)) {
+ if (!encoding.empty())
+ reader.SetEncoding(std::string(encoding));
+
+ while (const ZipReader::Entry* const entry = reader.Next()) {
+ EXPECT_TRUE(reader.ok());
+ paths.push_back(entry->path);
+ }
+
+ EXPECT_TRUE(reader.ok());
+ }
- bool CompareFileAndMD5(const base::FilePath& path,
- const std::string expected_md5) {
- // Read the output file and compute the MD5.
- std::string output;
- if (!base::ReadFileToString(path, &output))
- return false;
- const std::string md5 = base::MD5String(output);
- return expected_md5 == md5;
+ return paths;
}
// The path to temporary directory used to contain the test operations.
base::FilePath test_dir_;
// The path to the test data directory where test.zip etc. are located.
- base::FilePath test_data_dir_;
+ const base::FilePath data_dir_ = GetTestDataDirectory();
// The path to test.zip in the test data directory.
- base::FilePath test_zip_file_;
- // The path to test_encrypted.zip in the test data directory.
- base::FilePath encrypted_zip_file_;
- // The path to evil.zip in the test data directory.
- base::FilePath evil_zip_file_;
- // The path to evil_via_invalid_utf8.zip in the test data directory.
- base::FilePath evil_via_invalid_utf8_zip_file_;
- // The path to evil_via_absolute_file_name.zip in the test data directory.
- base::FilePath evil_via_absolute_file_name_zip_file_;
- std::set<base::FilePath> test_zip_contents_;
-
+ const base::FilePath test_zip_file_ = data_dir_.AppendASCII("test.zip");
+ const Paths test_zip_contents_ = {
+ base::FilePath(FILE_PATH_LITERAL("foo/")),
+ base::FilePath(FILE_PATH_LITERAL("foo/bar/")),
+ base::FilePath(FILE_PATH_LITERAL("foo/bar/baz.txt")),
+ base::FilePath(FILE_PATH_LITERAL("foo/bar/quux.txt")),
+ base::FilePath(FILE_PATH_LITERAL("foo/bar.txt")),
+ base::FilePath(FILE_PATH_LITERAL("foo.txt")),
+ base::FilePath(FILE_PATH_LITERAL("foo/bar/.hidden")),
+ };
base::ScopedTempDir temp_dir_;
-
base::test::TaskEnvironment task_environment_;
};
TEST_F(ZipReaderTest, Open_ValidZipFile) {
ZipReader reader;
- ASSERT_TRUE(reader.Open(test_zip_file_));
+ EXPECT_TRUE(reader.Open(test_zip_file_));
+ EXPECT_TRUE(reader.ok());
}
TEST_F(ZipReaderTest, Open_ValidZipPlatformFile) {
ZipReader reader;
+ EXPECT_FALSE(reader.ok());
FileWrapper zip_fd_wrapper(test_zip_file_, FileWrapper::READ_ONLY);
- ASSERT_TRUE(reader.OpenFromPlatformFile(zip_fd_wrapper.platform_file()));
+ EXPECT_TRUE(reader.OpenFromPlatformFile(zip_fd_wrapper.platform_file()));
+ EXPECT_TRUE(reader.ok());
}
TEST_F(ZipReaderTest, Open_NonExistentFile) {
ZipReader reader;
- ASSERT_FALSE(reader.Open(test_data_dir_.AppendASCII("nonexistent.zip")));
+ EXPECT_FALSE(reader.ok());
+ EXPECT_FALSE(reader.Open(data_dir_.AppendASCII("nonexistent.zip")));
+ EXPECT_FALSE(reader.ok());
}
TEST_F(ZipReaderTest, Open_ExistentButNonZipFile) {
ZipReader reader;
- ASSERT_FALSE(reader.Open(test_data_dir_.AppendASCII("create_test_zip.sh")));
+ EXPECT_FALSE(reader.ok());
+ EXPECT_FALSE(reader.Open(data_dir_.AppendASCII("create_test_zip.sh")));
+ EXPECT_FALSE(reader.ok());
}
-// Iterate through the contents in the test zip file, and compare that the
-// contents collected from the zip reader matches the expected contents.
+TEST_F(ZipReaderTest, Open_EmptyFile) {
+ ZipReader reader;
+ EXPECT_FALSE(reader.ok());
+ EXPECT_FALSE(reader.Open(data_dir_.AppendASCII("empty.zip")));
+ EXPECT_FALSE(reader.ok());
+}
+
+// Iterate through the contents in the test ZIP archive, and compare that the
+// contents collected from the ZipReader matches the expected contents.
TEST_F(ZipReaderTest, Iteration) {
- std::set<base::FilePath> actual_contents;
+ Paths actual_contents;
ZipReader reader;
- ASSERT_TRUE(reader.Open(test_zip_file_));
- while (reader.HasMore()) {
- ASSERT_TRUE(reader.OpenCurrentEntryInZip());
- actual_contents.insert(reader.current_entry_info()->file_path());
- ASSERT_TRUE(reader.AdvanceToNextEntry());
+ EXPECT_FALSE(reader.ok());
+ EXPECT_TRUE(reader.Open(test_zip_file_));
+ EXPECT_TRUE(reader.ok());
+ while (const ZipReader::Entry* const entry = reader.Next()) {
+ EXPECT_TRUE(reader.ok());
+ actual_contents.push_back(entry->path);
}
- EXPECT_FALSE(reader.AdvanceToNextEntry()); // Shouldn't go further.
- EXPECT_EQ(test_zip_contents_.size(),
- static_cast<size_t>(reader.num_entries()));
- EXPECT_EQ(test_zip_contents_.size(), actual_contents.size());
- EXPECT_EQ(test_zip_contents_, actual_contents);
+
+ EXPECT_TRUE(reader.ok());
+ EXPECT_FALSE(reader.Next()); // Shouldn't go further.
+ EXPECT_TRUE(reader.ok());
+
+ EXPECT_THAT(actual_contents, SizeIs(reader.num_entries()));
+ EXPECT_THAT(actual_contents, ElementsAreArray(test_zip_contents_));
}
-// Open the test zip file from a file descriptor, iterate through its contents,
-// and compare that they match the expected contents.
+// Open the test ZIP archive from a file descriptor, iterate through its
+// contents, and compare that they match the expected contents.
TEST_F(ZipReaderTest, PlatformFileIteration) {
- std::set<base::FilePath> actual_contents;
+ Paths actual_contents;
ZipReader reader;
FileWrapper zip_fd_wrapper(test_zip_file_, FileWrapper::READ_ONLY);
- ASSERT_TRUE(reader.OpenFromPlatformFile(zip_fd_wrapper.platform_file()));
- while (reader.HasMore()) {
- ASSERT_TRUE(reader.OpenCurrentEntryInZip());
- actual_contents.insert(reader.current_entry_info()->file_path());
- ASSERT_TRUE(reader.AdvanceToNextEntry());
+ EXPECT_TRUE(reader.OpenFromPlatformFile(zip_fd_wrapper.platform_file()));
+ EXPECT_TRUE(reader.ok());
+ while (const ZipReader::Entry* const entry = reader.Next()) {
+ EXPECT_TRUE(reader.ok());
+ actual_contents.push_back(entry->path);
}
- EXPECT_FALSE(reader.AdvanceToNextEntry()); // Shouldn't go further.
- EXPECT_EQ(test_zip_contents_.size(),
- static_cast<size_t>(reader.num_entries()));
- EXPECT_EQ(test_zip_contents_.size(), actual_contents.size());
- EXPECT_EQ(test_zip_contents_, actual_contents);
+
+ EXPECT_TRUE(reader.ok());
+ EXPECT_FALSE(reader.Next()); // Shouldn't go further.
+ EXPECT_TRUE(reader.ok());
+
+ EXPECT_THAT(actual_contents, SizeIs(reader.num_entries()));
+ EXPECT_THAT(actual_contents, ElementsAreArray(test_zip_contents_));
}
-TEST_F(ZipReaderTest, current_entry_info_RegularFile) {
+TEST_F(ZipReaderTest, RegularFile) {
ZipReader reader;
ASSERT_TRUE(reader.Open(test_zip_file_));
base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/quux.txt"));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ZipReader::EntryInfo* current_entry_info = reader.current_entry_info();
- EXPECT_EQ(target_path, current_entry_info->file_path());
- EXPECT_EQ(13527, current_entry_info->original_size());
+ const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
+ ASSERT_TRUE(entry);
+
+ EXPECT_EQ(target_path, entry->path);
+ EXPECT_EQ(13527, entry->original_size);
// The expected time stamp: 2009-05-29 06:22:20
base::Time::Exploded exploded = {}; // Zero-clear.
- current_entry_info->last_modified().UTCExplode(&exploded);
+ entry->last_modified.UTCExplode(&exploded);
EXPECT_EQ(2009, exploded.year);
EXPECT_EQ(5, exploded.month);
EXPECT_EQ(29, exploded.day_of_month);
@@ -297,67 +300,108 @@ TEST_F(ZipReaderTest, current_entry_info_RegularFile) {
EXPECT_EQ(20, exploded.second);
EXPECT_EQ(0, exploded.millisecond);
- EXPECT_FALSE(current_entry_info->is_unsafe());
- EXPECT_FALSE(current_entry_info->is_directory());
+ EXPECT_FALSE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_directory);
}
-TEST_F(ZipReaderTest, current_entry_info_DotDotFile) {
+TEST_F(ZipReaderTest, DotDotFile) {
ZipReader reader;
- ASSERT_TRUE(reader.Open(evil_zip_file_));
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("evil.zip")));
base::FilePath target_path(FILE_PATH_LITERAL(
"../levilevilevilevilevilevilevilevilevilevilevilevil"));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ZipReader::EntryInfo* current_entry_info = reader.current_entry_info();
- EXPECT_EQ(target_path, current_entry_info->file_path());
-
+ const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(target_path, entry->path);
// This file is unsafe because of ".." in the file name.
- EXPECT_TRUE(current_entry_info->is_unsafe());
- EXPECT_FALSE(current_entry_info->is_directory());
+ EXPECT_TRUE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_directory);
}
-TEST_F(ZipReaderTest, current_entry_info_InvalidUTF8File) {
+TEST_F(ZipReaderTest, InvalidUTF8File) {
ZipReader reader;
- ASSERT_TRUE(reader.Open(evil_via_invalid_utf8_zip_file_));
- // The evil file is the 2nd file in the zip file.
- // We cannot locate by the file name ".\x80.\\evil.txt",
- // as FilePath may internally convert the string.
- ASSERT_TRUE(reader.AdvanceToNextEntry());
- ASSERT_TRUE(reader.OpenCurrentEntryInZip());
- ZipReader::EntryInfo* current_entry_info = reader.current_entry_info();
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("evil_via_invalid_utf8.zip")));
+ base::FilePath target_path = base::FilePath::FromUTF8Unsafe(".�.\\evil.txt");
+ const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(target_path, entry->path);
+ EXPECT_FALSE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_directory);
+}
+
+// By default, file paths in ZIPs are interpreted as UTF-8. But in this test,
+// the ZIP archive contains file paths that are actually encoded in Shift JIS.
+// The SJIS-encoded paths are thus wrongly interpreted as UTF-8, resulting in
+// garbled paths. Invalid UTF-8 sequences are safely converted to the
+// replacement character �.
+TEST_F(ZipReaderTest, EncodingSjisAsUtf8) {
+ EXPECT_THAT(
+ GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip")),
+ ElementsAre(
+ base::FilePath::FromUTF8Unsafe("�V�����t�H���_/SJIS_835C_�\\.txt"),
+ base::FilePath::FromUTF8Unsafe(
+ "�V�����t�H���_/�V�����e�L�X�g �h�L�������g.txt")));
+}
+
+// In this test, SJIS-encoded paths are interpreted as Code Page 1252. This
+// results in garbled paths. Note the presence of C1 control codes U+0090 and
+// U+0081 in the garbled paths.
+TEST_F(ZipReaderTest, EncodingSjisAs1252) {
+ EXPECT_THAT(
+ GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip"), "windows-1252"),
+ ElementsAre(base::FilePath::FromUTF8Unsafe(
+ "\u0090V‚µ‚¢ƒtƒHƒ‹ƒ_/SJIS_835C_ƒ\\.txt"),
+ base::FilePath::FromUTF8Unsafe(
+ "\u0090V‚µ‚¢ƒtƒHƒ‹ƒ_/\u0090V‚µ‚¢ƒeƒLƒXƒg "
+ "ƒhƒLƒ…ƒ\u0081ƒ“ƒg.txt")));
+}
+
+// In this test, SJIS-encoded paths are interpreted as Code Page 866. This
+// results in garbled paths.
+TEST_F(ZipReaderTest, EncodingSjisAsIbm866) {
+ EXPECT_THAT(
+ GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip"), "IBM866"),
+ ElementsAre(
+ base::FilePath::FromUTF8Unsafe("РVВ╡ВвГtГHГЛГ_/SJIS_835C_Г\\.txt"),
+ base::FilePath::FromUTF8Unsafe(
+ "РVВ╡ВвГtГHГЛГ_/РVВ╡ВвГeГLГXГg ГhГLГЕГБГУГg.txt")));
+}
- // This file is unsafe because of invalid UTF-8 in the file name.
- EXPECT_TRUE(current_entry_info->is_unsafe());
- EXPECT_FALSE(current_entry_info->is_directory());
+// Tests that SJIS-encoded paths are correctly converted to Unicode.
+TEST_F(ZipReaderTest, EncodingSjis) {
+ EXPECT_THAT(
+ GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip"), "Shift_JIS"),
+ ElementsAre(
+ base::FilePath::FromUTF8Unsafe("新しいフォルダ/SJIS_835C_ソ.txt"),
+ base::FilePath::FromUTF8Unsafe(
+ "新しいフォルダ/新しいテキスト ドキュメント.txt")));
}
-TEST_F(ZipReaderTest, current_entry_info_AbsoluteFile) {
+TEST_F(ZipReaderTest, AbsoluteFile) {
ZipReader reader;
- ASSERT_TRUE(reader.Open(evil_via_absolute_file_name_zip_file_));
+ ASSERT_TRUE(
+ reader.Open(data_dir_.AppendASCII("evil_via_absolute_file_name.zip")));
base::FilePath target_path(FILE_PATH_LITERAL("/evil.txt"));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ZipReader::EntryInfo* current_entry_info = reader.current_entry_info();
- EXPECT_EQ(target_path, current_entry_info->file_path());
-
+ const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(target_path, entry->path);
// This file is unsafe because of the absolute file name.
- EXPECT_TRUE(current_entry_info->is_unsafe());
- EXPECT_FALSE(current_entry_info->is_directory());
+ EXPECT_TRUE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_directory);
}
-TEST_F(ZipReaderTest, current_entry_info_Directory) {
+TEST_F(ZipReaderTest, Directory) {
ZipReader reader;
ASSERT_TRUE(reader.Open(test_zip_file_));
base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/"));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ZipReader::EntryInfo* current_entry_info = reader.current_entry_info();
-
- EXPECT_EQ(base::FilePath(FILE_PATH_LITERAL("foo/bar/")),
- current_entry_info->file_path());
+ const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(target_path, entry->path);
// The directory size should be zero.
- EXPECT_EQ(0, current_entry_info->original_size());
+ EXPECT_EQ(0, entry->original_size);
// The expected time stamp: 2009-05-31 15:49:52
base::Time::Exploded exploded = {}; // Zero-clear.
- current_entry_info->last_modified().UTCExplode(&exploded);
+ entry->last_modified.UTCExplode(&exploded);
EXPECT_EQ(2009, exploded.year);
EXPECT_EQ(5, exploded.month);
EXPECT_EQ(31, exploded.day_of_month);
@@ -366,22 +410,91 @@ TEST_F(ZipReaderTest, current_entry_info_Directory) {
EXPECT_EQ(52, exploded.second);
EXPECT_EQ(0, exploded.millisecond);
- EXPECT_FALSE(current_entry_info->is_unsafe());
- EXPECT_TRUE(current_entry_info->is_directory());
+ EXPECT_FALSE(entry->is_unsafe);
+ EXPECT_TRUE(entry->is_directory);
}
-TEST_F(ZipReaderTest, current_entry_info_EncryptedFile) {
+TEST_F(ZipReaderTest, EncryptedFile_WrongPassword) {
ZipReader reader;
- base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/quux.txt"));
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Different Encryptions.zip")));
+ reader.SetPassword("wrong password");
- ASSERT_TRUE(reader.Open(encrypted_zip_file_));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- EXPECT_TRUE(reader.current_entry_info()->is_encrypted());
- reader.Close();
+ {
+ const ZipReader::Entry* entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(base::FilePath::FromASCII("ClearText.txt"), entry->path);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_FALSE(entry->is_encrypted);
+ std::string contents = "dummy";
+ EXPECT_TRUE(reader.ExtractCurrentEntryToString(&contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+ }
- ASSERT_TRUE(reader.Open(test_zip_file_));
- ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- EXPECT_FALSE(reader.current_entry_info()->is_encrypted());
+ for (const base::StringPiece path : {
+ "Encrypted AES-128.txt",
+ "Encrypted AES-192.txt",
+ "Encrypted AES-256.txt",
+ "Encrypted ZipCrypto.txt",
+ }) {
+ const ZipReader::Entry* entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(base::FilePath::FromASCII(path), entry->path);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_TRUE(entry->is_encrypted);
+ std::string contents = "dummy";
+ EXPECT_FALSE(reader.ExtractCurrentEntryToString(&contents));
+ }
+
+ EXPECT_FALSE(reader.Next());
+ EXPECT_TRUE(reader.ok());
+}
+
+TEST_F(ZipReaderTest, EncryptedFile_RightPassword) {
+ ZipReader reader;
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Different Encryptions.zip")));
+ reader.SetPassword("password");
+
+ {
+ const ZipReader::Entry* entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(base::FilePath::FromASCII("ClearText.txt"), entry->path);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_FALSE(entry->is_encrypted);
+ std::string contents = "dummy";
+ EXPECT_TRUE(reader.ExtractCurrentEntryToString(&contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+ }
+
+ // TODO(crbug.com/1296838) Support AES encryption.
+ for (const base::StringPiece path : {
+ "Encrypted AES-128.txt",
+ "Encrypted AES-192.txt",
+ "Encrypted AES-256.txt",
+ }) {
+ const ZipReader::Entry* entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(base::FilePath::FromASCII(path), entry->path);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_TRUE(entry->is_encrypted);
+ std::string contents = "dummy";
+ EXPECT_FALSE(reader.ExtractCurrentEntryToString(&contents));
+ EXPECT_EQ("", contents);
+ }
+
+ {
+ const ZipReader::Entry* entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(base::FilePath::FromASCII("Encrypted ZipCrypto.txt"),
+ entry->path);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_TRUE(entry->is_encrypted);
+ std::string contents = "dummy";
+ EXPECT_TRUE(reader.ExtractCurrentEntryToString(&contents));
+ EXPECT_EQ("This is encrypted with ZipCrypto.\n", contents);
+ }
+
+ EXPECT_FALSE(reader.Next());
+ EXPECT_TRUE(reader.ok());
}
// Verifies that the ZipReader class can extract a file from a zip archive
@@ -404,7 +517,7 @@ TEST_F(ZipReaderTest, OpenFromString) {
"\x50\x75\x78\x0b\x00\x01\x04\x8e\xf0\x00\x00\x04\x88\x13\x00\x00"
"\x50\x4b\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00\x4e\x00\x00\x00"
"\x52\x00\x00\x00\x00\x00";
- std::string data(kTestData, base::size(kTestData));
+ std::string data(kTestData, std::size(kTestData));
ZipReader reader;
ASSERT_TRUE(reader.OpenFromString(data));
base::FilePath target_path(FILE_PATH_LITERAL("test.txt"));
@@ -413,8 +526,8 @@ TEST_F(ZipReaderTest, OpenFromString) {
test_dir_.AppendASCII("test.txt")));
std::string actual;
- ASSERT_TRUE(base::ReadFileToString(
- test_dir_.AppendASCII("test.txt"), &actual));
+ ASSERT_TRUE(
+ base::ReadFileToString(test_dir_.AppendASCII("test.txt"), &actual));
EXPECT_EQ(std::string("This is a test.\n"), actual);
}
@@ -445,8 +558,8 @@ TEST_F(ZipReaderTest, ExtractToFileAsync_RegularFile) {
EXPECT_LE(1, listener.progress_calls());
std::string output;
- ASSERT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("quux.txt"),
- &output));
+ ASSERT_TRUE(
+ base::ReadFileToString(test_dir_.AppendASCII("quux.txt"), &output));
const std::string md5 = base::MD5String(output);
EXPECT_EQ(kQuuxExpectedMD5, md5);
@@ -456,6 +569,103 @@ TEST_F(ZipReaderTest, ExtractToFileAsync_RegularFile) {
EXPECT_EQ(file_size, listener.current_progress());
}
+TEST_F(ZipReaderTest, ExtractToFileAsync_Encrypted_NoPassword) {
+ MockUnzipListener listener;
+
+ ZipReader reader;
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Different Encryptions.zip")));
+ ASSERT_TRUE(LocateAndOpenEntry(
+ &reader, base::FilePath::FromASCII("Encrypted ZipCrypto.txt")));
+ const base::FilePath target_path = test_dir_.AppendASCII("extracted");
+ reader.ExtractCurrentEntryToFilePathAsync(
+ target_path,
+ base::BindOnce(&MockUnzipListener::OnUnzipSuccess, listener.AsWeakPtr()),
+ base::BindOnce(&MockUnzipListener::OnUnzipFailure, listener.AsWeakPtr()),
+ base::BindRepeating(&MockUnzipListener::OnUnzipProgress,
+ listener.AsWeakPtr()));
+
+ EXPECT_EQ(0, listener.success_calls());
+ EXPECT_EQ(0, listener.failure_calls());
+ EXPECT_EQ(0, listener.progress_calls());
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(0, listener.success_calls());
+ EXPECT_EQ(1, listener.failure_calls());
+ EXPECT_LE(1, listener.progress_calls());
+
+ // The extracted file contains rubbish data.
+ // We probably shouldn't even look at it.
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(target_path, &contents));
+ EXPECT_NE("", contents);
+ EXPECT_EQ(contents.size(), listener.current_progress());
+}
+
+TEST_F(ZipReaderTest, ExtractToFileAsync_Encrypted_RightPassword) {
+ MockUnzipListener listener;
+
+ ZipReader reader;
+ reader.SetPassword("password");
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Different Encryptions.zip")));
+ ASSERT_TRUE(LocateAndOpenEntry(
+ &reader, base::FilePath::FromASCII("Encrypted ZipCrypto.txt")));
+ const base::FilePath target_path = test_dir_.AppendASCII("extracted");
+ reader.ExtractCurrentEntryToFilePathAsync(
+ target_path,
+ base::BindOnce(&MockUnzipListener::OnUnzipSuccess, listener.AsWeakPtr()),
+ base::BindOnce(&MockUnzipListener::OnUnzipFailure, listener.AsWeakPtr()),
+ base::BindRepeating(&MockUnzipListener::OnUnzipProgress,
+ listener.AsWeakPtr()));
+
+ EXPECT_EQ(0, listener.success_calls());
+ EXPECT_EQ(0, listener.failure_calls());
+ EXPECT_EQ(0, listener.progress_calls());
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(1, listener.success_calls());
+ EXPECT_EQ(0, listener.failure_calls());
+ EXPECT_LE(1, listener.progress_calls());
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(target_path, &contents));
+ EXPECT_EQ("This is encrypted with ZipCrypto.\n", contents);
+ EXPECT_EQ(contents.size(), listener.current_progress());
+}
+
+TEST_F(ZipReaderTest, ExtractToFileAsync_WrongCrc) {
+ MockUnzipListener listener;
+
+ ZipReader reader;
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Wrong CRC.zip")));
+ ASSERT_TRUE(
+ LocateAndOpenEntry(&reader, base::FilePath::FromASCII("Corrupted.txt")));
+ const base::FilePath target_path = test_dir_.AppendASCII("extracted");
+ reader.ExtractCurrentEntryToFilePathAsync(
+ target_path,
+ base::BindOnce(&MockUnzipListener::OnUnzipSuccess, listener.AsWeakPtr()),
+ base::BindOnce(&MockUnzipListener::OnUnzipFailure, listener.AsWeakPtr()),
+ base::BindRepeating(&MockUnzipListener::OnUnzipProgress,
+ listener.AsWeakPtr()));
+
+ EXPECT_EQ(0, listener.success_calls());
+ EXPECT_EQ(0, listener.failure_calls());
+ EXPECT_EQ(0, listener.progress_calls());
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(0, listener.success_calls());
+ EXPECT_EQ(1, listener.failure_calls());
+ EXPECT_LE(1, listener.progress_calls());
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(target_path, &contents));
+ EXPECT_EQ("This file has been changed after its CRC was computed.\n",
+ contents);
+ EXPECT_EQ(contents.size(), listener.current_progress());
+}
+
// Verifies that the asynchronous extraction to a file works.
TEST_F(ZipReaderTest, ExtractToFileAsync_Directory) {
MockUnzipListener listener;
@@ -490,7 +700,7 @@ TEST_F(ZipReaderTest, ExtractCurrentEntryToString) {
// sizes from 0 to 7 bytes respectively, being the contents of each file a
// substring of "0123456" starting at '0'.
base::FilePath test_zip_file =
- test_data_dir_.AppendASCII("test_mismatch_size.zip");
+ data_dir_.AppendASCII("test_mismatch_size.zip");
ZipReader reader;
std::string contents;
@@ -515,7 +725,7 @@ TEST_F(ZipReaderTest, ExtractCurrentEntryToString) {
}
// More than necessary byte read limit: must pass.
- EXPECT_TRUE(reader.ExtractCurrentEntryToString(16, &contents));
+ EXPECT_TRUE(reader.ExtractCurrentEntryToString(&contents));
EXPECT_EQ(std::string(base::StringPiece("0123456", i)), contents);
}
reader.Close();
@@ -526,7 +736,7 @@ TEST_F(ZipReaderTest, ExtractPartOfCurrentEntry) {
// sizes from 0 to 7 bytes respectively, being the contents of each file a
// substring of "0123456" starting at '0'.
base::FilePath test_zip_file =
- test_data_dir_.AppendASCII("test_mismatch_size.zip");
+ data_dir_.AppendASCII("test_mismatch_size.zip");
ZipReader reader;
std::string contents;
@@ -564,6 +774,37 @@ TEST_F(ZipReaderTest, ExtractPartOfCurrentEntry) {
reader.Close();
}
+TEST_F(ZipReaderTest, ExtractPosixPermissions) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ ZipReader reader;
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("test_posix_permissions.zip")));
+ for (auto entry : {"0.txt", "1.txt", "2.txt", "3.txt"}) {
+ ASSERT_TRUE(LocateAndOpenEntry(&reader, base::FilePath::FromASCII(entry)));
+ FilePathWriterDelegate delegate(temp_dir.GetPath().AppendASCII(entry));
+ ASSERT_TRUE(reader.ExtractCurrentEntry(&delegate));
+ }
+ reader.Close();
+
+#if defined(OS_POSIX)
+ // This assumes a umask of at least 0400.
+ int mode = 0;
+ EXPECT_TRUE(base::GetPosixFilePermissions(
+ temp_dir.GetPath().AppendASCII("0.txt"), &mode));
+ EXPECT_EQ(mode & 0700, 0700);
+ EXPECT_TRUE(base::GetPosixFilePermissions(
+ temp_dir.GetPath().AppendASCII("1.txt"), &mode));
+ EXPECT_EQ(mode & 0700, 0600);
+ EXPECT_TRUE(base::GetPosixFilePermissions(
+ temp_dir.GetPath().AppendASCII("2.txt"), &mode));
+ EXPECT_EQ(mode & 0700, 0700);
+ EXPECT_TRUE(base::GetPosixFilePermissions(
+ temp_dir.GetPath().AppendASCII("3.txt"), &mode));
+ EXPECT_EQ(mode & 0700, 0600);
+#endif
+}
+
// This test exposes http://crbug.com/430959, at least on OS X
TEST_F(ZipReaderTest, DISABLED_LeakDetectionTest) {
for (int i = 0; i < 100000; ++i) {
@@ -578,45 +819,40 @@ TEST_F(ZipReaderTest, DISABLED_LeakDetectionTest) {
TEST_F(ZipReaderTest, ExtractCurrentEntryPrepareFailure) {
testing::StrictMock<MockWriterDelegate> mock_writer;
- EXPECT_CALL(mock_writer, PrepareOutput())
- .WillOnce(Return(false));
+ EXPECT_CALL(mock_writer, PrepareOutput()).WillOnce(Return(false));
base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/quux.txt"));
ZipReader reader;
ASSERT_TRUE(reader.Open(test_zip_file_));
ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ASSERT_FALSE(reader.ExtractCurrentEntry(
- &mock_writer, std::numeric_limits<uint64_t>::max()));
+ ASSERT_FALSE(reader.ExtractCurrentEntry(&mock_writer));
}
-// Test that when WriterDelegate::WriteBytes returns false, no other methods on
-// the delegate are called and the extraction fails.
+// Test that when WriterDelegate::WriteBytes returns false, only the OnError
+// method on the delegate is called and the extraction fails.
TEST_F(ZipReaderTest, ExtractCurrentEntryWriteBytesFailure) {
testing::StrictMock<MockWriterDelegate> mock_writer;
- EXPECT_CALL(mock_writer, PrepareOutput())
- .WillOnce(Return(true));
- EXPECT_CALL(mock_writer, WriteBytes(_, _))
- .WillOnce(Return(false));
+ EXPECT_CALL(mock_writer, PrepareOutput()).WillOnce(Return(true));
+ EXPECT_CALL(mock_writer, WriteBytes(_, _)).WillOnce(Return(false));
+ EXPECT_CALL(mock_writer, OnError());
base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/quux.txt"));
ZipReader reader;
ASSERT_TRUE(reader.Open(test_zip_file_));
ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ASSERT_FALSE(reader.ExtractCurrentEntry(
- &mock_writer, std::numeric_limits<uint64_t>::max()));
+ ASSERT_FALSE(reader.ExtractCurrentEntry(&mock_writer));
}
// Test that extraction succeeds when the writer delegate reports all is well.
TEST_F(ZipReaderTest, ExtractCurrentEntrySuccess) {
testing::StrictMock<MockWriterDelegate> mock_writer;
- EXPECT_CALL(mock_writer, PrepareOutput())
- .WillOnce(Return(true));
- EXPECT_CALL(mock_writer, WriteBytes(_, _))
- .WillRepeatedly(Return(true));
+ EXPECT_CALL(mock_writer, PrepareOutput()).WillOnce(Return(true));
+ EXPECT_CALL(mock_writer, WriteBytes(_, _)).WillRepeatedly(Return(true));
+ EXPECT_CALL(mock_writer, SetPosixFilePermissions(_));
EXPECT_CALL(mock_writer, SetTimeModified(_));
base::FilePath target_path(FILE_PATH_LITERAL("foo/bar/quux.txt"));
@@ -624,8 +860,38 @@ TEST_F(ZipReaderTest, ExtractCurrentEntrySuccess) {
ASSERT_TRUE(reader.Open(test_zip_file_));
ASSERT_TRUE(LocateAndOpenEntry(&reader, target_path));
- ASSERT_TRUE(reader.ExtractCurrentEntry(&mock_writer,
- std::numeric_limits<uint64_t>::max()));
+ ASSERT_TRUE(reader.ExtractCurrentEntry(&mock_writer));
+}
+
+TEST_F(ZipReaderTest, WrongCrc) {
+ ZipReader reader;
+ ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("Wrong CRC.zip")));
+
+ const ZipReader::Entry* const entry =
+ LocateAndOpenEntry(&reader, base::FilePath::FromASCII("Corrupted.txt"));
+ ASSERT_TRUE(entry);
+
+ std::string contents = "dummy";
+ EXPECT_FALSE(reader.ExtractCurrentEntryToString(&contents));
+ EXPECT_EQ("This file has been changed after its CRC was computed.\n",
+ contents);
+
+ contents = "dummy";
+ EXPECT_FALSE(
+ reader.ExtractCurrentEntryToString(entry->original_size + 1, &contents));
+ EXPECT_EQ("This file has been changed after its CRC was computed.\n",
+ contents);
+
+ contents = "dummy";
+ EXPECT_FALSE(
+ reader.ExtractCurrentEntryToString(entry->original_size, &contents));
+ EXPECT_EQ("This file has been changed after its CRC was computed.\n",
+ contents);
+
+ contents = "dummy";
+ EXPECT_FALSE(
+ reader.ExtractCurrentEntryToString(entry->original_size - 1, &contents));
+ EXPECT_EQ("This file has been changed after its CRC was computed.", contents);
}
class FileWriterDelegateTest : public ::testing::Test {
@@ -639,34 +905,39 @@ class FileWriterDelegateTest : public ::testing::Test {
ASSERT_TRUE(file_.IsValid());
}
- // Writes data to the file, leaving the current position at the end of the
- // write.
- void PopulateFile() {
- static const char kSomeData[] = "this sure is some data.";
- static const size_t kSomeDataLen = sizeof(kSomeData) - 1;
- ASSERT_NE(-1LL, file_.Write(0LL, kSomeData, kSomeDataLen));
- }
-
base::FilePath temp_file_path_;
base::File file_;
};
-TEST_F(FileWriterDelegateTest, WriteToStartAndTruncate) {
- // Write stuff and advance.
- PopulateFile();
+TEST_F(FileWriterDelegateTest, WriteToEnd) {
+ const std::string payload = "This is the actualy payload data.\n";
- // This should rewind, write, then truncate.
- static const char kSomeData[] = "short";
- static const int kSomeDataLen = sizeof(kSomeData) - 1;
{
FileWriterDelegate writer(&file_);
+ EXPECT_EQ(0, writer.file_length());
ASSERT_TRUE(writer.PrepareOutput());
- ASSERT_TRUE(writer.WriteBytes(kSomeData, kSomeDataLen));
+ ASSERT_TRUE(writer.WriteBytes(payload.data(), payload.size()));
+ EXPECT_EQ(payload.size(), writer.file_length());
}
- ASSERT_EQ(kSomeDataLen, file_.GetLength());
- char buf[kSomeDataLen] = {};
- ASSERT_EQ(kSomeDataLen, file_.Read(0LL, buf, kSomeDataLen));
- ASSERT_EQ(std::string(kSomeData), std::string(buf, kSomeDataLen));
+
+ EXPECT_EQ(payload.size(), file_.GetLength());
+}
+
+TEST_F(FileWriterDelegateTest, EmptyOnError) {
+ const std::string payload = "This is the actualy payload data.\n";
+
+ {
+ FileWriterDelegate writer(&file_);
+ EXPECT_EQ(0, writer.file_length());
+ ASSERT_TRUE(writer.PrepareOutput());
+ ASSERT_TRUE(writer.WriteBytes(payload.data(), payload.size()));
+ EXPECT_EQ(payload.size(), writer.file_length());
+ EXPECT_EQ(payload.size(), file_.GetLength());
+ writer.OnError();
+ EXPECT_EQ(0, writer.file_length());
+ }
+
+ EXPECT_EQ(0, file_.GetLength());
}
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_unittest.cc b/deps/v8/third_party/zlib/google/zip_unittest.cc
index 944930ffc8..ab86e88343 100644
--- a/deps/v8/third_party/zlib/google/zip_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_unittest.cc
@@ -5,6 +5,8 @@
#include <stddef.h>
#include <stdint.h>
+#include <iomanip>
+#include <limits>
#include <map>
#include <set>
#include <string>
@@ -21,6 +23,7 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/test/bind.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
@@ -47,6 +50,46 @@ bool CreateFile(const std::string& content,
return file->IsValid();
}
+// A WriterDelegate that logs progress once per second.
+class ProgressWriterDelegate : public zip::WriterDelegate {
+ public:
+ explicit ProgressWriterDelegate(int64_t expected_size)
+ : expected_size_(expected_size) {
+ CHECK_GT(expected_size_, 0);
+ }
+
+ bool WriteBytes(const char* data, int num_bytes) override {
+ received_bytes_ += num_bytes;
+ LogProgressIfNecessary();
+ return true;
+ }
+
+ void SetTimeModified(const base::Time& time) override { LogProgress(); }
+
+ int64_t received_bytes() const { return received_bytes_; }
+
+ private:
+ void LogProgressIfNecessary() {
+ const base::TimeTicks now = base::TimeTicks::Now();
+ if (next_progress_report_time_ > now)
+ return;
+
+ next_progress_report_time_ = now + progress_period_;
+ LogProgress();
+ }
+
+ void LogProgress() const {
+ LOG(INFO) << "Unzipping... " << std::setw(3)
+ << (100 * received_bytes_ / expected_size_) << "%";
+ }
+
+ const base::TimeDelta progress_period_ = base::Seconds(1);
+ base::TimeTicks next_progress_report_time_ =
+ base::TimeTicks::Now() + progress_period_;
+ const uint64_t expected_size_;
+ int64_t received_bytes_ = 0;
+};
+
// A virtual file system containing:
// /test
// /test/foo.txt
@@ -192,32 +235,28 @@ class ZipTest : public PlatformTest {
virtual void TearDown() { PlatformTest::TearDown(); }
- bool GetTestDataDirectory(base::FilePath* path) {
- bool success = base::PathService::Get(base::DIR_SOURCE_ROOT, path);
+ static base::FilePath GetDataDirectory() {
+ base::FilePath path;
+ bool success = base::PathService::Get(base::DIR_SOURCE_ROOT, &path);
EXPECT_TRUE(success);
- if (!success)
- return false;
- for (const base::StringPiece s :
- {"third_party", "zlib", "google", "test", "data"}) {
- *path = path->AppendASCII(s);
- }
- return true;
+ return std::move(path)
+ .AppendASCII("third_party")
+ .AppendASCII("zlib")
+ .AppendASCII("google")
+ .AppendASCII("test")
+ .AppendASCII("data");
}
void TestUnzipFile(const base::FilePath::StringType& filename,
bool expect_hidden_files) {
- base::FilePath test_dir;
- ASSERT_TRUE(GetTestDataDirectory(&test_dir));
- TestUnzipFile(test_dir.Append(filename), expect_hidden_files);
+ TestUnzipFile(GetDataDirectory().Append(filename), expect_hidden_files);
}
void TestUnzipFile(const base::FilePath& path, bool expect_hidden_files) {
- ASSERT_TRUE(base::PathExists(path)) << "no file " << path.value();
+ ASSERT_TRUE(base::PathExists(path)) << "no file " << path;
ASSERT_TRUE(zip::Unzip(path, test_dir_));
- base::FilePath original_dir;
- ASSERT_TRUE(GetTestDataDirectory(&original_dir));
- original_dir = original_dir.AppendASCII("test");
+ base::FilePath original_dir = GetDataDirectory().AppendASCII("test");
base::FileEnumerator files(
test_dir_, true,
@@ -226,7 +265,7 @@ class ZipTest : public PlatformTest {
size_t count = 0;
while (!unzipped_entry_path.empty()) {
EXPECT_EQ(zip_contents_.count(unzipped_entry_path), 1U)
- << "Couldn't find " << unzipped_entry_path.value();
+ << "Couldn't find " << unzipped_entry_path;
count++;
if (base::PathExists(unzipped_entry_path) &&
@@ -329,9 +368,7 @@ TEST_F(ZipTest, UnzipUncompressed) {
}
TEST_F(ZipTest, UnzipEvil) {
- base::FilePath path;
- ASSERT_TRUE(GetTestDataDirectory(&path));
- path = path.AppendASCII("evil.zip");
+ base::FilePath path = GetDataDirectory().AppendASCII("evil.zip");
// Unzip the zip file into a sub directory of test_dir_ so evil.zip
// won't create a persistent file outside test_dir_ in case of a
// failure.
@@ -344,28 +381,23 @@ TEST_F(ZipTest, UnzipEvil) {
}
TEST_F(ZipTest, UnzipEvil2) {
- base::FilePath path;
- ASSERT_TRUE(GetTestDataDirectory(&path));
- // The zip file contains an evil file with invalid UTF-8 in its file
- // name.
- path = path.AppendASCII("evil_via_invalid_utf8.zip");
+ // The ZIP file contains a file with invalid UTF-8 in its file name.
+ base::FilePath path =
+ GetDataDirectory().AppendASCII("evil_via_invalid_utf8.zip");
// See the comment at UnzipEvil() for why we do this.
base::FilePath output_dir = test_dir_.AppendASCII("out");
- // This should fail as it contains an evil file.
- ASSERT_FALSE(zip::Unzip(path, output_dir));
- base::FilePath evil_file = output_dir;
- evil_file = evil_file.AppendASCII("../evil.txt");
- ASSERT_FALSE(base::PathExists(evil_file));
+ ASSERT_TRUE(zip::Unzip(path, output_dir));
+ ASSERT_TRUE(base::PathExists(
+ output_dir.Append(base::FilePath::FromUTF8Unsafe(".�.\\evil.txt"))));
+ ASSERT_FALSE(base::PathExists(output_dir.AppendASCII("../evil.txt")));
}
TEST_F(ZipTest, UnzipWithFilter) {
auto filter = base::BindRepeating([](const base::FilePath& path) {
return path.BaseName().MaybeAsASCII() == "foo.txt";
});
- base::FilePath path;
- ASSERT_TRUE(GetTestDataDirectory(&path));
- ASSERT_TRUE(zip::UnzipWithFilterCallback(path.AppendASCII("test.zip"),
- test_dir_, filter, false));
+ ASSERT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII("test.zip"), test_dir_,
+ {.filter = std::move(filter)}));
// Only foo.txt should have been extracted. The following paths should not
// be extracted:
// foo/
@@ -394,9 +426,75 @@ TEST_F(ZipTest, UnzipWithFilter) {
ASSERT_EQ(0, extracted_count);
}
+TEST_F(ZipTest, UnzipEncryptedWithRightPassword) {
+ // TODO(crbug.com/1296838) Also check the AES-encrypted files.
+ auto filter = base::BindRepeating([](const base::FilePath& path) {
+ return !base::StartsWith(path.MaybeAsASCII(), "Encrypted AES");
+ });
+
+ ASSERT_TRUE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Different Encryptions.zip"), test_dir_,
+ {.filter = std::move(filter), .password = "password"}));
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("ClearText.txt"),
+ &contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+
+ ASSERT_TRUE(base::ReadFileToString(
+ test_dir_.AppendASCII("Encrypted ZipCrypto.txt"), &contents));
+ EXPECT_EQ("This is encrypted with ZipCrypto.\n", contents);
+}
+
+TEST_F(ZipTest, UnzipEncryptedWithWrongPassword) {
+ // TODO(crbug.com/1296838) Also check the AES-encrypted files.
+ auto filter = base::BindRepeating([](const base::FilePath& path) {
+ return !base::StartsWith(path.MaybeAsASCII(), "Encrypted AES");
+ });
+
+ ASSERT_FALSE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Different Encryptions.zip"), test_dir_,
+ {.filter = std::move(filter), .password = "wrong"}));
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("ClearText.txt"),
+ &contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+
+ // No rubbish file should be left behind.
+ EXPECT_FALSE(
+ base::PathExists(test_dir_.AppendASCII("Encrypted ZipCrypto.txt")));
+}
+
+TEST_F(ZipTest, UnzipEncryptedWithNoPassword) {
+ // TODO(crbug.com/1296838) Also check the AES-encrypted files.
+ auto filter = base::BindRepeating([](const base::FilePath& path) {
+ return !base::StartsWith(path.MaybeAsASCII(), "Encrypted AES");
+ });
+
+ ASSERT_FALSE(
+ zip::Unzip(GetDataDirectory().AppendASCII("Different Encryptions.zip"),
+ test_dir_, {.filter = std::move(filter)}));
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("ClearText.txt"),
+ &contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+
+ // No rubbish file should be left behind.
+ EXPECT_FALSE(
+ base::PathExists(test_dir_.AppendASCII("Encrypted ZipCrypto.txt")));
+}
+
+TEST_F(ZipTest, UnzipWrongCrc) {
+ ASSERT_FALSE(
+ zip::Unzip(GetDataDirectory().AppendASCII("Wrong CRC.zip"), test_dir_));
+
+ // No rubbish file should be left behind.
+ EXPECT_FALSE(base::PathExists(test_dir_.AppendASCII("Corrupted.txt")));
+}
+
TEST_F(ZipTest, UnzipWithDelegates) {
- auto filter =
- base::BindRepeating([](const base::FilePath& path) { return true; });
auto dir_creator = base::BindRepeating(
[](const base::FilePath& extract_dir, const base::FilePath& entry_path) {
return base::CreateDirectory(extract_dir.Append(entry_path));
@@ -409,12 +507,10 @@ TEST_F(ZipTest, UnzipWithDelegates) {
extract_dir.Append(entry_path));
},
test_dir_);
- base::FilePath path;
- ASSERT_TRUE(GetTestDataDirectory(&path));
- base::File file(path.AppendASCII("test.zip"),
+
+ base::File file(GetDataDirectory().AppendASCII("test.zip"),
base::File::Flags::FLAG_OPEN | base::File::Flags::FLAG_READ);
- ASSERT_TRUE(zip::UnzipWithFilterAndWriters(file.GetPlatformFile(), writer,
- dir_creator, filter, false));
+ ASSERT_TRUE(zip::Unzip(file.GetPlatformFile(), writer, dir_creator));
base::FilePath dir = test_dir_;
base::FilePath dir_foo = dir.AppendASCII("foo");
base::FilePath dir_foo_bar = dir_foo.AppendASCII("bar");
@@ -427,10 +523,64 @@ TEST_F(ZipTest, UnzipWithDelegates) {
ASSERT_TRUE(base::PathExists(dir_foo_bar.AppendASCII("quux.txt")));
}
+// Tests that a ZIP archive containing SJIS-encoded file names can be correctly
+// extracted if the encoding is specified.
+TEST_F(ZipTest, UnzipSjis) {
+ ASSERT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII("SJIS Bug 846195.zip"),
+ test_dir_, {.encoding = "Shift_JIS"}));
+
+ const base::FilePath dir =
+ test_dir_.Append(base::FilePath::FromUTF8Unsafe("新しいフォルダ"));
+ EXPECT_TRUE(base::DirectoryExists(dir));
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(
+ dir.Append(base::FilePath::FromUTF8Unsafe("SJIS_835C_ソ.txt")),
+ &contents));
+ EXPECT_EQ(
+ "This file's name contains 0x5c (backslash) as the 2nd byte of Japanese "
+ "characater \"\x83\x5c\" when encoded in Shift JIS.",
+ contents);
+
+ ASSERT_TRUE(base::ReadFileToString(dir.Append(base::FilePath::FromUTF8Unsafe(
+ "新しいテキスト ドキュメント.txt")),
+ &contents));
+ EXPECT_EQ("This file name is coded in Shift JIS in the archive.", contents);
+}
+
+// Tests that a ZIP archive containing SJIS-encoded file names can be extracted
+// even if the encoding is not specified. In this case, file names are
+// interpreted as UTF-8, which leads to garbled names where invalid UTF-8
+// sequences are replaced with the character �. Nevertheless, the files are
+// safely extracted and readable.
+TEST_F(ZipTest, UnzipSjisAsUtf8) {
+ ASSERT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII("SJIS Bug 846195.zip"),
+ test_dir_));
+
+ EXPECT_FALSE(base::DirectoryExists(
+ test_dir_.Append(base::FilePath::FromUTF8Unsafe("新しいフォルダ"))));
+
+ const base::FilePath dir =
+ test_dir_.Append(base::FilePath::FromUTF8Unsafe("�V�����t�H���_"));
+ EXPECT_TRUE(base::DirectoryExists(dir));
+
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(
+ dir.Append(base::FilePath::FromUTF8Unsafe("SJIS_835C_�\\.txt")),
+ &contents));
+ EXPECT_EQ(
+ "This file's name contains 0x5c (backslash) as the 2nd byte of Japanese "
+ "characater \"\x83\x5c\" when encoded in Shift JIS.",
+ contents);
+
+ ASSERT_TRUE(base::ReadFileToString(dir.Append(base::FilePath::FromUTF8Unsafe(
+ "�V�����e�L�X�g �h�L�������g.txt")),
+ &contents));
+ EXPECT_EQ("This file name is coded in Shift JIS in the archive.", contents);
+}
+
TEST_F(ZipTest, Zip) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -441,9 +591,7 @@ TEST_F(ZipTest, Zip) {
}
TEST_F(ZipTest, ZipIgnoreHidden) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -454,9 +602,7 @@ TEST_F(ZipTest, ZipIgnoreHidden) {
}
TEST_F(ZipTest, ZipNonASCIIDir) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -492,11 +638,9 @@ TEST_F(ZipTest, ZipTimeStamp) {
TestTimeStamp("02 Jan 2038 23:59:58", VALID_YEAR);
}
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
TEST_F(ZipTest, ZipFiles) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -513,25 +657,20 @@ TEST_F(ZipTest, ZipFiles) {
EXPECT_TRUE(reader.Open(zip_name));
EXPECT_EQ(zip_file_list_.size(), static_cast<size_t>(reader.num_entries()));
for (size_t i = 0; i < zip_file_list_.size(); ++i) {
- EXPECT_TRUE(reader.HasMore());
- EXPECT_TRUE(reader.OpenCurrentEntryInZip());
- const zip::ZipReader::EntryInfo* entry_info = reader.current_entry_info();
- EXPECT_EQ(entry_info->file_path(), zip_file_list_[i]);
- reader.AdvanceToNextEntry();
+ const zip::ZipReader::Entry* const entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(entry->path, zip_file_list_[i]);
}
}
-#endif // defined(OS_POSIX)
+#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
TEST_F(ZipTest, UnzipFilesWithIncorrectSize) {
- base::FilePath test_data_folder;
- ASSERT_TRUE(GetTestDataDirectory(&test_data_folder));
-
// test_mismatch_size.zip contains files with names from 0.txt to 7.txt with
// sizes from 0 to 7 bytes respectively, but the metadata in the zip file says
// the uncompressed size is 3 bytes. The ZipReader and minizip code needs to
// be clever enough to get all the data out.
base::FilePath test_zip_file =
- test_data_folder.AppendASCII("test_mismatch_size.zip");
+ GetDataDirectory().AppendASCII("test_mismatch_size.zip");
base::ScopedTempDir scoped_temp_dir;
ASSERT_TRUE(scoped_temp_dir.CreateUniqueTempDir());
@@ -578,9 +717,7 @@ TEST_F(ZipTest, ZipWithFileAccessor) {
// Tests progress reporting while zipping files.
TEST_F(ZipTest, ZipProgress) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -617,9 +754,7 @@ TEST_F(ZipTest, ZipProgress) {
// Tests throttling of progress reporting while zipping files.
TEST_F(ZipTest, ZipProgressPeriod) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -658,9 +793,7 @@ TEST_F(ZipTest, ZipProgressPeriod) {
// Tests cancellation while zipping files.
TEST_F(ZipTest, ZipCancel) {
- base::FilePath src_dir;
- ASSERT_TRUE(GetTestDataDirectory(&src_dir));
- src_dir = src_dir.AppendASCII("test");
+ base::FilePath src_dir = GetDataDirectory().AppendASCII("test");
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -783,8 +916,28 @@ TEST_F(ZipTest, NestedZip) {
// Tests that there is no 2GB or 4GB limits. Tests that big files can be zipped
// (crbug.com/1207737) and that big ZIP files can be created
-// (crbug.com/1221447).
+// (crbug.com/1221447). Tests that the big ZIP can be opened, that its entries
+// are correctly enumerated (crbug.com/1298347), and that the big file can be
+// extracted.
+//
+// Because this test is dealing with big files, it tends to take a lot of disk
+// space and time (crbug.com/1299736). Therefore, it only gets run on a few bots
+// (ChromeOS and Windows).
+//
+// This test is too slow with TSAN.
+// OS Fuchsia does not seem to support large files.
+// Some 32-bit Android waterfall and CQ try bots are running out of space when
+// performing this test (android-asan, android-11-x86-rel,
+// android-marshmallow-x86-rel-non-cq).
+// Some Mac, Linux and Debug (dbg) bots tend to time out when performing this
+// test (crbug.com/1299736, crbug.com/1300448).
+#if defined(THREAD_SANITIZER) || BUILDFLAG(IS_FUCHSIA) || \
+ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+ BUILDFLAG(IS_CHROMEOS_LACROS) || !defined(NDEBUG)
TEST_F(ZipTest, DISABLED_BigFile) {
+#else
+TEST_F(ZipTest, BigFile) {
+#endif
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -795,15 +948,26 @@ TEST_F(ZipTest, DISABLED_BigFile) {
// purpose of this test, it doesn't really matter.
const int64_t src_size = 5'000'000'000;
+ const base::FilePath src_file = src_dir.AppendASCII("src.zip");
+ LOG(INFO) << "Creating big file " << src_file;
{
- base::File f(src_dir.AppendASCII("src.zip"),
- base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ base::File f(src_file, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
ASSERT_TRUE(f.SetLength(src_size));
}
// Zip the dummy ZIP file.
const base::FilePath dest_file = temp_dir.GetPath().AppendASCII("dest.zip");
- EXPECT_TRUE(zip::Zip({.src_dir = src_dir, .dest_file = dest_file}));
+ LOG(INFO) << "Zipping big file into " << dest_file;
+ zip::ProgressCallback progress_callback =
+ base::BindLambdaForTesting([&](const zip::Progress& progress) {
+ LOG(INFO) << "Zipping... " << std::setw(3)
+ << (100 * progress.bytes / src_size) << "%";
+ return true;
+ });
+ EXPECT_TRUE(zip::Zip({.src_dir = src_dir,
+ .dest_file = dest_file,
+ .progress_callback = std::move(progress_callback),
+ .progress_period = base::Seconds(1)}));
// Since the dummy source (inner) ZIP file should simply be stored in the
// destination (outer) ZIP file, the destination file should be bigger than
@@ -812,6 +976,25 @@ TEST_F(ZipTest, DISABLED_BigFile) {
ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size));
EXPECT_GT(dest_file_size, src_size + 100);
EXPECT_LT(dest_file_size, src_size + 300);
+
+ LOG(INFO) << "Reading big ZIP " << dest_file;
+ zip::ZipReader reader;
+ ASSERT_TRUE(reader.Open(dest_file));
+
+ const zip::ZipReader::Entry* const entry = reader.Next();
+ ASSERT_TRUE(entry);
+ EXPECT_EQ(FP("src.zip"), entry->path);
+ EXPECT_EQ(src_size, entry->original_size);
+ EXPECT_FALSE(entry->is_directory);
+ EXPECT_FALSE(entry->is_encrypted);
+
+ ProgressWriterDelegate writer(src_size);
+ EXPECT_TRUE(reader.ExtractCurrentEntry(&writer,
+ std::numeric_limits<uint64_t>::max()));
+ EXPECT_EQ(src_size, writer.received_bytes());
+
+ EXPECT_FALSE(reader.Next());
+ EXPECT_TRUE(reader.ok());
}
} // namespace
diff --git a/deps/v8/third_party/zlib/google/zip_writer.cc b/deps/v8/third_party/zlib/google/zip_writer.cc
index 201f1997b5..e3f677fe32 100644
--- a/deps/v8/third_party/zlib/google/zip_writer.cc
+++ b/deps/v8/third_party/zlib/google/zip_writer.cc
@@ -10,23 +10,12 @@
#include "base/logging.h"
#include "base/strings/strcat.h"
#include "base/strings/string_util.h"
+#include "third_party/zlib/google/redact.h"
#include "third_party/zlib/google/zip_internal.h"
namespace zip {
namespace internal {
-class Redact {
- public:
- explicit Redact(const base::FilePath& path) : path_(path) {}
-
- friend std::ostream& operator<<(std::ostream& out, const Redact&& r) {
- return LOG_IS_ON(INFO) ? out << "'" << r.path_ << "'" : out << "(redacted)";
- }
-
- private:
- const base::FilePath& path_;
-};
-
bool ZipWriter::ShouldContinue() {
if (!progress_callback_)
return true;
@@ -134,7 +123,7 @@ bool ZipWriter::AddDirectoryEntry(const base::FilePath& path) {
return AddDirectoryContents(path);
}
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// static
std::unique_ptr<ZipWriter> ZipWriter::CreateWithFd(
int zip_file_fd,
diff --git a/deps/v8/third_party/zlib/google/zip_writer.h b/deps/v8/third_party/zlib/google/zip_writer.h
index fcc9627500..aa3c965d91 100644
--- a/deps/v8/third_party/zlib/google/zip_writer.h
+++ b/deps/v8/third_party/zlib/google/zip_writer.h
@@ -36,7 +36,7 @@ class ZipWriter {
// Creates a writer that will write a ZIP file to |zip_file_fd| or |zip_file|
// and which entries are relative to |file_accessor|'s source directory.
// All file reads are performed using |file_accessor|.
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
static std::unique_ptr<ZipWriter> CreateWithFd(int zip_file_fd,
FileAccessor* file_accessor);
#endif
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 2f8197dd36..e168a05d4e 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -11,7 +11,7 @@ group("gn_all") {
data_deps = [
":v8_check_static_initializers",
"debug_helper:v8_debug_helper",
- "gcmole:v8_run_gcmole",
+ "gcmole:v8_gcmole_files",
"jsfunfuzz:v8_jsfunfuzz",
]
diff --git a/deps/v8/tools/PRESUBMIT.py b/deps/v8/tools/PRESUBMIT.py
index c883782ecf..b9fa8238e6 100644
--- a/deps/v8/tools/PRESUBMIT.py
+++ b/deps/v8/tools/PRESUBMIT.py
@@ -2,6 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
+
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, 'unittests', files_to_check=[r'.+_test\.py$'])
diff --git a/deps/v8/tools/callstats-from-telemetry.sh b/deps/v8/tools/callstats-from-telemetry.sh
index cea471cde8..3572b7e6ba 100755
--- a/deps/v8/tools/callstats-from-telemetry.sh
+++ b/deps/v8/tools/callstats-from-telemetry.sh
@@ -46,7 +46,7 @@ fi
OUT=out.json
if [[ -e $OUT ]]; then
echo "# Creating backup for $OUT"
- cp --backup=numbered $OUT $OUT.bak
+ cp $OUT $OUT.bak
fi
echo "# Writing to $OUT"
@@ -54,11 +54,14 @@ echo "# Writing to $OUT"
function convert {
NAME=$1
JSON=$2
- du -sh $JSON;
- echo "Converting NAME=$NAME";
- echo "," >> $OUT;
- echo "\"$NAME\": " >> $OUT;
- jq '[.traceEvents[].args | select(."runtime-call-stats" != null) | ."runtime-call-stats"]' $JSON >> $OUT;
+ # Check if any json file exists:
+ if ls $JSON 1> /dev/null 2>&1; then
+ du -sh $JSON;
+ echo "Converting NAME=$NAME";
+ echo "," >> $OUT;
+ echo "\"$NAME\": " >> $OUT;
+ jq '[.traceEvents[].args | select(."runtime-call-stats" != null) | ."runtime-call-stats"]' $JSON >> $OUT;
+ fi
}
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index d00f48d8b5..2a797680bc 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1543,23 +1543,27 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
// Instead of the default multi-page JSON:
// {"Version 1": { "Page 1": ..., ...}, "Version 2": {...}, ...}
// In this case insert a single "Default" version as top-level entry.
- let firstProperty = (object) => {
+ const firstProperty = (object) => {
for (let key in object) return object[key];
};
- let maybePage = firstProperty(json);
- let maybeMetrics = firstProperty(maybePage);
- let tempName = name ? name : new Date().toISOString();
- tempName = window.prompt('Enter a name for the loaded file:', tempName);
- if ('count' in maybeMetrics && 'duration' in maybeMetrics) {
+ const maybeMetrics = firstProperty(json);
+ const maybeMetric = firstProperty(maybeMetrics);
+ const tempName = name ? name : new Date().toISOString();
+ const getFileName =
+ () => window.prompt('Enter a name for the loaded file:', tempName);
+ if ('count' in maybeMetric && 'duration' in maybeMetric) {
return {
- [tempName]: json
+ [getFileName()]: json
}
}
// Legacy fallback where the metrics are encoded as arrays:
// { PAGE: [[metric_name, ...], [...], ]}
- if (Array.isArray(maybeMetrics)) {
+ // Also, make sure we don't have the versioned array-style:
+ // { VERSION: { PAGE: [[metric_name, ...], [...], ]}, ...}
+ const innerArray = firstProperty(maybeMetrics);
+ if (Array.isArray(maybeMetric) && !Array.isArray(innerArray)) {
return {
- [tempName]: json
+ [getFileName()]: json
}
}
return json
@@ -2705,4 +2709,4 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
</div>
</body>
-</html> \ No newline at end of file
+</html>
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index f756757a9a..1b76d0c166 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -74,7 +74,7 @@ def start_replay_server(args, sites, discard_output=True):
with open(os.devnull, 'w') as null:
server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
else:
- server = subprocess.Popen(cmd_args)
+ server = subprocess.Popen(cmd_args)
print("RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid))
print("=" * 80)
return {'process': server, 'injection': injection}
@@ -320,7 +320,7 @@ def do_run_replay_server(args):
try:
replay_server['process'].wait()
finally:
- stop_replay_server(replay_server)
+ stop_replay_server(replay_server)
# Calculate statistics.
@@ -493,12 +493,18 @@ def print_stats(S, args):
print_entry("Total", S["Total"])
+def extract_domain(filename):
+ # Extract domain name: domain#123.txt or domain_123.txt
+ match = re.match(r'^(.*?)[^a-zA-Z]?[0-9]+?.txt', filename)
+ domain = match.group(1)
+ return domain
+
+
def do_stats(args):
domains = {}
for path in args.logfiles:
filename = os.path.basename(path)
- m = re.match(r'^([^#]+)(#.*)?$', filename)
- domain = m.group(1)
+ domain = extract_domain(filename)
if domain not in domains: domains[domain] = {}
read_stats(path, domains[domain], args)
if args.aggregate:
@@ -558,8 +564,7 @@ def _read_logs(args):
if version not in versions: versions[version] = {}
for filename in files:
if filename.endswith(".txt"):
- m = re.match(r'^([^#]+)(#.*)?\.txt$', filename)
- domain = m.group(1)
+ domain = extract_domain(filename)
if domain not in versions[version]: versions[version][domain] = {}
read_stats(os.path.join(root, filename),
versions[version][domain], args)
diff --git a/deps/v8/tools/clusterfuzz/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/PRESUBMIT.py
deleted file mode 100644
index a98bb24413..0000000000
--- a/deps/v8/tools/clusterfuzz/PRESUBMIT.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2018 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-def CheckChangeOnCommit(input_api, output_api):
- tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, '.', files_to_check=['v8_foozzie_test.py$'])
- return input_api.RunTests(tests)
diff --git a/deps/v8/tools/clusterfuzz/BUILD.gn b/deps/v8/tools/clusterfuzz/foozzie/BUILD.gn
index 54e4fded96..90113e5aa4 100644
--- a/deps/v8/tools/clusterfuzz/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/foozzie/BUILD.gn
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("../../gni/v8.gni")
+import("../../../gni/v8.gni")
if (v8_correctness_fuzzer) {
copy("v8_correctness_fuzzer_resources") {
diff --git a/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py
new file mode 100644
index 0000000000..59bf2ee557
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
+
+def _RunTests(input_api, output_api):
+ return input_api.RunTests(input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', files_to_check=['v8_foozzie_test.py$']))
+
+def _CommonChecks(input_api, output_api):
+ """Checks common to both upload and commit."""
+ checks = [
+ _RunTests,
+ ]
+
+ return sum([check(input_api, output_api) for check in checks], [])
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/clusterfuzz/testdata/baseline/d8.py b/deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/d8.py
index cd729b9cf7..cd729b9cf7 100644
--- a/deps/v8/tools/clusterfuzz/testdata/baseline/d8.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/d8.py
diff --git a/deps/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json b/deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/v8_build_config.json
index ea27b1ccd7..ea27b1ccd7 100644
--- a/deps/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/baseline/v8_build_config.json
diff --git a/deps/v8/tools/clusterfuzz/testdata/build1/d8.py b/deps/v8/tools/clusterfuzz/foozzie/testdata/build1/d8.py
index 824b222485..824b222485 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build1/d8.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build1/d8.py
diff --git a/deps/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json b/deps/v8/tools/clusterfuzz/foozzie/testdata/build1/v8_build_config.json
index ea27b1ccd7..ea27b1ccd7 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build1/v8_build_config.json
diff --git a/deps/v8/tools/clusterfuzz/testdata/build2/d8.py b/deps/v8/tools/clusterfuzz/foozzie/testdata/build2/d8.py
index 0b19a3fc90..0b19a3fc90 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build2/d8.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build2/d8.py
diff --git a/deps/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json b/deps/v8/tools/clusterfuzz/foozzie/testdata/build2/v8_build_config.json
index ea27b1ccd7..ea27b1ccd7 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build2/v8_build_config.json
diff --git a/deps/v8/tools/clusterfuzz/testdata/build3/d8.py b/deps/v8/tools/clusterfuzz/foozzie/testdata/build3/d8.py
index a48a591d1e..a48a591d1e 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build3/d8.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build3/d8.py
diff --git a/deps/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json b/deps/v8/tools/clusterfuzz/foozzie/testdata/build3/v8_build_config.json
index 2a9917a433..2a9917a433 100644
--- a/deps/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/build3/v8_build_config.json
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt
index 91c06467ea..3d8715d583 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up --flag1 --flag2=0
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up --flag1 --flag2=0
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output_arch.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt
index 68af8a5dcf..5d32d9bab8 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output_arch.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --bad-flag
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --bad-flag
#
# Difference:
+ bad behavior
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output_second.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt
index 75da7f5a99..67ce872e4b 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output_second.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with ia32,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
# Flags of ia32,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --very-bad-flag
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --very-bad-flag
#
# Difference:
+ very bad behavior
diff --git a/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js b/deps/v8/tools/clusterfuzz/foozzie/testdata/fuzz-123.js
index fbde5736d4..fbde5736d4 100644
--- a/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/fuzz-123.js
diff --git a/deps/v8/tools/clusterfuzz/testdata/smoke_test_output.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt
index c2c1378ec8..618c210420 100644
--- a/deps/v8/tools/clusterfuzz/testdata/smoke_test_output.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-opt --no-sparkplug --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/toolchain/BUILD.gn b/deps/v8/tools/clusterfuzz/foozzie/toolchain/BUILD.gn
index ddcb4e1ad2..ddcb4e1ad2 100644
--- a/deps/v8/tools/clusterfuzz/toolchain/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/foozzie/toolchain/BUILD.gn
diff --git a/deps/v8/tools/clusterfuzz/v8_commands.py b/deps/v8/tools/clusterfuzz/foozzie/v8_commands.py
index f03161c2c4..c6c85f1ac2 100644
--- a/deps/v8/tools/clusterfuzz/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_commands.py
@@ -16,16 +16,16 @@ PYTHON3 = sys.version_info >= (3, 0)
# List of default flags passed to each d8 run.
DEFAULT_FLAGS = [
- '--correctness-fuzzer-suppressions',
- '--expose-gc',
- '--fuzzing',
- '--allow-natives-for-differential-fuzzing',
- '--invoke-weak-callbacks',
- '--omit-quit',
- '--es-staging',
- '--wasm-staging',
- '--no-wasm-async-compilation',
- '--suppress-asm-messages',
+ '--correctness-fuzzer-suppressions',
+ '--expose-gc',
+ '--fuzzing',
+ '--allow-natives-for-differential-fuzzing',
+ '--invoke-weak-callbacks',
+ '--omit-quit',
+ '--harmony',
+ '--wasm-staging',
+ '--no-wasm-async-compilation',
+ '--suppress-asm-messages',
]
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
index 656bc89ed3..656bc89ed3 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_harness_adjust.js
index b81f8dd952..b81f8dd952 100644
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_harness_adjust.js
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py
index a8ba74364b..a8ba74364b 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_config.py
index 99439a9d66..99439a9d66 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_config.py
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json
index 8c6baa2496..8c6baa2496 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json
index 7aefe69267..c9c107e0e8 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json
@@ -27,8 +27,9 @@
[0.1, "--no-enable-popcnt"],
[0.25, "--no-lazy-feedback-allocation"],
[0.1, "--no-lazy-feedback-allocation --interrupt-budget=100"],
- [0.05, "--budget-for-feedback-vector-allocation=0"],
+ [0.05, "--interrupt-budget-for-feedback-allocation=0"],
[0.1, "--no-wasm-generic-wrapper"],
[0.1, "--turbo-force-mid-tier-regalloc"],
- [0.0001, "--simulate-errors"]
+ [0.0001, "--simulate-errors"],
+ [0.25, "--compact-maps"]
]
diff --git a/deps/v8/tools/clusterfuzz/v8_mock.js b/deps/v8/tools/clusterfuzz/foozzie/v8_mock.js
index 41f1901d5d..41f1901d5d 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_mock.js
diff --git a/deps/v8/tools/clusterfuzz/v8_mock_archs.js b/deps/v8/tools/clusterfuzz/foozzie/v8_mock_archs.js
index 3482e8c4c6..3482e8c4c6 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock_archs.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_mock_archs.js
diff --git a/deps/v8/tools/clusterfuzz/v8_mock_webassembly.js b/deps/v8/tools/clusterfuzz/foozzie/v8_mock_webassembly.js
index 594e6e7004..594e6e7004 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock_webassembly.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_mock_webassembly.js
diff --git a/deps/v8/tools/clusterfuzz/v8_smoke_tests.js b/deps/v8/tools/clusterfuzz/foozzie/v8_smoke_tests.js
index 2c5fab338d..2c5fab338d 100644
--- a/deps/v8/tools/clusterfuzz/v8_smoke_tests.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_smoke_tests.js
diff --git a/deps/v8/tools/clusterfuzz/v8_suppressions.js b/deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.js
index d73ce04d45..d73ce04d45 100644
--- a/deps/v8/tools/clusterfuzz/v8_suppressions.js
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.js
diff --git a/deps/v8/tools/clusterfuzz/v8_suppressions.py b/deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.py
index 18f9de7ac1..18f9de7ac1 100644
--- a/deps/v8/tools/clusterfuzz/v8_suppressions.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_suppressions.py
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
index 4a571d5dd0..6aa3094179 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
@@ -97,14 +97,14 @@ const DISALLOWED_FLAGS = [
// stabilized yet and would cause too much noise when enabled.
/^--experimental-.*/,
- // Disallowed due to noise. We explicitly add --es-staging to job
+ // Disallowed due to noise. We explicitly add --harmony to job
// definitions, and all of these features are staged before launch.
/^--harmony-.*/,
// Disallowed because they are passed explicitly on the command line.
'--allow-natives-syntax',
'--debug-code',
- '--es-staging',
+ '--harmony',
'--wasm-staging',
'--expose-gc',
'--expose_gc',
diff --git a/deps/v8/tools/clusterfuzz/trials/BUILD.gn b/deps/v8/tools/clusterfuzz/trials/BUILD.gn
new file mode 100644
index 0000000000..d3c8bf6aab
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/trials/BUILD.gn
@@ -0,0 +1,8 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+copy("v8_clusterfuzz_resources") {
+ sources = [ "clusterfuzz_trials_config.json" ]
+ outputs = [ "$root_out_dir/{{source_file_part}}" ]
+}
diff --git a/deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py
new file mode 100644
index 0000000000..16a64abccc
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py
@@ -0,0 +1,57 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
+
+def _CheckTrialsConfig(input_api, output_api):
+ def FilterFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ files_to_check=(r'.+clusterfuzz_trials_config\.json',))
+
+ results = []
+ for f in input_api.AffectedFiles(
+ file_filter=FilterFile, include_deletes=False):
+ with open(f.AbsoluteLocalPath()) as j:
+ try:
+ trials = json.load(j)
+ for trial in trials:
+ if not all(
+ k in trial for k in ('app_args', 'app_name', 'probability')):
+ results.append('trial {} is not configured correctly'.format(trial))
+ if trial['app_name'] != 'd8':
+ results.append('trial {} has an incorrect app_name'.format(trial))
+ if not isinstance(trial['probability'], float):
+ results.append('trial {} probability is not a float'.format(trial))
+ if not (0 <= trial['probability'] <= 1):
+ results.append(
+ 'trial {} has invalid probability value'.format(trial))
+ if not isinstance(trial['app_args'], str) or not trial['app_args']:
+ results.append(
+ 'trial {} should have a non-empty string for app_args'.format(
+ trial))
+ except Exception as e:
+ results.append(
+ 'JSON validation failed for %s. Error:\n%s' % (f.LocalPath(), e))
+
+ return [output_api.PresubmitError(r) for r in results]
+
+def _CommonChecks(input_api, output_api):
+ """Checks common to both upload and commit."""
+ checks = [
+ _CheckTrialsConfig,
+ ]
+
+ return sum([check(input_api, output_api) for check in checks], [])
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json b/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json
new file mode 100644
index 0000000000..682972d5b2
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json
@@ -0,0 +1,38 @@
+[
+ {"app_args": "--assert-types", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--interrupt-budget-for-feedback-vector-allocation=0", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--compact-maps", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--force-slow-path", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--future", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--interrupt-budget=1000", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--jitless", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--random-gc-interval=2000", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--noanalyze-environment-liveness", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-avx", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-bmi1", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-bmi2", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-fma3", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-lzcnt", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-popcnt", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-sahf", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-sse3", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-sse4_1", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-sse4_2", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-enable-ssse3", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-lazy-feedback-allocation", "app_name": "d8", "probability": 0.35},
+ {"app_args": "--no-regexp-tier-up", "app_name": "d8", "probability": 0.2},
+ {"app_args": "--no-untrusted-code-mitigations", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-use-ic", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--no-wasm-generic-wrapper", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--regexp-interpret-all", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--simulate-errors", "app_name": "d8", "probability": 0.001},
+ {"app_args": "--stress-compaction-random", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--stress-concurrent-inlining", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--stress-concurrent-inlining-attach-code", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--stress-flush-code", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--stress-marking=100", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--stress-scavenge=100", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--turbo-instruction-scheduling", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--turbo-stress-instruction-scheduling", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--wasm-code-gc --stress-wasm-code-gc", "app_name": "d8", "probability": 0.1}
+]
diff --git a/deps/v8/tools/codemap.mjs b/deps/v8/tools/codemap.mjs
index 55327b6982..8d1e00c9e9 100644
--- a/deps/v8/tools/codemap.mjs
+++ b/deps/v8/tools/codemap.mjs
@@ -28,6 +28,15 @@
import { SplayTree } from "./splaytree.mjs";
/**
+* The number of alignment bits in a page address.
+*/
+const kPageAlignment = 12;
+/**
+* Page size in bytes.
+*/
+const kPageSize = 1 << kPageAlignment;
+
+/**
* Constructs a mapper that maps addresses into code entries.
*
* @constructor
@@ -56,19 +65,7 @@ export class CodeMap {
/**
* Map of memory pages occupied with static code.
*/
- pages_ = [];
-
-
- /**
- * The number of alignment bits in a page address.
- */
- static PAGE_ALIGNMENT = 12;
-
-
- /**
- * Page size in bytes.
- */
- static PAGE_SIZE = 1 << CodeMap.PAGE_ALIGNMENT;
+ pages_ = new Set();
/**
@@ -130,9 +127,8 @@ export class CodeMap {
* @private
*/
markPages_(start, end) {
- for (let addr = start; addr <= end;
- addr += CodeMap.PAGE_SIZE) {
- this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
+ for (let addr = start; addr <= end; addr += kPageSize) {
+ this.pages_.add((addr / kPageSize) | 0);
}
}
@@ -144,7 +140,7 @@ export class CodeMap {
let addr = end - 1;
while (addr >= start) {
const node = tree.findGreatestLessThan(addr);
- if (!node) break;
+ if (node === null) break;
const start2 = node.key, end2 = start2 + node.value.size;
if (start2 < end && start < end2) to_delete.push(start2);
addr = start2 - 1;
@@ -164,7 +160,7 @@ export class CodeMap {
*/
findInTree_(tree, addr) {
const node = tree.findGreatestLessThan(addr);
- return node && this.isAddressBelongsTo_(addr, node) ? node : null;
+ return node !== null && this.isAddressBelongsTo_(addr, node) ? node : null;
}
/**
@@ -175,22 +171,23 @@ export class CodeMap {
* @param {number} addr Address.
*/
findAddress(addr) {
- const pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
- if (pageAddr in this.pages_) {
+ const pageAddr = (addr / kPageSize) | 0;
+ if (this.pages_.has(pageAddr)) {
// Static code entries can contain "holes" of unnamed code.
// In this case, the whole library is assigned to this address.
let result = this.findInTree_(this.statics_, addr);
- if (!result) {
+ if (result === null) {
result = this.findInTree_(this.libraries_, addr);
- if (!result) return null;
+ if (result === null) return null;
}
return {entry: result.value, offset: addr - result.key};
}
- const min = this.dynamics_.findMin();
const max = this.dynamics_.findMax();
- if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
+ if (max === null) return null;
+ const min = this.dynamics_.findMin();
+ if (addr >= min.key && addr < (max.key + max.value.size)) {
const dynaEntry = this.findInTree_(this.dynamics_, addr);
- if (dynaEntry == null) return null;
+ if (dynaEntry === null) return null;
// Dedupe entry name.
const entry = dynaEntry.value;
if (!entry.nameUpdated_) {
@@ -210,7 +207,7 @@ export class CodeMap {
*/
findEntry(addr) {
const result = this.findAddress(addr);
- return result ? result.entry : null;
+ return result !== null ? result.entry : null;
}
/**
@@ -220,7 +217,7 @@ export class CodeMap {
*/
findDynamicEntryByStartAddress(addr) {
const node = this.dynamics_.find(addr);
- return node ? node.value : null;
+ return node !== null ? node.value : null;
}
/**
diff --git a/deps/v8/tools/compare_torque_output.py b/deps/v8/tools/compare_torque_output.py
index 50e93a7538..4ef01217dc 100644
--- a/deps/v8/tools/compare_torque_output.py
+++ b/deps/v8/tools/compare_torque_output.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/csvparser.mjs b/deps/v8/tools/csvparser.mjs
index c43ee4c4fc..273bf89776 100644
--- a/deps/v8/tools/csvparser.mjs
+++ b/deps/v8/tools/csvparser.mjs
@@ -38,13 +38,11 @@ export class CsvParser {
escapeField(string) {
let nextPos = string.indexOf("\\");
if (nextPos === -1) return string;
-
let result = string.substring(0, nextPos);
// Escape sequences of the form \x00 and \u0000;
- let endPos = string.length;
let pos = 0;
while (nextPos !== -1) {
- let escapeIdentifier = string.charAt(nextPos + 1);
+ const escapeIdentifier = string.charAt(nextPos + 1);
pos = nextPos + 2;
if (escapeIdentifier === 'n') {
result += '\n';
@@ -61,7 +59,7 @@ export class CsvParser {
nextPos = pos + 4;
}
// Convert the selected escape sequence to a single character.
- let escapeChars = string.substring(pos, nextPos);
+ const escapeChars = string.substring(pos, nextPos);
if (escapeChars === '2C') {
result += ',';
} else {
@@ -75,6 +73,7 @@ export class CsvParser {
// If there are no more escape sequences consume the rest of the string.
if (nextPos === -1) {
result += string.substr(pos);
+ break;
} else if (pos !== nextPos) {
result += string.substring(pos, nextPos);
}
diff --git a/deps/v8/tools/debug_helper/debug-macro-shims.h b/deps/v8/tools/debug_helper/debug-macro-shims.h
index 948482810b..02deb3d766 100644
--- a/deps/v8/tools/debug_helper/debug-macro-shims.h
+++ b/deps/v8/tools/debug_helper/debug-macro-shims.h
@@ -8,6 +8,7 @@
#ifndef V8_TORQUE_DEBUG_MACRO_SHIMS_H_
#define V8_TORQUE_DEBUG_MACRO_SHIMS_H_
+#include "src/numbers/integer-literal.h"
#include "src/objects/smi.h"
#include "tools/debug_helper/debug-helper-internal.h"
@@ -66,6 +67,14 @@ inline Value<intptr_t> IntPtrMul(d::MemoryAccessor accessor, intptr_t a,
intptr_t b) {
return {d::MemoryAccessResult::kOk, a * b};
}
+inline Value<bool> IntPtrLessThan(d::MemoryAccessor accessor, intptr_t a,
+ intptr_t b) {
+ return {d::MemoryAccessResult::kOk, a < b};
+}
+inline Value<bool> IntPtrLessThanOrEqual(d::MemoryAccessor accessor, intptr_t a,
+ intptr_t b) {
+ return {d::MemoryAccessResult::kOk, a <= b};
+}
inline Value<intptr_t> Signed(d::MemoryAccessor accessor, uintptr_t u) {
return {d::MemoryAccessResult::kOk, static_cast<intptr_t>(u)};
}
@@ -73,6 +82,9 @@ inline Value<int32_t> SmiUntag(d::MemoryAccessor accessor, uintptr_t s_t) {
Smi s(s_t);
return {d::MemoryAccessResult::kOk, s.value()};
}
+inline Value<uintptr_t> SmiFromInt32(d::MemoryAccessor accessor, int32_t i) {
+ return {d::MemoryAccessResult::kOk, Smi::FromInt(i).ptr()};
+}
inline Value<bool> UintPtrLessThan(d::MemoryAccessor accessor, uintptr_t a,
uintptr_t b) {
return {d::MemoryAccessResult::kOk, a < b};
@@ -93,6 +105,19 @@ inline Value<bool> Word32NotEqual(d::MemoryAccessor accessor, uint32_t a,
uint32_t b) {
return {d::MemoryAccessResult::kOk, a != b};
}
+// This is used in a nested call where we cannot pass Value<int32_t>.
+inline int31_t ConstexprIntegerLiteralToInt31(d::MemoryAccessor accessor,
+ const IntegerLiteral& i) {
+ return i.To<int32_t>();
+}
+inline int32_t ConstexprIntegerLiteralToInt32(d::MemoryAccessor accessor,
+ const IntegerLiteral& i) {
+ return i.To<int32_t>();
+}
+inline intptr_t ConstexprIntegerLiteralToIntptr(d::MemoryAccessor accessor,
+ const IntegerLiteral& i) {
+ return i.To<intptr_t>();
+}
} // namespace CodeStubAssembler
} // namespace TorqueDebugMacroShims
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
index 6eb7f3743c..0a7907b020 100644
--- a/deps/v8/tools/debug_helper/gen-heap-constants.py
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -31,7 +31,7 @@ def iterate_objects(target_space, camel_space_name):
if space == target_space:
result.append((offset, name))
for (space, offset), name in v8heapconst.KNOWN_OBJECTS.items():
- if space == target_space:
+ if space == target_space and (space, offset) not in v8heapconst.KNOWN_MAPS:
result.append((offset, name))
out = out + '\nstd::string FindKnownObjectIn' + camel_space_name \
+ '(uintptr_t offset) {\n switch (offset) {\n'
@@ -40,8 +40,9 @@ def iterate_objects(target_space, camel_space_name):
out = out + ' default: return "";\n }\n}\n'
iterate_objects('map_space', 'MapSpace')
-iterate_objects('read_only_space', 'ReadOnlySpace')
iterate_objects('old_space', 'OldSpace')
+iterate_objects('read_only_space', 'ReadOnlySpace')
+
def iterate_maps(target_space, camel_space_name):
global out
@@ -54,6 +55,7 @@ def iterate_maps(target_space, camel_space_name):
out = out + ' default: return -1;\n }\n}\n'
iterate_maps('map_space', 'MapSpace')
+iterate_maps('old_space', 'OldSpace')
iterate_maps('read_only_space', 'ReadOnlySpace')
out = out + '\nvoid FillInUnknownHeapAddresses(' + \
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index 10ef48cbba..43a67941ac 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -11,7 +11,7 @@
#include "src/execution/frames.h"
#include "src/execution/isolate-utils.h"
#include "src/objects/string-inl.h"
-#include "src/security/external-pointer.h"
+#include "src/sandbox/external-pointer.h"
#include "src/strings/unicode-inl.h"
#include "torque-generated/class-debug-readers.h"
#include "torque-generated/debug-macros.h"
@@ -350,7 +350,7 @@ class ReadStringVisitor : public TqObjectVisitor {
ExternalPointer_t resource_data =
GetOrFinish(object->GetResourceDataValue(accessor_));
#ifdef V8_COMPRESS_POINTERS
- Isolate* isolate = GetIsolateForHeapSandbox(
+ Isolate* isolate = GetIsolateForSandbox(
HeapObject::unchecked_cast(Object(heap_addresses_.any_heap_pointer)));
uintptr_t data_address = static_cast<uintptr_t>(DecodeExternalPointer(
isolate, resource_data, kExternalStringResourceDataTag));
diff --git a/deps/v8/tools/debug_helper/heap-constants.cc b/deps/v8/tools/debug_helper/heap-constants.cc
index f62dd9b697..412308eb7f 100644
--- a/deps/v8/tools/debug_helper/heap-constants.cc
+++ b/deps/v8/tools/debug_helper/heap-constants.cc
@@ -61,6 +61,10 @@ KnownInstanceType FindKnownMapInstanceTypes(
return KnownInstanceType(
FindKnownMapInstanceTypeInMapSpace(offset_in_page));
}
+ if (containing_page == heap_addresses.old_space_first_page) {
+ return KnownInstanceType(
+ FindKnownMapInstanceTypeInOldSpace(offset_in_page));
+ }
if (containing_page == heap_addresses.read_only_space_first_page) {
return KnownInstanceType(
FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page));
@@ -74,6 +78,12 @@ KnownInstanceType FindKnownMapInstanceTypes(
result.types.push_back(static_cast<i::InstanceType>(sub_result));
}
}
+ if (heap_addresses.old_space_first_page == 0) {
+ int sub_result = FindKnownMapInstanceTypeInOldSpace(offset_in_page);
+ if (sub_result >= 0) {
+ result.types.push_back(static_cast<i::InstanceType>(sub_result));
+ }
+ }
if (heap_addresses.read_only_space_first_page == 0) {
int sub_result = FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page);
if (sub_result >= 0) {
diff --git a/deps/v8/tools/debug_helper/heap-constants.h b/deps/v8/tools/debug_helper/heap-constants.h
index 89620479ec..9486a18d05 100644
--- a/deps/v8/tools/debug_helper/heap-constants.h
+++ b/deps/v8/tools/debug_helper/heap-constants.h
@@ -34,6 +34,7 @@ void FillInUnknownHeapAddresses(d::HeapAddresses* heap_addresses,
// Returns the instance type for the known Map, given its offset within the
// first page of the space, or empty string on failure.
int FindKnownMapInstanceTypeInMapSpace(uintptr_t offset);
+int FindKnownMapInstanceTypeInOldSpace(uintptr_t offset);
int FindKnownMapInstanceTypeInReadOnlySpace(uintptr_t offset);
// ===== End of generated functions. ===========================================
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 613065d5b1..d16b4dd737 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -21,9 +21,6 @@ Flags are passed unchanged to the test runner. They must start with -- and must
not contain spaces.
"""
# See HELP below for additional documentation.
-# Note on Python3 compatibility: gm.py itself is Python3 compatible, but
-# run-tests.py, which will be executed by the same binary, is not; hence
-# the hashbang line at the top of this file explicitly requires Python2.
from __future__ import print_function
import errno
@@ -43,7 +40,8 @@ BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64"]
+ "riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64",
+ "fuchsia_x64", "fuchsia_arm64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
@@ -291,7 +289,7 @@ class Config(object):
cpu = "x86"
if self.arch == "android_arm":
cpu = "arm"
- elif self.arch == "android_arm64":
+ elif self.arch == "android_arm64" or self.arch == "fuchsia_arm64":
cpu = "arm64"
elif self.arch == "arm64" and _GetMachine() in ("aarch64", "arm64"):
# arm64 build host:
@@ -310,7 +308,7 @@ class Config(object):
def GetV8TargetCpu(self):
if self.arch == "android_arm":
v8_cpu = "arm"
- elif self.arch == "android_arm64":
+ elif self.arch == "android_arm64" or self.arch == "fuchsia_arm64":
v8_cpu = "arm64"
elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"riscv64", "s390", "s390x", "loong64"):
@@ -322,6 +320,8 @@ class Config(object):
def GetTargetOS(self):
if self.arch in ("android_arm", "android_arm64"):
return ["target_os = \"android\""]
+ elif self.arch in ("fuchsia_x64", "fuchsia_arm64"):
+ return ["target_os = \"fuchsia\""]
return []
def GetSpecialCompiler(self):
diff --git a/deps/v8/tools/dumpcpp.mjs b/deps/v8/tools/dumpcpp.mjs
index 9459deda15..e92ee9ab5a 100644
--- a/deps/v8/tools/dumpcpp.mjs
+++ b/deps/v8/tools/dumpcpp.mjs
@@ -14,6 +14,7 @@ export class CppProcessor extends LogReader {
constructor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
super({}, timedRange, pairwiseTimedRange);
this.dispatchTable_ = {
+ __proto__: null,
'shared-library': {
parsers: [parseString, parseInt, parseInt, parseInt],
processor: this.processSharedLibrary }
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 558766487d..3d0841913f 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -4,11 +4,13 @@
import("../../gni/v8.gni")
-group("v8_run_gcmole") {
+group("v8_gcmole_files") {
testonly = true
-
+ data_deps = [
+ "../../:v8_dump_build_config",
+ "../../:v8_generated_cc_files",
+ ]
data = [
- "GCMOLE.gn",
"gcmole.py",
"gcmole-test.cc",
"gcmole-tools/",
@@ -36,8 +38,6 @@ group("v8_run_gcmole") {
"$target_gen_dir/../../torque-generated/",
]
- deps = [ "../../:run_torque" ]
-
if (v8_gcmole) {
# This assumes gcmole tools have been fetched by a hook
# into v8/tools/gcmole/gcmole_tools.
diff --git a/deps/v8/tools/gcmole/GCMOLE.gn b/deps/v8/tools/gcmole/GCMOLE.gn
deleted file mode 100644
index 62da0a084b..0000000000
--- a/deps/v8/tools/gcmole/GCMOLE.gn
+++ /dev/null
@@ -1,6 +0,0 @@
-action("gcmole") {
- sources = [
- ### gcmole(all) ###
- "tools/gcmole/gcmole-test.cc",
- ]
-}
diff --git a/deps/v8/tools/gcmole/Makefile b/deps/v8/tools/gcmole/Makefile
index e1bde684a6..fe295341da 100644
--- a/deps/v8/tools/gcmole/Makefile
+++ b/deps/v8/tools/gcmole/Makefile
@@ -32,11 +32,18 @@ LLVM_BUILD_INCLUDE:=$(BUILD_ROOT)/include
CLANG_SRC_INCLUDE:=$(CLANG_SRC_ROOT)/include
CLANG_BUILD_INCLUDE:=$(BUILD_ROOT)/tools/clang/include
+CXXFLAGS = -O3 -g3
+all: libgcmole.so
+Release: libgcmole.so
+
+Debug: CXXFLAGS = -O1 -DDEBUG -g
+Debug: libgcmole.so
+
libgcmole.so: gcmole.cc
$(CXX) -I$(LLVM_BUILD_INCLUDE) -I$(LLVM_SRC_INCLUDE) \
- -I$(CLANG_BUILD_INCLUDE) -I$(CLANG_SRC_INCLUDE) -I. -D_DEBUG \
+ -I$(CLANG_BUILD_INCLUDE) -I$(CLANG_SRC_INCLUDE) -I. ${CXXFLAGS} \
-D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS \
- -D__STDC_LIMIT_MACROS -O3 -fomit-frame-pointer -fno-exceptions \
+ -D__STDC_LIMIT_MACROS -fomit-frame-pointer -fno-exceptions \
-fno-rtti -fPIC -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \
-pedantic -Wno-long-long -Wall -W -Wno-unused-parameter \
-Wwrite-strings -static-libstdc++ -std=c++0x -shared -o libgcmole.so \
diff --git a/deps/v8/tools/gcmole/README b/deps/v8/tools/gcmole/README
index 1d2acd3b1a..15828fa435 100644
--- a/deps/v8/tools/gcmole/README
+++ b/deps/v8/tools/gcmole/README
@@ -109,7 +109,7 @@ script "bootstrap.sh" mentioned above).
TROUBLESHOOTING ---------------------------------------------------------------
-gcmole is tighly coupled with the AST structure that Clang produces. Therefore
+gcmole is tightly coupled with the AST structure that Clang produces. Therefore
when upgrading to a newer Clang version, it might start producing bogus output
or completely stop outputting warnings. In such occasion, one might start the
debugging process by checking weather a new AST node type is introduced which
diff --git a/deps/v8/tools/gcmole/bootstrap.sh b/deps/v8/tools/gcmole/bootstrap.sh
index e04ef5826c..f47ba6d213 100755
--- a/deps/v8/tools/gcmole/bootstrap.sh
+++ b/deps/v8/tools/gcmole/bootstrap.sh
@@ -35,6 +35,8 @@
LLVM_RELEASE=9.0.1
+BUILD_TYPE="Release"
+# BUILD_TYPE="Debug"
THIS_DIR="$(readlink -f "$(dirname "${0}")")"
LLVM_PROJECT_DIR="${THIS_DIR}/bootstrap/llvm"
BUILD_DIR="${THIS_DIR}/bootstrap/build"
@@ -99,29 +101,35 @@ if [ ! -e "${BUILD_DIR}" ]; then
fi
cd "${BUILD_DIR}"
cmake -GNinja -DCMAKE_CXX_FLAGS="-static-libstdc++" -DLLVM_ENABLE_TERMINFO=OFF \
- -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS=clang \
+ -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DLLVM_ENABLE_PROJECTS=clang \
-DLLVM_ENABLE_Z3_SOLVER=OFF "${LLVM_PROJECT_DIR}/llvm"
-MACOSX_DEPLOYMENT_TARGET=10.5 ninja -j"${NUM_JOBS}"
-
-# Strip the clang binary.
-STRIP_FLAGS=
-if [ "${OS}" = "Darwin" ]; then
- # See http://crbug.com/256342
- STRIP_FLAGS=-x
+MACOSX_DEPLOYMENT_TARGET=10.5 ninja -j"${NUM_JOBS}" clang
+
+if [[ "${BUILD_TYPE}" = "Release" ]]; then
+ # Strip the clang binary.
+ STRIP_FLAGS=
+ if [ "${OS}" = "Darwin" ]; then
+ # See http://crbug.com/256342
+ STRIP_FLAGS=-x
+ fi
+ strip ${STRIP_FLAGS} bin/clang
fi
-strip ${STRIP_FLAGS} bin/clang
cd -
# Build libgcmole.so
make -C "${THIS_DIR}" clean
make -C "${THIS_DIR}" LLVM_SRC_ROOT="${LLVM_PROJECT_DIR}/llvm" \
CLANG_SRC_ROOT="${LLVM_PROJECT_DIR}/clang" \
- BUILD_ROOT="${BUILD_DIR}" libgcmole.so
+ BUILD_ROOT="${BUILD_DIR}" $BUILD_TYPE
set +x
-echo
-echo You can now run gcmole using this command:
-echo
-echo CLANG_BIN=\"tools/gcmole/gcmole-tools/bin\" python tools/gcmole/gcmole.py
+echo '#########################################################################'
+echo 'Congratulations you compiled clang and libgcmole.so'
+echo
+echo '# You can now run gcmole:'
+echo 'tools/gcmole/gcmole.py \'
+echo ' --clang-bin-dir="tools/gcmole/bootstrap/build/bin" \'
+echo ' --clang-plugins-dir="tools/gcmole" \'
+echo ' --v8-target-cpu=$CPU'
echo
diff --git a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
index 6de7b957f3..70900be895 100644
--- a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
+++ b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -1 +1 @@
-b9c0f67013d70cabd7a53ca059304f8f51bd937b \ No newline at end of file
+270aaed40a5d903a4795a2b59c33991b2885374c \ No newline at end of file
diff --git a/deps/v8/tools/gcmole/gcmole.cc b/deps/v8/tools/gcmole/gcmole.cc
index 3dc4baa722..9881f4c5b1 100644
--- a/deps/v8/tools/gcmole/gcmole.cc
+++ b/deps/v8/tools/gcmole/gcmole.cc
@@ -48,6 +48,8 @@ namespace {
bool g_tracing_enabled = false;
bool g_dead_vars_analysis = false;
+bool g_verbose = false;
+bool g_print_gc_call_chain = false;
#define TRACE(str) \
do { \
@@ -80,16 +82,13 @@ typedef std::map<MangledName, MangledName> CalleesMap;
static bool GetMangledName(clang::MangleContext* ctx,
const clang::NamedDecl* decl,
MangledName* result) {
- if (!llvm::isa<clang::CXXConstructorDecl>(decl) &&
- !llvm::isa<clang::CXXDestructorDecl>(decl)) {
- llvm::SmallVector<char, 512> output;
- llvm::raw_svector_ostream out(output);
- ctx->mangleName(decl, out);
- *result = out.str().str();
- return true;
- }
-
- return false;
+ if (llvm::isa<clang::CXXConstructorDecl>(decl)) return false;
+ if (llvm::isa<clang::CXXDestructorDecl>(decl)) return false;
+ llvm::SmallVector<char, 512> output;
+ llvm::raw_svector_ostream out(output);
+ ctx->mangleName(decl, out);
+ *result = out.str().str();
+ return true;
}
@@ -217,8 +216,7 @@ struct Resolver {
class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
public:
- explicit CalleesPrinter(clang::MangleContext* ctx) : ctx_(ctx) {
- }
+ explicit CalleesPrinter(clang::MangleContext* ctx) : ctx_(ctx) {}
virtual bool VisitCallExpr(clang::CallExpr* expr) {
const clang::FunctionDecl* callee = expr->getDirectCallee();
@@ -236,17 +234,17 @@ class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
}
void AnalyzeFunction(const clang::FunctionDecl* f) {
+ if (!InV8Namespace(f)) return;
MangledName name;
- if (InV8Namespace(f) && GetMangledName(ctx_, f, &name)) {
- const std::string& function = f->getNameAsString();
- AddCallee(name, function);
-
- const clang::FunctionDecl* body = NULL;
- if (f->hasBody(body) && !Analyzed(name)) {
- EnterScope(name);
- TraverseStmt(body->getBody());
- LeaveScope();
- }
+ if (!GetMangledName(ctx_, f, &name)) return;
+ const std::string& function = f->getNameAsString();
+ AddCallee(name, function);
+
+ const clang::FunctionDecl* body = NULL;
+ if (f->hasBody(body) && !Analyzed(name)) {
+ EnterScope(name);
+ TraverseStmt(body->getBody());
+ LeaveScope();
}
}
@@ -303,17 +301,18 @@ class FunctionDeclarationFinder
: public clang::ASTConsumer,
public clang::RecursiveASTVisitor<FunctionDeclarationFinder> {
public:
- explicit FunctionDeclarationFinder(clang::DiagnosticsEngine& d,
- clang::SourceManager& sm,
- const std::vector<std::string>& args)
- : d_(d), sm_(sm) {}
+ explicit FunctionDeclarationFinder(
+ clang::DiagnosticsEngine& diagnostics_engine,
+ clang::SourceManager& source_manager,
+ const std::vector<std::string>& args)
+ : diagnostics_engine_(diagnostics_engine),
+ source_manager_(source_manager) {}
virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
- mangle_context_ = clang::ItaniumMangleContext::create(ctx, d_);
+ mangle_context_ =
+ clang::ItaniumMangleContext::create(ctx, diagnostics_engine_);
callees_printer_ = new CalleesPrinter(mangle_context_);
-
TraverseDecl(ctx.getTranslationUnitDecl());
-
callees_printer_->PrintCallGraph();
}
@@ -323,8 +322,8 @@ class FunctionDeclarationFinder
}
private:
- clang::DiagnosticsEngine& d_;
- clang::SourceManager& sm_;
+ clang::DiagnosticsEngine& diagnostics_engine_;
+ clang::SourceManager& source_manager_;
clang::MangleContext* mangle_context_;
CalleesPrinter* callees_printer_;
@@ -333,8 +332,39 @@ class FunctionDeclarationFinder
static bool gc_suspects_loaded = false;
static CalleesSet gc_suspects;
static CalleesSet gc_functions;
-static bool whitelist_loaded = false;
-static CalleesSet suspects_whitelist;
+
+static bool allowlist_loaded = false;
+static CalleesSet suspects_allowlist;
+
+static bool gc_causes_loaded = false;
+static std::map<MangledName, std::vector<MangledName>> gc_causes;
+
+static void LoadGCCauses() {
+ if (gc_causes_loaded) return;
+ std::ifstream fin("gccauses");
+ std::string mangled, function;
+ while (!fin.eof()) {
+ std::getline(fin, mangled, ',');
+ std::getline(fin, function);
+ if (mangled.empty()) break;
+ std::string parent = mangled;
+ // start,nested
+ std::getline(fin, mangled, ',');
+ assert(mangled.compare("start") == 0);
+ std::getline(fin, function);
+ assert(function.compare("nested") == 0);
+ while (true) {
+ std::getline(fin, mangled, ',');
+ std::getline(fin, function);
+ if (mangled.compare("end") == 0) {
+ assert(function.compare("nested") == 0);
+ break;
+ }
+ gc_causes[parent].push_back(mangled);
+ }
+ }
+ gc_causes_loaded = true;
+}
static void LoadGCSuspects() {
if (gc_suspects_loaded) return;
@@ -352,55 +382,51 @@ static void LoadGCSuspects() {
gc_suspects_loaded = true;
}
-static void LoadSuspectsWhitelist() {
- if (whitelist_loaded) return;
+static void LoadSuspectsAllowList() {
+ if (allowlist_loaded) return;
- std::ifstream fin("tools/gcmole/suspects.whitelist");
+ // TODO(cbruni): clean up once fully migrated
+ std::ifstream fin("tools/gcmole/suspects.allowlist");
+ if (!fin.is_open()) {
+ fin = std::ifstream("tools/gcmole/suspects.whitelist");
+ }
std::string s;
- while (fin >> s) suspects_whitelist.insert(s);
+ while (fin >> s) suspects_allowlist.insert(s);
- whitelist_loaded = true;
+ allowlist_loaded = true;
}
// Looks for exact match of the mangled name.
-static bool KnownToCauseGC(clang::MangleContext* ctx,
- const clang::FunctionDecl* decl) {
+static bool IsKnownToCauseGC(clang::MangleContext* ctx,
+ const clang::FunctionDecl* decl) {
LoadGCSuspects();
-
if (!InV8Namespace(decl)) return false;
-
- if (suspects_whitelist.find(decl->getNameAsString()) !=
- suspects_whitelist.end()) {
+ if (suspects_allowlist.find(decl->getNameAsString()) !=
+ suspects_allowlist.end()) {
return false;
}
-
MangledName name;
if (GetMangledName(ctx, decl, &name)) {
return gc_suspects.find(name) != gc_suspects.end();
}
-
return false;
}
// Looks for partial match of only the function name.
-static bool SuspectedToCauseGC(clang::MangleContext* ctx,
- const clang::FunctionDecl* decl) {
+static bool IsSuspectedToCauseGC(clang::MangleContext* ctx,
+ const clang::FunctionDecl* decl) {
LoadGCSuspects();
-
if (!InV8Namespace(decl)) return false;
-
- LoadSuspectsWhitelist();
- if (suspects_whitelist.find(decl->getNameAsString()) !=
- suspects_whitelist.end()) {
+ LoadSuspectsAllowList();
+ if (suspects_allowlist.find(decl->getNameAsString()) !=
+ suspects_allowlist.end()) {
return false;
}
-
if (gc_functions.find(decl->getNameAsString()) != gc_functions.end()) {
TRACE_LLVM_DECL("Suspected by ", decl);
return true;
}
-
return false;
}
@@ -449,10 +475,9 @@ class ExprEffect {
intptr_t effect_;
};
-
-const std::string BAD_EXPR_MSG("Possible problem with evaluation order.");
-const std::string DEAD_VAR_MSG("Possibly dead variable.");
-
+const std::string BAD_EXPR_MSG(
+ "Possible problem with evaluation order with interleaved GCs.");
+const std::string DEAD_VAR_MSG("Possibly stale variable due to GCs.");
class Environment {
public:
@@ -612,22 +637,16 @@ class CallProps {
ExprEffect ComputeCumulativeEffect(bool result_is_raw) {
ExprEffect out = ExprEffect::NoneWithEnv(env_);
- if (gc_.any()) {
- out.setGC();
- }
+ if (gc_.any()) out.setGC();
if (raw_use_.any()) out.setRawUse();
if (result_is_raw) out.setRawDef();
return out;
}
bool IsSafe() {
- if (!gc_.any()) {
- return true;
- }
+ if (!gc_.any()) return true;
std::bitset<kMaxNumberOfArguments> raw = (raw_def_ | raw_use_);
- if (!raw.any()) {
- return true;
- }
+ if (!raw.any()) return true;
bool result = gc_.count() == 1 && !((raw ^ gc_).any());
return result;
}
@@ -950,13 +969,10 @@ class FunctionAnalyzer {
ExprEffect Parallel(clang::Expr* parent, int n, clang::Expr** exprs,
const Environment& env) {
CallProps props;
-
for (int i = 0; i < n; ++i) {
props.SetEffect(i, VisitExpr(exprs[i], env));
}
-
if (!props.IsSafe()) ReportUnsafe(parent, BAD_EXPR_MSG);
-
return props.ComputeCumulativeEffect(
RepresentsRawPointerType(parent->getType()));
}
@@ -984,27 +1000,24 @@ class FunctionAnalyzer {
const clang::QualType& var_type,
const std::string& var_name,
const Environment& env) {
- if (RepresentsRawPointerType(var_type)) {
- // We currently care only about our internal pointer types and not about
- // raw C++ pointers, because normally special care is taken when storing
- // raw pointers to the managed heap. Furthermore, checking for raw
- // pointers produces too many false positives in the dead variable
- // analysis.
- if (IsInternalPointerType(var_type) && !env.IsAlive(var_name) &&
- !HasActiveGuard() && g_dead_vars_analysis) {
- ReportUnsafe(parent, DEAD_VAR_MSG);
- }
- return ExprEffect::RawUse();
- }
- return ExprEffect::None();
+ if (!g_dead_vars_analysis) return ExprEffect::None();
+ if (!RepresentsRawPointerType(var_type)) return ExprEffect::None();
+ // We currently care only about our internal pointer types and not about
+ // raw C++ pointers, because normally special care is taken when storing
+ // raw pointers to the managed heap. Furthermore, checking for raw
+ // pointers produces too many false positives in the dead variable
+ // analysis.
+ if (!IsInternalPointerType(var_type)) return ExprEffect::None();
+ if (env.IsAlive(var_name)) return ExprEffect::None();
+ if (HasActiveGuard()) return ExprEffect::None();
+ ReportUnsafe(parent, DEAD_VAR_MSG);
+ return ExprEffect::RawUse();
}
ExprEffect Use(const clang::Expr* parent,
const clang::ValueDecl* var,
const Environment& env) {
- if (IsExternalVMState(var)) {
- return ExprEffect::GC();
- }
+ if (IsExternalVMState(var)) return ExprEffect::GC();
return Use(parent, var->getType(), var->getNameAsString(), env);
}
@@ -1062,43 +1075,40 @@ class FunctionAnalyzer {
RepresentsRawPointerType(call->getType()));
clang::FunctionDecl* callee = call->getDirectCallee();
- if (callee != NULL) {
- if (KnownToCauseGC(ctx_, callee)) {
+ if (callee == NULL) return out;
+
+ if (IsKnownToCauseGC(ctx_, callee)) {
+ out.setGC();
+ scopes_.back().SetGCCauseLocation(
+ clang::FullSourceLoc(call->getExprLoc(), sm_), callee);
+ }
+
+ // Support for virtual methods that might be GC suspects.
+ if (memcall == NULL) return out;
+ clang::CXXMethodDecl* method =
+ llvm::dyn_cast_or_null<clang::CXXMethodDecl>(callee);
+ if (method == NULL) return out;
+ if (!method->isVirtual()) return out;
+
+ clang::CXXMethodDecl* target = method->getDevirtualizedMethod(
+ memcall->getImplicitObjectArgument(), false);
+ if (target != NULL) {
+ if (IsKnownToCauseGC(ctx_, target)) {
out.setGC();
scopes_.back().SetGCCauseLocation(
- clang::FullSourceLoc(call->getExprLoc(), sm_));
+ clang::FullSourceLoc(call->getExprLoc(), sm_), target);
}
-
- // Support for virtual methods that might be GC suspects.
- clang::CXXMethodDecl* method =
- llvm::dyn_cast_or_null<clang::CXXMethodDecl>(callee);
- if (method != NULL && method->isVirtual()) {
- clang::CXXMemberCallExpr* memcall =
- llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
- if (memcall != NULL) {
- clang::CXXMethodDecl* target = method->getDevirtualizedMethod(
- memcall->getImplicitObjectArgument(), false);
- if (target != NULL) {
- if (KnownToCauseGC(ctx_, target)) {
- out.setGC();
- scopes_.back().SetGCCauseLocation(
- clang::FullSourceLoc(call->getExprLoc(), sm_));
- }
- } else {
- // According to the documentation, {getDevirtualizedMethod} might
- // return NULL, in which case we still want to use the partial
- // match of the {method}'s name against the GC suspects in order
- // to increase coverage.
- if (SuspectedToCauseGC(ctx_, method)) {
- out.setGC();
- scopes_.back().SetGCCauseLocation(
- clang::FullSourceLoc(call->getExprLoc(), sm_));
- }
- }
- }
+ } else {
+ // According to the documentation, {getDevirtualizedMethod} might
+ // return NULL, in which case we still want to use the partial
+ // match of the {method}'s name against the GC suspects in order
+ // to increase coverage.
+ if (IsSuspectedToCauseGC(ctx_, method)) {
+ out.setGC();
+ scopes_.back().SetGCCauseLocation(
+ clang::FullSourceLoc(call->getExprLoc(), sm_), method);
}
}
-
return out;
}
@@ -1185,11 +1195,9 @@ class FunctionAnalyzer {
}
bool changed() {
- if (changed_) {
- changed_ = false;
- return true;
- }
- return false;
+ if (!changed_) return false;
+ changed_ = false;
+ return true;
}
const Environment& in() {
@@ -1455,7 +1463,7 @@ class FunctionAnalyzer {
}
bool HasActiveGuard() {
- for (auto s : scopes_) {
+ for (const auto s : scopes_) {
if (s.IsBeforeGCCause()) return true;
}
return false;
@@ -1466,6 +1474,36 @@ class FunctionAnalyzer {
d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
d_.getCustomDiagID(clang::DiagnosticsEngine::Warning, "%0"))
<< msg;
+ if (scopes_.empty()) return;
+ GCScope scope = scopes_[0];
+ if (!scope.gccause_location.isValid()) return;
+ d_.Report(scope.gccause_location,
+ d_.getCustomDiagID(clang::DiagnosticsEngine::Note,
+ "Call might cause unexpected GC."));
+ clang::FunctionDecl* gccause_decl = scope.gccause_decl;
+ d_.Report(
+ clang::FullSourceLoc(gccause_decl->getBeginLoc(), sm_),
+ d_.getCustomDiagID(clang::DiagnosticsEngine::Note, "GC call here."));
+
+ if (!g_print_gc_call_chain) return;
+ // TODO(cbruni, v8::10009): print call-chain to gc with proper source
+ // positions.
+ LoadGCCauses();
+ MangledName name;
+ if (!GetMangledName(ctx_, gccause_decl, &name)) return;
+ std::cout << "Potential GC call chain:\n";
+ std::set<MangledName> stack;
+ while (true) {
+ if (!stack.insert(name).second) break;
+ std::cout << "\t" << name << "\n";
+ auto next = gc_causes.find(name);
+ if (next == gc_causes.end()) break;
+ std::vector<MangledName> calls = next->second;
+ for (MangledName call : calls) {
+ name = call;
+ if (stack.find(call) != stack.end()) break;
+ }
+ }
}
@@ -1484,10 +1522,11 @@ class FunctionAnalyzer {
struct GCScope {
clang::FullSourceLoc guard_location;
clang::FullSourceLoc gccause_location;
+ clang::FunctionDecl* gccause_decl;
// We're only interested in guards that are declared before any further GC
// causing calls (see TestGuardedDeadVarAnalysisMidFunction for example).
- bool IsBeforeGCCause() {
+ bool IsBeforeGCCause() const {
if (!guard_location.isValid()) return false;
if (!gccause_location.isValid()) return true;
return guard_location.isBeforeInTranslationUnitThan(gccause_location);
@@ -1495,9 +1534,11 @@ class FunctionAnalyzer {
// After we set the first GC cause in the scope, we don't need the later
// ones.
- void SetGCCauseLocation(clang::FullSourceLoc gccause_location_) {
+ void SetGCCauseLocation(clang::FullSourceLoc gccause_location_,
+ clang::FunctionDecl* decl) {
if (gccause_location.isValid()) return;
gccause_location = gccause_location_;
+ gccause_decl = decl;
}
};
std::vector<GCScope> scopes_;
@@ -1513,9 +1554,8 @@ class ProblemsFinder : public clang::ASTConsumer,
if (args[i] == "--dead-vars") {
g_dead_vars_analysis = true;
}
- if (args[i] == "--verbose") {
- g_tracing_enabled = true;
- }
+ if (args[i] == "--verbose-trace") g_tracing_enabled = true;
+ if (args[i] == "--verbose") g_verbose = true;
}
}
@@ -1571,7 +1611,7 @@ class ProblemsFinder : public clang::ASTConsumer,
clang::ItaniumMangleContext::create(ctx, d_), object_decl,
maybe_object_decl, smi_decl, no_gc_mole_decl, d_, sm_);
TraverseDecl(ctx.getTranslationUnitDecl());
- } else {
+ } else if (g_verbose) {
if (object_decl == NULL) {
llvm::errs() << "Failed to resolve v8::internal::Object\n";
}
@@ -1609,7 +1649,6 @@ class ProblemsFinder : public clang::ASTConsumer,
FunctionAnalyzer* function_analyzer_;
};
-
template<typename ConsumerType>
class Action : public clang::PluginASTAction {
protected:
diff --git a/deps/v8/tools/gcmole/gcmole.py b/deps/v8/tools/gcmole/gcmole.py
index 3df0788ade..a77c57355d 100644..100755
--- a/deps/v8/tools/gcmole/gcmole.py
+++ b/deps/v8/tools/gcmole/gcmole.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,55 +9,132 @@
# for py2/py3 compatibility
from __future__ import print_function
+from multiprocessing import cpu_count
+
import collections
import difflib
-from multiprocessing import cpu_count
+import json
+import optparse
import os
import re
import subprocess
import sys
import threading
+
if sys.version_info.major > 2:
+ from pathlib import Path
import queue
else:
import Queue as queue
+ default_open = open
+
+ def open(path, *args, **kwargs):
+ return default_open(str(path), *args, **kwargs)
+
+ class Path(object):
+
+ def __init__(self, path, *args):
+ if args:
+ self._path = os.path.join(str(path), *args)
+ else:
+ self._path = str(path)
+
+ def __div__(self, other):
+ return Path(self._path, str(other))
+
+ def __str__(self):
+ return self._path
-ArchCfg = collections.namedtuple("ArchCfg",
- ["triple", "arch_define", "arch_options"])
+ def resolve(self):
+ return Path(os.path.abspath(self._path))
+
+ @property
+ def parent(self):
+ return Path(os.path.dirname(self._path))
+
+ @property
+ def parents(self):
+ current = self
+ parents = []
+ while current._path != "" and current._path != "/":
+ current = current.parent
+ parents.append(current)
+ return parents
+
+ def is_file(self):
+ return os.path.isfile(self._path)
+
+ def is_dir(self):
+ return os.path.isdir(self._path)
+
+ def exists(self):
+ return os.path.exists(self._path)
+
+ def mkdir(self, parents=False, exist_ok=False):
+ if parents and not self.parent.exists():
+ self.parent.mkdir(parents=True, exist_ok=True)
+ if exist_ok and self.exists():
+ return
+ os.mkdir(self._path)
+
+
+ArchCfg = collections.namedtuple(
+ "ArchCfg", ["name", "cpu", "triple", "arch_define", "arch_options"])
+
+# TODO(cbruni): use gn desc by default for platform-specific settings
+OPTIONS_64BIT = [
+ "-DV8_COMPRESS_POINTERS",
+ "-DV8_COMPRESS_POINTERS_IN_SHARED_CAGE",
+ "-DV8_EXTERNAL_CODE_SPACE",
+ "-DV8_SHORT_BUILTIN_CALLS",
+ "-DV8_SHARED_RO_HEAP",
+]
ARCHITECTURES = {
"ia32":
ArchCfg(
+ name="ia32",
+ cpu="x86",
triple="i586-unknown-linux",
arch_define="V8_TARGET_ARCH_IA32",
arch_options=["-m32"],
),
"arm":
ArchCfg(
+ name="arm",
+ cpu="arm",
triple="i586-unknown-linux",
arch_define="V8_TARGET_ARCH_ARM",
arch_options=["-m32"],
),
+ # TODO(cbruni): Use detailed settings:
+ # arch_options = OPTIONS_64BIT + [ "-DV8_WIN64_UNWINDING_INFO" ]
"x64":
ArchCfg(
+ name="x64",
+ cpu="x64",
triple="x86_64-unknown-linux",
arch_define="V8_TARGET_ARCH_X64",
arch_options=[]),
"arm64":
ArchCfg(
+ name="arm64",
+ cpu="arm64",
triple="x86_64-unknown-linux",
arch_define="V8_TARGET_ARCH_ARM64",
arch_options=[],
),
}
+ARCHITECTURES['x86'] = ARCHITECTURES['ia32']
-def log(format, *args):
- print(format.format(*args))
+def log(format, *args, **kwargs):
+ mark = ("#", "=", "-", ".")[kwargs.get("level", 0)]
+ print(mark * 2, str(format).format(*list(map(str, args))))
-def fatal(format, *args):
- log(format, *args)
+def fatal(format):
+ log(format)
sys.exit(1)
@@ -65,26 +142,27 @@ def fatal(format, *args):
# Clang invocation
-def MakeClangCommandLine(plugin, plugin_args, arch_cfg, clang_bin_dir,
- clang_plugins_dir):
+def make_clang_command_line(plugin, plugin_args, options):
+ arch_cfg = ARCHITECTURES[options.v8_target_cpu]
prefixed_plugin_args = []
if plugin_args:
for arg in plugin_args:
prefixed_plugin_args += [
"-Xclang",
- "-plugin-arg-{}".format(plugin),
+ "-plugin-arg-" + plugin,
"-Xclang",
arg,
]
-
+ log("Using generated files in {}", options.v8_build_dir / 'gen')
+ icu_src_dir = options.v8_root_dir / 'third_party/icu/source'
return ([
- os.path.join(clang_bin_dir, "clang++"),
- "-std=c++14",
+ options.clang_bin_dir / "clang++",
+ "-std=c++17",
"-c",
"-Xclang",
"-load",
"-Xclang",
- os.path.join(clang_plugins_dir, "libgcmole.so"),
+ options.clang_plugins_dir / "libgcmole.so",
"-Xclang",
"-plugin",
"-Xclang",
@@ -95,41 +173,44 @@ def MakeClangCommandLine(plugin, plugin_args, arch_cfg, clang_bin_dir,
"-Xclang",
arch_cfg.triple,
"-fno-exceptions",
+ "-Wno-everything",
"-D",
arch_cfg.arch_define,
"-DENABLE_DEBUGGER_SUPPORT",
- "-DV8_INTL_SUPPORT",
"-DV8_ENABLE_WEBASSEMBLY",
- "-I./",
- "-Iinclude/",
- "-Iout/build/gen",
- "-Ithird_party/icu/source/common",
- "-Ithird_party/icu/source/i18n",
+ "-DV8_GC_MOLE",
+ "-DV8_INTL_SUPPORT",
+ "-I{}".format(options.v8_root_dir),
+ "-I{}".format(options.v8_root_dir / 'include'),
+ "-I{}".format(options.v8_build_dir / 'gen'),
+ "-I{}".format(icu_src_dir / 'common'),
+ "-I{}".format(icu_src_dir / 'i18n'),
] + arch_cfg.arch_options)
-def InvokeClangPluginForFile(filename, cmd_line, verbose):
+def invoke_clang_plugin_for_file(filename, cmd_line, verbose):
+ args = cmd_line + [filename]
+ args = list(map(str, args))
if verbose:
- print("popen ", " ".join(cmd_line + [filename]))
- p = subprocess.Popen(
- cmd_line + [filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ print("popen ", " ".join(args))
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
- return p.returncode, stdout, stderr
+ return p.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
-def InvokeClangPluginForFilesInQueue(i, input_queue, output_queue, cancel_event,
- cmd_line, verbose):
+def invoke_clang_plugin_for_files_in_queue(i, input_queue, output_queue,
+ cancel_event, cmd_line, verbose):
success = False
try:
while not cancel_event.is_set():
filename = input_queue.get_nowait()
- ret, stdout, stderr = InvokeClangPluginForFile(filename, cmd_line,
- verbose)
- output_queue.put_nowait((filename, ret, stdout.decode('utf-8'), stderr.decode('utf-8')))
+ ret, stdout, stderr = invoke_clang_plugin_for_file(
+ filename, cmd_line, verbose)
+ output_queue.put_nowait((filename, ret, stdout, stderr))
if ret != 0:
break
except KeyboardInterrupt:
- log("-- [{}] Interrupting", i)
+ log("[{}] Interrupting", i, level=1)
except queue.Empty:
success = True
finally:
@@ -138,30 +219,21 @@ def InvokeClangPluginForFilesInQueue(i, input_queue, output_queue, cancel_event,
output_queue.put_nowait(success)
-def InvokeClangPluginForEachFile(
- filenames,
- plugin,
- plugin_args,
- arch_cfg,
- flags,
- clang_bin_dir,
- clang_plugins_dir,
-):
- cmd_line = MakeClangCommandLine(plugin, plugin_args, arch_cfg, clang_bin_dir,
- clang_plugins_dir)
- verbose = flags["verbose"]
- if flags["sequential"]:
- log("** Sequential execution.")
+def invoke_clang_plugin_for_each_file(filenames, plugin, plugin_args, options):
+ cmd_line = make_clang_command_line(plugin, plugin_args, options)
+ verbose = options.verbose
+ if options.sequential:
+ log("Sequential execution.")
for filename in filenames:
- log("-- {}", filename)
- returncode, stdout, stderr = InvokeClangPluginForFile(
+ log(filename, level=1)
+ returncode, stdout, stderr = invoke_clang_plugin_for_file(
filename, cmd_line, verbose)
if returncode != 0:
sys.stderr.write(stderr)
sys.exit(returncode)
yield filename, stdout, stderr
else:
- log("** Parallel execution.")
+ log("Parallel execution.")
cpus = cpu_count()
input_queue = queue.Queue()
output_queue = queue.Queue()
@@ -175,7 +247,7 @@ def InvokeClangPluginForEachFile(
for i in range(min(len(filenames), cpus)):
threads.append(
threading.Thread(
- target=InvokeClangPluginForFilesInQueue,
+ target=invoke_clang_plugin_for_files_in_queue,
args=(i, input_queue, output_queue, cancel_event, cmd_line,
verbose)))
@@ -192,7 +264,7 @@ def InvokeClangPluginForEachFile(
else:
break
filename, returncode, stdout, stderr = output
- log("-- {}", filename)
+ log(filename, level=2)
if returncode != 0:
sys.stderr.write(stderr)
sys.exit(returncode)
@@ -207,31 +279,30 @@ def InvokeClangPluginForEachFile(
# -----------------------------------------------------------------------------
-def ParseGNFile(for_test):
- result = {}
+def parse_gn_file(options, for_test):
if for_test:
- gn_files = [("tools/gcmole/GCMOLE.gn", re.compile('"([^"]*?\.cc)"'), "")]
- else:
- gn_files = [
- ("BUILD.gn", re.compile('"([^"]*?\.cc)"'), ""),
- ("test/cctest/BUILD.gn", re.compile('"(test-[^"]*?\.cc)"'),
- "test/cctest/"),
- ]
-
+ return {"all": [options.v8_root_dir / "tools/gcmole/gcmole-test.cc"]}
+ result = {}
+ gn_files = [
+ ("BUILD.gn", re.compile('"([^"]*?\.cc)"'), ""),
+ ("test/cctest/BUILD.gn", re.compile('"(test-[^"]*?\.cc)"'),
+ Path("test/cctest/")),
+ ]
for filename, pattern, prefix in gn_files:
- with open(filename) as gn_file:
+ path = options.v8_root_dir / filename
+ with open(path) as gn_file:
gn = gn_file.read()
for condition, sources in re.findall("### gcmole\((.*?)\) ###(.*?)\]", gn,
re.MULTILINE | re.DOTALL):
if condition not in result:
result[condition] = []
for file in pattern.findall(sources):
- result[condition].append(prefix + file)
+ result[condition].append(options.v8_root_dir / prefix / file)
return result
-def EvaluateCondition(cond, props):
+def evaluate_condition(cond, props):
if cond == "all":
return True
@@ -245,34 +316,19 @@ def EvaluateCondition(cond, props):
return props[p] == v
-def BuildFileList(sources, props):
- ret = []
- for condition, files in sources.items():
- if EvaluateCondition(condition, props):
- ret += files
- return ret
-
-
-gn_sources = ParseGNFile(for_test=False)
-gn_test_sources = ParseGNFile(for_test=True)
-
-
-def FilesForArch(arch):
- return BuildFileList(gn_sources, {
+def build_file_list(options, for_test):
+ sources = parse_gn_file(options, for_test)
+ props = {
"os": "linux",
- "arch": arch,
+ "arch": options.v8_target_cpu,
"mode": "debug",
"simulator": ""
- })
-
-
-def FilesForTest(arch):
- return BuildFileList(gn_test_sources, {
- "os": "linux",
- "arch": arch,
- "mode": "debug",
- "simulator": ""
- })
+ }
+ ret = []
+ for condition, files in list(sources.items()):
+ if evaluate_condition(condition, props):
+ ret += files
+ return ret
# -----------------------------------------------------------------------------
@@ -308,19 +364,19 @@ ALLOWLIST = [
GC_PATTERN = ",.*Collect.*Garbage"
SAFEPOINT_PATTERN = ",SafepointSlowPath"
-ALLOWLIST_PATTERN = "|".join("(?:%s)" % p for p in ALLOWLIST)
+ALLOWLIST_PATTERN = "|".join("(?:{})".format(p) for p in ALLOWLIST)
-def MergeRegexp(pattern_dict):
- return re.compile("|".join(
- "(?P<%s>%s)" % (key, value) for (key, value) in pattern_dict.items()))
+def merge_regexp(pattern_dict):
+ return re.compile("|".join("(?P<{}>{})".format(key, value)
+ for (key, value) in list(pattern_dict.items())))
-IS_SPECIAL_WITHOUT_ALLOW_LIST = MergeRegexp({
+IS_SPECIAL_WITHOUT_ALLOW_LIST = merge_regexp({
"gc": GC_PATTERN,
"safepoint": SAFEPOINT_PATTERN
})
-IS_SPECIAL_WITH_ALLOW_LIST = MergeRegexp({
+IS_SPECIAL_WITH_ALLOW_LIST = merge_regexp({
"gc": GC_PATTERN,
"safepoint": SAFEPOINT_PATTERN,
"allow": ALLOWLIST_PATTERN
@@ -329,133 +385,139 @@ IS_SPECIAL_WITH_ALLOW_LIST = MergeRegexp({
class GCSuspectsCollector:
- def __init__(self, flags):
+ def __init__(self, options):
self.gc = {}
- self.gc_caused = collections.defaultdict(lambda: [])
+ self.gc_caused = collections.defaultdict(lambda: set())
self.funcs = {}
self.current_caller = None
- self.allowlist = flags["allowlist"]
+ self.allowlist = options.allowlist
self.is_special = IS_SPECIAL_WITH_ALLOW_LIST if self.allowlist else IS_SPECIAL_WITHOUT_ALLOW_LIST
- def AddCause(self, name, cause):
- self.gc_caused[name].append(cause)
+ def add_cause(self, name, cause):
+ self.gc_caused[name].add(cause)
- def Parse(self, lines):
+ def parse(self, lines):
for funcname in lines:
if not funcname:
continue
if funcname[0] != "\t":
- self.Resolve(funcname)
+ self.resolve(funcname)
self.current_caller = funcname
else:
name = funcname[1:]
- callers_for_name = self.Resolve(name)
+ callers_for_name = self.resolve(name)
callers_for_name.add(self.current_caller)
- def Resolve(self, name):
+ def resolve(self, name):
if name not in self.funcs:
self.funcs[name] = set()
m = self.is_special.search(name)
if m:
if m.group("gc"):
self.gc[name] = True
- self.AddCause(name, "<GC>")
+ self.add_cause(name, "<GC>")
elif m.group("safepoint"):
self.gc[name] = True
- self.AddCause(name, "<Safepoint>")
+ self.add_cause(name, "<Safepoint>")
elif m.group("allow"):
self.gc[name] = False
return self.funcs[name]
- def Propagate(self):
- log("** Propagating GC information")
+ def propagate(self):
+ log("Propagating GC information")
def mark(funcname, callers):
for caller in callers:
if caller not in self.gc:
self.gc[caller] = True
mark(caller, self.funcs[caller])
+ self.add_cause(caller, funcname)
- self.AddCause(caller, funcname)
-
- for funcname, callers in self.funcs.items():
+ for funcname, callers in list(self.funcs.items()):
if self.gc.get(funcname, False):
mark(funcname, callers)
-def GenerateGCSuspects(arch, files, arch_cfg, flags, clang_bin_dir,
- clang_plugins_dir):
+def generate_gc_suspects(files, options):
# Reset the global state.
- collector = GCSuspectsCollector(flags)
-
- log("** Building GC Suspects for {}", arch)
- for filename, stdout, stderr in InvokeClangPluginForEachFile(
- files, "dump-callees", [], arch_cfg, flags, clang_bin_dir,
- clang_plugins_dir):
- collector.Parse(stdout.splitlines())
-
- collector.Propagate()
-
- with open("gcsuspects", "w") as out:
- for name, value in collector.gc.items():
+ collector = GCSuspectsCollector(options)
+
+ log("Building GC Suspects for {}", options.v8_target_cpu)
+ for _, stdout, _ in invoke_clang_plugin_for_each_file(files, "dump-callees",
+ [], options):
+ collector.parse(stdout.splitlines())
+ collector.propagate()
+ # TODO(cbruni): remove once gcmole.cc is migrated
+ write_gcmole_results(collector, options, options.v8_root_dir)
+ write_gcmole_results(collector, options, options.out_dir)
+
+
+def write_gcmole_results(collector, options, dst):
+ # gcsuspects contains a list("mangled_full_name,name") of all functions that
+ # could cause a gc (directly or indirectly).
+ #
+ # EXAMPLE
+ # _ZN2v88internal4Heap16CreateApiObjectsEv,CreateApiObjects
+ # _ZN2v88internal4Heap17CreateInitialMapsEv,CreateInitialMaps
+ # ...
+ with open(dst / "gcsuspects", "w") as out:
+ for name, value in list(collector.gc.items()):
if value:
out.write(name + "\n")
-
- with open("gccauses", "w") as out:
- out.write("GC = {\n")
- for name, causes in collector.gc_caused.items():
- out.write(" '{}': [\n".format(name))
+ # gccauses contains a map["mangled_full_name,name"] => list(inner gcsuspects)
+ # Where the inner gcsuspects are functions directly called in the outer
+ # function that can cause a gc. The format is encoded for simplified
+ # deserialization in gcmole.cc.
+ #
+ # EXAMPLE:
+ # _ZN2v88internal4Heap17CreateHeapObjectsEv,CreateHeapObjects
+ # start,nested
+ # _ZN2v88internal4Heap16CreateApiObjectsEv,CreateApiObjects
+ # _ZN2v88internal4Heap17CreateInitialMapsEv,CreateInitialMaps
+ # ...
+ # end,nested
+ # ...
+ with open(dst / "gccauses", "w") as out:
+ for name, causes in list(collector.gc_caused.items()):
+ out.write("{}\n".format(name))
+ out.write("start,nested\n")
for cause in causes:
- out.write(" '{}',\n".format(cause))
- out.write(" ],\n")
- out.write("}\n")
-
- log("** GCSuspects generated for {}", arch)
+ out.write("{}\n".format(cause))
+ out.write("end,nested\n")
+ log("GCSuspects and gccauses generated for {} in '{}'", options.v8_target_cpu,
+ dst)
# ------------------------------------------------------------------------------
# Analysis
-def CheckCorrectnessForArch(arch, for_test, flags, clang_bin_dir,
- clang_plugins_dir):
- if for_test:
- files = FilesForTest(arch)
- else:
- files = FilesForArch(arch)
- arch_cfg = ARCHITECTURES[arch]
+def check_correctness_for_arch(options, for_test):
+ files = build_file_list(options, for_test)
- if not flags["reuse_gcsuspects"]:
- GenerateGCSuspects(arch, files, arch_cfg, flags, clang_bin_dir,
- clang_plugins_dir)
+ if not options.reuse_gcsuspects:
+ generate_gc_suspects(files, options)
else:
- log("** Reusing GCSuspects for {}", arch)
+ log("Reusing GCSuspects for {}", options.v8_target_cpu)
processed_files = 0
errors_found = False
output = ""
- log(
- "** Searching for evaluation order problems{} for {}",
- " and dead variables" if flags["dead_vars"] else "",
- arch,
- )
+ log("Searching for evaluation order problems " +
+ (' and dead variables' if options.dead_vars else '') + "for" +
+ options.v8_target_cpu)
plugin_args = []
- if flags["dead_vars"]:
+ if options.dead_vars:
plugin_args.append("--dead-vars")
- if flags["verbose_trace"]:
+ if options.verbose:
plugin_args.append("--verbose")
- for filename, stdout, stderr in InvokeClangPluginForEachFile(
- files,
- "find-problems",
- plugin_args,
- arch_cfg,
- flags,
- clang_bin_dir,
- clang_plugins_dir,
- ):
+ if options.verbose_trace:
+ plugin_args.append("--verbose-trace")
+ for _, _, stderr in invoke_clang_plugin_for_each_file(files, "find-problems",
+ plugin_args, options):
processed_files = processed_files + 1
if not errors_found:
errors_found = re.search("^[^:]+:\d+:\d+: (warning|error)", stderr,
@@ -465,112 +527,281 @@ def CheckCorrectnessForArch(arch, for_test, flags, clang_bin_dir,
else:
sys.stdout.write(stderr)
- log(
- "** Done processing {} files. {}",
- processed_files,
- "Errors found" if errors_found else "No errors found",
- )
+ log("Done processing {} files.", processed_files)
+ log("Errors found" if errors_found else "No errors found")
return errors_found, output
-def TestRun(flags, clang_bin_dir, clang_plugins_dir):
- log("** Test Run")
- errors_found, output = CheckCorrectnessForArch("x64", True, flags,
- clang_bin_dir,
- clang_plugins_dir)
+def test_run(options):
+ if not options.test_run:
+ return True
+ log("Test Run")
+ errors_found, output = check_correctness_for_arch(options, True)
if not errors_found:
- log("** Test file should produce errors, but none were found. Output:")
- log(output)
+ log("Test file should produce errors, but none were found. Output:")
+ print(output)
return False
- filename = "tools/gcmole/test-expectations.txt"
- with open(filename) as exp_file:
+ new_file = options.out_dir / "test-expectations-gen.txt"
+ with open(new_file, "w") as f:
+ f.write(output)
+ log("Wrote test-results: {}", new_file)
+
+ expected_file = options.v8_root_dir / "tools/gcmole/test-expectations.txt"
+ with open(expected_file) as exp_file:
expectations = exp_file.read()
if output != expectations:
- log("** Output mismatch from running tests. Please run them manually.")
-
+ diff_file = options.out_dir / "test_output.diff"
+ print("#" * 79)
+ log("Output mismatch from running tests.")
+ log("Please run gcmole manually with --test-run --verbose.")
+ log("Expected: " + expected_file)
+ log("New: " + new_file)
+ log("*Diff:* " + diff_file)
+ print("#" * 79)
for line in difflib.unified_diff(
expectations.splitlines(),
output.splitlines(),
- fromfile=filename,
- tofile="output",
+ fromfile=str(new_file),
+ tofile=str(diff_file),
lineterm="",
):
- log("{}", line)
+ print(line)
- log("------")
- log("--- Full output ---")
- log(output)
- log("------")
+ print("#" * 79)
+ log("Full output")
+ log("Expected: " + expected_file)
+ log("Diff: " + diff_file)
+ log("*New:* " + new_file)
+ print("#" * 79)
+ print(output)
+ print("#" * 79)
return False
- log("** Tests ran successfully")
+ log("Tests ran successfully")
return True
+# =============================================================================
+def relative_parents(path, level=0):
+ return Path(os.path.relpath(str(path.resolve().parents[level])))
+
+
def main(args):
- DIR = os.path.dirname(args[0])
-
- clang_bin_dir = os.getenv("CLANG_BIN")
- clang_plugins_dir = os.getenv("CLANG_PLUGINS")
-
- if not clang_bin_dir or clang_bin_dir == "":
- fatal("CLANG_BIN not set")
-
- if not clang_plugins_dir or clang_plugins_dir == "":
- clang_plugins_dir = DIR
-
- flags = {
- #: not build gcsuspects file and reuse previously generated one.
- "reuse_gcsuspects": False,
- #:n't use parallel python runner.
- "sequential": False,
- # Print commands to console before executing them.
- "verbose": True,
- # Perform dead variable analysis.
- "dead_vars": True,
- # Enable verbose tracing from the plugin itself.
- "verbose_trace": False,
- # When building gcsuspects allowlist certain functions as if they can be
- # causing GC. Currently used to reduce number of false positives in dead
- # variables analysis. See TODO for ALLOWLIST
- "allowlist": True,
- }
- pos_args = []
-
- flag_regexp = re.compile("^--(no[-_]?)?([\w\-_]+)$")
- for arg in args[1:]:
- m = flag_regexp.match(arg)
- if m:
- no, flag = m.groups()
- flag = flag.replace("-", "_")
- if flag in flags:
- flags[flag] = no is None
- else:
- fatal("Unknown flag: {}", flag)
+ # Print arguments for better debugging on the bots
+ # Get a clean parent path relative to PWD
+ gcmole_dir = relative_parents(Path(args[0]))
+
+ parser = optparse.OptionParser()
+ archs = list(ARCHITECTURES.keys())
+ parser.add_option(
+ "--v8-target-cpu",
+ type="choice",
+ choices=archs,
+ help="Tested CPU architecture. Choices: {}".format(archs),
+ metavar="CPU")
+ default_clang_bin_dir = gcmole_dir / 'gcmole-tools/bin'
+ parser.add_option(
+ "--clang-bin-dir",
+ metavar="DIR",
+ help="Build dir of the custom clang version for gcmole." + \
+ "Default: env['CLANG_DIR'] or '{}'".format(default_clang_bin_dir))
+ parser.add_option(
+ "--clang-plugins-dir",
+ metavar="DIR",
+ help="Containing dir for libgcmole.so."
+ "Default: env['CLANG_PLUGINS'] or '{}'".format(gcmole_dir))
+ default_root_dir = relative_parents(gcmole_dir, 1)
+ parser.add_option(
+ "--v8-root-dir",
+ metavar="DIR",
+ default=default_root_dir,
+ help="V8 checkout directory. Default: '{}'".format(default_root_dir))
+ parser.add_option(
+ "--v8-build-dir",
+ metavar="BUILD_DIR",
+ help="GN build dir for v8. Default: 'out/CPU.Release'. "
+ "Config must match cpu specified by --v8-target-cpu")
+ parser.add_option(
+ "--out-dir",
+ metavar="DIR",
+ help="Output location for the gcsuspect and gcauses file."
+ "Default: BUILD_DIR/gen/tools/gcmole")
+ parser.add_option(
+ "--is-bot",
+ action="store_true",
+ default=False,
+ help="Flag for setting build bot specific settings.")
+
+ group = optparse.OptionGroup(parser, "GCMOLE options")
+ group.add_option(
+ "--reuse-gcsuspects",
+ action="store_true",
+ default=False,
+ help="Don't build gcsuspects file and reuse previously generated one.")
+ group.add_option(
+ "--sequential",
+ action="store_true",
+ default=False,
+ help="Don't use parallel python runner.")
+ group.add_option(
+ "--verbose",
+ action="store_true",
+ default=False,
+ help="Print commands to console before executing them.")
+ group.add_option(
+ "--no-dead-vars",
+ action="store_false",
+ dest="dead_vars",
+ default=True,
+ help="Don't perform dead variable analysis.")
+ group.add_option(
+ "--verbose-trace",
+ action="store_true",
+ default=False,
+ help="Enable verbose tracing from the plugin itself."
+ "This can be useful to debug finding dead variable.")
+ group.add_option(
+ "--no-allowlist",
+ action="store_true",
+ default=True,
+ dest="allowlist",
+ help="When building gcsuspects allowlist certain functions as if they can be "
+ "causing GC. Currently used to reduce number of false positives in dead "
+ "variables analysis. See TODO for ALLOWLIST in gcmole.py")
+ group.add_option(
+ "--test-run",
+ action="store_true",
+ default=False,
+ help="Test gcmole on tools/gcmole/gcmole-test.cc")
+ parser.add_option_group(group)
+
+ (options, args) = parser.parse_args()
+
+ if not options.v8_target_cpu:
+ # Backwards compatibility
+ if len(args[0]) > 0 and args[0] in archs:
+ options.v8_target_cpu = args[0]
+ log("Using --v8-target-cpu={}", options.v8_target_cpu)
else:
- pos_args.append(arg)
+ parser.error("Missing --v8-target-cpu option")
- archs = pos_args if len(pos_args) > 0 else ["ia32", "arm", "x64", "arm64"]
+ options.is_bot = False
+ verify_and_convert_dirs(parser, options, gcmole_dir, default_clang_bin_dir)
+ verify_clang_plugin(parser, options)
+ prepare_gcmole_files(options)
+ verify_build_config(parser, options)
any_errors_found = False
- if not TestRun(flags, clang_bin_dir, clang_plugins_dir):
+ if not test_run(options):
any_errors_found = True
else:
- for arch in archs:
- if not ARCHITECTURES[arch]:
- fatal("Unknown arch: {}", arch)
-
- errors_found, output = CheckCorrectnessForArch(arch, False, flags,
- clang_bin_dir,
- clang_plugins_dir)
- any_errors_found = any_errors_found or errors_found
+ errors_found, output = check_correctness_for_arch(options, False)
+ any_errors_found = any_errors_found or errors_found
sys.exit(1 if any_errors_found else 0)
+def verify_and_convert_dirs(parser, options, gcmole_dir, default_clang_bin_dir):
+ # Verify options for setting directors and convert the input strings to Path
+ # objects.
+ options.v8_root_dir = Path(options.v8_root_dir)
+
+ if not options.clang_bin_dir:
+ if os.getenv("CLANG_BIN"):
+ options.clang_bin_dir = Path(os.getenv("CLANG_BIN"))
+ options.is_bot = True
+ else:
+ options.clang_bin_dir = default_clang_bin_dir
+ if not (options.clang_bin_dir / 'clang++').exists():
+ options.clang_bin_dir = Path(gcmole_dir,
+ "tools/gcmole/bootstrap/build/bin")
+ log("Using --clang-bin-dir={}", options.clang_bin_dir)
+ else:
+ options.clang_bin_dir = Path(options.clang_bin_dir)
+
+ if not options.clang_plugins_dir:
+ if os.getenv("CLANG_PLUGINS"):
+ options.clang_plugins_dir = Path(os.getenv("CLANG_PLUGINS"))
+ else:
+ options.clang_plugins_dir = gcmole_dir.resolve()
+ log("Using --clang-plugins-dir={}", options.clang_plugins_dir)
+ else:
+ options.clang_plugins_dir = Path(options.clang_plugins_dir)
+
+ if not options.v8_build_dir:
+ config = ARCHITECTURES[options.v8_target_cpu]
+ options.v8_build_dir = options.v8_root_dir / ('out/%s.release' %
+ config.name)
+ # Fallback for build bots.
+ if not options.v8_build_dir.exists() and os.getenv("CLANG_BIN"):
+ options.v8_build_dir = options.v8_root_dir / 'out/build'
+ log("Using --v8-build-dir={}", options.v8_build_dir)
+ else:
+ options.v8_build_dir = Path(options.v8_build_dir)
+
+ if not options.out_dir:
+ options.out_dir = options.v8_build_dir / 'gen/tools/gcmole'
+ if options.v8_build_dir.exists():
+ options.out_dir.mkdir(parents=True, exist_ok=True)
+ else:
+ options.out_dir = Path(options.out_dir)
+
+ for flag in [
+ "--v8-root-dir", "--v8-build-dir", "--clang-bin-dir",
+ "--clang-plugins-dir", "--out-dir"
+ ]:
+ dir = getattr(options, parser.get_option(flag).dest)
+ if not dir.is_dir():
+ parser.error("{}='{}' does not exist!".format(flag, dir))
+
+
+def verify_clang_plugin(parser, options):
+ libgcmole_path = options.clang_plugins_dir / "libgcmole.so"
+ if not libgcmole_path.is_file():
+ parser.error("'{}' does not exist. Please build gcmole first.".format(
+ libgcmole_path))
+ clang_path = options.clang_bin_dir / "clang++"
+ if not clang_path.is_file():
+ parser.error(
+ "'{}' does not exist. Please build gcmole first.".format(clang_path))
+
+
+def prepare_gcmole_files(options):
+ cmd = [
+ "ninja", "-C", options.v8_build_dir, "v8_gcmole_files",
+ "v8_dump_build_config"
+ ]
+ cmd = list(map(str, cmd))
+ log("Preparing files: {}", " ".join(cmd))
+ try:
+ subprocess.check_call(cmd)
+ except:
+ # Ignore ninja task errors on the bots
+ if options.is_bot:
+ log("Ninja command failed, ignoring errors.")
+ else:
+ raise
+
+
+def verify_build_config(parser, options):
+ if options.is_bot:
+ #TODO(cbruni): Fix, currently not supported on the bots
+ return
+ config_file = options.v8_build_dir / 'v8_build_config.json'
+ with open(config_file) as f:
+ config = json.load(f)
+ found_cpu = None
+ for key in ('v8_target_cpu', 'target_cpu', 'current_cpu'):
+ found_cpu = config.get('v8_target_cpu')
+ if found_cpu == options.v8_target_cpu:
+ return
+ parser.error("Build dir '{}' config doesn't match request cpu. {}: {}".format(
+ options.v8_build_dir, options.v8_target_cpu, found_cpu))
+
+
if __name__ == "__main__":
main(sys.argv)
diff --git a/deps/v8/tools/gcmole/package.sh b/deps/v8/tools/gcmole/package.sh
index bbddd5b772..ceeffc2a29 100755
--- a/deps/v8/tools/gcmole/package.sh
+++ b/deps/v8/tools/gcmole/package.sh
@@ -14,6 +14,7 @@ PACKAGE_DIR="${THIS_DIR}/gcmole-tools"
PACKAGE_FILE="${THIS_DIR}/gcmole-tools.tar.gz"
PACKAGE_SUM="${THIS_DIR}/gcmole-tools.tar.gz.sha1"
BUILD_DIR="${THIS_DIR}/bootstrap/build"
+V8_ROOT_DIR= `realpath "${THIS_DIR}/../.."`
# Echo all commands
set -x
@@ -72,5 +73,8 @@ echo "sudo chroot \$CHROOT_DIR bash -c 'PATH=/docs/depot_tools:\$PATH; /docs/v8/
echo
echo You can now run gcmole using this command:
echo
-echo CLANG_BIN=\"tools/gcmole/gcmole-tools/bin\" python tools/gcmole/gcmole.py
+echo 'tools/gcmole/gcmole.py \'
+echo ' --clang-bin-dir="tools/gcmole/gcmole-tools/bin" \'
+echo ' --clang-plugins-dir="tools/gcmole/gcmole-tools" \'
+echo ' --v8-target-cpu=$CPU'
echo
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 4e26346afa..3db2e42278 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -161,16 +161,17 @@ end
set disable-randomization off
# Install a handler whenever the debugger stops due to a signal. It walks up the
-# stack looking for V8_Dcheck and moves the frame to the one above it so it's
-# immediately at the line of code that triggered the DCHECK.
+# stack looking for V8_Dcheck / V8_Fatal / OS::DebugBreak frame and moves the
+# frame to the one above it so it's immediately at the line of code that
+# triggered the stop condition.
python
-def dcheck_stop_handler(event):
+def v8_stop_handler(event):
frame = gdb.selected_frame()
select_frame = None
message = None
count = 0
- # limit stack scanning since they're usually shallow and otherwise stack
- # overflows can be very slow.
+ # Limit stack scanning since the frames we look for are near the top anyway,
+ # and otherwise stack overflows can be very slow.
while frame is not None and count < 7:
count += 1
# If we are in a frame created by gdb (e.g. for `(gdb) call foo()`), gdb
@@ -186,6 +187,8 @@ def dcheck_stop_handler(event):
break
if frame.name() is not None and frame.name().startswith('V8_Fatal'):
select_frame = frame.older()
+ if frame.name() == 'v8::base::OS::DebugBreak':
+ select_frame = frame.older()
frame = frame.older()
if select_frame is not None:
@@ -194,7 +197,7 @@ def dcheck_stop_handler(event):
if message:
print('DCHECK error: {}'.format(message))
-gdb.events.stop.connect(dcheck_stop_handler)
+gdb.events.stop.connect(v8_stop_handler)
end
# Code imported from chromium/src/tools/gdb/gdbinit
diff --git a/deps/v8/tools/gen-keywords-gen-h.py b/deps/v8/tools/gen-keywords-gen-h.py
index 02750dc109..97c91ee289 100755
--- a/deps/v8/tools/gen-keywords-gen-h.py
+++ b/deps/v8/tools/gen-keywords-gen-h.py
@@ -83,7 +83,8 @@ def trim_and_dcheck_char_table(out):
def use_isinrange(out):
# Our IsInRange method is more efficient than checking for min/max length
return checked_sub(r'if \(len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH\)',
- r'if (IsInRange(len, MIN_WORD_LENGTH, MAX_WORD_LENGTH))',
+ r'if (base::IsInRange(len, MIN_WORD_LENGTH, '
+ + r'MAX_WORD_LENGTH))',
out)
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 298af332d9..9fae92417e 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -337,7 +337,7 @@ header = '''
*/
#include "src/init/v8.h"
-#include "src/codegen/register-arch.h"
+#include "src/codegen/register.h"
#include "src/execution/frames.h"
#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
#include "src/objects/contexts.h"
@@ -359,7 +359,7 @@ STACK_FRAME_TYPE_LIST(FRAME_CONST)
#undef FRAME_CONST
-''' % sys.argv[0];
+''' % sys.argv[0]
footer = '''
}
@@ -440,12 +440,12 @@ def load_objects_from_file(objfilename, checktypes):
continue;
if (in_torque_insttype and (not line or line.isspace())):
- in_torque_insttype = False
- continue
+ in_torque_insttype = False
+ continue
if (in_torque_fulldef and (not line or line.isspace())):
- in_torque_fulldef = False
- continue
+ in_torque_fulldef = False
+ continue
pre = line.strip()
line = re.sub('// .*', '', line.strip());
@@ -497,7 +497,7 @@ def load_objects_from_file(objfilename, checktypes):
for entry in entries:
entry = entry.strip()
if not entry:
- continue
+ continue
start = entry.find('(');
end = entry.find(')', start);
rest = entry[start + 1: end];
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 42c118c9d5..2d6f218e11 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -37,14 +37,17 @@ AUTO_EXCLUDE = [
'src/trap-handler/trap-handler-simulator.h',
]
AUTO_EXCLUDE_PATTERNS = [
- 'src/base/atomicops_internals_.*',
- # TODO(petermarshall): Enable once Perfetto is built by default.
- 'src/libplatform/tracing/perfetto*',
+ 'src/base/atomicops_internals_.*',
+ # TODO(petermarshall): Enable once Perfetto is built by default.
+ 'src/libplatform/tracing/perfetto*',
+ # TODO(v8:7700): Enable once Maglev is built by default.
+ 'src/maglev/.*',
] + [
- # platform-specific headers
- '\\b{}\\b'.format(p) for p in
- ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
- 'ppc', 'riscv64', 'loong64')]
+ # platform-specific headers
+ '\\b{}\\b'.format(p)
+ for p in ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64',
+ 's390', 'ppc', 'riscv64', 'loong64')
+]
args = None
def parse_args():
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 368580f0c3..1d73b43ace 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -168,7 +168,7 @@ def FullDump(reader, heap):
print("%s - %s" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size)))
print(start + size + 1);
- for i in range(0, size, reader.PointerSize()):
+ for i in range(0, size, reader.MachinePointerSize()):
slot = start + i
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
@@ -710,7 +710,7 @@ class MinidumpReader(object):
def _FindObjdump(self, options):
if options.objdump:
- objdump_bin = options.objdump
+ objdump_bin = options.objdump
else:
objdump_bin = self._FindThirdPartyObjdump()
if not objdump_bin or not os.path.exists(objdump_bin):
@@ -722,29 +722,29 @@ class MinidumpReader(object):
disasm.OBJDUMP_BIN = objdump_bin
def _FindThirdPartyObjdump(self):
- # Try to find the platform specific objdump
- third_party_dir = os.path.join(
- os.path.dirname(os.path.dirname(__file__)), 'third_party')
- objdumps = []
- for root, dirs, files in os.walk(third_party_dir):
- for file in files:
- if file.endswith("objdump"):
- objdumps.append(os.path.join(root, file))
- if self.arch == MD_CPU_ARCHITECTURE_ARM:
- platform_filter = 'arm-linux'
- elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
- platform_filter = 'aarch64'
- else:
- # use default otherwise
- return None
- print(("# Looking for platform specific (%s) objdump in "
- "third_party directory.") % platform_filter)
- objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
- if len(objdumps) == 0:
- print("# Could not find platform specific objdump in third_party.")
- print("# Make sure you installed the correct SDK.")
- return None
- return objdumps[0]
+ # Try to find the platform specific objdump
+ third_party_dir = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), 'third_party')
+ objdumps = []
+ for root, dirs, files in os.walk(third_party_dir):
+ for file in files:
+ if file.endswith("objdump"):
+ objdumps.append(os.path.join(root, file))
+ if self.arch == MD_CPU_ARCHITECTURE_ARM:
+ platform_filter = 'arm-linux'
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ platform_filter = 'aarch64'
+ else:
+ # use default otherwise
+ return None
+ print(("# Looking for platform specific (%s) objdump in "
+ "third_party directory.") % platform_filter)
+ objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
+ if len(objdumps) == 0:
+ print("# Could not find platform specific objdump in third_party.")
+ print("# Make sure you installed the correct SDK.")
+ return None
+ return objdumps[0]
def ContextDescriptor(self):
if self.arch == MD_CPU_ARCHITECTURE_X86:
@@ -765,7 +765,7 @@ class MinidumpReader(object):
return self.FindLocation(address) is not None
def IsAlignedAddress(self, address):
- return (address % self.PointerSize()) == 0
+ return (address % self.MachinePointerSize()) == 0
def IsExceptionStackAddress(self, address):
if not self.IsAlignedAddress(address): return False
@@ -804,11 +804,29 @@ class MinidumpReader(object):
return (self.arch == MD_CPU_ARCHITECTURE_ARM64 or
self.arch == MD_CPU_ARCHITECTURE_AMD64)
+ def IsPointerCompressed(self):
+ # Assume all 64-bit builds are pointer compressed.
+ return self.Is64()
+
+ def Is32BitTagged(self):
+ return not self.Is64() or self.IsPointerCompressed()
+
+ def ReadTagged(self, address):
+ if self.Is32BitTagged():
+ return self.ReadU32(address)
+ return self.ReadU64(address)
+
def ReadUIntPtr(self, address):
if self.Is64():
return self.ReadU64(address)
return self.ReadU32(address)
+ def ReadSized(self, address, size):
+ if size == 8:
+ return self.ReadU64(address)
+ assert (size == 4)
+ return self.ReadU32(address)
+
def ReadBytes(self, address, size):
location = self.FindLocation(address)
return self.minidump[location:location + size]
@@ -819,8 +837,10 @@ class MinidumpReader(object):
return ctypes.c_uint32.from_buffer(self.minidump, location).value
def ReadAsciiPtr(self, address):
- ascii_content = [c if c >= '\x20' and c < '\x7f' else '.'
- for c in self.ReadBytes(address, self.PointerSize())]
+ ascii_content = [
+ c if c >= '\x20' and c < '\x7f' else '.'
+ for c in self.ReadBytes(address, self.MachinePointerSize())
+ ]
return ''.join(ascii_content)
def ReadAsciiString(self, address):
@@ -908,7 +928,7 @@ class MinidumpReader(object):
def FindWord(self, word, alignment=0):
def search_inside_region(reader, start, size, location):
location = (location + alignment) & ~alignment
- for i in range(size - self.PointerSize()):
+ for i in range(size - self.MachinePointerSize()):
loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
@@ -920,7 +940,7 @@ class MinidumpReader(object):
aligned_res = []
unaligned_res = []
def search_inside_region(reader, start, size, location):
- for i in range(size - self.PointerSize()):
+ for i in range(size - self.MachinePointerSize()):
loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
@@ -1023,11 +1043,21 @@ class MinidumpReader(object):
return "%016x" % value
return "%08x" % value
- def PointerSize(self):
+ def FormatTagged(self, value):
+ if self.Is64() and not self.IsPointerCompressed():
+ return "%016x" % value
+ return "%08x" % value
+
+ def MachinePointerSize(self):
if self.Is64():
return 8
return 4
+ def TaggedPointerSize(self):
+ if self.IsPointerCompressed():
+ return 4
+ return self.MachinePointerSize()
+
def Register(self, name):
return self.exception_context.__getattribute__(name)
@@ -1173,11 +1203,11 @@ class HeapObject(object):
instance_type)
def ObjectField(self, offset):
- field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
+ field_value = self.heap.reader.ReadTagged(self.address + offset)
return self.heap.FindObjectOrSmi(field_value)
def SmiField(self, offset):
- field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
+ field_value = self.heap.reader.ReadTagged(self.address + offset)
if self.heap.IsSmi(field_value):
return self.heap.SmiUntag(field_value)
return None
@@ -1189,7 +1219,7 @@ class Map(HeapObject):
# Instance Sizes
def InstanceSizesOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
def InstanceSizeOffset(self):
return self.InstanceSizesOffset()
@@ -1224,28 +1254,29 @@ class Map(HeapObject):
return self.InstanceAttributesOffset() + self.heap.IntSize()
def PrototypeOffset(self):
- return self.BitField3Offset() + self.heap.PointerSize()
+ return self.BitField3Offset() + self.heap.TaggedPointerSize()
def ConstructorOrBackPointerOffset(self):
- return self.PrototypeOffset() + self.heap.PointerSize()
+ return self.PrototypeOffset() + self.heap.TaggedPointerSize()
def TransitionsOrPrototypeInfoOffset(self):
- return self.ConstructorOrBackPointerOffset() + self.heap.PointerSize()
+ return self.ConstructorOrBackPointerOffset() + self.heap.TaggedPointerSize()
def DescriptorsOffset(self):
- return self.TransitionsOrPrototypeInfoOffset() + self.heap.PointerSize()
+ return (self.TransitionsOrPrototypeInfoOffset() +
+ self.heap.TaggedPointerSize())
def CodeCacheOffset(self):
- return self.DescriptorsOffset() + self.heap.PointerSize()
+ return self.DescriptorsOffset() + self.heap.TaggedPointerSize()
def DependentCodeOffset(self):
- return self.CodeCacheOffset() + self.heap.PointerSize()
+ return self.CodeCacheOffset() + self.heap.TaggedPointerSize()
def ReadByte(self, offset):
return self.heap.reader.ReadU8(self.address + offset)
- def ReadWord(self, offset):
- return self.heap.reader.ReadUIntPtr(self.address + offset)
+ def ReadSlot(self, offset):
+ return self.heap.reader.ReadTagged(self.address + offset)
def Print(self, p):
p.Print("Map(%08x)" % (self.address))
@@ -1264,7 +1295,7 @@ class Map(HeapObject):
p.Print(" - kind: %s" % (self.Decode(3, 5, bitfield2)))
- bitfield3 = self.ReadWord(self.BitField3Offset())
+ bitfield3 = self.ReadSlot(self.BitField3Offset())
p.Print(
" - EnumLength: %d NumberOfOwnDescriptors: %d OwnsDescriptors: %s" % (
@@ -1299,7 +1330,7 @@ class Map(HeapObject):
class String(HeapObject):
def LengthOffset(self):
# First word after the map is the hash, the second is the length.
- return self.heap.PointerSize() * 2
+ return self.heap.TaggedPointerSize() * 2
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1317,7 +1348,7 @@ class String(HeapObject):
class SeqString(String):
def CharsOffset(self):
- return self.heap.PointerSize() * 3
+ return self.heap.TaggedPointerSize() * 3
def __init__(self, heap, map, address):
String.__init__(self, heap, map, address)
@@ -1360,10 +1391,10 @@ class ExternalString(String):
class ConsString(String):
def LeftOffset(self):
- return self.heap.PointerSize() * 3
+ return self.heap.TaggedPointerSize() * 3
def RightOffset(self):
- return self.heap.PointerSize() * 4
+ return self.heap.TaggedPointerSize() * 4
def __init__(self, heap, map, address):
String.__init__(self, heap, map, address)
@@ -1390,13 +1421,13 @@ class Oddball(HeapObject):
]
def ToStringOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
def ToNumberOffset(self):
- return self.ToStringOffset() + self.heap.PointerSize()
+ return self.ToStringOffset() + self.heap.TaggedPointerSize()
def KindOffset(self):
- return self.ToNumberOffset() + self.heap.PointerSize()
+ return self.ToNumberOffset() + self.heap.TaggedPointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1418,13 +1449,13 @@ class Oddball(HeapObject):
class FixedArray(HeapObject):
def LengthOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
def ElementsOffset(self):
- return self.heap.PointerSize() * 2
+ return self.heap.TaggedPointerSize() * 2
def MemberOffset(self, i):
- return self.ElementsOffset() + self.heap.PointerSize() * i
+ return self.ElementsOffset() + self.heap.TaggedPointerSize() * i
def Get(self, i):
return self.ObjectField(self.MemberOffset(i))
@@ -1561,10 +1592,10 @@ class TransitionArray(object):
class JSFunction(HeapObject):
def CodeEntryOffset(self):
- return 3 * self.heap.PointerSize()
+ return 3 * self.heap.TaggedPointerSize()
def SharedOffset(self):
- return 5 * self.heap.PointerSize()
+ return 5 * self.heap.TaggedPointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1611,19 +1642,19 @@ class JSFunction(HeapObject):
class SharedFunctionInfo(HeapObject):
def CodeOffset(self):
- return 2 * self.heap.PointerSize()
+ return 2 * self.heap.TaggedPointerSize()
def ScriptOffset(self):
- return 7 * self.heap.PointerSize()
+ return 7 * self.heap.TaggedPointerSize()
def InferredNameOffset(self):
- return 9 * self.heap.PointerSize()
+ return 9 * self.heap.TaggedPointerSize()
def EndPositionOffset(self):
- return 12 * self.heap.PointerSize() + 4 * self.heap.IntSize()
+ return 12 * self.heap.TaggedPointerSize() + 4 * self.heap.IntSize()
def StartPositionAndTypeOffset(self):
- return 12 * self.heap.PointerSize() + 5 * self.heap.IntSize()
+ return 12 * self.heap.TaggedPointerSize() + 5 * self.heap.IntSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1631,7 +1662,7 @@ class SharedFunctionInfo(HeapObject):
self.code = self.ObjectField(self.CodeOffset())
self.script = self.ObjectField(self.ScriptOffset())
self.inferred_name = self.ObjectField(self.InferredNameOffset())
- if heap.PointerSize() == 8:
+ if heap.TaggedPointerSize() == 8:
start_position_and_type = \
heap.reader.ReadU32(self.StartPositionAndTypeOffset())
self.start_position = start_position_and_type >> 2
@@ -1653,10 +1684,10 @@ class SharedFunctionInfo(HeapObject):
class Script(HeapObject):
def SourceOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
def NameOffset(self):
- return self.SourceOffset() + self.heap.PointerSize()
+ return self.SourceOffset() + self.heap.TaggedPointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1666,10 +1697,10 @@ class Script(HeapObject):
class CodeCache(HeapObject):
def DefaultCacheOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
def NormalTypeCacheOffset(self):
- return self.DefaultCacheOffset() + self.heap.PointerSize()
+ return self.DefaultCacheOffset() + self.heap.TaggedPointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1689,12 +1720,12 @@ class Code(HeapObject):
CODE_ALIGNMENT_MASK = (1 << 5) - 1
def InstructionSizeOffset(self):
- return self.heap.PointerSize()
+ return self.heap.TaggedPointerSize()
@staticmethod
def HeaderSize(heap):
- return (heap.PointerSize() + heap.IntSize() + \
- 4 * heap.PointerSize() + 3 * heap.IntSize() + \
+ return (heap.TaggedPointerSize() + heap.IntSize() + \
+ 4 * heap.TaggedPointerSize() + 3 * heap.IntSize() + \
Code.CODE_ALIGNMENT_MASK) & ~Code.CODE_ALIGNMENT_MASK
def __init__(self, heap, map, address):
@@ -1763,7 +1794,7 @@ class V8Heap(object):
if not self.IsTaggedObjectAddress(tagged_address): return None
address = tagged_address - 1
if not self.reader.IsValidAddress(address): return None
- map_tagged_address = self.reader.ReadUIntPtr(address)
+ map_tagged_address = self.reader.ReadTagged(address)
if tagged_address == map_tagged_address:
# Meta map?
meta_map = Map(self, None, address)
@@ -1796,11 +1827,17 @@ class V8Heap(object):
def IntSize(self):
return 4
- def PointerSize(self):
- return self.reader.PointerSize()
+ def MachinePointerSize(self):
+ return self.reader.MachinePointerSize()
+
+ def TaggedPointerSize(self):
+ return self.reader.TaggedPointerSize()
+
+ def IsPointerCompressed(self):
+ return self.reader.IsPointerCompressed()
def ObjectAlignmentMask(self):
- return self.PointerSize() - 1
+ return self.TaggedPointerSize() - 1
def IsTaggedObjectAddress(self, address):
return (address & self.ObjectAlignmentMask()) == 1
@@ -1829,13 +1866,14 @@ class V8Heap(object):
return (address & self.ObjectAlignmentMask()) == 1
def IsSmi(self, tagged_address):
- if self.reader.Is64():
+ if self.reader.Is64() and not self.reader.IsPointerCompressed():
return (tagged_address & 0xFFFFFFFF) == 0
return not self.IsTaggedAddress(tagged_address)
def SmiUntag(self, tagged_address):
- if self.reader.Is64(): return tagged_address >> 32
- return tagged_address >> 1
+ if self.reader.Is64() and not self.reader.IsPointerCompressed():
+ return tagged_address >> 32
+ return (tagged_address >> 1) & 0xFFFFFFFF
def AddressTypeMarker(self, address):
if not self.reader.IsValidAddress(address): return " "
@@ -1858,7 +1896,7 @@ class V8Heap(object):
if self.IsTaggedObjectAddress(address):
address -= 1
if not self.reader.IsValidAlignedAddress(address): return None
- offset = (address - slot) / self.PointerSize()
+ offset = (address - slot) / self.MachinePointerSize()
lower_limit = -32
upper_limit = 128
@@ -1873,12 +1911,12 @@ class V8Heap(object):
def FindObjectPointers(self, start=0, end=0):
objects = set()
def find_object_in_region(reader, start, size, location):
- for slot in range(start, start+size, self.reader.PointerSize()):
+ for slot in range(start, start + size, self.reader.TaggedPointerSize()):
if not self.reader.IsValidAddress(slot): break
# Collect only tagged pointers (object) to tagged pointers (map)
- tagged_address = self.reader.ReadUIntPtr(slot)
+ tagged_address = self.reader.ReadTagged(slot)
if not self.IsValidTaggedObjectAddress(tagged_address): continue
- map_address = self.reader.ReadUIntPtr(tagged_address - 1)
+ map_address = self.reader.ReadTagged(tagged_address - 1)
if not self.IsTaggedMapAddress(map_address): continue
objects.add(tagged_address)
@@ -1951,10 +1989,12 @@ class InspectionInfo(object):
exception_thread.stack.memory.data_size
frame_pointer = self.reader.ExceptionFP()
self.styles[frame_pointer] = "frame"
- for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom,
+ self.reader.MachinePointerSize()):
# stack address
self.styles[slot] = "sa"
- for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom,
+ self.reader.MachinePointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
# stack value
self.styles[maybe_address] = "sv"
@@ -2026,7 +2066,7 @@ class InspectionPadawan(object):
# Frame markers only occur directly after a frame pointer and only on the
# stack.
if not self.reader.IsExceptionStackAddress(slot): return False
- next_slot = slot + self.reader.PointerSize()
+ next_slot = slot + self.reader.MachinePointerSize()
if not self.reader.IsValidAddress(next_slot): return False
next_address = self.reader.ReadUIntPtr(next_slot)
return self.reader.IsExceptionStackAddress(next_address)
@@ -2058,7 +2098,7 @@ class InspectionPadawan(object):
if found_obj: return found_obj
address = tagged_address - 1
if self.reader.IsValidAddress(address):
- map_tagged_address = self.reader.ReadUIntPtr(address)
+ map_tagged_address = self.reader.ReadTagged(address)
map = self.SenseMap(map_tagged_address)
if map is None: return None
instance_type_name = INSTANCE_TYPES.get(map.instance_type)
@@ -2118,7 +2158,7 @@ class InspectionPadawan(object):
Returns the first address where the normal stack starts again.
"""
# Only look at the first 1k words on the stack
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
if start is None: start = self.reader.ExceptionSP()
if not self.reader.IsValidAddress(start): return start
end = start + ptr_size * 1024 * 4
@@ -2140,7 +2180,7 @@ class InspectionPadawan(object):
print_message)
def TryExtractStackTrace(self, slot, start, end, print_message):
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
end_marker = STACK_TRACE_MARKER + 1;
header_size = 10
@@ -2163,7 +2203,7 @@ class InspectionPadawan(object):
return stack_start
def FindPtr(self, expected_value, start, end):
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
for slot in range(start, end, ptr_size):
if not self.reader.IsValidAddress(slot): return None
value = self.reader.ReadUIntPtr(slot)
@@ -2171,7 +2211,7 @@ class InspectionPadawan(object):
return None
def TryExtractErrorMessage(self, slot, start, end, print_message):
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
end_marker = ERROR_MESSAGE_MARKER + 1;
header_size = 1
end_search = start + 1024 + (header_size * ptr_size);
@@ -2186,7 +2226,7 @@ class InspectionPadawan(object):
def TryExtractOldStyleStackTrace(self, message_slot, start, end,
print_message):
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
if message_slot == 0:
"""
On Mac we don't always get proper magic markers, so just try printing
@@ -2225,7 +2265,7 @@ class InspectionPadawan(object):
print(" Use `dsa` to print the message with annotated addresses.")
print("")
return
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
# Annotate all addresses in the dumped message
prog = re.compile("[0-9a-fA-F]{%s}" % ptr_size*2)
addresses = list(set(prog.findall(message)))
@@ -2252,7 +2292,7 @@ class InspectionPadawan(object):
def TryInferContext(self, address):
if self.context: return
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.MachinePointerSize()
possible_context = dict()
count = 0
while self.reader.IsExceptionStackAddress(address):
@@ -2287,7 +2327,7 @@ class InspectionPadawan(object):
in_oom_dump_area = False
is_stack = self.reader.IsExceptionStackAddress(start)
free_space_end = 0
- ptr_size = self.reader.PointerSize()
+ ptr_size = self.reader.TaggedPointerSize()
for slot in range(start, end, ptr_size):
if not self.reader.IsValidAddress(slot):
@@ -2309,7 +2349,7 @@ class InspectionPadawan(object):
if isinstance(heap_object, KnownMap) and \
heap_object.known_name == "FreeSpaceMap":
# The free-space length is is stored as a Smi in the next slot.
- length = self.reader.ReadUIntPtr(slot + ptr_size)
+ length = self.reader.ReadTagged(slot + ptr_size)
if self.heap.IsSmi(length):
length = self.heap.SmiUntag(length)
free_space_end = slot + length - ptr_size
@@ -2711,7 +2751,8 @@ class InspectionWebFormatter(object):
stack_bottom = exception_thread.stack.start + \
exception_thread.stack.memory.data_size
stack_map = {self.reader.ExceptionIP(): -1}
- for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom,
+ self.reader.MachinePointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
@@ -2757,6 +2798,18 @@ class InspectionWebFormatter(object):
return ("<a %s href=s?%s&amp;val=%s>%s</a>" %
(style_class, self.encfilename, straddress, straddress))
+ def format_onheap_address(self, size, maybeaddress, uncompressed):
+ if maybeaddress is None:
+ return "not in dump"
+ else:
+ straddress = "0x" + self.reader.FormatTagged(maybeaddress)
+ struncompressed = "0x" + self.reader.FormatIntPtr(uncompressed)
+ style_class = ""
+ if not self.reader.IsValidAddress(maybeaddress):
+ style_class = "class=nd"
+ return ("<a %s href=s?%s&amp;val=%s>%s</a>" %
+ (style_class, self.encfilename, struncompressed, straddress))
+
def output_header(self, f):
f.write(WEB_HEADER %
{ "query_dump" : self.encfilename,
@@ -2779,7 +2832,8 @@ class InspectionWebFormatter(object):
stack_bottom = min(exception_thread.stack.start + \
exception_thread.stack.memory.data_size,
stack_top + self.MAX_CONTEXT_STACK)
- self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack")
+ self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack",
+ self.heap.MachinePointerSize())
f.write('</div>')
self.output_footer(f)
@@ -2900,7 +2954,11 @@ class InspectionWebFormatter(object):
return
region = self.reader.FindRegion(address)
if datakind == "address":
- self.output_words(f, region[0], region[0] + region[1], address, "Dump")
+ self.output_words(f, region[0], region[0] + region[1], address, "Dump",
+ self.heap.MachinePointerSize())
+ if datakind == "tagged":
+ self.output_words(f, region[0], region[0] + region[1], address,
+ "Tagged Dump", self.heap.TaggedPointerSize())
elif datakind == "ascii":
self.output_ascii(f, region[0], region[0] + region[1], address)
self.output_footer(f)
@@ -2909,14 +2967,13 @@ class InspectionWebFormatter(object):
f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
return
- def output_words(self, f, start_address, end_address,
- highlight_address, desc):
+ def output_words(self, f, start_address, end_address, highlight_address, desc,
+ size):
region = self.reader.FindRegion(highlight_address)
if region is None:
f.write("<h3>Address 0x%x not found in the dump.</h3>" %
(highlight_address))
return
- size = self.heap.PointerSize()
start_address = self.align_down(start_address, size)
low = self.align_down(region[0], size)
high = self.align_up(region[0] + region[1], size)
@@ -2943,6 +3000,7 @@ class InspectionWebFormatter(object):
slot = start_address + j
heap_object = ""
maybe_address = None
+ maybe_uncompressed_address = None
end_region = region[0] + region[1]
if slot < region[0] or slot + size > end_region:
straddress = "0x"
@@ -2954,10 +3012,20 @@ class InspectionWebFormatter(object):
for i in range(slot, region[0]):
straddress += "??"
else:
- maybe_address = self.reader.ReadUIntPtr(slot)
- straddress = self.format_address(maybe_address)
- if maybe_address:
- heap_object = self.format_object(maybe_address)
+ maybe_address = self.reader.ReadSized(slot, size)
+ if size == self.reader.MachinePointerSize():
+ maybe_uncompressed_address = maybe_address
+ else:
+ maybe_uncompressed_address = (slot & (0xFFFFFF << 32)) | (
+ maybe_address & 0xFFFFFF)
+
+ if size == self.reader.TaggedPointerSize():
+ straddress = self.format_onheap_address(size, maybe_address,
+ maybe_uncompressed_address)
+ if maybe_address:
+ heap_object = self.format_object(maybe_address)
+ else:
+ straddress = self.format_address(maybe_address)
address_fmt = "%s&nbsp;</td>"
if slot == highlight_address:
@@ -2974,12 +3042,12 @@ class InspectionWebFormatter(object):
f.write("</td>")
self.td_from_address(f, slot)
f.write(address_fmt % self.format_address(slot))
- self.td_from_address(f, maybe_address)
+ self.td_from_address(f, maybe_uncompressed_address)
f.write(":&nbsp;%s&nbsp;</td>" % straddress)
f.write("<td>")
- if maybe_address != None:
- self.output_comment_box(
- f, "sv-" + self.reader.FormatIntPtr(slot), maybe_address)
+ if maybe_uncompressed_address != None:
+ self.output_comment_box(f, "sv-" + self.reader.FormatIntPtr(slot),
+ maybe_uncompressed_address)
f.write("</td>")
f.write("<td>%s</td>" % (heap_object or ''))
f.write("</tr>")
@@ -3134,9 +3202,9 @@ class InspectionWebFormatter(object):
# Some disassemblers insert spaces between each byte,
# while some do not.
if code[2] == " ":
- op_offset = 3 * num_bytes - 1
+ op_offset = 3 * num_bytes - 1
else:
- op_offset = 2 * num_bytes
+ op_offset = 2 * num_bytes
# Compute the actual call target which the disassembler is too stupid
# to figure out (it adds the call offset to the disassembly offset rather
@@ -3230,7 +3298,12 @@ class InspectionWebFormatter(object):
straddress)
else:
# Print as words
- self.output_words(f, address - 8, address + 32, address, "Dump")
+ self.output_words(f, address - 8, address + 32, address, "Dump",
+ self.heap.MachinePointerSize())
+
+ if self.heap.IsPointerCompressed():
+ self.output_words(f, address - 8, address + 32, address,
+ "Tagged Dump", self.heap.TaggedPointerSize())
# Print as ASCII
f.write("<hr>")
@@ -3534,10 +3607,10 @@ class InspectionShell(cmd.Cmd):
self.dd_start = self.ParseAddressExpr(args[0])
self.dd_num = int(args[1], 16) if len(args) > 1 else 0x10
else:
- self.dd_start += self.dd_num * self.reader.PointerSize()
+ self.dd_start += self.dd_num * self.reader.MachinePointerSize()
if not self.reader.IsAlignedAddress(self.dd_start):
print("Warning: Dumping un-aligned memory, is this what you had in mind?")
- end = self.dd_start + self.reader.PointerSize() * self.dd_num
+ end = self.dd_start + self.reader.MachinePointerSize() * self.dd_num
self.padawan.InterpretMemory(self.dd_start, end)
def do_do(self, address):
@@ -3828,7 +3901,7 @@ def AnalyzeMinidump(options, minidump_name):
stack_top = reader.ExceptionSP()
stack_bottom = reader.StackBottom()
stack_map = {reader.ExceptionIP(): -1}
- for slot in range(stack_top, stack_bottom, reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom, reader.MachinePointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
diff --git a/deps/v8/tools/heap-layout/heap-layout-viewer-template.html b/deps/v8/tools/heap-layout/heap-layout-viewer-template.html
new file mode 100644
index 0000000000..c50f5cc3d3
--- /dev/null
+++ b/deps/v8/tools/heap-layout/heap-layout-viewer-template.html
@@ -0,0 +1,14 @@
+<!-- Copyright 2021 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<style>
+ #chart {
+ width: 100%;
+ height: 600px;
+ }
+</style>
+<div id="container" style="display: none;">
+ <h2>V8 Heap Layout</h2>
+ <div id="chart"></div>
+</div> \ No newline at end of file
diff --git a/deps/v8/tools/heap-layout/heap-layout-viewer.mjs b/deps/v8/tools/heap-layout/heap-layout-viewer.mjs
new file mode 100644
index 0000000000..ada4a02311
--- /dev/null
+++ b/deps/v8/tools/heap-layout/heap-layout-viewer.mjs
@@ -0,0 +1,225 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {GB, MB} from '../js/helper.mjs';
+import {DOM} from '../js/web-api-helper.mjs';
+
+import {getColorFromSpaceName, kSpaceNames} from './space-categories.mjs';
+
+DOM.defineCustomElement('heap-layout-viewer',
+ (templateText) =>
+ class HeapLayoutViewer extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.innerHTML = templateText;
+ this.chart = echarts.init(this.$('#chart'), null, {
+ renderer: 'canvas',
+ });
+ window.addEventListener('resize', () => {
+ this.chart.resize();
+ });
+ this.currentIndex = 0;
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ this.drawChart(0);
+ }
+
+ getChartTitle(index) {
+ return this.data[index].header;
+ }
+
+ getSeriesData(pageinfos) {
+ let ret = [];
+ for (let pageinfo of pageinfos) {
+ ret.push({value: pageinfo});
+ }
+ return ret;
+ }
+
+ getChartSeries(index) {
+ const snapshot = this.data[index];
+ let series = [];
+ for (const [space_name, pageinfos] of Object.entries(snapshot.data)) {
+ let space_series = {
+ name: space_name,
+ type: 'custom',
+ renderItem(params, api) {
+ const addressBegin = api.value(1);
+ const addressEnd = api.value(2);
+ const allocated = api.value(3);
+ const start = api.coord([addressBegin, 0]);
+ const end = api.coord([addressEnd, 0]);
+
+ const allocatedRate = allocated / (addressEnd - addressBegin);
+ const unAllocatedRate = 1 - allocatedRate;
+
+ const standardH = api.size([0, 1])[1];
+ const standardY = start[1] - standardH / 2;
+
+ const allocatedY = standardY + standardH * unAllocatedRate;
+ const allocatedH = standardH * allocatedRate;
+
+ const unAllocatedY = standardY;
+ const unAllocatedH = standardH - allocatedH;
+
+ const allocatedShape = echarts.graphic.clipRectByRect(
+ {
+ x: start[0],
+ y: allocatedY,
+ width: end[0] - start[0],
+ height: allocatedH,
+ },
+ {
+ x: params.coordSys.x,
+ y: params.coordSys.y,
+ width: params.coordSys.width,
+ height: params.coordSys.height,
+ });
+
+ const unAllocatedShape = echarts.graphic.clipRectByRect(
+ {
+ x: start[0],
+ y: unAllocatedY,
+ width: end[0] - start[0],
+ height: unAllocatedH,
+ },
+ {
+ x: params.coordSys.x,
+ y: params.coordSys.y,
+ width: params.coordSys.width,
+ height: params.coordSys.height,
+ });
+
+ const ret = {
+ type: 'group',
+ children: [
+ {
+ type: 'rect',
+ shape: allocatedShape,
+ style: api.style(),
+ },
+ {
+ type: 'rect',
+ shape: unAllocatedShape,
+ style: {
+ fill: '#000000',
+ },
+ },
+ ],
+ };
+ return ret;
+ },
+ data: this.getSeriesData(pageinfos),
+ encode: {
+ x: [1, 2],
+ },
+ itemStyle: {
+ color: getColorFromSpaceName(space_name),
+ },
+ };
+ series.push(space_series);
+ }
+ return series;
+ }
+
+ drawChart(index) {
+ if (index >= this.data.length || index < 0) {
+ console.error('Invalid index:', index);
+ return;
+ }
+ const option = {
+ tooltip: {
+ formatter(params) {
+ const ret = params.marker + params.value[0] + '<br>' +
+ 'address:' + (params.value[1] / MB).toFixed(3) + 'MB' +
+ '<br>' +
+ 'size:' + ((params.value[2] - params.value[1]) / MB).toFixed(3) +
+ 'MB' +
+ '<br>' +
+ 'allocated:' + (params.value[3] / MB).toFixed(3) + 'MB' +
+ '<br>' +
+ 'wasted:' + params.value[4] + 'B';
+ return ret;
+ },
+ },
+ grid: {
+ bottom: 120,
+ top: 120,
+ },
+ dataZoom: [
+ {
+ type: 'slider',
+ filterMode: 'weakFilter',
+ showDataShadow: true,
+ labelFormatter: '',
+ },
+ {
+ type: 'inside',
+ filterMode: 'weakFilter',
+ },
+ ],
+ legend: {
+ show: true,
+ data: kSpaceNames,
+ top: '6%',
+ type: 'scroll',
+ },
+ title: {
+ text: this.getChartTitle(index),
+ left: 'center',
+ },
+ xAxis: {
+ name: 'Address offset in heap(MB)',
+ nameLocation: 'center',
+ nameTextStyle: {
+ fontSize: 25,
+ padding: [30, 0, 50, 0],
+ },
+ type: 'value',
+ min: 0,
+ max: 4 * GB,
+ axisLabel: {
+ rotate: 0,
+ formatter(value, index) {
+ value = value / MB;
+ value = value.toFixed(3);
+ return value;
+ },
+ },
+ },
+ yAxis: {
+ data: ['Page'],
+ },
+ series: this.getChartSeries(index),
+ };
+
+ this.show();
+ this.chart.resize();
+ this.chart.setOption(option);
+ this.currentIndex = index;
+ }
+});
diff --git a/deps/v8/tools/heap-layout/heap-size-trend-viewer-template.html b/deps/v8/tools/heap-layout/heap-size-trend-viewer-template.html
new file mode 100644
index 0000000000..cbf2bc711d
--- /dev/null
+++ b/deps/v8/tools/heap-layout/heap-size-trend-viewer-template.html
@@ -0,0 +1,14 @@
+<!-- Copyright 2021 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<style>
+ #chart {
+ width: 100%;
+ height: 400px;
+ }
+</style>
+<div id="container" style="display: none;">
+ <h2>V8 Heap Space Size Trend</h2>
+ <div id="chart"></div>
+</div> \ No newline at end of file
diff --git a/deps/v8/tools/heap-layout/heap-size-trend-viewer.mjs b/deps/v8/tools/heap-layout/heap-size-trend-viewer.mjs
new file mode 100644
index 0000000000..d7b8737d7f
--- /dev/null
+++ b/deps/v8/tools/heap-layout/heap-size-trend-viewer.mjs
@@ -0,0 +1,266 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {MB} from '../js/helper.mjs';
+import {DOM} from '../js/web-api-helper.mjs';
+
+import {getColorFromSpaceName, kSpaceNames} from './space-categories.mjs';
+
+class TrendLineHelper {
+ static re_gc_count = /(?<=(Before|After) GC:)\d+(?=,)/;
+ static re_allocated = /allocated/;
+ static re_space_name = /^[a-z_]+_space/;
+
+ static snapshotHeaderToXLabel(header) {
+ const gc_count = this.re_gc_count.exec(header)[0];
+ const alpha = header[0];
+ return alpha + gc_count;
+ }
+
+ static getLineSymbolFromTrendLineName(trend_line_name) {
+ const is_allocated_line = this.re_allocated.test(trend_line_name);
+ if (is_allocated_line) {
+ return 'emptyTriangle';
+ }
+ return 'emptyCircle';
+ }
+
+ static getSizeTrendLineName(space_name) {
+ return space_name + ' size';
+ }
+
+ static getAllocatedTrendSizeName(space_name) {
+ return space_name + ' allocated';
+ }
+
+ static getSpaceNameFromTrendLineName(trend_line_name) {
+ const space_name = this.re_space_name.exec(trend_line_name)[0];
+ return space_name;
+ }
+}
+
+DOM.defineCustomElement('heap-size-trend-viewer',
+ (templateText) =>
+ class HeapSizeTrendViewer extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.innerHTML = templateText;
+ this.chart = echarts.init(this.$('#chart'), null, {
+ renderer: 'canvas',
+ });
+ this.chart.getZr().on('click', 'series.line', (params) => {
+ const pointInPixel = [params.offsetX, params.offsetY];
+ const pointInGrid =
+ this.chart.convertFromPixel({seriesIndex: 0}, pointInPixel);
+ const xIndex = pointInGrid[0];
+ this.dispatchEvent(new CustomEvent('change', {
+ bubbles: true,
+ composed: true,
+ detail: xIndex,
+ }));
+ this.setXMarkLine(xIndex);
+ });
+ this.chartXAxisData = null;
+ this.chartSeriesData = null;
+ this.currentIndex = 0;
+ window.addEventListener('resize', () => {
+ this.chart.resize();
+ });
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ this.initTrendLineNames();
+ this.initXAxisDataAndSeries();
+ this.drawChart();
+ }
+
+ initTrendLineNames() {
+ this.trend_line_names = [];
+ for (const space_name of kSpaceNames) {
+ this.trend_line_names.push(
+ TrendLineHelper.getSizeTrendLineName(space_name));
+ this.trend_line_names.push(
+ TrendLineHelper.getAllocatedTrendSizeName(space_name));
+ }
+ }
+
+ // X axis represent the moment before or after nth GC : [B1,A1,...Bn,An].
+ initXAxisDataAndSeries() {
+ this.chartXAxisData = [];
+ this.chartSeriesData = [];
+ let trend_line_name_data_dict = {};
+
+ for (const trend_line_name of this.trend_line_names) {
+ trend_line_name_data_dict[trend_line_name] = [];
+ }
+
+ // Init x axis data and trend line series.
+ for (const snapshot of this.data) {
+ this.chartXAxisData.push(
+ TrendLineHelper.snapshotHeaderToXLabel(snapshot.header));
+ for (const [space_name, pageinfos] of Object.entries(snapshot.data)) {
+ const size_trend_line_name =
+ TrendLineHelper.getSizeTrendLineName(space_name);
+ const allocated_trend_line_name =
+ TrendLineHelper.getAllocatedTrendSizeName(space_name);
+ let size_sum = 0;
+ let allocated_sum = 0;
+ for (const pageinfo of pageinfos) {
+ size_sum += pageinfo[2] - pageinfo[1];
+ allocated_sum += pageinfo[3];
+ }
+ trend_line_name_data_dict[size_trend_line_name].push(size_sum);
+ trend_line_name_data_dict[allocated_trend_line_name].push(
+ allocated_sum);
+ }
+ }
+
+ // Init mark line series as the first series
+ const markline_series = {
+ name: 'mark-line',
+ type: 'line',
+
+ markLine: {
+ silent: true,
+ symbol: 'none',
+ label: {
+ show: false,
+ },
+ lineStyle: {
+ color: '#333',
+ },
+ data: [
+ {
+ xAxis: 0,
+ },
+ ],
+ },
+ };
+ this.chartSeriesData.push(markline_series);
+
+ for (const [trend_line_name, trend_line_data] of Object.entries(
+ trend_line_name_data_dict)) {
+ const color = getColorFromSpaceName(
+ TrendLineHelper.getSpaceNameFromTrendLineName(trend_line_name));
+ const trend_line_series = {
+ name: trend_line_name,
+ type: 'line',
+ data: trend_line_data,
+ lineStyle: {
+ color: color,
+ },
+ itemStyle: {
+ color: color,
+ },
+ symbol: TrendLineHelper.getLineSymbolFromTrendLineName(trend_line_name),
+ symbolSize: 8,
+ };
+ this.chartSeriesData.push(trend_line_series);
+ }
+ }
+
+ setXMarkLine(index) {
+ if (index < 0 || index >= this.data.length) {
+ console.error('Invalid index:', index);
+ return;
+ }
+ // Set the mark-line series
+ this.chartSeriesData[0].markLine.data[0].xAxis = index;
+ this.chart.setOption({
+ series: this.chartSeriesData,
+ });
+ this.currentIndex = index;
+ }
+
+ drawChart() {
+ const option = {
+ dataZoom: [
+ {
+ type: 'inside',
+ filterMode: 'weakFilter',
+ },
+ {
+ type: 'slider',
+ filterMode: 'weakFilter',
+ labelFormatter: '',
+ },
+ ],
+ title: {
+ text: 'Size Trend',
+ left: 'center',
+ },
+ tooltip: {
+ trigger: 'axis',
+ position(point, params, dom, rect, size) {
+ let ret_x = point[0] + 10;
+ if (point[0] > size.viewSize[0] * 0.7) {
+ ret_x = point[0] - dom.clientWidth - 10;
+ }
+ return [ret_x, '85%'];
+ },
+ formatter(params) {
+ const colorSpan = (color) =>
+ '<span style="display:inline-block;margin-right:1px;border-radius:5px;width:9px;height:9px;background-color:' +
+ color + '"></span>';
+ let result = '<p>' + params[0].axisValue + '</p>';
+ params.forEach((item) => {
+ const xx = '<p style="margin:0;">' + colorSpan(item.color) + ' ' +
+ item.seriesName + ': ' + (item.data / MB).toFixed(2) + 'MB' +
+ '</p>';
+ result += xx;
+ });
+
+ return result;
+ },
+ },
+ legend: {
+ data: this.trend_line_names,
+ top: '6%',
+ type: 'scroll',
+ },
+
+ xAxis: {
+ minInterval: 1,
+ type: 'category',
+ boundaryGap: false,
+ data: this.chartXAxisData,
+ },
+ yAxis: {
+ type: 'value',
+ axisLabel: {
+ formatter(value, index) {
+ return (value / MB).toFixed(3) + 'MB';
+ },
+ },
+ },
+
+ series: this.chartSeriesData,
+ };
+ this.show();
+ this.chart.resize();
+ this.chart.setOption(option);
+ }
+});
diff --git a/deps/v8/tools/heap-layout/index.css b/deps/v8/tools/heap-layout/index.css
new file mode 100644
index 0000000000..53fcf97def
--- /dev/null
+++ b/deps/v8/tools/heap-layout/index.css
@@ -0,0 +1,24 @@
+:root {
+ --surface-color: #ffffff;
+ --primary-color: #bb86fc;
+ --on-primary-color: #000000;
+ --error-color: #cf6679;
+ --file-reader-background-color: #ffffff80;
+ --file-reader-border-color: #000000;
+}
+
+body {
+ font-family: "Roboto", sans-serif;
+ margin-left: 5%;
+ margin-right: 5%;
+}
+
+.button-container {
+ text-align: center;
+ display: none;
+}
+
+button {
+ height: 50px;
+ width: 100px;
+}
diff --git a/deps/v8/tools/heap-layout/index.html b/deps/v8/tools/heap-layout/index.html
new file mode 100644
index 0000000000..0f33a73049
--- /dev/null
+++ b/deps/v8/tools/heap-layout/index.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<!-- Copyright 2021 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+ <meta charset="UTF-8">
+ <title>V8 Heap Layout</title>
+
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/echarts/5.2.2/echarts.min.js"></script>
+
+ <script type="module" src="heap-layout-viewer.mjs"></script>
+ <script type="module" src="heap-size-trend-viewer.mjs"></script>
+ <script type="module" src="trace-file-reader.mjs"></script>
+
+ <link rel="stylesheet" type="text/css" href="./index.css">
+
+ <script>
+ 'use strict';
+ function $(id) { return document.querySelector(id); }
+
+ function globalDataChanged(e) {
+ $('#heap-layout-viewer').data = e.detail;
+ $('#heap-size-trend-viewer').data = e.detail;
+ $('.button-container').style.display = 'block';
+ }
+
+ function selectSnapshotAtIndex(e) {
+ const index = e.detail;
+ $('#heap-layout-viewer').drawChart(index);
+ }
+
+
+ function OnPrevClick() {
+ const heap_size_trend_viewer = $('#heap-size-trend-viewer');
+ const heap_layout_viewer = $('#heap-layout-viewer');
+ heap_size_trend_viewer.setXMarkLine(heap_size_trend_viewer.currentIndex - 1);
+ heap_layout_viewer.drawChart(heap_layout_viewer.currentIndex - 1);
+ }
+
+ function OnNextClick() {
+ const heap_size_trend_viewer = $('#heap-size-trend-viewer');
+ const heap_layout_viewer = $('#heap-layout-viewer');
+ heap_size_trend_viewer.setXMarkLine(heap_size_trend_viewer.currentIndex + 1);
+ heap_layout_viewer.drawChart(heap_layout_viewer.currentIndex + 1);
+ }
+
+ </script>
+</head>
+
+<body>
+ <h1>V8 Heap Layout</h1>
+ <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+ <heap-size-trend-viewer id="heap-size-trend-viewer" onchange="selectSnapshotAtIndex(event)"></heap-size-trend-viewer>
+ <heap-layout-viewer id="heap-layout-viewer"></heap-layout-viewer>
+ <div class="button-container">
+ <button id="button_prev" type="button" onclick="OnPrevClick()">Prev</button>
+ <button id="button_next" type="button" onclick="OnNextClick()">Next</button>
+ </div>
+
+ <p>Heap layout is a HTML-based tool for visualizing V8-internal heap layout.</p>
+ <p>Visualize heap layout that have been gathered using</p>
+ <ul>
+ <li><code>--trace-gc-heap-layout</code> on V8</li>
+
+ </ul>
+
+</body>
+
+</html> \ No newline at end of file
diff --git a/deps/v8/tools/heap-layout/space-categories.mjs b/deps/v8/tools/heap-layout/space-categories.mjs
new file mode 100644
index 0000000000..95b52ba9ce
--- /dev/null
+++ b/deps/v8/tools/heap-layout/space-categories.mjs
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export const kSpaceNames = [
+ 'to_space',
+ 'from_space',
+ 'old_space',
+ 'map_space',
+ 'code_space',
+ 'large_object_space',
+ 'new_large_object_space',
+ 'code_large_object_space',
+ 'ro_space',
+];
+
+const kSpaceColors = [
+ '#5b8ff9',
+ '#5ad8a6',
+ '#5d7092',
+ '#f6bd16',
+ '#e8684a',
+ '#6dc8ec',
+ '#9270ca',
+ '#ff9d4d',
+ '#269a99',
+];
+
+export function getColorFromSpaceName(space_name) {
+ const index = kSpaceNames.indexOf(space_name);
+ return kSpaceColors[index];
+}
diff --git a/deps/v8/tools/heap-layout/trace-file-reader.mjs b/deps/v8/tools/heap-layout/trace-file-reader.mjs
new file mode 100644
index 0000000000..880acf9fad
--- /dev/null
+++ b/deps/v8/tools/heap-layout/trace-file-reader.mjs
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {calcOffsetInVMCage} from '../js/helper.mjs';
+import {DOM, FileReader,} from '../js/web-api-helper.mjs';
+
+import {kSpaceNames} from './space-categories.mjs';
+
+class TraceLogParseHelper {
+ static re_gc_header = /(Before|After) GC:\d/;
+ static re_page_info =
+ /\{owner:.+,address:.+,size:.+,allocated_bytes:.+,wasted_memory:.+\}/;
+ static re_owner = /(?<=owner:)[a-z_]+_space/;
+ static re_address = /(?<=address:)0x[a-f0-9]+(?=,)/;
+ static re_size = /(?<=size:)\d+(?=,)/;
+ static re_allocated_bytes = /(?<=allocated_bytes:)\d+(?=,)/;
+ static re_wasted_memory = /(?<=wasted_memory:)\d+(?=})/;
+
+ static matchGCHeader(content) {
+ return this.re_gc_header.test(content);
+ }
+
+ static matchPageInfo(content) {
+ return this.re_page_info.test(content);
+ }
+
+ static parsePageInfo(content) {
+ const owner = this.re_owner.exec(content)[0];
+ const address =
+ calcOffsetInVMCage(BigInt(this.re_address.exec(content)[0], 16));
+ const size = parseInt(this.re_size.exec(content)[0]);
+ const allocated_bytes = parseInt(this.re_allocated_bytes.exec(content)[0]);
+ const wasted_memory = parseInt(this.re_wasted_memory.exec(content)[0]);
+ const info = [
+ owner,
+ address,
+ address + size,
+ allocated_bytes,
+ wasted_memory,
+ ];
+ return info;
+ }
+
+ // Create a empty snapshot.
+ static createSnapShotData() {
+ let snapshot = {header: null, data: {}};
+ for (let space_name of kSpaceNames) {
+ snapshot.data[space_name] = [];
+ }
+ return snapshot;
+ }
+
+ static createModelFromV8TraceFile(contents) {
+ let snapshots = [];
+ let snapshot = this.createSnapShotData();
+
+ // Fill data info a snapshot, then push it into snapshots.
+ for (let content of contents) {
+ if (this.matchGCHeader(content)) {
+ if (snapshot.header != null) {
+ snapshots.push(snapshot);
+ }
+ snapshot = this.createSnapShotData();
+ snapshot.header = content;
+ continue;
+ }
+
+ if (this.matchPageInfo(content)) {
+ let pageinfo = this.parsePageInfo(content);
+ try {
+ snapshot.data[pageinfo[0]].push(pageinfo);
+ } catch (e) {
+ console.error(e);
+ }
+ }
+ }
+ // EOL, push the last.
+ if (snapshot.header != null) {
+ snapshots.push(snapshot);
+ }
+ return snapshots;
+ }
+}
+
+DOM.defineCustomElement('../js/log-file-reader', 'trace-file-reader',
+ (templateText) =>
+ class TraceFileReader extends FileReader {
+ constructor() {
+ super(templateText);
+ this.fullDataFromFile = '';
+ this.addEventListener('fileuploadchunk', (e) => this.handleLoadChunk(e));
+
+ this.addEventListener('fileuploadend', (e) => this.handleLoadEnd(e));
+ }
+
+ handleLoadChunk(event) {
+ this.fullDataFromFile += event.detail;
+ }
+
+ handleLoadEnd(event) {
+ let contents = this.fullDataFromFile.split('\n');
+ let snapshots = TraceLogParseHelper.createModelFromV8TraceFile(contents);
+ this.dispatchEvent(new CustomEvent('change', {
+ bubbles: true,
+ composed: true,
+ detail: snapshots,
+ }));
+ }
+});
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 2bd08fad02..e4e570c4b4 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -117,6 +117,7 @@ export const CATEGORIES = new Map([
'BOILERPLATE_PROPERTY_DICTIONARY_TYPE',
'BYTE_ARRAY_TYPE',
'CALL_HANDLER_INFO_TYPE',
+ 'CALL_SITE_INFO_TYPE',
'CELL_TYPE',
'CODE_STUBS_TABLE_TYPE',
'CONTEXT_EXTENSION_TYPE',
@@ -148,7 +149,6 @@ export const CATEGORIES = new Map([
'SCRIPT_SHARED_FUNCTION_INFOS_TYPE',
'SERIALIZED_OBJECTS_TYPE',
'SINGLE_CHARACTER_STRING_CACHE_TYPE',
- 'STACK_FRAME_INFO_TYPE',
'STRING_SPLIT_CACHE_TYPE',
'STRING_TABLE_TYPE',
'TRANSITION_ARRAY_TYPE',
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
index efb74af011..9f053a8730 100644
--- a/deps/v8/tools/heap-stats/index.html
+++ b/deps/v8/tools/heap-stats/index.html
@@ -80,23 +80,32 @@ function globalSelectionChangedA(e) {
<p>Visualize object statistics that have been gathered using</p>
<ul>
- <li><code>--trace-gc-object-stats</code> on V8</li>
+ <li>Use <code>--trace-gc-object-stats</code> for V8 and load the contents of stdout</li>
<li>
<a
href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
tracing infrastructure</a> collecting data for the category
- <code>v8.gc_stats</code>.
+ <code>disabled-by-default-v8.gc_stats</code> and directly load the
+ results.html or trace.json.gzip file.
</li>
</ul>
- <p>
- Note that you only get a data point on major GCs. You can enforce this by
- using the <code>--gc-global</code> flag.
- </p>
- <p>
- Note that the visualizer needs to run on a web server due to HTML imports
- requiring <a
- href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
- </p>
+
+ Additional information:
+ <ul>
+ <li>
+ You only get a data point on major GCs. You can enforce this by
+ using the <code>--gc-global</code> V8 flag.
+ </li>
+ <li>
+ For more frequent data points you can also the
+ <code>--gc-interval=$AFTER_N_ALLOCATIONS</code> V8.
+ </li>
+ <li>
+ The visualizer needs to run on a web server due to HTML imports
+ requiring <a
+ href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
+ </li>
+ <ul>
</body>
</html>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index e297723e6f..ef83b30db1 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -78,6 +78,27 @@ defineCustomElement('trace-file-reader', (templateText) =>
};
// Delay the loading a bit to allow for CSS animations to happen.
setTimeout(() => reader.readAsArrayBuffer(file), 0);
+ } else if (file.type == 'text/html') {
+ // try extracting the data from a results.html file
+ reader.onload = (e) => {
+ try {
+ let html = document.createElement('html');
+ html.innerHTML = e.target.result;
+ for (let dataScript of html.querySelectorAll('#viewer-data')) {
+ const base64 = dataScript.innerText.slice(1,-1);
+ const binary = globalThis.atob(base64);
+ const textResult = pako.inflate(binary, {to: 'string'});
+ this.processRawText(file, textResult);
+ }
+ this.section.className = 'success';
+ this.$('#fileReader').classList.add('done');
+ } catch (err) {
+ console.error(err);
+ this.section.className = 'failure';
+ }
+ };
+ // Delay the loading a bit to allow for CSS animations to happen.
+ setTimeout(() => reader.readAsText(file), 0);
} else {
reader.onload = (e) => {
try {
diff --git a/deps/v8/tools/index.html b/deps/v8/tools/index.html
index 53b22f170d..dfb0be7b34 100644
--- a/deps/v8/tools/index.html
+++ b/deps/v8/tools/index.html
@@ -81,6 +81,10 @@ dd, dt {
<dd>Visualize heap memory usage.</dd>
</div>
<div class="card">
+ <dt><a href="./heap-layout/index.html">Heap Layout</a></dt>
+ <dd>Visualize heap memory layout.</dd>
+ </div>
+ <div class="card">
<dt><a href="./parse-processor.html">Parse Processor</a></dt>
<dd>Analyse parse, compile and first-execution.</dd>
</div>
@@ -89,10 +93,6 @@ dd, dt {
<dd>Fancy sampling profile viewer.</dd>
</div>
<div class="card">
- <dt><a href="./tick-processor.html">Tick Processor</a></dt>
- <dd>Simple sampling profile viewer.</dd>
- </div>
- <div class="card">
<dt><a href="./turbolizer/index.html">Turbolizer</a></dt>
<dd>Visualise the sea of nodes graph generated by TurboFan.</dd>
</div>
diff --git a/deps/v8/tools/js/helper.mjs b/deps/v8/tools/js/helper.mjs
new file mode 100644
index 0000000000..04df6b5421
--- /dev/null
+++ b/deps/v8/tools/js/helper.mjs
@@ -0,0 +1,53 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export const KB = 1024;
+export const MB = KB * KB;
+export const GB = MB * KB;
+export const kMillis2Seconds = 1 / 1000;
+export const kMicro2Milli = 1 / 1000;
+
+export function formatBytes(bytes) {
+ const units = ['B', 'KiB', 'MiB', 'GiB'];
+ const divisor = 1024;
+ let index = 0;
+ while (index < units.length && bytes >= divisor) {
+ index++;
+ bytes /= divisor;
+ }
+ return bytes.toFixed(2) + units[index];
+}
+
+export function formatMicroSeconds(micro) {
+ return (micro * kMicro2Milli).toFixed(1) + 'ms';
+}
+
+export function formatDurationMicros(micros, secondsDigits = 3) {
+ return formatDurationMillis(micros * kMicro2Milli, secondsDigits);
+}
+
+export function formatDurationMillis(millis, secondsDigits = 3) {
+ if (millis < 1000) {
+ if (millis < 1) {
+ return (millis / kMicro2Milli).toFixed(1) + 'ns';
+ }
+ return millis.toFixed(2) + 'ms';
+ }
+ let seconds = millis / 1000;
+ const hours = Math.floor(seconds / 3600);
+ const minutes = Math.floor((seconds % 3600) / 60);
+ seconds = seconds % 60;
+ let buffer = '';
+ if (hours > 0) buffer += hours + 'h ';
+ if (hours > 0 || minutes > 0) buffer += minutes + 'm ';
+ buffer += seconds.toFixed(secondsDigits) + 's';
+ return buffer;
+}
+
+// Get the offset in the 4GB virtual memory cage.
+export function calcOffsetInVMCage(address) {
+ let mask = (1n << 32n) - 1n;
+ let ret = Number(address & mask);
+ return ret;
+}
diff --git a/deps/v8/tools/system-analyzer/view/log-file-reader-template.html b/deps/v8/tools/js/log-file-reader-template.html
index 68403300e3..f9e31eed8b 100644
--- a/deps/v8/tools/system-analyzer/view/log-file-reader-template.html
+++ b/deps/v8/tools/js/log-file-reader-template.html
@@ -1,9 +1,9 @@
-<!-- Copyright 2020 the V8 project authors. All rights reserved.
+<!-- Copyright 2021 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
<head>
- <link href="./index.css" rel="stylesheet">
+ <link href="./index.css" rel="stylesheet" />
</head>
<style>
#fileReader {
@@ -13,6 +13,8 @@ found in the LICENSE file. -->
cursor: pointer;
transition: all 0.5s ease-in-out;
background-color: var(--surface-color);
+ border: solid 1px var(--file-reader-border-color);
+ border-radius: 5px;
}
#fileReader:hover {
@@ -20,7 +22,7 @@ found in the LICENSE file. -->
color: var(--on-primary-color);
}
- .done #fileReader{
+ .done #fileReader {
display: none;
}
@@ -32,7 +34,7 @@ found in the LICENSE file. -->
cursor: wait;
}
- #fileReader>input {
+ #fileReader > input {
display: none;
}
@@ -79,11 +81,11 @@ found in the LICENSE file. -->
}
</style>
<div id="root">
- <div id="fileReader" class="panel" tabindex=1>
+ <div id="fileReader" class="panel" tabindex="1">
<span id="label">
Drag and drop a v8.log file into this area, or click to choose from disk.
</span>
- <input id="file" type="file" name="file">
+ <input id="file" type="file" name="file" />
</div>
<div id="loader">
<div id="spinner"></div>
diff --git a/deps/v8/tools/js/web-api-helper.mjs b/deps/v8/tools/js/web-api-helper.mjs
new file mode 100644
index 0000000000..15a23e1070
--- /dev/null
+++ b/deps/v8/tools/js/web-api-helper.mjs
@@ -0,0 +1,261 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export class V8CustomElement extends HTMLElement {
+ _updateTimeoutId;
+ _updateCallback = this.forceUpdate.bind(this);
+
+ constructor(templateText) {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.innerHTML = templateText;
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ querySelectorAll(query) {
+ return this.shadowRoot.querySelectorAll(query);
+ }
+
+ requestUpdate(useAnimation = false) {
+ if (useAnimation) {
+ window.cancelAnimationFrame(this._updateTimeoutId);
+ this._updateTimeoutId =
+ window.requestAnimationFrame(this._updateCallback);
+ } else {
+ // Use timeout tasks to asynchronously update the UI without blocking.
+ clearTimeout(this._updateTimeoutId);
+ const kDelayMs = 5;
+ this._updateTimeoutId = setTimeout(this._updateCallback, kDelayMs);
+ }
+ }
+
+ forceUpdate() {
+ this._update();
+ }
+
+ _update() {
+ throw Error('Subclass responsibility');
+ }
+}
+
+export class FileReader extends V8CustomElement {
+ constructor(templateText) {
+ super(templateText);
+ this.addEventListener('click', (e) => this.handleClick(e));
+ this.addEventListener('dragover', (e) => this.handleDragOver(e));
+ this.addEventListener('drop', (e) => this.handleChange(e));
+ this.$('#file').addEventListener('change', (e) => this.handleChange(e));
+ this.$('#fileReader')
+ .addEventListener('keydown', (e) => this.handleKeyEvent(e));
+ }
+
+ set error(message) {
+ this._updateLabel(message);
+ this.root.className = 'fail';
+ }
+
+ _updateLabel(text) {
+ this.$('#label').innerText = text;
+ }
+
+ handleKeyEvent(event) {
+ if (event.key == 'Enter') this.handleClick(event);
+ }
+
+ handleClick(event) {
+ this.$('#file').click();
+ }
+
+ handleChange(event) {
+ // Used for drop and file change.
+ event.preventDefault();
+ this.dispatchEvent(
+ new CustomEvent('fileuploadstart', {bubbles: true, composed: true}));
+ const host = event.dataTransfer ? event.dataTransfer : event.target;
+ this.readFile(host.files[0]);
+ }
+
+ handleDragOver(event) {
+ event.preventDefault();
+ }
+
+ connectedCallback() {
+ this.fileReader.focus();
+ }
+
+ get fileReader() {
+ return this.$('#fileReader');
+ }
+
+ get root() {
+ return this.$('#root');
+ }
+
+ readFile(file) {
+ if (!file) {
+ this.error = 'Failed to load file.';
+ return;
+ }
+ this.fileReader.blur();
+ this.root.className = 'loading';
+ // Delay the loading a bit to allow for CSS animations to happen.
+ window.requestAnimationFrame(() => this.asyncReadFile(file));
+ }
+
+ async asyncReadFile(file) {
+ const decoder = globalThis.TextDecoderStream;
+ if (decoder) {
+ await this._streamFile(file, decoder);
+ } else {
+ await this._readFullFile(file);
+ }
+ this._updateLabel(`Finished loading '${file.name}'.`);
+ this.dispatchEvent(
+ new CustomEvent('fileuploadend', {bubbles: true, composed: true}));
+ this.root.className = 'done';
+ }
+
+ async _readFullFile(file) {
+ const text = await file.text();
+ this._handleFileChunk(text);
+ }
+
+ async _streamFile(file, decoder) {
+ const stream = file.stream().pipeThrough(new decoder());
+ const reader = stream.getReader();
+ let chunk, readerDone;
+ do {
+ const readResult = await reader.read();
+ chunk = readResult.value;
+ readerDone = readResult.done;
+ if (chunk) this._handleFileChunk(chunk);
+ } while (!readerDone);
+ }
+
+ _handleFileChunk(chunk) {
+ this.dispatchEvent(new CustomEvent('fileuploadchunk', {
+ bubbles: true,
+ composed: true,
+ detail: chunk,
+ }));
+ }
+}
+
+export class DOM {
+ static element(type, options) {
+ const node = document.createElement(type);
+ if (options === undefined) return node;
+ if (typeof options === 'string') {
+ // Old behaviour: options = class string
+ node.className = options;
+ } else if (Array.isArray(options)) {
+ // Old behaviour: options = class array
+ DOM.addClasses(node, options);
+ } else {
+ // New behaviour: options = attribute dict
+ for (const [key, value] of Object.entries(options)) {
+ if (key == 'className') {
+ node.className = value;
+ } else if (key == 'classList') {
+ DOM.addClasses(node, value);
+ } else if (key == 'textContent') {
+ node.textContent = value;
+ } else if (key == 'children') {
+ for (const child of value) {
+ node.appendChild(child);
+ }
+ } else {
+ node.setAttribute(key, value);
+ }
+ }
+ }
+ return node;
+ }
+
+ static addClasses(node, classes) {
+ const classList = node.classList;
+ if (typeof classes === 'string') {
+ classList.add(classes);
+ } else {
+ for (let i = 0; i < classes.length; i++) {
+ classList.add(classes[i]);
+ }
+ }
+ return node;
+ }
+
+ static text(string) {
+ return document.createTextNode(string);
+ }
+
+ static button(label, clickHandler) {
+ const button = DOM.element('button');
+ button.innerText = label;
+ if (typeof clickHandler != 'function') {
+ throw new Error(
+ `DOM.button: Expected function but got clickHandler=${clickHandler}`);
+ }
+ button.onclick = clickHandler;
+ return button;
+ }
+
+ static div(options) {
+ return this.element('div', options);
+ }
+
+ static span(options) {
+ return this.element('span', options);
+ }
+
+ static table(options) {
+ return this.element('table', options);
+ }
+
+ static tbody(options) {
+ return this.element('tbody', options);
+ }
+
+ static td(textOrNode, className) {
+ const node = this.element('td');
+ if (typeof textOrNode === 'object') {
+ node.appendChild(textOrNode);
+ } else if (textOrNode) {
+ node.innerText = textOrNode;
+ }
+ if (className) node.className = className;
+ return node;
+ }
+
+ static tr(classes) {
+ return this.element('tr', classes);
+ }
+
+ static removeAllChildren(node) {
+ let range = document.createRange();
+ range.selectNodeContents(node);
+ range.deleteContents();
+ }
+
+ static defineCustomElement(
+ path, nameOrGenerator, maybeGenerator = undefined) {
+ let generator = nameOrGenerator;
+ let name = nameOrGenerator;
+ if (typeof nameOrGenerator == 'function') {
+ console.assert(maybeGenerator === undefined);
+ name = path.substring(path.lastIndexOf('/') + 1, path.length);
+ } else {
+ console.assert(typeof nameOrGenerator == 'string');
+ generator = maybeGenerator;
+ }
+ path = path + '-template.html';
+ fetch(path)
+ .then(stream => stream.text())
+ .then(
+ templateText =>
+ customElements.define(name, generator(templateText)));
+ }
+}
diff --git a/deps/v8/tools/logreader.mjs b/deps/v8/tools/logreader.mjs
index ecd7b573a2..26a6106a01 100644
--- a/deps/v8/tools/logreader.mjs
+++ b/deps/v8/tools/logreader.mjs
@@ -180,16 +180,6 @@ export class LogReader {
}
/**
- * Returns whether a particular dispatch must be skipped.
- *
- * @param {!Object} dispatch Dispatch record.
- * @return {boolean} True if dispatch must be skipped.
- */
- skipDispatch(dispatch) {
- return false;
- }
-
- /**
* Does a dispatch of a log record.
*
* @param {Array.<string>} fields Log record.
@@ -200,14 +190,12 @@ export class LogReader {
const command = fields[0];
const dispatch = this.dispatchTable_[command];
if (dispatch === undefined) return;
- if (dispatch === null || this.skipDispatch(dispatch)) {
- return;
- }
-
+ const parsers = dispatch.parsers;
+ const length = parsers.length;
// Parse fields.
const parsedFields = [];
- for (let i = 0; i < dispatch.parsers.length; ++i) {
- const parser = dispatch.parsers[i];
+ for (let i = 0; i < length; ++i) {
+ const parser = parsers[i];
if (parser === parseString) {
parsedFields.push(fields[1 + i]);
} else if (typeof parser == 'function') {
diff --git a/deps/v8/tools/mb/PRESUBMIT.py b/deps/v8/tools/mb/PRESUBMIT.py
index 6f5307c63e..bda4ef3f43 100644
--- a/deps/v8/tools/mb/PRESUBMIT.py
+++ b/deps/v8/tools/mb/PRESUBMIT.py
@@ -3,6 +3,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
def _CommonChecks(input_api, output_api):
results = []
@@ -12,8 +16,10 @@ def _CommonChecks(input_api, output_api):
results.extend(input_api.RunTests(pylint_checks))
# Run the MB unittests.
- results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
- input_api, output_api, '.', [ r'^.+_unittest\.py$']))
+ results.extend(
+ input_api.canned_checks.RunUnitTestsInDirectory(input_api, output_api,
+ '.',
+ [r'^.+_unittest\.py$']))
# Validate the format of the mb_config.pyl file.
cmd = [input_api.python_executable, 'mb.py', 'validate']
@@ -23,12 +29,10 @@ def _CommonChecks(input_api, output_api):
cmd=cmd, kwargs=kwargs,
message=output_api.PresubmitError)]))
+ is_mb_config = (lambda filepath: 'mb_config.pyl' in filepath.LocalPath())
results.extend(
input_api.canned_checks.CheckLongLines(
- input_api,
- output_api,
- maxlen=80,
- source_file_filter=lambda x: 'mb_config.pyl' in x.LocalPath()))
+ input_api, output_api, maxlen=80, source_file_filter=is_mb_config))
return results
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 408e2b566a..1ba74b747b 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -1150,6 +1150,8 @@ class MetaBuildWrapper(object):
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
+ out = out.decode('utf-8')
+ err = err.decode('utf-8')
else:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
env=env)
diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py
index ad5adf7d29..ea80653c86 100644
--- a/deps/v8/tools/predictable_wrapper.py
+++ b/deps/v8/tools/predictable_wrapper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -31,10 +31,16 @@ TIMEOUT = 120
# Predictable mode works only when run on the host os.
command.setup(utils.GuessOS(), None)
+def maybe_decode(message):
+ if not isinstance(message, str):
+ return message.decode()
+ return message
+
+
def main(args):
def allocation_str(stdout):
for line in reversed((stdout or '').splitlines()):
- if line.startswith('### Allocations = '):
+ if maybe_decode(line).startswith('### Allocations = '):
return line
return None
diff --git a/deps/v8/tools/process-wasm-compilation-times.py b/deps/v8/tools/process-wasm-compilation-times.py
index 5002fc00e2..37c5998657 100755
--- a/deps/v8/tools/process-wasm-compilation-times.py
+++ b/deps/v8/tools/process-wasm-compilation-times.py
@@ -70,13 +70,13 @@ class Function:
self.has_tf = True
# 0 1 2 3 4 5 6 7 8 9 10 11
# Compiled function #6 using TurboFan, took 0 ms and 14440 / 44656
- # 12 13 14 15 16 17
- # max/total bytes, codesize 24 name wasm-function#6
+ # 12 13 14 15 16 17 18 19
+ # max/total bytes; bodysize 12 codesize 24 name wasm-function#6
self.time_tf = int(words[6])
self.mem_tf_max = int(words[9])
self.mem_tf_total = int(words[11])
- self.size_tf = int(words[15])
- self.name = words[17]
+ self.size_tf = int(words[17])
+ self.name = words[19]
def AddLiftoffLine(self, words):
assert self.index == words[2], "wrong function"
@@ -109,7 +109,8 @@ if len(sys.argv) < 2 or sys.argv[1] in ("-h", "--help", "help"):
with open(sys.argv[1], "r") as f:
for line in f.readlines():
words = line.strip().split(" ")
- if words[0] != "Compiled": continue
+ if words[0] != "Compiled" or words[1] != "function":
+ continue
name = words[2]
RegisterName(name)
if name in funcs_dict:
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index 5f0b1667ec..c62ebcf177 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -261,6 +261,10 @@ class SourceInfo {
}
}
+const kProfileOperationMove = 0;
+const kProfileOperationDelete = 1;
+const kProfileOperationTick = 2;
+
/**
* Creates a profile object for processing profiling-related events
* and calculating function execution times.
@@ -271,9 +275,10 @@ export class Profile {
codeMap_ = new CodeMap();
topDownTree_ = new CallTree();
bottomUpTree_ = new CallTree();
- c_entries_ = {};
+ c_entries_ = {__proto__:null};
scripts_ = [];
urlToScript_ = new Map();
+ warnings = new Set();
serializeVMSymbols() {
let result = this.codeMap_.getAllStaticEntriesWithAddresses();
@@ -300,9 +305,9 @@ export class Profile {
* @enum {number}
*/
static Operation = {
- MOVE: 0,
- DELETE: 1,
- TICK: 2
+ MOVE: kProfileOperationMove,
+ DELETE: kProfileOperationDelete,
+ TICK: kProfileOperationTick
}
/**
@@ -314,7 +319,6 @@ export class Profile {
COMPILED: 0,
IGNITION: 1,
BASELINE: 2,
- TURBOPROP: 4,
TURBOFAN: 5,
}
@@ -346,8 +350,6 @@ export class Profile {
return this.CodeState.IGNITION;
case '^':
return this.CodeState.BASELINE;
- case '+':
- return this.CodeState.TURBOPROP;
case '*':
return this.CodeState.TURBOFAN;
}
@@ -361,8 +363,6 @@ export class Profile {
return "Unopt";
} else if (state === this.CodeState.BASELINE) {
return "Baseline";
- } else if (state === this.CodeState.TURBOPROP) {
- return "Turboprop";
} else if (state === this.CodeState.TURBOFAN) {
return "Opt";
}
@@ -459,7 +459,7 @@ export class Profile {
// As code and functions are in the same address space,
// it is safe to put them in a single code map.
let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func) {
+ if (func === null) {
func = new FunctionEntry(name);
this.codeMap_.addCode(funcAddr, func);
} else if (func.name !== name) {
@@ -467,7 +467,7 @@ export class Profile {
func.name = name;
}
let entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (entry) {
+ if (entry !== null) {
if (entry.size === size && entry.func === func) {
// Entry state has changed.
entry.state = state;
@@ -476,7 +476,7 @@ export class Profile {
entry = null;
}
}
- if (!entry) {
+ if (entry === null) {
entry = new DynamicFuncCodeEntry(size, type, func, state);
this.codeMap_.addCode(start, entry);
}
@@ -493,7 +493,7 @@ export class Profile {
try {
this.codeMap_.moveCode(from, to);
} catch (e) {
- this.handleUnknownCode(Profile.Operation.MOVE, from);
+ this.handleUnknownCode(kProfileOperationMove, from);
}
}
@@ -510,7 +510,7 @@ export class Profile {
try {
this.codeMap_.deleteCode(start);
} catch (e) {
- this.handleUnknownCode(Profile.Operation.DELETE, start);
+ this.handleUnknownCode(kProfileOperationDelete, start);
}
}
@@ -521,16 +521,16 @@ export class Profile {
inliningPositions, inlinedFunctions) {
const script = this.getOrCreateScript(scriptId);
const entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (!entry) return;
+ if (entry === null) return;
// Resolve the inlined functions list.
if (inlinedFunctions.length > 0) {
inlinedFunctions = inlinedFunctions.substring(1).split("S");
for (let i = 0; i < inlinedFunctions.length; i++) {
const funcAddr = parseInt(inlinedFunctions[i]);
const func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func || func.funcId === undefined) {
+ if (func === null || func.funcId === undefined) {
// TODO: fix
- console.warn(`Could not find function ${inlinedFunctions[i]}`);
+ this.warnings.add(`Could not find function ${inlinedFunctions[i]}`);
inlinedFunctions[i] = null;
} else {
inlinedFunctions[i] = func.funcId;
@@ -547,7 +547,9 @@ export class Profile {
addDisassemble(start, kind, disassemble) {
const entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (entry) this.getOrCreateSourceInfo(entry).setDisassemble(disassemble);
+ if (entry !== null) {
+ this.getOrCreateSourceInfo(entry).setDisassemble(disassemble);
+ }
return entry;
}
@@ -563,7 +565,7 @@ export class Profile {
getOrCreateScript(id) {
let script = this.scripts_[id];
- if (!script) {
+ if (script === undefined) {
script = new Script(id);
this.scripts_[id] = script;
}
@@ -623,7 +625,7 @@ export class Profile {
for (let i = 0; i < stack.length; ++i) {
const pc = stack[i];
const entry = this.codeMap_.findEntry(pc);
- if (entry) {
+ if (entry !== null) {
entryStack.push(entry);
const name = entry.getName();
if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
@@ -636,12 +638,13 @@ export class Profile {
nameStack.push(name);
}
} else {
- this.handleUnknownCode(Profile.Operation.TICK, pc, i);
+ this.handleUnknownCode(kProfileOperationTick, pc, i);
if (i === 0) nameStack.push("UNKNOWN");
entryStack.push(pc);
}
if (look_for_first_c_function && i > 0 &&
- (!entry || entry.type !== 'CPP') && last_seen_c_function !== '') {
+ (entry === null || entry.type !== 'CPP')
+ && last_seen_c_function !== '') {
if (this.c_entries_[last_seen_c_function] === undefined) {
this.c_entries_[last_seen_c_function] = 0;
}
@@ -716,7 +719,7 @@ export class Profile {
getFlatProfile(opt_label) {
const counters = new CallTree();
const rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
- const precs = {};
+ const precs = {__proto__:null};
precs[rootLabel] = 0;
const root = counters.findOrAddChild(rootLabel);
@@ -968,9 +971,7 @@ class CallTree {
* @param {Array<string>} path Call path.
*/
addPath(path) {
- if (path.length == 0) {
- return;
- }
+ if (path.length == 0) return;
let curr = this.root_;
for (let i = 0; i < path.length; ++i) {
curr = curr.findOrAddChild(path[i]);
@@ -1084,21 +1085,14 @@ class CallTree {
* @param {CallTreeNode} opt_parent Node parent.
*/
class CallTreeNode {
- /**
- * Node self weight (how many times this node was the last node in
- * a call path).
- * @type {number}
- */
- selfWeight = 0;
-
- /**
- * Node total weight (includes weights of all children).
- * @type {number}
- */
- totalWeight = 0;
- children = {};
constructor(label, opt_parent) {
+ // Node self weight (how many times this node was the last node in
+ // a call path).
+ this.selfWeight = 0;
+ // Node total weight (includes weights of all children).
+ this.totalWeight = 0;
+ this. children = { __proto__:null };
this.label = label;
this.parent = opt_parent;
}
@@ -1141,7 +1135,8 @@ class CallTreeNode {
* @param {string} label Child node label.
*/
findChild(label) {
- return this.children[label] || null;
+ const found = this.children[label];
+ return found === undefined ? null : found;
}
/**
@@ -1151,7 +1146,9 @@ class CallTreeNode {
* @param {string} label Child node label.
*/
findOrAddChild(label) {
- return this.findChild(label) || this.addChild(label);
+ const found = this.findChild(label)
+ if (found === null) return this.addChild(label);
+ return found;
}
/**
@@ -1171,7 +1168,7 @@ class CallTreeNode {
* @param {function(CallTreeNode)} f Visitor function.
*/
walkUpToRoot(f) {
- for (let curr = this; curr != null; curr = curr.parent) {
+ for (let curr = this; curr !== null; curr = curr.parent) {
f(curr);
}
}
diff --git a/deps/v8/tools/release/PRESUBMIT.py b/deps/v8/tools/release/PRESUBMIT.py
index a982b2e153..ed4557d98e 100644
--- a/deps/v8/tools/release/PRESUBMIT.py
+++ b/deps/v8/tools/release/PRESUBMIT.py
@@ -2,9 +2,15 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
+
def _CommonChecks(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, '.', files_to_check=['test_scripts.py$'])
+ input_api, output_api, '.', files_to_check=['test_scripts.py$'],
+ run_on_python2=False)
return input_api.RunTests(tests)
def CheckChangeOnUpload(input_api, output_api):
diff --git a/deps/v8/tools/release/auto_push.py b/deps/v8/tools/release/auto_push.py
index 4cb968787f..eb52e2a316 100755
--- a/deps/v8/tools/release/auto_push.py
+++ b/deps/v8/tools/release/auto_push.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,9 +26,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
import json
import os
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 76247b1fb3..5d44505c2a 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -3,9 +3,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
import os
import sys
@@ -80,7 +77,7 @@ class DetectRevisionToRoll(Step):
version = self.GetVersionTag(revision)
assert version, "Internal error. All recent releases should have a tag"
- if SortingKey(self["last_version"]) < SortingKey(version):
+ if LooseVersion(self["last_version"]) < LooseVersion(version):
self["roll"] = revision
break
else:
diff --git a/deps/v8/tools/release/auto_tag.py b/deps/v8/tools/release/auto_tag.py
deleted file mode 100755
index 7e77c313d8..0000000000
--- a/deps/v8/tools/release/auto_tag.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import argparse
-import sys
-
-from common_includes import *
-
-
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # TODO(machenbach): Remove after the git switch.
- if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
- print("This script is disabled until after the v8 git migration.")
- return True
-
- self.CommonPrepare()
- self.PrepareBranch()
- self.GitCheckout("main")
- self.vc.Pull()
-
-
-class GetTags(Step):
- MESSAGE = "Get all V8 tags."
-
- def RunStep(self):
- self.GitCreateBranch(self._config["BRANCHNAME"])
- self["tags"] = self.vc.GetTags()
-
-
-class GetOldestUntaggedVersion(Step):
- MESSAGE = "Check if there's a version on bleeding edge without a tag."
-
- def RunStep(self):
- tags = set(self["tags"])
- self["candidate"] = None
- self["candidate_version"] = None
- self["next"] = None
- self["next_version"] = None
-
- # Iterate backwards through all automatic version updates.
- for git_hash in self.GitLog(
- format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
-
- # Get the version.
- if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
- continue
-
- self.ReadAndPersistVersion()
- version = self.ArrayToVersion("")
-
- # Strip off trailing patch level (tags don't include tag level 0).
- if version.endswith(".0"):
- version = version[:-2]
-
- # Clean up checked-out version file.
- self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
-
- if version in tags:
- if self["candidate"]:
- # Revision "git_hash" is tagged already and "candidate" was the next
- # newer revision without a tag.
- break
- else:
- print("Stop as %s is the latest version and it has been tagged." %
- version)
- self.CommonCleanup()
- return True
- else:
- # This is the second oldest version without a tag.
- self["next"] = self["candidate"]
- self["next_version"] = self["candidate_version"]
-
- # This is the oldest version without a tag.
- self["candidate"] = git_hash
- self["candidate_version"] = version
-
- if not self["candidate"] or not self["candidate_version"]:
- print("Nothing found to tag.")
- self.CommonCleanup()
- return True
-
- print("Candidate for tagging is %s with version %s" %
- (self["candidate"], self["candidate_version"]))
-
-
-class GetLKGRs(Step):
- MESSAGE = "Get the last lkgrs."
-
- def RunStep(self):
- revision_url = "https://v8-status.appspot.com/revisions?format=json"
- status_json = self.ReadURL(revision_url, wait_plan=[5, 20])
- self["lkgrs"] = [entry["revision"]
- for entry in json.loads(status_json) if entry["status"]]
-
-
-class CalculateTagRevision(Step):
- MESSAGE = "Calculate the revision to tag."
-
- def LastLKGR(self, min_rev, max_rev):
- """Finds the newest lkgr between min_rev (inclusive) and max_rev
- (exclusive).
- """
- for lkgr in self["lkgrs"]:
- # LKGRs are reverse sorted.
- if int(min_rev) <= int(lkgr) and int(lkgr) < int(max_rev):
- return lkgr
- return None
-
- def RunStep(self):
- # Get the lkgr after the tag candidate and before the next tag candidate.
- candidate_svn = self.vc.GitSvn(self["candidate"])
- if self["next"]:
- next_svn = self.vc.GitSvn(self["next"])
- else:
- # Don't include the version change commit itself if there is no upper
- # limit yet.
- candidate_svn = str(int(candidate_svn) + 1)
- next_svn = sys.maxsize
- lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
-
- if not lkgr_svn:
- print("There is no lkgr since the candidate version yet.")
- self.CommonCleanup()
- return True
-
- # Let's check if the lkgr is at least three hours old.
- self["lkgr"] = self.vc.SvnGit(lkgr_svn)
- if not self["lkgr"]:
- print("Couldn't find git hash for lkgr %s" % lkgr_svn)
- self.CommonCleanup()
- return True
-
- lkgr_utc_time = int(self.GitLog(n=1, format="%at", git_hash=self["lkgr"]))
- current_utc_time = self._side_effect_handler.GetUTCStamp()
-
- if current_utc_time < lkgr_utc_time + 10800:
- print("Candidate lkgr %s is too recent for tagging." % lkgr_svn)
- self.CommonCleanup()
- return True
-
- print("Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]))
-
-
-class MakeTag(Step):
- MESSAGE = "Tag the version."
-
- def RunStep(self):
- if not self._options.dry_run:
- self.GitReset(self["lkgr"])
- # FIXME(machenbach): Make this work with the git repo.
- self.vc.Tag(self["candidate_version"],
- "svn/bleeding_edge",
- "This won't work!")
-
-
-class CleanUp(Step):
- MESSAGE = "Clean up."
-
- def RunStep(self):
- self.CommonCleanup()
-
-
-class AutoTag(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("--dry_run", help="Don't tag the new version.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.dry_run and not options.author:
- print("Specify your chromium.org email with -a")
- return False
- options.wait_for_lgtm = False
- options.force_readline_defaults = True
- options.force_upload = True
- return True
-
- def _Config(self):
- return {
- "BRANCHNAME": "auto-tag-v8",
- "PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
- }
-
- def _Steps(self):
- return [
- Preparation,
- GetTags,
- GetOldestUntaggedVersion,
- GetLKGRs,
- CalculateTagRevision,
- MakeTag,
- CleanUp,
- ]
-
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(AutoTag().Run())
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
deleted file mode 100755
index b1b7e084df..0000000000
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script to check for new clusterfuzz issues since the last rolled v8 revision.
-
-Returns a json list with test case IDs if any.
-
-Security considerations: The security key and request data must never be
-written to public logs. Public automated callers of this script should
-suppress stdout and stderr and only process contents of the results_file.
-"""
-
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import argparse
-import httplib
-import json
-import os
-import re
-import sys
-import urllib
-import urllib2
-
-
-# Constants to git repos.
-BASE_URL = "https://chromium.googlesource.com"
-DEPS_LOG = BASE_URL + "/chromium/src/+log/main/DEPS?format=JSON"
-
-# Constants for retrieving v8 rolls.
-CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
-V8_COMMIT_RE = re.compile(
- r"^Update V8 to version \d+\.\d+\.\d+ \(based on ([a-fA-F0-9]+)\)\..*")
-
-# Constants for the clusterfuzz backend.
-HOSTNAME = "backend-dot-cluster-fuzz.appspot.com"
-
-# Crash patterns.
-V8_INTERNAL_RE = re.compile(r"^v8::internal.*")
-ANY_RE = re.compile(r".*")
-
-# List of all api requests.
-BUG_SPECS = [
- {
- "args": {
- "job_type": "linux_asan_chrome_v8",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": V8_INTERNAL_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_ignition_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_v8_arm_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_ignition_v8_arm_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_v8_arm64_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
- {
- "args": {
- "job_type": "linux_asan_d8_v8_mipsel_dbg",
- "reproducible": "True",
- "open": "True",
- "bug_information": "",
- },
- "crash_state": ANY_RE,
- },
-]
-
-
-def GetRequest(url):
- url_fh = urllib2.urlopen(url, None, 60)
- try:
- return url_fh.read()
- finally:
- url_fh.close()
-
-
-def GetLatestV8InChromium():
- """Returns the commit position number of the latest v8 roll in chromium."""
-
- # Check currently rolled v8 revision.
- result = GetRequest(DEPS_LOG)
- if not result:
- return None
-
- # Strip security header and load json.
- commits = json.loads(result[5:])
-
- git_revision = None
- for commit in commits["log"]:
- # Get latest commit that matches the v8 roll pattern. Ignore cherry-picks.
- match = re.match(V8_COMMIT_RE, commit["message"])
- if match:
- git_revision = match.group(1)
- break
- else:
- return None
-
- # Get commit position number for v8 revision.
- result = GetRequest(CRREV % git_revision)
- if not result:
- return None
-
- commit = json.loads(result)
- assert commit["repo"] == "v8/v8"
- return commit["number"]
-
-
-def APIRequest(key, **params):
- """Send a request to the clusterfuzz api.
-
- Returns a json dict of the response.
- """
-
- params["api_key"] = key
- params = urllib.urlencode(params)
-
- headers = {"Content-type": "application/x-www-form-urlencoded"}
-
- try:
- conn = httplib.HTTPSConnection(HOSTNAME)
- conn.request("POST", "/_api/", params, headers)
-
- response = conn.getresponse()
-
- # Never leak "data" into public logs.
- data = response.read()
- except:
- raise Exception("ERROR: Connection problem.")
-
- try:
- return json.loads(data)
- except:
- raise Exception("ERROR: Could not read response. Is your key valid?")
-
- return None
-
-
-def Main():
- parser = argparse.ArgumentParser()
- parser.add_argument("-k", "--key-file", required=True,
- help="A file with the clusterfuzz api key.")
- parser.add_argument("-r", "--results-file",
- help="A file to write the results to.")
- options = parser.parse_args()
-
- # Get api key. The key's content must never be logged.
- assert options.key_file
- with open(options.key_file) as f:
- key = f.read().strip()
- assert key
-
- revision_number = GetLatestV8InChromium()
-
- results = []
- for spec in BUG_SPECS:
- args = dict(spec["args"])
- # Use incremented revision as we're interested in all revision greater than
- # what's currently rolled into chromium.
- if revision_number:
- args["revision_greater_or_equal"] = str(int(revision_number) + 1)
-
- # Never print issue details in public logs.
- issues = APIRequest(key, **args)
- assert issues is not None
- for issue in issues:
- if (re.match(spec["crash_state"], issue["crash_state"]) and
- not issue.get('has_bug_flag')):
- results.append(issue["id"])
-
- if options.results_file:
- with open(options.results_file, "w") as f:
- f.write(json.dumps(results))
- else:
- print(results)
-
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index b61a3e2e27..afbcc9f837 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,12 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
import datetime
-import httplib
+from distutils.version import LooseVersion
import glob
import imp
import json
@@ -43,11 +40,14 @@ import sys
import textwrap
import time
import urllib
-import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
+import http.client as httplib
+import urllib.request as urllib2
+
+
DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
@@ -92,16 +92,6 @@ def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
-def SortingKey(version):
- """Key for sorting version number strings: '3.11' > '3.2.1.1'"""
- version_keys = map(int, version.split("."))
- # Fill up to full version numbers to normalize comparison.
- while len(version_keys) < 4: # pragma: no cover
- version_keys.append(0)
- # Fill digits.
- return ".".join(map("{0:04d}".format, version_keys))
-
-
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True, cwd=None):
@@ -113,7 +103,7 @@ def Command(cmd, args="", prefix="", pipe=True, cwd=None):
sys.stdout.flush()
try:
if pipe:
- return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
+ return subprocess.check_output(cmd_line, shell=True, cwd=cwd).decode('utf-8')
else:
return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
@@ -256,7 +246,7 @@ class GitInterface(VCInterface):
lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
self.step.GitRemotes())
# Remove 'branch-heads/' prefix.
- return map(lambda s: s[13:], branches)
+ return [b[13:] for b in branches]
def MainBranch(self):
return "main"
@@ -557,7 +547,7 @@ class Step(GitRecipesMixin):
int(time_now - max_age)).strip()
# Filter out revisions who's tag is off by one or more commits.
- return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
+ return list(filter(self.GetVersionTag, revisions.splitlines()))
def GetLatestVersion(self):
# Use cached version if available.
@@ -571,7 +561,7 @@ class Step(GitRecipesMixin):
only_version_tags = NormalizeVersionTags(all_tags)
version = sorted(only_version_tags,
- key=SortingKey, reverse=True)[0]
+ key=LooseVersion, reverse=True)[0]
self["latest_version"] = version
return version
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index d1a066f00b..37e9e8673c 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -1,19 +1,18 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
import os
import sys
import tempfile
-import urllib2
from common_includes import *
+import urllib.request as urllib2
+
+
class Preparation(Step):
MESSAGE = "Preparation."
@@ -48,7 +47,7 @@ class IncrementVersion(Step):
# Use the highest version from main or from tags to determine the new
# version.
authoritative_version = sorted(
- [main_version, latest_version], key=SortingKey)[1]
+ [main_version, latest_version], key=LooseVersion)[1]
self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index a90266aa71..865e13bf31 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
diff --git a/deps/v8/tools/release/list_deprecated.py b/deps/v8/tools/release/list_deprecated.py
index e25a5d713b..3549ecd427 100755
--- a/deps/v8/tools/release/list_deprecated.py
+++ b/deps/v8/tools/release/list_deprecated.py
@@ -30,13 +30,13 @@ class HeaderFile(object):
self.blame_list = self.get_blame_list()
@classmethod
- def get_api_header_files(clazz, options):
+ def get_api_header_files(cls, options):
files = subprocess.check_output(
['git', 'ls-tree', '--name-only', '-r', 'HEAD', options.include_dir],
encoding='UTF-8')
- files = filter(lambda l: l.endswith('.h'), files.splitlines())
+ files = map(Path, filter(lambda l: l.endswith('.h'), files.splitlines()))
with Pool(processes=24) as pool:
- return pool.map(HeaderFile, files)
+ return pool.map(cls, files)
def extract_version(self, hash):
if hash in VERSION_CACHE:
@@ -129,14 +129,22 @@ class HeaderFile(object):
content = line[start:pos].strip().replace('""', '')
deprecated.append((index + 1, commit_datetime, commit_hash, content))
index = index + 1
- if len(deprecated) == 0: return
for linenumber, commit_datetime, commit_hash, content in deprecated:
- commit_date = commit_datetime.date()
- file_position = (f"{self.path}:{linenumber}").ljust(40)
- v8_version = self.extract_version(commit_hash)
- print(f"{file_position} v{v8_version} {commit_date} {commit_hash[:8]}"
- f" {content}")
- return len(deprecated)
+ self.print_details(linenumber, commit_datetime, commit_hash, content)
+
+ def print_details(self, linenumber, commit_datetime, commit_hash, content):
+ commit_date = commit_datetime.date()
+ file_position = (f"{self.path}:{linenumber}").ljust(40)
+ v8_version = f"v{self.extract_version(commit_hash)}".rjust(5)
+ print(f"{file_position} {v8_version} {commit_date} {commit_hash[:8]}"
+ f" {content}")
+
+ def print_v8_version(self, options):
+ commit_hash, commit_datetime = subprocess.check_output(
+ ['git', 'log', '-1', '--format=%H%n%ct', self.path],
+ encoding='UTF-8').splitlines()
+ commit_datetime = datetime.fromtimestamp(int(commit_datetime))
+ self.print_details(11, commit_datetime, commit_hash, content="")
def parse_options(args):
@@ -163,10 +171,17 @@ def parse_options(args):
def main(args):
options = parse_options(args)
+
+ print("# CURRENT V8 VERSION:")
+ version = HeaderFile(Path(options.include_dir) / 'v8-version.h')
+ version.print_v8_version(options)
+
header_files = HeaderFile.get_api_header_files(options)
+ print("\n")
print("# V8_DEPRECATE_SOON:")
for header in header_files:
header.filter_and_print("V8_DEPRECATE_SOON", options)
+
print("\n")
print("# V8_DEPRECATED:")
for header in header_files:
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 08a36125f8..ca5b2ce674 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,9 +26,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
from collections import OrderedDict
import sys
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index d25f95e397..d1abe56edd 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,9 +26,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
from collections import OrderedDict
import sys
diff --git a/deps/v8/tools/release/script_test.py b/deps/v8/tools/release/script_test.py
index 0f345b7fa8..a0899911c6 100755
--- a/deps/v8/tools/release/script_test.py
+++ b/deps/v8/tools/release/script_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -29,9 +29,6 @@
# Wraps test execution with a coverage analysis. To get the best speed, the
# native python coverage version >= 3.7.1 should be installed.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import coverage
import os
import unittest
diff --git a/deps/v8/tools/release/search_related_commits.py b/deps/v8/tools/release/search_related_commits.py
deleted file mode 100755
index 48e6ae2592..0000000000
--- a/deps/v8/tools/release/search_related_commits.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import argparse
-import operator
-import os
-import re
-from sets import Set
-from subprocess import Popen, PIPE
-import sys
-
-def search_all_related_commits(
- git_working_dir, start_hash, until, separator, verbose=False):
-
- all_commits_raw = _find_commits_inbetween(
- start_hash, until, git_working_dir, verbose)
- if verbose:
- print("All commits between <of> and <until>: " + all_commits_raw)
-
- # Adding start hash too
- all_commits = [start_hash]
- all_commits.extend(all_commits_raw.splitlines())
- all_related_commits = {}
- already_treated_commits = Set([])
- for commit in all_commits:
- if commit in already_treated_commits:
- continue
-
- related_commits = _search_related_commits(
- git_working_dir, commit, until, separator, verbose)
- if len(related_commits) > 0:
- all_related_commits[commit] = related_commits
- already_treated_commits.update(related_commits)
-
- already_treated_commits.update(commit)
-
- return all_related_commits
-
-def _search_related_commits(
- git_working_dir, start_hash, until, separator, verbose=False):
-
- if separator:
- commits_between = _find_commits_inbetween(
- start_hash, separator, git_working_dir, verbose)
- if commits_between == "":
- return []
-
- # Extract commit position
- original_message = git_execute(
- git_working_dir,
- ["show", "-s", "--format=%B", start_hash],
- verbose)
- title = original_message.splitlines()[0]
-
- matches = re.search("(\{#)([0-9]*)(\})", original_message)
-
- if not matches:
- return []
-
- commit_position = matches.group(2)
- if verbose:
- print("1.) Commit position to look for: " + commit_position)
-
- search_range = start_hash + ".." + until
-
- def git_args(grep_pattern):
- return [
- "log",
- "--reverse",
- "--grep=" + grep_pattern,
- "--format=%H",
- search_range,
- ]
-
- found_by_hash = git_execute(
- git_working_dir, git_args(start_hash), verbose).strip()
-
- if verbose:
- print("2.) Found by hash: " + found_by_hash)
-
- found_by_commit_pos = git_execute(
- git_working_dir, git_args(commit_position), verbose).strip()
-
- if verbose:
- print("3.) Found by commit position: " + found_by_commit_pos)
-
- # Replace brackets or else they are wrongly interpreted by --grep
- title = title.replace("[", "\\[")
- title = title.replace("]", "\\]")
-
- found_by_title = git_execute(
- git_working_dir, git_args(title), verbose).strip()
-
- if verbose:
- print("4.) Found by title: " + found_by_title)
-
- hits = (
- _convert_to_array(found_by_hash) +
- _convert_to_array(found_by_commit_pos) +
- _convert_to_array(found_by_title))
- hits = _remove_duplicates(hits)
-
- if separator:
- for current_hit in hits:
- commits_between = _find_commits_inbetween(
- separator, current_hit, git_working_dir, verbose)
- if commits_between != "":
- return hits
- return []
-
- return hits
-
-def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
- commits_between = git_execute(
- git_working_dir,
- ["rev-list", "--reverse", start_hash + ".." + end_hash],
- verbose)
- return commits_between.strip()
-
-def _convert_to_array(string_of_hashes):
- return string_of_hashes.splitlines()
-
-def _remove_duplicates(array):
- no_duplicates = []
- for current in array:
- if not current in no_duplicates:
- no_duplicates.append(current)
- return no_duplicates
-
-def git_execute(working_dir, args, verbose=False):
- command = ["git", "-C", working_dir] + args
- if verbose:
- print("Git working dir: " + working_dir)
- print("Executing git command:" + str(command))
- p = Popen(args=command, stdin=PIPE,
- stdout=PIPE, stderr=PIPE)
- output, err = p.communicate()
- rc = p.returncode
- if rc != 0:
- raise Exception(err)
- if verbose:
- print("Git return value: " + output)
- return output
-
-def _pretty_print_entry(hash, git_dir, pre_text, verbose):
- text_to_print = git_execute(
- git_dir,
- ["show",
- "--quiet",
- "--date=iso",
- hash,
- "--format=%ad # %H # %s"],
- verbose)
- return pre_text + text_to_print.strip()
-
-def main(options):
- all_related_commits = search_all_related_commits(
- options.git_dir,
- options.of[0],
- options.until[0],
- options.separator,
- options.verbose)
-
- sort_key = lambda x: (
- git_execute(
- options.git_dir,
- ["show", "--quiet", "--date=iso", x, "--format=%ad"],
- options.verbose)).strip()
-
- high_level_commits = sorted(all_related_commits.keys(), key=sort_key)
-
- for current_key in high_level_commits:
- if options.prettyprint:
- yield _pretty_print_entry(
- current_key,
- options.git_dir,
- "+",
- options.verbose)
- else:
- yield "+" + current_key
-
- found_commits = all_related_commits[current_key]
- for current_commit in found_commits:
- if options.prettyprint:
- yield _pretty_print_entry(
- current_commit,
- options.git_dir,
- "| ",
- options.verbose)
- else:
- yield "| " + current_commit
-
-if __name__ == "__main__": # pragma: no cover
- parser = argparse.ArgumentParser(
- "This tool analyzes the commit range between <of> and <until>. "
- "It finds commits which belong together e.g. Implement/Revert pairs and "
- "Implement/Port/Revert triples. All supplied hashes need to be "
- "from the same branch e.g. main.")
- parser.add_argument("-g", "--git-dir", required=False, default=".",
- help="The path to your git working directory.")
- parser.add_argument("--verbose", action="store_true",
- help="Enables a very verbose output")
- parser.add_argument("of", nargs=1,
- help="Hash of the commit to be searched.")
- parser.add_argument("until", nargs=1,
- help="Commit when searching should stop")
- parser.add_argument("--separator", required=False,
- help="The script will only list related commits "
- "which are separated by hash <--separator>.")
- parser.add_argument("--prettyprint", action="store_true",
- help="Pretty prints the output")
-
- args = sys.argv[1:]
- options = parser.parse_args(args)
- for current_line in main(options):
- print(current_line)
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index e8757cf277..1777984a58 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,9 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
+import json
import os
import shutil
import tempfile
@@ -44,7 +42,6 @@ import create_release
from create_release import *
import merge_to_branch
from merge_to_branch import MergeToBranch
-from auto_tag import AutoTag
import roll_merge
from roll_merge import RollMerge
@@ -93,6 +90,10 @@ class ToplevelTest(unittest.TestCase):
]
self.assertEquals(expected, NormalizeVersionTags(input))
+ def testCommand(self):
+ """Ensure json can decode the output of commands."""
+ json.dumps(Command('ls', pipe=True))
+
def Cmd(*args, **kwargs):
"""Convenience function returning a shell command test expectation."""
diff --git a/deps/v8/tools/release/test_search_related_commits.py b/deps/v8/tools/release/test_search_related_commits.py
deleted file mode 100755
index 6943915fd6..0000000000
--- a/deps/v8/tools/release/test_search_related_commits.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from collections import namedtuple
-from os import path
-import search_related_commits
-import shutil
-from subprocess import Popen, PIPE, check_call
-import unittest
-
-
-TEST_CONFIG = {
- "GIT_REPO": "/tmp/test-v8-search-related-commits",
-}
-
-class TestSearchRelatedCommits(unittest.TestCase):
-
- base_dir = TEST_CONFIG["GIT_REPO"]
-
- def _execute_git(self, git_args):
-
- fullCommand = ["git", "-C", self.base_dir] + git_args
- p = Popen(args=fullCommand, stdin=PIPE,
- stdout=PIPE, stderr=PIPE)
- output, err = p.communicate()
- rc = p.returncode
- if rc != 0:
- raise Exception(err)
- return output
-
- def setUp(self):
- if path.exists(self.base_dir):
- shutil.rmtree(self.base_dir)
-
- check_call(["git", "init", self.base_dir])
-
- # Initial commit
- message = """[turbofan] Sanitize language mode for javascript operators.
-
- R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28059}"""
- self._make_empty_commit(message)
-
- message = """[crankshaft] Do some stuff
-
- R=hablich@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243007
-
- Cr-Commit-Position: refs/heads/main@{#28030}"""
-
- self._make_empty_commit(message)
-
- def tearDown(self):
- if path.exists(self.base_dir):
- shutil.rmtree(self.base_dir)
-
- def _assert_correct_standard_result(
- self, result, all_commits, hash_of_first_commit):
- self.assertEqual(len(result), 1, "Main commit not found")
- self.assertTrue(
- result.get(hash_of_first_commit),
- "Main commit is wrong")
-
- self.assertEqual(
- len(result[hash_of_first_commit]),
- 1,
- "Child commit not found")
- self.assertEqual(
- all_commits[2],
- result[hash_of_first_commit][0],
- "Child commit wrong")
-
- def _get_commits(self):
- commits = self._execute_git(
- ["log", "--format=%H", "--reverse"]).splitlines()
- return commits
-
- def _make_empty_commit(self, message):
- self._execute_git(["commit", "--allow-empty", "-m", message])
-
- def testSearchByCommitPosition(self):
- message = """Revert of some stuff.
- > Cr-Commit-Position: refs/heads/main@{#28059}
- R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28088}"""
-
- self._make_empty_commit(message)
-
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
-
- result = search_related_commits.search_all_related_commits(
- self.base_dir, hash_of_first_commit, "HEAD", None)
-
- self._assert_correct_standard_result(result, commits, hash_of_first_commit)
-
- def testSearchByTitle(self):
- message = """Revert of some stuff.
- > [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/main@{#289}
- R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28088}"""
-
- self._make_empty_commit(message)
-
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
-
- result = search_related_commits.search_all_related_commits(
- self.base_dir, hash_of_first_commit, "HEAD", None)
-
- self._assert_correct_standard_result(result, commits, hash_of_first_commit)
-
- def testSearchByHash(self):
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
-
- message = """Revert of some stuff.
- > [turbofan] Sanitize language mode for javascript operators.
- > Reverting """ + hash_of_first_commit + """
- > R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28088}"""
-
- self._make_empty_commit(message)
-
- #Fetch again for an update
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
-
- result = search_related_commits.search_all_related_commits(
- self.base_dir,
- hash_of_first_commit,
- "HEAD",
- None)
-
- self._assert_correct_standard_result(result, commits, hash_of_first_commit)
-
- def testConsiderSeparator(self):
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
-
- # Related commits happen before separator so it is not a hit
- message = """Revert of some stuff: Not a hit
- > [turbofan] Sanitize language mode for javascript operators.
- > Reverting """ + hash_of_first_commit + """
- > R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28088}"""
- self._make_empty_commit(message)
-
- # Related commits happen before and after separator so it is a hit
- commit_pos_of_main = "27088"
- message = """Implement awesome feature: Main commit
-
- Review URL: https://codereview.chromium.org/1084243235
-
- Cr-Commit-Position: refs/heads/main@{#""" + commit_pos_of_main + "}"
- self._make_empty_commit(message)
-
- # Separator commit
- message = """Commit which is the origin of the branch
-
- Review URL: https://codereview.chromium.org/1084243456
-
- Cr-Commit-Position: refs/heads/main@{#28173}"""
- self._make_empty_commit(message)
-
- # Filler commit
- message = "Some unrelated commit: Not a hit"
- self._make_empty_commit(message)
-
- # Related commit after separator: a hit
- message = "Patch r" + commit_pos_of_main +""" done
-
- Review URL: https://codereview.chromium.org/1084243235
-
- Cr-Commit-Position: refs/heads/main@{#29567}"""
- self._make_empty_commit(message)
-
- #Fetch again for an update
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
- hash_of_hit = commits[3]
- hash_of_separator = commits[4]
- hash_of_child_hit = commits[6]
-
- result = search_related_commits.search_all_related_commits(
- self.base_dir,
- hash_of_first_commit,
- "HEAD",
- hash_of_separator)
-
- self.assertTrue(result.get(hash_of_hit), "Hit not found")
- self.assertEqual(len(result), 1, "More than one hit found")
- self.assertEqual(
- len(result.get(hash_of_hit)),
- 1,
- "More than one child hit found")
- self.assertEqual(
- result.get(hash_of_hit)[0],
- hash_of_child_hit,
- "Wrong commit found")
-
- def testPrettyPrint(self):
- message = """Revert of some stuff.
- > [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/main@{#289}
- R=mstarzinger@chromium.org
-
- Review URL: https://codereview.chromium.org/1084243005
-
- Cr-Commit-Position: refs/heads/main@{#28088}"""
-
- self._make_empty_commit(message)
-
- commits = self._get_commits()
- hash_of_first_commit = commits[0]
- OptionsStruct = namedtuple(
- "OptionsStruct",
- "git_dir of until all prettyprint separator verbose")
- options = OptionsStruct(
- git_dir= self.base_dir,
- of= [hash_of_first_commit],
- until= [commits[2]],
- all= True,
- prettyprint= True,
- separator = None,
- verbose=False)
- output = []
- for current_line in search_related_commits.main(options):
- output.append(current_line)
-
- self.assertIs(len(output), 2, "Not exactly two entries written")
- self.assertTrue(output[0].startswith("+"), "Main entry not marked with +")
- self.assertTrue(output[1].startswith("| "), "Child entry not marked with |")
-
- def testNothingFound(self):
- commits = self._get_commits()
-
- self._execute_git(["commit", "--allow-empty", "-m", "A"])
- self._execute_git(["commit", "--allow-empty", "-m", "B"])
- self._execute_git(["commit", "--allow-empty", "-m", "C"])
- self._execute_git(["commit", "--allow-empty", "-m", "D"])
-
- hash_of_first_commit = commits[0]
- result = search_related_commits.search_all_related_commits(
- self.base_dir,
- hash_of_first_commit,
- "HEAD",
- None)
-
- self.assertEqual(len(result), 0, "Results found where none should be.")
-
-
-if __name__ == "__main__":
- #import sys;sys.argv = ['', 'Test.testName']
- unittest.main()
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 1e22b298a8..4dd7d87996 100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -119,13 +119,21 @@ import sys
import time
import traceback
-import numpy
-
from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
+from math import sqrt
+# NOTE: added import here to prevent breakages during the py2/3 migration,
+# once we enable python3 only, we can move the import up
+try:
+ from numpy import mean
+ from numpy import std as stdev
+except ImportError:
+ from statistics import mean, stdev
+
+
# for py2/py3 compatibility
try:
basestring # Python 2
@@ -265,11 +273,11 @@ class ResultTracker(object):
return False
logging.debug(' Results: %d entries', len(results))
- mean = numpy.mean(results)
- mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
- logging.debug(' Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
- logging.info('>>> Confidence level is %.2f', mean / (1000.0 * mean_stderr))
- return confidence_level * mean_stderr < mean / 1000.0
+ avg = mean(results)
+ avg_stderr = stdev(results) / sqrt(len(results))
+ logging.debug(' Mean: %.2f, mean_stderr: %.2f', avg, avg_stderr)
+ logging.info('>>> Confidence level is %.2f', avg / (1000.0 * avg_stderr))
+ return confidence_level * avg_stderr < avg / 1000.0
def __str__(self): # pragma: no cover
return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
@@ -289,7 +297,8 @@ def RunResultsProcessor(results_processor, output, count):
stderr=subprocess.PIPE,
)
new_output = copy.copy(output)
- new_output.stdout, _ = p.communicate(input=output.stdout)
+ new_output.stdout = p.communicate(
+ input=output.stdout.encode('utf-8'))[0].decode('utf-8')
logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
return new_output
diff --git a/deps/v8/tools/sanitizers/tsan_suppressions.txt b/deps/v8/tools/sanitizers/tsan_suppressions.txt
index 270340e484..f9e3942039 100644
--- a/deps/v8/tools/sanitizers/tsan_suppressions.txt
+++ b/deps/v8/tools/sanitizers/tsan_suppressions.txt
@@ -4,3 +4,7 @@
# Incorrectly detected lock cycles in test-lockers
# https://code.google.com/p/thread-sanitizer/issues/detail?id=81
deadlock:LockAndUnlockDifferentIsolatesThread::Run
+
+# A global safepoint might lock client isolate mutexes in any order, which
+# would be reported as potential deadlocks.
+deadlock:GlobalSafepoint::EnterGlobalSafepointScope
diff --git a/deps/v8/tools/splaytree.mjs b/deps/v8/tools/splaytree.mjs
index d942d1f463..ac25cf0668 100644
--- a/deps/v8/tools/splaytree.mjs
+++ b/deps/v8/tools/splaytree.mjs
@@ -49,7 +49,7 @@ export class SplayTree {
* @return {boolean} Whether the tree is empty.
*/
isEmpty() {
- return !this.root_;
+ return this.root_ === null;
}
/**
@@ -100,7 +100,7 @@ export class SplayTree {
throw Error(`Key not found: ${key}`);
}
const removed = this.root_;
- if (!this.root_.left) {
+ if (this.root_.left === null) {
this.root_ = this.root_.right;
} else {
const { right } = this.root_;
@@ -133,7 +133,7 @@ export class SplayTree {
findMin() {
if (this.isEmpty()) return null;
let current = this.root_;
- while (current.left) {
+ while (current.left !== null) {
current = current.left;
}
return current;
@@ -145,7 +145,7 @@ export class SplayTree {
findMax(opt_startNode) {
if (this.isEmpty()) return null;
let current = opt_startNode || this.root_;
- while (current.right) {
+ while (current.right !== null) {
current = current.right;
}
return current;
@@ -164,7 +164,7 @@ export class SplayTree {
// the left subtree.
if (this.root_.key <= key) {
return this.root_;
- } else if (this.root_.left) {
+ } else if (this.root_.left !== null) {
return this.findMax(this.root_.left);
} else {
return null;
@@ -186,7 +186,7 @@ export class SplayTree {
*/
exportValues() {
const result = [];
- this.traverse_(function(node) { result.push(node.value); });
+ this.traverse_(function(node) { result.push(node.value) });
return result;
}
@@ -212,36 +212,28 @@ export class SplayTree {
let current = this.root_;
while (true) {
if (key < current.key) {
- if (!current.left) {
- break;
- }
+ if (current.left === null) break;
if (key < current.left.key) {
// Rotate right.
const tmp = current.left;
current.left = tmp.right;
tmp.right = current;
current = tmp;
- if (!current.left) {
- break;
- }
+ if (current.left === null) break;
}
// Link right.
right.left = current;
right = current;
current = current.left;
} else if (key > current.key) {
- if (!current.right) {
- break;
- }
+ if (current.right === null) break;
if (key > current.right.key) {
// Rotate left.
const tmp = current.right;
current.right = tmp.left;
tmp.left = current;
current = tmp;
- if (!current.right) {
- break;
- }
+ if (current.right === null) break;
}
// Link left.
left.right = current;
@@ -269,9 +261,7 @@ export class SplayTree {
const nodesToVisit = [this.root_];
while (nodesToVisit.length > 0) {
const node = nodesToVisit.shift();
- if (node == null) {
- continue;
- }
+ if (node === null) continue;
f(node);
nodesToVisit.push(node.left);
nodesToVisit.push(node.right);
@@ -298,4 +288,4 @@ class SplayTreeNode {
*/
this.right = null;
}
-}; \ No newline at end of file
+};
diff --git a/deps/v8/tools/system-analyzer/helper.mjs b/deps/v8/tools/system-analyzer/helper.mjs
index ba6d0614f2..a50e06d3be 100644
--- a/deps/v8/tools/system-analyzer/helper.mjs
+++ b/deps/v8/tools/system-analyzer/helper.mjs
@@ -2,48 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export const KB = 1024;
-export const MB = KB * KB;
-export const GB = MB * KB;
-export const kMicro2Milli = 1 / 1000;
-
-export function formatBytes(bytes) {
- const units = ['B', 'KiB', 'MiB', 'GiB'];
- const divisor = 1024;
- let index = 0;
- while (index < units.length && bytes >= divisor) {
- index++;
- bytes /= divisor;
- }
- return bytes.toFixed(2) + units[index];
-}
-
-export function formatMicroSeconds(micro) {
- return (micro * kMicro2Milli).toFixed(1) + 'ms';
-}
-
-export function formatDurationMicros(micros, secondsDigits = 3) {
- return formatDurationMillis(micros * kMicro2Milli, secondsDigits);
-}
-
-export function formatDurationMillis(millis, secondsDigits = 3) {
- if (millis < 1000) {
- if (millis < 1) {
- return (millis / kMicro2Milli).toFixed(1) + 'ns';
- }
- return millis.toFixed(2) + 'ms';
- }
- let seconds = millis / 1000;
- const hours = Math.floor(seconds / 3600);
- const minutes = Math.floor((seconds % 3600) / 60);
- seconds = seconds % 60;
- let buffer = ''
- if (hours > 0) buffer += hours + 'h ';
- if (hours > 0 || minutes > 0) buffer += minutes + 'm ';
- buffer += seconds.toFixed(secondsDigits) + 's'
- return buffer;
-}
-
export function delay(time) {
return new Promise(resolver => setTimeout(resolver, time));
}
@@ -105,3 +63,14 @@ export function groupBy(array, keyFunction, collect = false) {
// Sort by length
return groups.sort((a, b) => b.length - a.length);
}
+
+export function arrayEquals(left, right) {
+ if (left == right) return true;
+ if (left.length != right.length) return false;
+ for (let i = 0; i < left.length; i++) {
+ if (left[i] != right[i]) return false;
+ }
+ return true;
+}
+
+export * from '../js/helper.mjs'
diff --git a/deps/v8/tools/system-analyzer/index.css b/deps/v8/tools/system-analyzer/index.css
index 4525f0d9b4..b2fb95843f 100644
--- a/deps/v8/tools/system-analyzer/index.css
+++ b/deps/v8/tools/system-analyzer/index.css
@@ -13,6 +13,7 @@
--map-background-color: #5e5454;
--timeline-background-color: #1f1f1f;
--file-reader-background-color: #ffffff80;
+ --file-reader-border-color: #ffffff;
--red: #dc6eae;
--green: #aedc6e;
--yellow: #eeff41;
diff --git a/deps/v8/tools/system-analyzer/index.html b/deps/v8/tools/system-analyzer/index.html
index e85a59d6e6..0a333dd18f 100644
--- a/deps/v8/tools/system-analyzer/index.html
+++ b/deps/v8/tools/system-analyzer/index.html
@@ -11,7 +11,7 @@ found in the LICENSE file. -->
<link rel="modulepreload" href="./helper.mjs" >
<link rel="modulepreload" href="./view/log-file-reader.mjs" >
<link rel="modulepreload" href="./view/helper.mjs" >
- <link rel="preload" href="./view/log-file-reader-template.html" as="fetch" crossorigin="anonymous">
+ <link rel="preload" href="../js/log-file-reader-template.html" as="fetch" crossorigin="anonymous">
<script type="module">
// Force instatiating the log-reader before anything else.
import "./view/log-file-reader.mjs";
diff --git a/deps/v8/tools/system-analyzer/index.mjs b/deps/v8/tools/system-analyzer/index.mjs
index 2cae0d3b6d..41463d9484 100644
--- a/deps/v8/tools/system-analyzer/index.mjs
+++ b/deps/v8/tools/system-analyzer/index.mjs
@@ -95,7 +95,7 @@ class App {
document.addEventListener(
SelectionEvent.name, e => this.handleSelectEntries(e))
document.addEventListener(
- FocusEvent.name, e => this.handleFocusLogEntryl(e));
+ FocusEvent.name, e => this.handleFocusLogEntry(e));
document.addEventListener(
SelectTimeEvent.name, e => this.handleTimeRangeSelect(e));
document.addEventListener(ToolTipEvent.name, e => this.handleToolTip(e));
@@ -151,7 +151,7 @@ class App {
handleSelectEntries(e) {
e.stopImmediatePropagation();
- this.showEntries(e.entries);
+ this.selectEntries(e.entries);
}
selectEntries(entries) {
@@ -160,29 +160,30 @@ class App {
this.selectEntriesOfSingleType(group.entries);
missingTypes.delete(group.key);
});
- missingTypes.forEach(type => this.selectEntriesOfSingleType([], type));
+ missingTypes.forEach(
+ type => this.selectEntriesOfSingleType([], type, false));
}
- selectEntriesOfSingleType(entries, type) {
+ selectEntriesOfSingleType(entries, type, focusView = true) {
const entryType = entries[0]?.constructor ?? type;
switch (entryType) {
case Script:
entries = entries.flatMap(script => script.sourcePositions);
- return this.showSourcePositions(entries);
+ return this.showSourcePositions(entries, focusView);
case SourcePosition:
- return this.showSourcePositions(entries);
+ return this.showSourcePositions(entries, focusView);
case MapLogEntry:
- return this.showMapEntries(entries);
+ return this.showMapEntries(entries, focusView);
case IcLogEntry:
- return this.showIcEntries(entries);
+ return this.showIcEntries(entries, focusView);
case ApiLogEntry:
- return this.showApiEntries(entries);
+ return this.showApiEntries(entries, focusView);
case CodeLogEntry:
- return this.showCodeEntries(entries);
+ return this.showCodeEntries(entries, focusView);
case DeoptLogEntry:
- return this.showDeoptEntries(entries);
+ return this.showDeoptEntries(entries, focusView);
case SharedLibLogEntry:
- return this.showSharedLibEntries(entries);
+ return this.showSharedLibEntries(entries, focusView);
case TimerLogEntry:
case TickLogEntry:
break;
@@ -245,7 +246,7 @@ class App {
this._view.timelinePanel.timeSelection = {start, end};
}
- handleFocusLogEntryl(e) {
+ handleFocusLogEntry(e) {
e.stopImmediatePropagation();
this.focusLogEntry(e.entry);
}
@@ -281,11 +282,11 @@ class App {
this._state.map = entry;
this._view.mapTrack.focusedEntry = entry;
this._view.mapPanel.map = entry;
- this._view.mapPanel.show();
if (focusSourcePosition) {
this.focusCodeLogEntry(entry.code, false);
this.focusSourcePosition(entry.sourcePosition);
}
+ this._view.mapPanel.show();
}
focusIcLogEntry(entry) {
diff --git a/deps/v8/tools/system-analyzer/log/code.mjs b/deps/v8/tools/system-analyzer/log/code.mjs
index feee95361e..4e8ca40f5e 100644
--- a/deps/v8/tools/system-analyzer/log/code.mjs
+++ b/deps/v8/tools/system-analyzer/log/code.mjs
@@ -66,6 +66,10 @@ export class CodeLogEntry extends LogEntry {
return this._kindName === 'Builtin';
}
+ get isBytecodeKind() {
+ return this._kindName === 'Unopt';
+ }
+
get kindName() {
return this._kindName;
}
diff --git a/deps/v8/tools/system-analyzer/log/tick.mjs b/deps/v8/tools/system-analyzer/log/tick.mjs
index 64dbeb3780..e8093df93f 100644
--- a/deps/v8/tools/system-analyzer/log/tick.mjs
+++ b/deps/v8/tools/system-analyzer/log/tick.mjs
@@ -10,6 +10,28 @@ export class TickLogEntry extends LogEntry {
super(TickLogEntry.extractType(vmState, processedStack), time);
this.state = vmState;
this.stack = processedStack;
+ this._endTime = time;
+ }
+
+ end(time) {
+ if (this.isInitialized) throw new Error('Invalid timer change');
+ this._endTime = time;
+ }
+
+ get isInitialized() {
+ return this._endTime !== this._time;
+ }
+
+ get startTime() {
+ return this._time;
+ }
+
+ get endTime() {
+ return this._endTime;
+ }
+
+ get duration() {
+ return this._endTime - this._time;
}
static extractType(vmState, processedStack) {
@@ -34,4 +56,4 @@ export class TickLogEntry extends LogEntry {
if (entry?.vmState) return Profile.vmStateString(entry.vmState);
return 'Other';
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/system-analyzer/processor.mjs b/deps/v8/tools/system-analyzer/processor.mjs
index 4f192ba96f..38f3a46b9e 100644
--- a/deps/v8/tools/system-analyzer/processor.mjs
+++ b/deps/v8/tools/system-analyzer/processor.mjs
@@ -59,6 +59,7 @@ export class Processor extends LogReader {
_formatPCRegexp = /(.*):[0-9]+:[0-9]+$/;
_lastTimestamp = 0;
_lastCodeLogEntry;
+ _lastTickLogEntry;
_chunkRemainder = '';
MAJOR_VERSION = 7;
MINOR_VERSION = 6;
@@ -248,6 +249,9 @@ export class Processor extends LogReader {
async finalize() {
await this._chunkConsumer.consumeAll();
+ if (this._profile.warnings.size > 0) {
+ console.warn('Found profiler warnings:', this._profile.warnings);
+ }
// TODO(cbruni): print stats;
this._mapTimeline.transitions = new Map();
let id = 0;
@@ -387,7 +391,12 @@ export class Processor extends LogReader {
const entryStack = this._profile.recordTick(
time_ns, vmState,
this.processStack(pc, tos_or_external_callback, stack));
- this._tickTimeline.push(new TickLogEntry(time_ns, vmState, entryStack))
+ const newEntry = new TickLogEntry(time_ns, vmState, entryStack);
+ this._tickTimeline.push(newEntry);
+ if (this._lastTickLogEntry !== undefined) {
+ this._lastTickLogEntry.end(time_ns);
+ }
+ this._lastTickLogEntry = newEntry;
}
processCodeSourceInfo(
diff --git a/deps/v8/tools/system-analyzer/view/code-panel-template.html b/deps/v8/tools/system-analyzer/view/code-panel-template.html
index 105e6b980c..d237ac3a51 100644
--- a/deps/v8/tools/system-analyzer/view/code-panel-template.html
+++ b/deps/v8/tools/system-analyzer/view/code-panel-template.html
@@ -9,17 +9,20 @@ found in the LICENSE file. -->
#sourceCode {
white-space: pre-line;
}
- .register {
+ .reg, .addr {
border-bottom: 1px dashed;
border-radius: 2px;
}
- .register:hover {
+ .reg:hover, .addr:hover {
background-color: var(--border-color);
}
- .register.selected {
+ .reg.selected, .addr.selected {
color: var(--default-color);
background-color: var(--border-color);
}
+ .addr:hover {
+ cursor: pointer;
+ }
</style>
<div class="panel">
@@ -37,7 +40,5 @@ found in the LICENSE file. -->
<property-link-table id="feedbackVector"></property-link-table>
<h3>Disassembly</h3>
<pre id="disassembly"></pre>
- <h3>Source Code</h3>
- <pre id="sourceCode"></pre>
</div>
</div>
diff --git a/deps/v8/tools/system-analyzer/view/code-panel.mjs b/deps/v8/tools/system-analyzer/view/code-panel.mjs
index 084c8fb2d3..42fe7b3d4c 100644
--- a/deps/v8/tools/system-analyzer/view/code-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/code-panel.mjs
@@ -1,14 +1,22 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import {LinuxCppEntriesProvider} from '../../tickprocessor.mjs';
import {SelectRelatedEvent} from './events.mjs';
import {CollapsableElement, DOM, formatBytes, formatMicroSeconds} from './helper.mjs';
const kRegisters = ['rsp', 'rbp', 'rax', 'rbx', 'rcx', 'rdx', 'rsi', 'rdi'];
-// Add Interpreter and x64 registers
-for (let i = 0; i < 14; i++) {
- kRegisters.push(`r${i}`);
-}
+// Make sure we dont match register on bytecode: Star1 or Star2
+const kAvoidBytecodeOps = '(.*?[^a-zA-Z])'
+// Look for registers in strings like: movl rbx,[rcx-0x30]
+const kRegisterRegexp = `(${kRegisters.join('|')}|r[0-9]+)`
+const kRegisterRegexpSplit =
+ new RegExp(`${kAvoidBytecodeOps}${kRegisterRegexp}`)
+const kIsRegisterRegexp = new RegExp(`^${kRegisterRegexp}$`);
+
+const kFullAddressRegexp = /(0x[0-9a-f]{8,})/;
+const kRelativeAddressRegexp = /([+-]0x[0-9a-f]+)/;
+const kAnyAddressRegexp = /([+-]?0x[0-9a-f]+)/;
DOM.defineCustomElement('view/code-panel',
(templateText) =>
@@ -23,8 +31,7 @@ DOM.defineCustomElement('view/code-panel',
this._codeSelectNode = this.$('#codeSelect');
this._disassemblyNode = this.$('#disassembly');
this._feedbackVectorNode = this.$('#feedbackVector');
- this._sourceNode = this.$('#sourceCode');
- this._registerSelector = new RegisterSelector(this._disassemblyNode);
+ this._selectionHandler = new SelectionHandler(this._disassemblyNode);
this._codeSelectNode.onchange = this._handleSelectCode.bind(this);
this.$('#selectedRelatedButton').onclick =
@@ -56,7 +63,8 @@ DOM.defineCustomElement('view/code-panel',
script: entry.script,
type: entry.type,
kind: entry.kindName,
- variants: entry.variants.length > 1 ? entry.variants : undefined,
+ variants: entry.variants.length > 1 ? [undefined, ...entry.variants] :
+ undefined,
};
}
this.requestUpdate();
@@ -66,7 +74,6 @@ DOM.defineCustomElement('view/code-panel',
this._updateSelect();
this._updateDisassembly();
this._updateFeedbackVector();
- this._sourceNode.innerText = this._entry?.source ?? '';
}
_updateFeedbackVector() {
@@ -81,24 +88,14 @@ DOM.defineCustomElement('view/code-panel',
}
_updateDisassembly() {
- if (!this._entry?.code) {
- this._disassemblyNode.innerText = '';
- return;
- }
- const rawCode = this._entry?.code;
+ this._disassemblyNode.innerText = '';
+ if (!this._entry?.code) return;
try {
- this._disassemblyNode.innerText = rawCode;
- let formattedCode = this._disassemblyNode.innerHTML;
- for (let register of kRegisters) {
- const button = `<span class="register ${register}">${register}</span>`
- formattedCode = formattedCode.replaceAll(register, button);
- }
- // Let's replace the base-address since it doesn't add any value.
- // TODO
- this._disassemblyNode.innerHTML = formattedCode;
+ this._disassemblyNode.appendChild(
+ new AssemblyFormatter(this._entry).fragment);
} catch (e) {
console.error(e);
- this._disassemblyNode.innerText = rawCode;
+ this._disassemblyNode.innerText = this._entry.code;
}
}
@@ -135,34 +132,133 @@ DOM.defineCustomElement('view/code-panel',
}
});
-class RegisterSelector {
- _currentRegister;
+class AssemblyFormatter {
+ constructor(codeLogEntry) {
+ this._fragment = new DocumentFragment();
+ this._entry = codeLogEntry;
+ codeLogEntry.code.split('\n').forEach(line => this._addLine(line));
+ }
+
+ get fragment() {
+ return this._fragment;
+ }
+
+ _addLine(line) {
+ const parts = line.split(' ');
+ let lineAddress = 0;
+ if (kFullAddressRegexp.test(parts[0])) {
+ lineAddress = parseInt(parts[0]);
+ }
+ const content = DOM.span({textContent: parts.join(' ') + '\n'});
+ let formattedCode = content.innerHTML.split(kRegisterRegexpSplit)
+ .map(part => this._formatRegisterPart(part))
+ .join('');
+ formattedCode = formattedCode.split(kAnyAddressRegexp)
+ .map(
+ (part, index) => this._formatAddressPart(
+ part, index, lineAddress))
+ .join('');
+ // Let's replace the base-address since it doesn't add any value.
+ // TODO
+ content.innerHTML = formattedCode;
+ this._fragment.appendChild(content);
+ }
+
+ _formatRegisterPart(part) {
+ if (!kIsRegisterRegexp.test(part)) return part;
+ return `<span class="reg ${part}">${part}</span>`
+ }
+
+ _formatAddressPart(part, index, lineAddress) {
+ if (kFullAddressRegexp.test(part)) {
+ // The first or second address must be the line address
+ if (index <= 1) {
+ return `<span class="addr line" data-addr="${part}">${part}</span>`;
+ }
+ return `<span class=addr data-addr="${part}">${part}</span>`;
+ } else if (kRelativeAddressRegexp.test(part)) {
+ const targetAddress = (lineAddress + parseInt(part)).toString(16);
+ return `<span class=addr data-addr="0x${targetAddress}">${part}</span>`;
+ } else {
+ return part;
+ }
+ }
+}
+
+class SelectionHandler {
+ _currentRegisterHovered;
+ _currentRegisterClicked;
+
constructor(node) {
this._node = node;
- this._node.onmousemove = this._handleDisassemblyMouseMove.bind(this);
+ this._node.onmousemove = this._handleMouseMove.bind(this);
+ this._node.onclick = this._handleClick.bind(this);
+ }
+
+ $(query) {
+ return this._node.querySelectorAll(query);
+ }
+
+ _handleClick(event) {
+ const target = event.target;
+ if (target.classList.contains('addr')) {
+ return this._handleClickAddress(target);
+ } else if (target.classList.contains('reg')) {
+ this._handleClickRegister(target);
+ } else {
+ this._clearRegisterSelection();
+ }
+ }
+
+ _handleClickAddress(target) {
+ let targetAddress = target.getAttribute('data-addr') ?? target.innerText;
+ // Clear any selection
+ for (let addrNode of this.$('.addr.selected')) {
+ addrNode.classList.remove('selected');
+ }
+ // Highlight all matching addresses
+ let lineAddrNode;
+ for (let addrNode of this.$(`.addr[data-addr="${targetAddress}"]`)) {
+ addrNode.classList.add('selected');
+ if (addrNode.classList.contains('line') && lineAddrNode == undefined) {
+ lineAddrNode = addrNode;
+ }
+ }
+ // Jump to potential target address.
+ if (lineAddrNode) {
+ lineAddrNode.scrollIntoView({behavior: 'smooth', block: 'nearest'});
+ }
}
- _handleDisassemblyMouseMove(event) {
+ _handleClickRegister(target) {
+ this._setRegisterSelection(target.innerText);
+ this._currentRegisterClicked = this._currentRegisterHovered;
+ }
+
+ _handleMouseMove(event) {
+ if (this._currentRegisterClicked) return;
const target = event.target;
- if (!target.classList.contains('register')) {
- this._clear();
- return;
- };
- this._select(target.innerText);
+ if (!target.classList.contains('reg')) {
+ this._clearRegisterSelection();
+ } else {
+ this._setRegisterSelection(target.innerText);
+ }
}
- _clear() {
- if (this._currentRegister == undefined) return;
- for (let node of this._node.querySelectorAll('.register')) {
+ _clearRegisterSelection() {
+ if (!this._currentRegisterHovered) return;
+ for (let node of this.$('.reg.selected')) {
node.classList.remove('selected');
}
+ this._currentRegisterClicked = undefined;
+ this._currentRegisterHovered = undefined;
}
- _select(register) {
- if (register == this._currentRegister) return;
- this._clear();
- this._currentRegister = register;
- for (let node of this._node.querySelectorAll(`.register.${register}`)) {
+ _setRegisterSelection(register) {
+ if (register == this._currentRegisterHovered) return;
+ this._clearRegisterSelection();
+ this._currentRegisterHovered = register;
+ for (let node of this.$(`.reg.${register}`)) {
node.classList.add('selected');
}
}
diff --git a/deps/v8/tools/system-analyzer/view/helper.mjs b/deps/v8/tools/system-analyzer/view/helper.mjs
index 50dc6a9a03..93823e1106 100644
--- a/deps/v8/tools/system-analyzer/view/helper.mjs
+++ b/deps/v8/tools/system-analyzer/view/helper.mjs
@@ -122,117 +122,7 @@ export class CSSColor {
}
}
-export class DOM {
- static element(type, options) {
- const node = document.createElement(type);
- if (options !== undefined) {
- if (typeof options === 'string') {
- // Old behaviour: options = class string
- node.className = options;
- } else if (Array.isArray(options)) {
- // Old behaviour: options = class array
- DOM.addClasses(node, options);
- } else {
- // New behaviour: options = attribute dict
- for (const [key, value] of Object.entries(options)) {
- if (key == 'className') {
- node.className = value;
- } else if (key == 'classList') {
- node.classList = value;
- } else if (key == 'textContent') {
- node.textContent = value;
- } else if (key == 'children') {
- for (const child of value) {
- node.appendChild(child);
- }
- } else {
- node.setAttribute(key, value);
- }
- }
- }
- }
- return node;
- }
-
- static addClasses(node, classes) {
- const classList = node.classList;
- if (typeof classes === 'string') {
- classList.add(classes);
- } else {
- for (let i = 0; i < classes.length; i++) {
- classList.add(classes[i]);
- }
- }
- return node;
- }
-
- static text(string) {
- return document.createTextNode(string);
- }
-
- static button(label, clickHandler) {
- const button = DOM.element('button');
- button.innerText = label;
- button.onclick = clickHandler;
- return button;
- }
-
- static div(options) {
- return this.element('div', options);
- }
-
- static span(options) {
- return this.element('span', options);
- }
-
- static table(options) {
- return this.element('table', options);
- }
-
- static tbody(options) {
- return this.element('tbody', options);
- }
-
- static td(textOrNode, className) {
- const node = this.element('td');
- if (typeof textOrNode === 'object') {
- node.appendChild(textOrNode);
- } else if (textOrNode) {
- node.innerText = textOrNode;
- }
- if (className) node.className = className;
- return node;
- }
-
- static tr(classes) {
- return this.element('tr', classes);
- }
-
- static removeAllChildren(node) {
- let range = document.createRange();
- range.selectNodeContents(node);
- range.deleteContents();
- }
-
- static defineCustomElement(
- path, nameOrGenerator, maybeGenerator = undefined) {
- let generator = nameOrGenerator;
- let name = nameOrGenerator;
- if (typeof nameOrGenerator == 'function') {
- console.assert(maybeGenerator === undefined);
- name = path.substring(path.lastIndexOf('/') + 1, path.length);
- } else {
- console.assert(typeof nameOrGenerator == 'string');
- generator = maybeGenerator;
- }
- path = path + '-template.html';
- fetch(path)
- .then(stream => stream.text())
- .then(
- templateText =>
- customElements.define(name, generator(templateText)));
- }
-}
+import {DOM} from '../../js/web-api-helper.mjs';
const SVGNamespace = 'http://www.w3.org/2000/svg';
export class SVG {
@@ -259,45 +149,7 @@ export function $(id) {
return document.querySelector(id)
}
-export class V8CustomElement extends HTMLElement {
- _updateTimeoutId;
- _updateCallback = this.forceUpdate.bind(this);
-
- constructor(templateText) {
- super();
- const shadowRoot = this.attachShadow({mode: 'open'});
- shadowRoot.innerHTML = templateText;
- }
-
- $(id) {
- return this.shadowRoot.querySelector(id);
- }
-
- querySelectorAll(query) {
- return this.shadowRoot.querySelectorAll(query);
- }
-
- requestUpdate(useAnimation = false) {
- if (useAnimation) {
- window.cancelAnimationFrame(this._updateTimeoutId);
- this._updateTimeoutId =
- window.requestAnimationFrame(this._updateCallback);
- } else {
- // Use timeout tasks to asynchronously update the UI without blocking.
- clearTimeout(this._updateTimeoutId);
- const kDelayMs = 5;
- this._updateTimeoutId = setTimeout(this._updateCallback, kDelayMs);
- }
- }
-
- forceUpdate() {
- this._update();
- }
-
- _update() {
- throw Error('Subclass responsibility');
- }
-}
+import {V8CustomElement} from '../../js/web-api-helper.mjs'
export class CollapsableElement extends V8CustomElement {
constructor(templateText) {
@@ -319,7 +171,6 @@ export class CollapsableElement extends V8CustomElement {
this._closer.checked = true;
this._requestUpdateIfVisible();
}
- this.scrollIntoView();
}
show() {
@@ -327,7 +178,7 @@ export class CollapsableElement extends V8CustomElement {
this._closer.checked = false;
this._requestUpdateIfVisible();
}
- this.scrollIntoView();
+ this.scrollIntoView({behavior: 'smooth', block: 'center'});
}
requestUpdate(useAnimation = false) {
@@ -468,3 +319,4 @@ export function gradientStopsFromGroups(
}
export * from '../helper.mjs';
+export * from '../../js/web-api-helper.mjs'
diff --git a/deps/v8/tools/system-analyzer/view/log-file-reader.mjs b/deps/v8/tools/system-analyzer/view/log-file-reader.mjs
index 5edb90d2fc..8d65c030f1 100644
--- a/deps/v8/tools/system-analyzer/view/log-file-reader.mjs
+++ b/deps/v8/tools/system-analyzer/view/log-file-reader.mjs
@@ -1,110 +1,12 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {delay} from '../helper.mjs';
-import {DOM, V8CustomElement} from './helper.mjs';
-
-DOM.defineCustomElement('view/log-file-reader',
- (templateText) =>
- class LogFileReader extends V8CustomElement {
- constructor() {
- super(templateText);
- this.addEventListener('click', e => this.handleClick(e));
- this.addEventListener('dragover', e => this.handleDragOver(e));
- this.addEventListener('drop', e => this.handleChange(e));
- this.$('#file').addEventListener('change', e => this.handleChange(e));
- this.$('#fileReader')
- .addEventListener('keydown', e => this.handleKeyEvent(e));
- }
-
- set error(message) {
- this._updateLabel(message);
- this.root.className = 'fail';
- }
-
- _updateLabel(text) {
- this.$('#label').innerText = text;
- }
-
- handleKeyEvent(event) {
- if (event.key == 'Enter') this.handleClick(event);
- }
-
- handleClick(event) {
- this.$('#file').click();
- }
-
- handleChange(event) {
- // Used for drop and file change.
- event.preventDefault();
- this.dispatchEvent(
- new CustomEvent('fileuploadstart', {bubbles: true, composed: true}));
- const host = event.dataTransfer ? event.dataTransfer : event.target;
- this.readFile(host.files[0]);
- }
-
- handleDragOver(event) {
- event.preventDefault();
- }
-
- connectedCallback() {
- this.fileReader.focus();
- }
-
- get fileReader() {
- return this.$('#fileReader');
- }
-
- get root() {
- return this.$('#root');
- }
-
- readFile(file) {
- if (!file) {
- this.error = 'Failed to load file.';
- return;
- }
- this.fileReader.blur();
- this.root.className = 'loading';
- // Delay the loading a bit to allow for CSS animations to happen.
- window.requestAnimationFrame(() => this.asyncReadFile(file));
- }
-
- async asyncReadFile(file) {
- const decoder = globalThis.TextDecoderStream;
- if (decoder) {
- await this._streamFile(file, decoder);
- } else {
- await this._readFullFile(file);
- }
- this._updateLabel(`Finished loading '${file.name}'.`);
- this.dispatchEvent(
- new CustomEvent('fileuploadend', {bubbles: true, composed: true}));
- this.root.className = 'done';
- }
-
- async _readFullFile(file) {
- const text = await file.text();
- this._handleFileChunk(text)
- }
-
- async _streamFile(file, decoder) {
- const stream = file.stream().pipeThrough(new decoder());
- const reader = stream.getReader();
- let chunk, readerDone;
- do {
- const readResult = await reader.read();
- chunk = readResult.value;
- readerDone = readResult.done;
- if (chunk) this._handleFileChunk(chunk);
- } while (!readerDone);
- }
-
- _handleFileChunk(chunk) {
- this.dispatchEvent(new CustomEvent('fileuploadchunk', {
- bubbles: true,
- composed: true,
- detail: chunk,
- }));
- }
-});
+import {DOM, FileReader} from './helper.mjs';
+
+DOM.defineCustomElement(
+ '../js/log-file-reader',
+ (templateText) => class LogFileReader extends FileReader {
+ constructor() {
+ super(templateText);
+ }
+ });
diff --git a/deps/v8/tools/system-analyzer/view/property-link-table.mjs b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
index 17cecc58ed..2c81bc6536 100644
--- a/deps/v8/tools/system-analyzer/view/property-link-table.mjs
+++ b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
@@ -3,124 +3,135 @@
// found in the LICENSE file.
import {App} from '../index.mjs'
-import {FocusEvent} from './events.mjs';
+import {FocusEvent, SelectRelatedEvent} from './events.mjs';
import {DOM, ExpandableText, V8CustomElement} from './helper.mjs';
-DOM.defineCustomElement(
- 'view/property-link-table',
- template => class PropertyLinkTable extends V8CustomElement {
- _instance;
- _propertyDict;
- _instanceLinkButtons = false;
- _logEntryClickHandler = this._handleLogEntryClick.bind(this);
- _logEntryRelatedHandler = this._handleLogEntryRelated.bind(this);
- _arrayValueSelectHandler = this._handleArrayValueSelect.bind(this);
-
- constructor() {
- super(template);
- }
-
- set instanceLinkButtons(newValue) {
- this._instanceLinkButtons = newValue;
- }
-
- set propertyDict(propertyDict) {
- if (this._propertyDict === propertyDict) return;
- if (typeof propertyDict !== 'object') {
- throw new Error(
- `Invalid property dict, expected object: ${propertyDict}`);
- }
- this._propertyDict = propertyDict;
- this.requestUpdate();
- }
-
- _update() {
- this._fragment = new DocumentFragment();
- this._table = DOM.table('properties');
- for (let key in this._propertyDict) {
- const value = this._propertyDict[key];
- this._addKeyValue(key, value);
- }
- this._addFooter();
- this._fragment.appendChild(this._table);
-
- const newContent = DOM.div();
- newContent.appendChild(this._fragment);
- this.$('#content').replaceWith(newContent);
- newContent.id = 'content';
- this._fragment = undefined;
- }
-
- _addKeyValue(key, value) {
- if (key == 'title') {
- this._addTitle(value);
- return;
- }
- if (key == '__this__') {
- this._instance = value;
- return;
- }
- const row = this._table.insertRow();
- row.insertCell().innerText = key;
- const cell = row.insertCell();
- if (value == undefined) return;
- if (Array.isArray(value)) {
- cell.appendChild(this._addArrayValue(value));
- return;
- }
- if (App.isClickable(value)) {
- cell.className = 'clickable';
- cell.onclick = this._logEntryClickHandler;
- cell.data = value;
- }
- new ExpandableText(cell, value.toString());
- }
-
- _addArrayValue(array) {
- if (array.length == 0) {
- return DOM.text('empty');
- } else if (array.length > 200) {
- return DOM.text(`${array.length} items`);
- }
- const select = DOM.element('select');
- select.onchange = this._arrayValueSelectHandler;
- for (let value of array) {
- const option = DOM.element('option');
- option.innerText = value.toString();
- option.data = value;
- select.add(option);
- }
- return select;
- }
-
- _addTitle(value) {
- const title = DOM.element('h3');
- title.innerText = value;
- this._fragment.appendChild(title);
- }
-
- _addFooter() {
- if (this._instance === undefined) return;
- if (!this._instanceLinkButtons) return;
- const td = this._table.createTFoot().insertRow().insertCell();
- td.colSpan = 2;
- let showButton =
- td.appendChild(DOM.button('Show', this._logEntryClickHandler));
- showButton.data = this._instance;
- let showRelatedButton = td.appendChild(
- DOM.button('Show Related', this._logEntryRelatedClickHandler));
- showRelatedButton.data = this._instance;
- }
-
- _handleArrayValueSelect(event) {
- const logEntry = event.currentTarget.selectedOptions[0].data;
- this.dispatchEvent(new FocusEvent(logEntry));
- }
- _handleLogEntryClick(event) {
- this.dispatchEvent(new FocusEvent(event.currentTarget.data));
- }
-
- _handleLogEntryRelated(event) {
- this.dispatchEvent(new SelectRelatedEvent(event.currentTarget.data));
- }
- });
+DOM.defineCustomElement('view/property-link-table',
+ template =>
+ class PropertyLinkTable extends V8CustomElement {
+ _object;
+ _propertyDict;
+ _instanceLinkButtons = false;
+
+ _showHandler = this._handleShow.bind(this);
+ _showSourcePositionHandler = this._handleShowSourcePosition.bind(this);
+ _showRelatedHandler = this._handleShowRelated.bind(this);
+ _arrayValueSelectHandler = this._handleArrayValueSelect.bind(this);
+
+ constructor() {
+ super(template);
+ }
+
+ set instanceLinkButtons(newValue) {
+ this._instanceLinkButtons = newValue;
+ }
+
+ set propertyDict(propertyDict) {
+ if (this._propertyDict === propertyDict) return;
+ if (typeof propertyDict !== 'object') {
+ throw new Error(
+ `Invalid property dict, expected object: ${propertyDict}`);
+ }
+ this._propertyDict = propertyDict;
+ this.requestUpdate();
+ }
+
+ _update() {
+ this._fragment = new DocumentFragment();
+ this._table = DOM.table('properties');
+ for (let key in this._propertyDict) {
+ const value = this._propertyDict[key];
+ this._addKeyValue(key, value);
+ }
+ this._addFooter();
+ this._fragment.appendChild(this._table);
+
+ const newContent = DOM.div();
+ newContent.appendChild(this._fragment);
+ this.$('#content').replaceWith(newContent);
+ newContent.id = 'content';
+ this._fragment = undefined;
+ }
+
+ _addKeyValue(key, value) {
+ if (key == 'title') {
+ this._addTitle(value);
+ return;
+ }
+ if (key == '__this__') {
+ this._object = value;
+ return;
+ }
+ const row = this._table.insertRow();
+ row.insertCell().innerText = key;
+ const cell = row.insertCell();
+ if (value == undefined) return;
+ if (Array.isArray(value)) {
+ cell.appendChild(this._addArrayValue(value));
+ return;
+ }
+ if (App.isClickable(value)) {
+ cell.className = 'clickable';
+ cell.onclick = this._showHandler;
+ cell.data = value;
+ }
+ new ExpandableText(cell, value.toString());
+ }
+
+ _addArrayValue(array) {
+ if (array.length == 0) {
+ return DOM.text('empty');
+ } else if (array.length > 200) {
+ return DOM.text(`${array.length} items`);
+ }
+ const select = DOM.element('select');
+ select.onchange = this._arrayValueSelectHandler;
+ for (let value of array) {
+ const option = DOM.element('option');
+ option.innerText = value === undefined ? '' : value.toString();
+ option.data = value;
+ select.add(option);
+ }
+ return select;
+ }
+
+ _addTitle(value) {
+ const title = DOM.element('h3');
+ title.innerText = value;
+ this._fragment.appendChild(title);
+ }
+
+ _addFooter() {
+ if (this._object === undefined) return;
+ if (!this._instanceLinkButtons) return;
+ const td = this._table.createTFoot().insertRow().insertCell();
+ td.colSpan = 2;
+ let showButton = td.appendChild(DOM.button('Show', this._showHandler));
+ showButton.data = this._object;
+ if (this._object.sourcePosition) {
+ let showSourcePositionButton = td.appendChild(
+ DOM.button('Source Position', this._showSourcePositionHandler));
+ showSourcePositionButton.data = this._object;
+ }
+ let showRelatedButton =
+ td.appendChild(DOM.button('Show Related', this._showRelatedHandler));
+ showRelatedButton.data = this._object;
+ }
+
+ _handleArrayValueSelect(event) {
+ const logEntry = event.currentTarget.selectedOptions[0].data;
+ this.dispatchEvent(new FocusEvent(logEntry));
+ }
+
+ _handleShow(event) {
+ this.dispatchEvent(new FocusEvent(event.currentTarget.data));
+ }
+
+ _handleShowSourcePosition(event) {
+ this.dispatchEvent(new FocusEvent(event.currentTarget.data.sourcePosition));
+ }
+
+ _handleShowRelated(event) {
+ this.dispatchEvent(new SelectRelatedEvent(event.currentTarget.data));
+ }
+});
diff --git a/deps/v8/tools/system-analyzer/view/script-panel.mjs b/deps/v8/tools/system-analyzer/view/script-panel.mjs
index 75720534ca..f6b24733be 100644
--- a/deps/v8/tools/system-analyzer/view/script-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/script-panel.mjs
@@ -1,11 +1,11 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {defer, groupBy} from '../helper.mjs';
+import {arrayEquals, defer, groupBy} from '../helper.mjs';
import {App} from '../index.mjs'
-import {SelectRelatedEvent, ToolTipEvent} from './events.mjs';
-import {CollapsableElement, CSSColor, delay, DOM, formatBytes, gradientStopsFromGroups} from './helper.mjs';
+import {SelectionEvent, SelectRelatedEvent, ToolTipEvent} from './events.mjs';
+import {CollapsableElement, CSSColor, delay, DOM, formatBytes, gradientStopsFromGroups, LazyTable} from './helper.mjs';
// A source mapping proxy for source maps that don't have CORS headers.
// TODO(leszeks): Make this configurable.
@@ -19,6 +19,8 @@ DOM.defineCustomElement('view/script-panel',
_scripts = [];
_script;
+ showToolTipEntriesHandler = this.handleShowToolTipEntries.bind(this);
+
constructor() {
super(templateText);
this.scriptDropdown.addEventListener(
@@ -40,6 +42,8 @@ DOM.defineCustomElement('view/script-panel',
this._script = script;
script.ensureSourceMapCalculated(sourceMapFetchPrefix);
this._sourcePositionsToMarkNodesPromise = defer();
+ this._selectedSourcePositions =
+ this._selectedSourcePositions.filter(each => each.script === script);
this.requestUpdate();
}
@@ -48,10 +52,14 @@ DOM.defineCustomElement('view/script-panel',
}
set selectedSourcePositions(sourcePositions) {
- this._selectedSourcePositions = sourcePositions;
- // TODO: highlight multiple scripts
- this.script = sourcePositions[0]?.script;
- this._focusSelectedMarkers();
+ if (arrayEquals(this._selectedSourcePositions, sourcePositions)) {
+ this._focusSelectedMarkers(0);
+ } else {
+ this._selectedSourcePositions = sourcePositions;
+ // TODO: highlight multiple scripts
+ this.script = sourcePositions[0]?.script;
+ this._focusSelectedMarkers(100);
+ }
}
set scripts(scripts) {
@@ -106,8 +114,8 @@ DOM.defineCustomElement('view/script-panel',
this.script.replaceChild(scriptNode, oldScriptNode);
}
- async _focusSelectedMarkers() {
- await delay(100);
+ async _focusSelectedMarkers(delay_ms) {
+ if (delay_ms) await delay(delay_ms);
const sourcePositionsToMarkNodes =
await this._sourcePositionsToMarkNodesPromise;
// Remove all marked nodes.
@@ -127,7 +135,7 @@ DOM.defineCustomElement('view/script-panel',
if (!sourcePosition) return;
const markNode = sourcePositionsToMarkNodes.get(sourcePosition);
markNode.scrollIntoView(
- {behavior: 'auto', block: 'center', inline: 'center'});
+ {behavior: 'smooth', block: 'center', inline: 'center'});
}
_handleSelectScript(e) {
@@ -141,25 +149,23 @@ DOM.defineCustomElement('view/script-panel',
this.dispatchEvent(new SelectRelatedEvent(this._script));
}
+ setSelectedSourcePositionInternal(sourcePosition) {
+ this._selectedSourcePositions = [sourcePosition];
+ console.assert(sourcePosition.script === this._script);
+ }
+
handleSourcePositionClick(e) {
const sourcePosition = e.target.sourcePosition;
+ this.setSelectedSourcePositionInternal(sourcePosition);
this.dispatchEvent(new SelectRelatedEvent(sourcePosition));
}
handleSourcePositionMouseOver(e) {
const sourcePosition = e.target.sourcePosition;
const entries = sourcePosition.entries;
- let text = groupBy(entries, each => each.constructor, true)
- .map(group => {
- let text = `${group.key.name}: ${group.length}\n`
- text += groupBy(group.entries, each => each.type, true)
- .map(group => {
- return ` - ${group.key}: ${group.length}`;
- })
- .join('\n');
- return text;
- })
- .join('\n');
+ const toolTipContent = DOM.div();
+ toolTipContent.appendChild(
+ new ToolTipTableBuilder(this, entries).tableNode);
let sourceMapContent;
switch (this._script.sourceMapState) {
@@ -192,17 +198,50 @@ DOM.defineCustomElement('view/script-panel',
default:
break;
}
-
- const toolTipContent = DOM.div({
- children: [
- DOM.element('pre', {className: 'textContent', textContent: text}),
- sourceMapContent
- ]
- });
+ toolTipContent.appendChild(sourceMapContent);
this.dispatchEvent(new ToolTipEvent(toolTipContent, e.target));
}
+
+ handleShowToolTipEntries(event) {
+ let entries = event.currentTarget.data;
+ const sourcePosition = entries[0].sourcePosition;
+ // Add a source position entry so the current position stays focused.
+ this.setSelectedSourcePositionInternal(sourcePosition);
+ entries = entries.concat(this._selectedSourcePositions);
+ this.dispatchEvent(new SelectionEvent(entries));
+ }
});
+class ToolTipTableBuilder {
+ constructor(scriptPanel, entries) {
+ this._scriptPanel = scriptPanel;
+ this.tableNode = DOM.table();
+ const tr = DOM.tr();
+ tr.appendChild(DOM.td('Type'));
+ tr.appendChild(DOM.td('Subtype'));
+ tr.appendChild(DOM.td('Count'));
+ this.tableNode.appendChild(document.createElement('thead')).appendChild(tr);
+ groupBy(entries, each => each.constructor, true).forEach(group => {
+ this.addRow(group.key.name, 'all', entries, false)
+ groupBy(group.entries, each => each.type, true).forEach(group => {
+ this.addRow('', group.key, group.entries, false)
+ })
+ })
+ }
+
+ addRow(name, subtypeName, entries) {
+ const tr = DOM.tr();
+ tr.appendChild(DOM.td(name));
+ tr.appendChild(DOM.td(subtypeName));
+ tr.appendChild(DOM.td(entries.length));
+ const button =
+ DOM.button('Show', this._scriptPanel.showToolTipEntriesHandler);
+ button.data = entries;
+ tr.appendChild(DOM.td(button));
+ this.tableNode.appendChild(tr);
+ }
+}
+
class SourcePositionIterator {
_entries;
_index = 0;
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
index 1ef8347088..678817399d 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
@@ -27,7 +27,6 @@ export class TimelineTrackBase extends V8CustomElement {
super(templateText);
this._selectionHandler = new SelectionHandler(this);
this._legend = new Legend(this.$('#legendTable'));
- this._legend.onFilter = (type) => this._handleFilterTimeline();
this.timelineChunks = this.$('#timelineChunks');
this.timelineSamples = this.$('#timelineSamples');
@@ -37,14 +36,17 @@ export class TimelineTrackBase extends V8CustomElement {
this.timelineAnnotationsNode = this.$('#timelineAnnotations');
this.timelineMarkersNode = this.$('#timelineMarkers');
this._scalableContentNode = this.$('#scalableContent');
+ this.isLocked = false;
+ }
+ _initEventListeners() {
+ this._legend.onFilter = (type) => this._handleFilterTimeline();
this.timelineNode.addEventListener(
'scroll', e => this._handleTimelineScroll(e));
this.hitPanelNode.onclick = this._handleClick.bind(this);
this.hitPanelNode.ondblclick = this._handleDoubleClick.bind(this);
this.hitPanelNode.onmousemove = this._handleMouseMove.bind(this);
window.addEventListener('resize', () => this._resetCachedDimensions());
- this.isLocked = false;
}
static get observedAttributes() {
@@ -62,6 +64,8 @@ export class TimelineTrackBase extends V8CustomElement {
}
set data(timeline) {
+ console.assert(timeline);
+ if (!this._timeline) this._initEventListeners();
this._timeline = timeline;
this._legend.timeline = timeline;
this.$('.content').style.display = timeline.isEmpty() ? 'none' : 'relative';
@@ -136,6 +140,11 @@ export class TimelineTrackBase extends V8CustomElement {
}
get chunks() {
+ if (this._chunks?.length != this.nofChunks) {
+ this._chunks =
+ this._timeline.chunks(this.nofChunks, this._legend.filterPredicate);
+ console.assert(this._chunks.length == this._nofChunks);
+ }
return this._chunks;
}
@@ -209,19 +218,13 @@ export class TimelineTrackBase extends V8CustomElement {
_update() {
this._legend.update();
- this._drawContent();
- this._drawAnnotations(this.selectedEntry);
+ this._drawContent().then(() => this._drawAnnotations(this.selectedEntry));
this._resetCachedDimensions();
}
async _drawContent() {
- await delay(5);
if (this._timeline.isEmpty()) return;
- if (this.chunks?.length != this.nofChunks) {
- this._chunks =
- this._timeline.chunks(this.nofChunks, this._legend.filterPredicate);
- console.assert(this._chunks.length == this._nofChunks);
- }
+ await delay(5);
const chunks = this.chunks;
const max = chunks.max(each => each.size());
let buffer = '';
@@ -558,12 +561,13 @@ class Legend {
tbody.appendChild(this._addTypeRow(group));
missingTypes.delete(group.key);
});
- missingTypes.forEach(key => tbody.appendChild(this._row('', key, 0, '0%')));
+ missingTypes.forEach(
+ key => tbody.appendChild(this._addRow('', key, 0, '0%')));
if (this._timeline.selection) {
tbody.appendChild(
- this._row('', 'Selection', this.selection.length, '100%'));
+ this._addRow('', 'Selection', this.selection.length, '100%'));
}
- tbody.appendChild(this._row('', 'All', this._timeline.length, ''));
+ tbody.appendChild(this._addRow('', 'All', this._timeline.length, ''));
this._table.tBodies[0].replaceWith(tbody);
}
@@ -572,11 +576,10 @@ class Legend {
const example = this.selection.at(0);
if (!example || !('duration' in example)) return;
this._enableDuration = true;
- this._table.tHead.appendChild(DOM.td('Duration'));
- this._table.tHead.appendChild(DOM.td(''));
+ this._table.tHead.rows[0].appendChild(DOM.td('Duration'));
}
- _row(colorNode, type, count, countPercent, duration, durationPercent) {
+ _addRow(colorNode, type, count, countPercent, duration, durationPercent) {
const row = DOM.tr();
row.appendChild(DOM.td(colorNode));
const typeCell = row.appendChild(DOM.td(type));
@@ -608,7 +611,7 @@ class Legend {
}
let countPercent =
`${(group.length / this.selection.length * 100).toFixed(1)}%`;
- const row = this._row(
+ const row = this._addRow(
colorDiv, group.key, group.length, countPercent, duration, '');
row.className = 'clickable';
row.onclick = this._typeClickHandler;
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
index 502504beb4..0f376ea355 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
@@ -6,7 +6,6 @@ import {delay} from '../../helper.mjs';
import {TickLogEntry} from '../../log/tick.mjs';
import {Timeline} from '../../timeline.mjs';
import {DOM, SVG} from '../helper.mjs';
-
import {TimelineTrackStackedBase} from './timeline-track-stacked-base.mjs'
class Flame {
@@ -179,15 +178,15 @@ class Annotations {
if (end > rawFlames.length) end = rawFlames.length;
const logEntry = this._logEntry;
// Also compare against the function, if any.
- const func = logEntry.entry?.func;
+ const func = logEntry.entry?.func ?? -1;
for (let i = start; i < end; i++) {
const flame = rawFlames[i];
- if (!flame.entry) continue;
- if (flame.entry.logEntry !== logEntry &&
- (!func || flame.entry.func !== func)) {
- continue;
+ const flameLogEntry = flame.logEntry;
+ if (!flameLogEntry) continue;
+ if (flameLogEntry !== logEntry) {
+ if (flameLogEntry.entry?.func !== func) continue;
}
- this._buffer += this._track.drawFlame(flame, i, true);
+ this._buffer += this._track._drawItem(flame, i, true);
}
}
@@ -198,4 +197,4 @@ class Annotations {
this._node.appendChild(svg);
this._buffer = '';
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py
index 94ea38b56c..a01f55ee29 100644
--- a/deps/v8/tools/testrunner/PRESUBMIT.py
+++ b/deps/v8/tools/testrunner/PRESUBMIT.py
@@ -2,6 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# This line is 'magic' in that git-cl looks for it to decide whether to
+# use Python3 instead of Python2 when running the code in this file.
+USE_PYTHON3 = True
+
+
def _CommonChecks(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.GetUnitTestsRecursively(
input_api,
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index c674e8eb29..e9ff4e793b 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -192,7 +192,8 @@ class BuildConfig(object):
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
self.pointer_compression_shared_cage = build_config['v8_enable_pointer_compression_shared_cage']
- self.virtual_memory_cage = build_config['v8_enable_virtual_memory_cage']
+ self.shared_ro_heap = build_config['v8_enable_shared_ro_heap']
+ self.sandbox = build_config['v8_enable_sandbox']
self.third_party_heap = build_config['v8_enable_third_party_heap']
self.webassembly = build_config['v8_enable_webassembly']
self.dict_property_const_tracking = build_config['v8_dict_property_const_tracking']
@@ -237,8 +238,8 @@ class BuildConfig(object):
detected_options.append('pointer_compression')
if self.pointer_compression_shared_cage:
detected_options.append('pointer_compression_shared_cage')
- if self.virtual_memory_cage:
- detected_options.append('virtual_memory_cage')
+ if self.sandbox:
+ detected_options.append('sandbox')
if self.third_party_heap:
detected_options.append('third_party_heap')
if self.webassembly:
@@ -386,9 +387,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -671,7 +669,7 @@ class BaseTestRunner(object):
no_simd_hardware = any(
i in options.extra_flags for i in ['--noenable-sse3',
- '--no-enable-sse3'
+ '--no-enable-sse3',
'--noenable-ssse3',
'--no-enable-ssse3',
'--noenable-sse4-1',
@@ -697,10 +695,6 @@ class BaseTestRunner(object):
utils.GuessPowerProcessorVersion() < 9:
no_simd_hardware = True
- # riscv64 do not support Simd instructions
- if self.build_config.arch == 'riscv64':
- no_simd_hardware = True
-
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
@@ -739,7 +733,10 @@ class BaseTestRunner(object):
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
"pointer_compression_shared_cage": self.build_config.pointer_compression_shared_cage,
- "virtual_memory_cage": self.build_config.virtual_memory_cage,
+ "no_js_shared_memory": (not self.build_config.shared_ro_heap) or
+ (self.build_config.pointer_compression and
+ not self.build_config.pointer_compression_shared_cage),
+ "sandbox": self.build_config.sandbox,
"dict_property_const_tracking": self.build_config.dict_property_const_tracking,
}
@@ -837,9 +834,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index df603d79d2..6942d1b9a4 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -19,6 +19,7 @@ from ..local.android import (
from ..local import utils
from ..objects import output
+PYTHON3 = sys.version_info >= (3, 0)
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
@@ -114,11 +115,17 @@ class BaseCommand(object):
timer.cancel()
+ def convert(stream):
+ if PYTHON3:
+ return stream.decode('utf-8', 'replace')
+ else:
+ return stream.decode('utf-8', 'replace').encode('utf-8')
+
return output.Output(
process.returncode,
timeout_occured[0],
- stdout.decode('utf-8', 'replace').encode('utf-8'),
- stderr.decode('utf-8', 'replace').encode('utf-8'),
+ convert(stdout),
+ convert(stderr),
process.pid,
duration
)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 1552ac7534..7b5eb4fd38 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -15,10 +15,9 @@ ALL_VARIANT_FLAGS = {
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
+ "concurrent_sparkplug": [["--concurrent-sparkplug", "--sparkplug"]],
"always_sparkplug": [[ "--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
- "no_concurrent_inlining": [["--no-concurrent-inlining",
- "--no-stress-concurrent-inlining"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
@@ -31,8 +30,7 @@ ALL_VARIANT_FLAGS = {
"stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions",
"--no-wasm-generic-wrapper"]],
"stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
- "stress_concurrent_inlining": [["--stress-concurrent-inlining",
- "--concurrent-inlining"]],
+ "stress_concurrent_inlining": [["--stress-concurrent-inlining"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
@@ -40,8 +38,6 @@ ALL_VARIANT_FLAGS = {
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
- "turboprop": [["--turboprop"]],
- "turboprop_as_toptier": [["--turboprop-as-toptier", "--turboprop"]],
"instruction_scheduling": [["--turbo-instruction-scheduling"]],
"stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]],
"wasm_write_protect_code": [["--wasm-write-protect-code-memory"]],
@@ -54,34 +50,44 @@ ALL_VARIANT_FLAGS = {
# variant. This depends on the flags specified in ALL_VARIANT_FLAGS and on the
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_VARIANT = {
- "jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types",
- "--validate-asm", "--sparkplug", "--always-sparkplug",
- "--regexp-tier-up", "--no-regexp-interpret-all"],
- "nooptimization": ["--always-opt"],
- "slow_path": ["--no-force-slow-path"],
- "stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "stress_concurrent_inlining": ["--single-threaded", "--predictable",
- "--turboprop", "--lazy-feedback-allocation",
- "--assert-types"],
- "turboprop": ["--stress_concurrent_inlining"],
- # The fast API tests initialize an embedder object that never needs to be
- # serialized to the snapshot, so we don't have a
- # SerializeInternalFieldsCallback for it, so they are incompatible with
- # stress_snapshot.
- "stress_snapshot": ["--expose-fast-api"],
- "stress": ["--always-opt", "--no-always-opt",
- "--max-inlined-bytecode-size=*",
- "--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
- "--liftoff-only", "--wasm-speculative-inlining"],
- "sparkplug": ["--jitless"],
- "always_sparkplug": ["--jitless"],
- "code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
- "--cache=none"],
- "experimental_regexp": ["--no-enable-experimental-regexp-engine"],
- # There is a negative implication: --perf-prof disables
- # --wasm-write-protect-code-memory.
- "wasm_write_protect_code": ["--perf-prof"],
- "assert_types": ["--concurrent-recompilation", "--concurrent-inlining", "--stress_concurrent_inlining", "--no-assert-types"],
+ "jitless": [
+ "--opt", "--always-opt", "--liftoff", "--track-field-types",
+ "--validate-asm", "--sparkplug", "--concurrent-sparkplug",
+ "--always-sparkplug", "--regexp-tier-up", "--no-regexp-interpret-all",
+ "--maglev"
+ ],
+ "nooptimization": ["--always-opt"],
+ "slow_path": ["--no-force-slow-path"],
+ "stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
+ "stress_concurrent_inlining": [
+ "--single-threaded", "--predictable", "--lazy-feedback-allocation",
+ "--assert-types", "--no-concurrent-recompilation"
+ ],
+ # The fast API tests initialize an embedder object that never needs to be
+ # serialized to the snapshot, so we don't have a
+ # SerializeInternalFieldsCallback for it, so they are incompatible with
+ # stress_snapshot.
+ "stress_snapshot": ["--expose-fast-api"],
+ "stress": [
+ "--always-opt", "--no-always-opt", "--max-inlined-bytecode-size=*",
+ "--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
+ "--liftoff-only", "--wasm-speculative-inlining",
+ "--wasm-dynamic-tiering"
+ ],
+ "sparkplug": ["--jitless"],
+ "concurrent_sparkplug": ["--jitless"],
+ "always_sparkplug": ["--jitless"],
+ "code_serializer": [
+ "--cache=after-execute", "--cache=full-code-cache", "--cache=none"
+ ],
+ "experimental_regexp": ["--no-enable-experimental-regexp-engine"],
+ # There is a negative implication: --perf-prof disables
+ # --wasm-write-protect-code-memory.
+ "wasm_write_protect_code": ["--perf-prof"],
+ "assert_types": [
+ "--concurrent-recompilation", "--stress_concurrent_inlining",
+ "--no-assert-types"
+ ],
}
# Flags that lead to a contradiction under certain build variables.
@@ -99,8 +105,6 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
"--stress-concurrent-allocation",
"--stress-concurrent-inlining"],
"dict_property_const_tracking": [
- "--concurrent-inlining",
- "--turboprop",
"--stress-concurrent-inlining"],
}
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 19fbdd6c11..82fb543055 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -75,11 +75,11 @@ except NameError:
def cmp(x, y): # Python 3
return (x > y) - (x < y)
-def read_file_utf8(file):
+def read_file(file):
try: # Python 3
- with open(file, encoding='utf-8') as f:
+ with open(file, encoding='ISO-8859-1') as f:
return f.read()
- except TypeError: # Python 2
+ except TypeError: # Python 2 ..
with open(file) as f:
return f.read()
@@ -414,7 +414,7 @@ class TestCase(object):
return self._get_source_path() is not None
def get_source(self):
- return read_file_utf8(self._get_source_path())
+ return read_file(self._get_source_path())
def _get_source_path(self):
return None
@@ -460,7 +460,7 @@ class D8TestCase(TestCase):
"""Returns for a given file a list of absolute paths of files needed by the
given file.
"""
- source = read_file_utf8(file)
+ source = read_file(file)
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 74a1d90159..b7ee301c5e 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -2,7 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import itertools
+try: # Python3
+ from itertools import zip_longest
+ PYTHON3 = True
+except ImportError: # Python2
+ from itertools import izip_longest as zip_longest
+ PYTHON3 = False
from ..testproc.base import (
DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
@@ -140,13 +145,15 @@ class ExpectedOutProc(OutProc):
def _is_failure_output(self, output):
if output.exit_code != 0:
- return True
+ return True
- with open(self._expected_filename, 'r') as f:
+ # TODO(https://crbug.com/1292013): Simplify after Python3 migration.
+ kwargs = {'encoding': 'utf-8'} if PYTHON3 else {}
+ with open(self._expected_filename, 'r', **kwargs) as f:
expected_lines = f.readlines()
for act_iterator in self._act_block_iterator(output):
- for expected, actual in itertools.izip_longest(
+ for expected, actual in zip_longest(
self._expected_iterator(expected_lines),
act_iterator,
fillvalue=''
diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py
index c301529eb7..39b8eadf64 100644
--- a/deps/v8/tools/testrunner/outproc/message.py
+++ b/deps/v8/tools/testrunner/outproc/message.py
@@ -2,10 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import itertools
import os
import re
+try: # Python3
+ from itertools import zip_longest
+except ImportError: # Python2
+ from itertools import izip_longest as zip_longest
+
from . import base
@@ -44,7 +48,7 @@ class OutProc(base.ExpectedOutProc):
env = {
'basename': os.path.basename(base_path),
}
- for (expected, actual) in itertools.izip_longest(
+ for (expected, actual) in zip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 08f17e7721..eed7527453 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -46,7 +46,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- 'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
+ 'extra': ['nooptimization', 'future', 'no_wasm_traps',
'instruction_scheduling', 'always_sparkplug'],
}
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index b5b87d8bfb..071a993954 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -11,44 +11,45 @@ from . import base
# Extra flags randomly added to all fuzz tests with numfuzz. List of tuples
# (probability, flag).
EXTRA_FLAGS = [
- (0.1, '--always-opt'),
- (0.1, '--assert-types'),
- (0.1, '--budget-for-feedback-vector-allocation=0'),
- (0.1, '--cache=code'),
- (0.1, '--force-slow-path'),
- (0.2, '--future'),
- (0.1, '--interrupt-budget=100'),
- (0.1, '--liftoff'),
- (0.2, '--no-analyze-environment-liveness'),
- # TODO(machenbach): Enable when it doesn't collide with crashing on missing
- # simd features.
- #(0.1, '--no-enable-sse3'),
- #(0.1, '--no-enable-ssse3'),
- #(0.1, '--no-enable-sse4_1'),
- (0.1, '--no-enable-sse4_2'),
- (0.1, '--no-enable-sahf'),
- (0.1, '--no-enable-avx'),
- (0.1, '--no-enable-fma3'),
- (0.1, '--no-enable-bmi1'),
- (0.1, '--no-enable-bmi2'),
- (0.1, '--no-enable-lzcnt'),
- (0.1, '--no-enable-popcnt'),
- (0.3, '--no-lazy-feedback-allocation'),
- (0.1, '--no-liftoff'),
- (0.1, '--no-opt'),
- (0.2, '--no-regexp-tier-up'),
- (0.1, '--no-wasm-tier-up'),
- (0.1, '--regexp-interpret-all'),
- (0.1, '--regexp-tier-up-ticks=10'),
- (0.1, '--regexp-tier-up-ticks=100'),
- (0.1, '--stress-background-compile'),
- (0.1, '--stress-concurrent-inlining'),
- (0.1, '--stress-flush-code'),
- (0.1, '--stress-lazy-source-positions'),
- (0.1, '--stress-wasm-code-gc'),
- (0.1, '--turbo-instruction-scheduling'),
- (0.1, '--turbo-stress-instruction-scheduling'),
- (0.1, '--turbo-force-mid-tier-regalloc'),
+ (0.1, '--always-opt'),
+ (0.1, '--assert-types'),
+ (0.1, '--interrupt-budget-for-feedback-allocation=0'),
+ (0.1, '--cache=code'),
+ (0.25, '--compact-maps'),
+ (0.1, '--force-slow-path'),
+ (0.2, '--future'),
+ (0.1, '--interrupt-budget=100'),
+ (0.1, '--liftoff'),
+ (0.2, '--no-analyze-environment-liveness'),
+ # TODO(machenbach): Enable when it doesn't collide with crashing on missing
+ # simd features.
+ #(0.1, '--no-enable-sse3'),
+ #(0.1, '--no-enable-ssse3'),
+ #(0.1, '--no-enable-sse4_1'),
+ (0.1, '--no-enable-sse4_2'),
+ (0.1, '--no-enable-sahf'),
+ (0.1, '--no-enable-avx'),
+ (0.1, '--no-enable-fma3'),
+ (0.1, '--no-enable-bmi1'),
+ (0.1, '--no-enable-bmi2'),
+ (0.1, '--no-enable-lzcnt'),
+ (0.1, '--no-enable-popcnt'),
+ (0.3, '--no-lazy-feedback-allocation'),
+ (0.1, '--no-liftoff'),
+ (0.1, '--no-opt'),
+ (0.2, '--no-regexp-tier-up'),
+ (0.1, '--no-wasm-tier-up'),
+ (0.1, '--regexp-interpret-all'),
+ (0.1, '--regexp-tier-up-ticks=10'),
+ (0.1, '--regexp-tier-up-ticks=100'),
+ (0.1, '--stress-background-compile'),
+ (0.1, '--stress-concurrent-inlining'),
+ (0.1, '--stress-flush-code'),
+ (0.1, '--stress-lazy-source-positions'),
+ (0.1, '--stress-wasm-code-gc'),
+ (0.1, '--turbo-instruction-scheduling'),
+ (0.1, '--turbo-stress-instruction-scheduling'),
+ (0.1, '--turbo-force-mid-tier-regalloc'),
]
def random_extra_flags(rng):
@@ -277,7 +278,8 @@ class InterruptBudgetFuzzer(Fuzzer):
# For most code paths, only one of the flags below has a meaning
# based on the flag above.
flag2 = '--interrupt-budget=%d' % rng.randint(0, 135168)
- flag3 = '--budget-for-feedback-vector-allocation=%d' % rng.randint(0, 940)
+ flag3 = '--interrupt-budget-for-feedback-allocation=%d' % rng.randint(
+ 0, 940)
yield [flag1, flag2, flag3]
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index ec97ab226f..c102cddec1 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test, is_flaky=False):
@@ -362,45 +361,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
deleted file mode 100644
index 97aaa2df5f..0000000000
--- a/deps/v8/tools/tick-processor.html
+++ /dev/null
@@ -1,157 +0,0 @@
-<!DOCTYPE html>
-<!-- Copyright 2012 the V8 project authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
-
-<html lang="en">
-<head>
- <meta charset="utf-8">
- <title>V8 Tick Processor</title>
-
- <style>
- body {
- font-family: Verdana, Arial, Helvetica, sans-serif;
- font-size: 10pt;
- }
- h4 {
- margin-bottom: 0px;
- }
- p {
- margin-top: 0px;
- }
- </style>
- <script type="module">
- import {
- TickProcessor, LinuxCppEntriesProvider, MacOSCppEntriesProvider,
- WindowsCppEntriesProvider
- } from "./tickprocessor.mjs";
-
-var v8log_content;
-globalThis.textout;
-
-globalThis.load_logfile = function(evt) {
- globalThis.textout.value = "";
- var f = evt.target.files[0];
- if (f) {
- var reader = new FileReader();
- reader.onload = function(event) {
- v8log_content = event.target.result;
- start_process();
- };
- reader.onerror = function(event) {
- console.error("File could not be read! Code " + event.target.error.code);
- };
- reader.readAsText(f);
- } else {
- alert("Failed to load file");
- }
-}
-
-function print(arg) {
- globalThis.textout.value+=arg+"\n";
-}
-
-function start_process() {
- let DEFAULTS = {
- logFileName: 'v8.log',
- platform: 'unix',
- stateFilter: null,
- callGraphSize: 5,
- ignoreUnknown: false,
- separateIc: true,
- targetRootFS: '',
- apkEmbeddedLibrary: '',
- nm: 'nm'
- };
-
- var entriesProviders = {
- 'unix': LinuxCppEntriesProvider,
- 'windows': WindowsCppEntriesProvider,
- 'mac': MacOSCppEntriesProvider
- };
-
- var tickProcessor = new TickProcessor(
- new (entriesProviders[DEFAULTS.platform])(
- DEFAULTS.nm, DEFAULTS.targetRootFS, DEFAULTS.apkEmbeddedLibrary),
- DEFAULTS.separateIc, DEFAULTS.callGraphSize,
- DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
-
- tickProcessor.processLogChunk(v8log_content);
- tickProcessor.printStatistics();
-}
-</script>
-<script>
-function Load() {
- document.getElementById('fileinput').addEventListener(
- 'change', globalThis.load_logfile, false);
- globalThis.textout = document.getElementById('textout');
-}
-</script>
-</head>
-<body onLoad="Load()">
-
-<h3 style="margin-top: 2px;">
- Chrome V8 profiling log processor
-</h3>
-<p>
-Process V8's profiling information log (sampling profiler tick information)
-in your browser. Particularly useful if you don't have the V8 shell (d8)
-at hand on your system. You still have to run Chrome with the appropriate
-<a href="https://code.google.com/p/v8/wiki/ProfilingChromiumWithV8">
- command line flags</a>
-to produce the profiling log.
-</p>
-<h4>Usage:</h4>
-<p>
-Click on the button and browse to the profiling log file (usually, v8.log).
-Process will start automatically and the output will be visible in the below
-text area.
-</p>
-<h4>Limitations and disclaimer:</h4>
-<p>
-This page offers a subset of the functionalities of the command-line tick
-processor utility in the V8 repository. In particular, this page cannot
-access the command-line utility that provides library symbol information,
-hence the [C++] section of the output stays empty. Also consider that this
-web-based tool is provided only for convenience and quick reference, you
-should refer to the
-<a href="https://code.google.com/p/v8/wiki/V8Profiler">
- command-line</a>
-version for full output.
-</p>
-<p>
-<input type="file" id="fileinput" />
-</p>
-<p>
-<textarea name="myTextArea" cols="120" rows="40" wrap="off" id="textout"
- readonly="yes"></textarea>
-</p>
-<p style="font-style:italic;">
-Copyright the V8 Authors - Last change to this page: 12/12/2012
-</p>
-
-
-</body>
-</html>
diff --git a/deps/v8/tools/tickprocessor.mjs b/deps/v8/tools/tickprocessor.mjs
index 1929c3069d..071bf35ca4 100644
--- a/deps/v8/tools/tickprocessor.mjs
+++ b/deps/v8/tools/tickprocessor.mjs
@@ -514,6 +514,7 @@ export class TickProcessor extends LogReader {
timedRange,
pairwiseTimedRange);
this.dispatchTable_ = {
+ __proto__: null,
'shared-library': {
parsers: [parseString, parseInt, parseInt, parseInt],
processor: this.processSharedLibrary
@@ -575,16 +576,16 @@ export class TickProcessor extends LogReader {
processor: this.advanceDistortion
},
// Ignored events.
- 'profiler': null,
- 'function-creation': null,
- 'function-move': null,
- 'function-delete': null,
- 'heap-sample-item': null,
- 'current-time': null, // Handled specially, not parsed.
+ 'profiler': undefined,
+ 'function-creation': undefined,
+ 'function-move': undefined,
+ 'function-delete': undefined,
+ 'heap-sample-item': undefined,
+ 'current-time': undefined, // Handled specially, not parsed.
// Obsolete row types.
- 'code-allocate': null,
- 'begin-code-region': null,
- 'end-code-region': null
+ 'code-allocate': undefined,
+ 'begin-code-region': undefined,
+ 'end-code-region': undefined
};
this.preprocessJson = preprocessJson;
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 638ca100fb..f46841875e 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -15,6 +15,14 @@ import sys
import re
from subprocess import Popen, PIPE
+PYTHON3 = sys.version_info >= (3, 0)
+
+def maybe_decode(arg, encoding="utf-8"):
+ return arg.decode(encoding) if PYTHON3 else arg
+
+def maybe_encode(arg, encoding="utf-8"):
+ return arg.encode(encoding) if PYTHON3 else arg
+
kPercentEscape = r'α'; # Unicode alpha
kDerefEscape = r'☆'; # Unicode star
kAddressofEscape = r'⌂'; # Unicode house
@@ -103,8 +111,8 @@ def process(filename, lint, should_format):
p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
else:
p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
- output, err = p.communicate(preprocess(content))
- output = postprocess(output)
+ output, err = p.communicate(maybe_encode(preprocess(content)))
+ output = postprocess(maybe_decode(output))
rc = p.returncode
if (rc != 0):
print("error code " + str(rc) + " running clang-format. Exiting...")
@@ -116,7 +124,7 @@ def process(filename, lint, should_format):
if should_format:
output_file = open(filename, 'wb')
- output_file.write(output);
+ output_file.write(maybe_encode(output))
output_file.close()
def print_usage():
diff --git a/deps/v8/tools/turbolizer/OWNERS b/deps/v8/tools/turbolizer/OWNERS
index b7694bd267..fc52961eff 100644
--- a/deps/v8/tools/turbolizer/OWNERS
+++ b/deps/v8/tools/turbolizer/OWNERS
@@ -1,2 +1 @@
danno@chromium.org
-sigurds@chromium.org
diff --git a/deps/v8/tools/unittests/compare_torque_output_test.py b/deps/v8/tools/unittests/compare_torque_output_test.py
index a6086d96c9..d5a5c4a125 100644
--- a/deps/v8/tools/unittests/compare_torque_output_test.py
+++ b/deps/v8/tools/unittests/compare_torque_output_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,6 +14,11 @@ TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
COMPARE_SCRIPT = os.path.join(TOOLS_DIR, 'compare_torque_output.py')
TEST_DATA = os.path.join(TOOLS_DIR, 'unittests', 'testdata', 'compare_torque')
+_PY3 = sys.version_info[0] == 3
+PYTHON_EXECUTABLE = "python%s" % sys.version_info[0]
+
+def maybe_bytes(value):
+ return value.decode("utf-8") if _PY3 else value
class PredictableTest(unittest.TestCase):
def setUp(self):
@@ -24,7 +29,7 @@ class PredictableTest(unittest.TestCase):
file1 = os.path.join(TEST_DATA, test_folder, 'f1')
file2 = os.path.join(TEST_DATA, test_folder, 'f2')
proc = subprocess.Popen([
- 'python', '-u',
+ PYTHON_EXECUTABLE, '-u',
COMPARE_SCRIPT, file1, file2, self.tmp_file
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = proc.communicate()
@@ -34,7 +39,7 @@ class PredictableTest(unittest.TestCase):
exitcode, output = self._compare_from('test1')
self.assertEqual(1, exitcode)
full_match = r'^Found.*-line 2\+line 2 with diff.*\+line 3\n\n$'
- self.assertRegexpMatches(output, re.compile(full_match, re.M | re.S))
+ self.assertRegexpMatches(maybe_bytes(output), re.compile(full_match, re.M | re.S))
def test_no_diff(self):
exitcode, output = self._compare_from('test2')
@@ -44,12 +49,12 @@ class PredictableTest(unittest.TestCase):
def test_right_only(self):
exitcode, output = self._compare_from('test3')
self.assertEqual(1, exitcode)
- self.assertRegexpMatches(output, r'Some files exist only in.*f2\nfile3')
+ self.assertRegexpMatches(maybe_bytes(output), r'Some files exist only in.*f2\nfile3')
def test_left_only(self):
exitcode, output = self._compare_from('test4')
self.assertEqual(1, exitcode)
- self.assertRegexpMatches(output, r'Some files exist only in.*f1\nfile4')
+ self.assertRegexpMatches(maybe_bytes(output), r'Some files exist only in.*f1\nfile4')
def tearDown(self):
os.unlink(self.tmp_file)
diff --git a/deps/v8/tools/unittests/predictable_wrapper_test.py b/deps/v8/tools/unittests/predictable_wrapper_test.py
index c085fb8879..2f806e7c11 100755
--- a/deps/v8/tools/unittests/predictable_wrapper_test.py
+++ b/deps/v8/tools/unittests/predictable_wrapper_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 28f71b2b33..6d8c5e2a13 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -28,6 +28,8 @@ TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf')
+SORT_KEY = lambda x: x['graphs']
+
V8_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
@@ -196,8 +198,8 @@ class PerfTest(unittest.TestCase):
{'units': units,
'graphs': [suite, trace['name']],
'results': trace['results'],
- 'stddev': trace['stddev']} for trace in traces]),
- sorted(self._LoadResults(file_name)['traces']))
+ 'stddev': trace['stddev']} for trace in traces], key=SORT_KEY),
+ sorted(self._LoadResults(file_name)['traces'], key=SORT_KEY))
def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
self.assertListEqual([
@@ -368,7 +370,7 @@ class PerfTest(unittest.TestCase):
'graphs': ['test', 'DeltaBlue'],
'results': [200.0],
'stddev': ''},
- ]), sorted(self._LoadResults()['traces']))
+ ], key=SORT_KEY), sorted(self._LoadResults()['traces'], key=SORT_KEY))
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
@@ -605,7 +607,7 @@ class PerfTest(unittest.TestCase):
'results': [2.1, 2.1],
'stddev': '',
},
- ]), sorted(results['traces']))
+ ], key=SORT_KEY), sorted(results['traces'], key=SORT_KEY))
def testResultsProcessor(self):
results = self._RunPerf('d8_mocked2.py', 'test2.json')
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 89acacaaa3..762d3096ec 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -30,7 +30,11 @@ import sys
import tempfile
import unittest
-from cStringIO import StringIO
+# TODO(https://crbug.com/1292016): Remove after Python3 migration.
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
@@ -277,7 +281,9 @@ class SystemTest(unittest.TestCase):
# We need lexicographic sorting here to avoid non-deterministic behaviour
# The original sorting key is duration, but in our fake test we have
# non-deterministic durations before we reset them to 1
- json_output['slowest_tests'].sort(key= lambda x: str(x))
+ def sort_key(x):
+ return str(sorted(x.items()))
+ json_output['slowest_tests'].sort(key=sort_key)
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
@@ -351,7 +357,8 @@ class SystemTest(unittest.TestCase):
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False,
v8_enable_pointer_compression_shared_cage=False,
- v8_enable_virtual_memory_cage=False)
+ v8_enable_shared_ro_heap=False,
+ v8_enable_sandbox=False)
result = run_tests(
basedir,
'--progress=verbose',
diff --git a/deps/v8/tools/unittests/testdata/predictable_mocked.py b/deps/v8/tools/unittests/testdata/predictable_mocked.py
index b9e73f6454..cd1a54d54e 100644
--- a/deps/v8/tools/unittests/testdata/predictable_mocked.py
+++ b/deps/v8/tools/unittests/testdata/predictable_mocked.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index d5d0f9981d..a1cff61442 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -22,7 +22,8 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
- "v8_enable_virtual_memory_cage": false,
+ "v8_enable_sandbox": false,
+ "v8_enable_shared_ro_heap": true,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index 590af4d59a..049078cb70 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -22,7 +22,8 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": false,
"v8_enable_pointer_compression_shared_cage": false,
- "v8_enable_virtual_memory_cage": false,
+ "v8_enable_sandbox": false,
+ "v8_enable_shared_ro_heap": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
index d5d0f9981d..a1cff61442 100644
--- a/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
@@ -22,7 +22,8 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
- "v8_enable_virtual_memory_cage": false,
+ "v8_enable_sandbox": false,
+ "v8_enable_shared_ro_heap": true,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index f421279451..92b9ab51d8 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -41,20 +41,28 @@ except ImportError as e:
import json
+import multiprocessing
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
-import sys
import subprocess
-import multiprocessing
from subprocess import PIPE
+import sys
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
+PYTHON3 = sys.version_info >= (3, 0)
+
+def maybe_decode(arg, encoding="utf-8"):
+ return arg.decode(encoding) if PYTHON3 else arg
+
+def maybe_encode(arg, encoding="utf-8"):
+ return arg.encode(encoding) if PYTHON3 else arg
+
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# We now run our own header guard check in PRESUBMIT.py.
@@ -76,7 +84,7 @@ LINT_RULES = """
-whitespace/comments
""".split()
-LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
+LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--opt[^-].*\n")
@@ -92,16 +100,20 @@ def CppLintWorker(command):
out_lines = ""
error_count = -1
while True:
- out_line = process.stderr.readline()
+ out_line = maybe_decode(process.stderr.readline())
if out_line == '' and process.poll() != None:
if error_count == -1:
print("Failed to process %s" % command.pop())
return 1
break
- m = LINT_OUTPUT_PATTERN.match(out_line)
- if m:
- out_lines += out_line
+ if out_line.strip() == 'Total errors found: 0':
+ out_lines += "Done processing %s\n" % command.pop()
error_count += 1
+ else:
+ m = LINT_OUTPUT_PATTERN.match(out_line)
+ if m:
+ out_lines += out_line
+ error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
@@ -118,7 +130,7 @@ def TorqueLintWorker(command):
out_lines = ""
error_count = 0
while True:
- out_line = process.stderr.readline()
+ out_line = maybe_decode(process.stderr.readline())
if out_line == '' and process.poll() != None:
break
out_lines += out_line
@@ -148,7 +160,7 @@ def JSLintWorker(command):
sys.stdout.write("error code " + str(rc) + " running clang-format.\n")
return rc
- if output != contents:
+ if maybe_decode(output) != contents:
return 1
return 0
@@ -206,7 +218,7 @@ class FileContentsCache(object):
for file in files:
try:
handle = open(file, "r")
- file_sum = md5er(handle.read()).digest()
+ file_sum = md5er(maybe_encode(handle.read())).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
@@ -441,7 +453,7 @@ class JSLintProcessor(CacheableSourceFileProcessor):
return name.endswith('.js') or name.endswith('.mjs')
def GetPathsToSearch(self):
- return ['tools/system-analyzer']
+ return ['tools/system-analyzer', 'tools/heap-layout', 'tools/js']
def GetProcessorWorker(self):
return JSLintWorker
@@ -490,7 +502,7 @@ class SourceProcessor(SourceFileProcessor):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
- for file in output.stdout.read().split():
+ for file in maybe_decode(output.stdout.read()).split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
@@ -623,8 +635,8 @@ class SourceProcessor(SourceFileProcessor):
violations = 0
for file in files:
try:
- handle = open(file)
- contents = handle.read()
+ handle = open(file, "rb")
+ contents = maybe_decode(handle.read(), "ISO-8859-1")
if len(contents) > 0 and not self.ProcessContents(file, contents):
success = False
violations += 1
@@ -733,8 +745,9 @@ def CheckDeps(workspace):
def PyTests(workspace):
result = True
for script in [
- join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
+ join(workspace, 'tools', 'clusterfuzz', 'foozzie', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
+ join(workspace, 'tools', 'unittests', 'predictable_wrapper_test.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 306eeb7aa2..64b9ac20a8 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -6,6 +6,8 @@
# be modified manually.
# List of known V8 instance types.
+# yapf: disable
+
INSTANCE_TYPES = {
0: "INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
@@ -55,497 +57,510 @@ INSTANCE_TYPES = {
151: "BREAK_POINT_INFO_TYPE",
152: "CACHED_TEMPLATE_OBJECT_TYPE",
153: "CALL_HANDLER_INFO_TYPE",
- 154: "CLASS_POSITIONS_TYPE",
- 155: "DEBUG_INFO_TYPE",
- 156: "ENUM_CACHE_TYPE",
- 157: "FEEDBACK_CELL_TYPE",
- 158: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 159: "INTERCEPTOR_INFO_TYPE",
- 160: "INTERPRETER_DATA_TYPE",
- 161: "MODULE_REQUEST_TYPE",
- 162: "PROMISE_CAPABILITY_TYPE",
- 163: "PROMISE_REACTION_TYPE",
- 164: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 165: "PROTOTYPE_INFO_TYPE",
- 166: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
- 167: "SCRIPT_TYPE",
- 168: "SCRIPT_OR_MODULE_TYPE",
- 169: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 170: "STACK_FRAME_INFO_TYPE",
- 171: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 172: "TUPLE2_TYPE",
- 173: "WASM_CONTINUATION_OBJECT_TYPE",
- 174: "WASM_EXCEPTION_TAG_TYPE",
- 175: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 176: "FIXED_ARRAY_TYPE",
- 177: "HASH_TABLE_TYPE",
- 178: "EPHEMERON_HASH_TABLE_TYPE",
- 179: "GLOBAL_DICTIONARY_TYPE",
- 180: "NAME_DICTIONARY_TYPE",
- 181: "NUMBER_DICTIONARY_TYPE",
- 182: "ORDERED_HASH_MAP_TYPE",
- 183: "ORDERED_HASH_SET_TYPE",
- 184: "ORDERED_NAME_DICTIONARY_TYPE",
- 185: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 186: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 187: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 188: "SCRIPT_CONTEXT_TABLE_TYPE",
- 189: "BYTE_ARRAY_TYPE",
- 190: "BYTECODE_ARRAY_TYPE",
- 191: "FIXED_DOUBLE_ARRAY_TYPE",
- 192: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 193: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 194: "AWAIT_CONTEXT_TYPE",
- 195: "BLOCK_CONTEXT_TYPE",
- 196: "CATCH_CONTEXT_TYPE",
- 197: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 198: "EVAL_CONTEXT_TYPE",
- 199: "FUNCTION_CONTEXT_TYPE",
- 200: "MODULE_CONTEXT_TYPE",
- 201: "NATIVE_CONTEXT_TYPE",
- 202: "SCRIPT_CONTEXT_TYPE",
- 203: "WITH_CONTEXT_TYPE",
+ 154: "CALL_SITE_INFO_TYPE",
+ 155: "CLASS_POSITIONS_TYPE",
+ 156: "DEBUG_INFO_TYPE",
+ 157: "ENUM_CACHE_TYPE",
+ 158: "ERROR_STACK_DATA_TYPE",
+ 159: "FEEDBACK_CELL_TYPE",
+ 160: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 161: "INTERCEPTOR_INFO_TYPE",
+ 162: "INTERPRETER_DATA_TYPE",
+ 163: "MODULE_REQUEST_TYPE",
+ 164: "PROMISE_CAPABILITY_TYPE",
+ 165: "PROMISE_REACTION_TYPE",
+ 166: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 167: "PROTOTYPE_INFO_TYPE",
+ 168: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 169: "SCRIPT_TYPE",
+ 170: "SCRIPT_OR_MODULE_TYPE",
+ 171: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 172: "STACK_FRAME_INFO_TYPE",
+ 173: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 174: "TUPLE2_TYPE",
+ 175: "WASM_CONTINUATION_OBJECT_TYPE",
+ 176: "WASM_EXCEPTION_TAG_TYPE",
+ 177: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 178: "FIXED_ARRAY_TYPE",
+ 179: "HASH_TABLE_TYPE",
+ 180: "EPHEMERON_HASH_TABLE_TYPE",
+ 181: "GLOBAL_DICTIONARY_TYPE",
+ 182: "NAME_DICTIONARY_TYPE",
+ 183: "NAME_TO_INDEX_HASH_TABLE_TYPE",
+ 184: "NUMBER_DICTIONARY_TYPE",
+ 185: "ORDERED_HASH_MAP_TYPE",
+ 186: "ORDERED_HASH_SET_TYPE",
+ 187: "ORDERED_NAME_DICTIONARY_TYPE",
+ 188: "REGISTERED_SYMBOL_TABLE_TYPE",
+ 189: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 190: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 191: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 192: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 193: "BYTE_ARRAY_TYPE",
+ 194: "BYTECODE_ARRAY_TYPE",
+ 195: "FIXED_DOUBLE_ARRAY_TYPE",
+ 196: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 197: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 198: "TURBOFAN_BITSET_TYPE_TYPE",
+ 199: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
+ 200: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
+ 201: "TURBOFAN_RANGE_TYPE_TYPE",
+ 202: "TURBOFAN_UNION_TYPE_TYPE",
+ 203: "CELL_TYPE",
204: "FOREIGN_TYPE",
205: "WASM_INTERNAL_FUNCTION_TYPE",
206: "WASM_TYPE_INFO_TYPE",
- 207: "TURBOFAN_BITSET_TYPE_TYPE",
- 208: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
- 209: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
- 210: "TURBOFAN_RANGE_TYPE_TYPE",
- 211: "TURBOFAN_UNION_TYPE_TYPE",
- 212: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 213: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
- 214: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 215: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
- 216: "WASM_FUNCTION_DATA_TYPE",
- 217: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 218: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 219: "WASM_JS_FUNCTION_DATA_TYPE",
- 220: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 221: "EXPORTED_SUB_CLASS_TYPE",
- 222: "EXPORTED_SUB_CLASS2_TYPE",
- 223: "SMALL_ORDERED_HASH_MAP_TYPE",
- 224: "SMALL_ORDERED_HASH_SET_TYPE",
- 225: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 226: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
- 227: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
- 228: "DESCRIPTOR_ARRAY_TYPE",
- 229: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 230: "SOURCE_TEXT_MODULE_TYPE",
- 231: "SYNTHETIC_MODULE_TYPE",
- 232: "WEAK_FIXED_ARRAY_TYPE",
- 233: "TRANSITION_ARRAY_TYPE",
- 234: "CELL_TYPE",
- 235: "CODE_TYPE",
- 236: "CODE_DATA_CONTAINER_TYPE",
- 237: "COVERAGE_INFO_TYPE",
- 238: "EMBEDDER_DATA_ARRAY_TYPE",
- 239: "FEEDBACK_METADATA_TYPE",
- 240: "FEEDBACK_VECTOR_TYPE",
- 241: "FILLER_TYPE",
- 242: "FREE_SPACE_TYPE",
- 243: "INTERNAL_CLASS_TYPE",
- 244: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 245: "MAP_TYPE",
- 246: "MEGA_DOM_HANDLER_TYPE",
- 247: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 248: "PREPARSE_DATA_TYPE",
- 249: "PROPERTY_ARRAY_TYPE",
- 250: "PROPERTY_CELL_TYPE",
- 251: "SCOPE_INFO_TYPE",
- 252: "SHARED_FUNCTION_INFO_TYPE",
- 253: "SMI_BOX_TYPE",
- 254: "SMI_PAIR_TYPE",
- 255: "SORT_STATE_TYPE",
- 256: "SWISS_NAME_DICTIONARY_TYPE",
- 257: "WASM_API_FUNCTION_REF_TYPE",
- 258: "WEAK_ARRAY_LIST_TYPE",
- 259: "WEAK_CELL_TYPE",
- 260: "WASM_ARRAY_TYPE",
- 261: "WASM_STRUCT_TYPE",
- 262: "JS_PROXY_TYPE",
+ 207: "AWAIT_CONTEXT_TYPE",
+ 208: "BLOCK_CONTEXT_TYPE",
+ 209: "CATCH_CONTEXT_TYPE",
+ 210: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 211: "EVAL_CONTEXT_TYPE",
+ 212: "FUNCTION_CONTEXT_TYPE",
+ 213: "MODULE_CONTEXT_TYPE",
+ 214: "NATIVE_CONTEXT_TYPE",
+ 215: "SCRIPT_CONTEXT_TYPE",
+ 216: "WITH_CONTEXT_TYPE",
+ 217: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 218: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
+ 219: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 220: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
+ 221: "WASM_FUNCTION_DATA_TYPE",
+ 222: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 223: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 224: "WASM_JS_FUNCTION_DATA_TYPE",
+ 225: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 226: "EXPORTED_SUB_CLASS_TYPE",
+ 227: "EXPORTED_SUB_CLASS2_TYPE",
+ 228: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 229: "SMALL_ORDERED_HASH_SET_TYPE",
+ 230: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 231: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
+ 232: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
+ 233: "DESCRIPTOR_ARRAY_TYPE",
+ 234: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 235: "SOURCE_TEXT_MODULE_TYPE",
+ 236: "SYNTHETIC_MODULE_TYPE",
+ 237: "WEAK_FIXED_ARRAY_TYPE",
+ 238: "TRANSITION_ARRAY_TYPE",
+ 239: "CODE_TYPE",
+ 240: "CODE_DATA_CONTAINER_TYPE",
+ 241: "COVERAGE_INFO_TYPE",
+ 242: "EMBEDDER_DATA_ARRAY_TYPE",
+ 243: "FEEDBACK_METADATA_TYPE",
+ 244: "FEEDBACK_VECTOR_TYPE",
+ 245: "FILLER_TYPE",
+ 246: "FREE_SPACE_TYPE",
+ 247: "INTERNAL_CLASS_TYPE",
+ 248: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 249: "MAP_TYPE",
+ 250: "MEGA_DOM_HANDLER_TYPE",
+ 251: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 252: "PREPARSE_DATA_TYPE",
+ 253: "PROPERTY_ARRAY_TYPE",
+ 254: "PROPERTY_CELL_TYPE",
+ 255: "SCOPE_INFO_TYPE",
+ 256: "SHARED_FUNCTION_INFO_TYPE",
+ 257: "SMI_BOX_TYPE",
+ 258: "SMI_PAIR_TYPE",
+ 259: "SORT_STATE_TYPE",
+ 260: "SWISS_NAME_DICTIONARY_TYPE",
+ 261: "WASM_API_FUNCTION_REF_TYPE",
+ 262: "WASM_ON_FULFILLED_DATA_TYPE",
+ 263: "WEAK_ARRAY_LIST_TYPE",
+ 264: "WEAK_CELL_TYPE",
+ 265: "WASM_ARRAY_TYPE",
+ 266: "WASM_STRUCT_TYPE",
+ 267: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 263: "JS_GLOBAL_OBJECT_TYPE",
- 264: "JS_GLOBAL_PROXY_TYPE",
- 265: "JS_MODULE_NAMESPACE_TYPE",
+ 268: "JS_GLOBAL_OBJECT_TYPE",
+ 269: "JS_GLOBAL_PROXY_TYPE",
+ 270: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
2058: "JS_LAST_DUMMY_API_OBJECT_TYPE",
- 2059: "JS_BOUND_FUNCTION_TYPE",
- 2060: "JS_FUNCTION_TYPE",
- 2061: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2062: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2063: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2064: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2065: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2066: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2067: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2068: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2069: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2070: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2071: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2072: "JS_ARRAY_CONSTRUCTOR_TYPE",
- 2073: "JS_PROMISE_CONSTRUCTOR_TYPE",
- 2074: "JS_REG_EXP_CONSTRUCTOR_TYPE",
- 2075: "JS_CLASS_CONSTRUCTOR_TYPE",
- 2076: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
- 2077: "JS_ITERATOR_PROTOTYPE_TYPE",
- 2078: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
- 2079: "JS_OBJECT_PROTOTYPE_TYPE",
- 2080: "JS_PROMISE_PROTOTYPE_TYPE",
- 2081: "JS_REG_EXP_PROTOTYPE_TYPE",
- 2082: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
- 2083: "JS_SET_PROTOTYPE_TYPE",
- 2084: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
- 2085: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
- 2086: "JS_MAP_KEY_ITERATOR_TYPE",
- 2087: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 2088: "JS_MAP_VALUE_ITERATOR_TYPE",
- 2089: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 2090: "JS_SET_VALUE_ITERATOR_TYPE",
- 2091: "JS_GENERATOR_OBJECT_TYPE",
- 2092: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 2093: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 2094: "JS_DATA_VIEW_TYPE",
- 2095: "JS_TYPED_ARRAY_TYPE",
- 2096: "JS_MAP_TYPE",
- 2097: "JS_SET_TYPE",
- 2098: "JS_WEAK_MAP_TYPE",
- 2099: "JS_WEAK_SET_TYPE",
- 2100: "JS_ARGUMENTS_OBJECT_TYPE",
- 2101: "JS_ARRAY_TYPE",
- 2102: "JS_ARRAY_BUFFER_TYPE",
- 2103: "JS_ARRAY_ITERATOR_TYPE",
- 2104: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 2105: "JS_COLLATOR_TYPE",
- 2106: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 2107: "JS_DATE_TYPE",
- 2108: "JS_DATE_TIME_FORMAT_TYPE",
- 2109: "JS_DISPLAY_NAMES_TYPE",
- 2110: "JS_ERROR_TYPE",
- 2111: "JS_FINALIZATION_REGISTRY_TYPE",
- 2112: "JS_LIST_FORMAT_TYPE",
- 2113: "JS_LOCALE_TYPE",
- 2114: "JS_MESSAGE_OBJECT_TYPE",
- 2115: "JS_NUMBER_FORMAT_TYPE",
- 2116: "JS_PLURAL_RULES_TYPE",
- 2117: "JS_PROMISE_TYPE",
- 2118: "JS_REG_EXP_TYPE",
- 2119: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 2120: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 2121: "JS_SEGMENT_ITERATOR_TYPE",
- 2122: "JS_SEGMENTER_TYPE",
- 2123: "JS_SEGMENTS_TYPE",
- 2124: "JS_STRING_ITERATOR_TYPE",
- 2125: "JS_TEMPORAL_CALENDAR_TYPE",
- 2126: "JS_TEMPORAL_DURATION_TYPE",
- 2127: "JS_TEMPORAL_INSTANT_TYPE",
- 2128: "JS_TEMPORAL_PLAIN_DATE_TYPE",
- 2129: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
- 2130: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
- 2131: "JS_TEMPORAL_PLAIN_TIME_TYPE",
- 2132: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
- 2133: "JS_TEMPORAL_TIME_ZONE_TYPE",
- 2134: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
- 2135: "JS_V8_BREAK_ITERATOR_TYPE",
- 2136: "JS_WEAK_REF_TYPE",
- 2137: "WASM_GLOBAL_OBJECT_TYPE",
- 2138: "WASM_INSTANCE_OBJECT_TYPE",
- 2139: "WASM_MEMORY_OBJECT_TYPE",
- 2140: "WASM_MODULE_OBJECT_TYPE",
- 2141: "WASM_SUSPENDER_OBJECT_TYPE",
- 2142: "WASM_TABLE_OBJECT_TYPE",
- 2143: "WASM_TAG_OBJECT_TYPE",
- 2144: "WASM_VALUE_OBJECT_TYPE",
+ 2059: "JS_DATA_VIEW_TYPE",
+ 2060: "JS_TYPED_ARRAY_TYPE",
+ 2061: "JS_ARRAY_BUFFER_TYPE",
+ 2062: "JS_PROMISE_TYPE",
+ 2063: "JS_BOUND_FUNCTION_TYPE",
+ 2064: "JS_WRAPPED_FUNCTION_TYPE",
+ 2065: "JS_FUNCTION_TYPE",
+ 2066: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2067: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2068: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2069: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2070: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2071: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2072: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2073: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2074: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2075: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2076: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2077: "JS_ARRAY_CONSTRUCTOR_TYPE",
+ 2078: "JS_PROMISE_CONSTRUCTOR_TYPE",
+ 2079: "JS_REG_EXP_CONSTRUCTOR_TYPE",
+ 2080: "JS_CLASS_CONSTRUCTOR_TYPE",
+ 2081: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
+ 2082: "JS_ITERATOR_PROTOTYPE_TYPE",
+ 2083: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
+ 2084: "JS_OBJECT_PROTOTYPE_TYPE",
+ 2085: "JS_PROMISE_PROTOTYPE_TYPE",
+ 2086: "JS_REG_EXP_PROTOTYPE_TYPE",
+ 2087: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
+ 2088: "JS_SET_PROTOTYPE_TYPE",
+ 2089: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
+ 2090: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
+ 2091: "JS_MAP_KEY_ITERATOR_TYPE",
+ 2092: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 2093: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 2094: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 2095: "JS_SET_VALUE_ITERATOR_TYPE",
+ 2096: "JS_GENERATOR_OBJECT_TYPE",
+ 2097: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 2098: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 2099: "JS_MAP_TYPE",
+ 2100: "JS_SET_TYPE",
+ 2101: "JS_WEAK_MAP_TYPE",
+ 2102: "JS_WEAK_SET_TYPE",
+ 2103: "JS_ARGUMENTS_OBJECT_TYPE",
+ 2104: "JS_ARRAY_TYPE",
+ 2105: "JS_ARRAY_ITERATOR_TYPE",
+ 2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 2107: "JS_COLLATOR_TYPE",
+ 2108: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 2109: "JS_DATE_TYPE",
+ 2110: "JS_DATE_TIME_FORMAT_TYPE",
+ 2111: "JS_DISPLAY_NAMES_TYPE",
+ 2112: "JS_ERROR_TYPE",
+ 2113: "JS_EXTERNAL_OBJECT_TYPE",
+ 2114: "JS_FINALIZATION_REGISTRY_TYPE",
+ 2115: "JS_LIST_FORMAT_TYPE",
+ 2116: "JS_LOCALE_TYPE",
+ 2117: "JS_MESSAGE_OBJECT_TYPE",
+ 2118: "JS_NUMBER_FORMAT_TYPE",
+ 2119: "JS_PLURAL_RULES_TYPE",
+ 2120: "JS_REG_EXP_TYPE",
+ 2121: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 2122: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 2123: "JS_SEGMENT_ITERATOR_TYPE",
+ 2124: "JS_SEGMENTER_TYPE",
+ 2125: "JS_SEGMENTS_TYPE",
+ 2126: "JS_SHADOW_REALM_TYPE",
+ 2127: "JS_SHARED_STRUCT_TYPE",
+ 2128: "JS_STRING_ITERATOR_TYPE",
+ 2129: "JS_TEMPORAL_CALENDAR_TYPE",
+ 2130: "JS_TEMPORAL_DURATION_TYPE",
+ 2131: "JS_TEMPORAL_INSTANT_TYPE",
+ 2132: "JS_TEMPORAL_PLAIN_DATE_TYPE",
+ 2133: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
+ 2134: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
+ 2135: "JS_TEMPORAL_PLAIN_TIME_TYPE",
+ 2136: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
+ 2137: "JS_TEMPORAL_TIME_ZONE_TYPE",
+ 2138: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
+ 2139: "JS_V8_BREAK_ITERATOR_TYPE",
+ 2140: "JS_WEAK_REF_TYPE",
+ 2141: "WASM_GLOBAL_OBJECT_TYPE",
+ 2142: "WASM_INSTANCE_OBJECT_TYPE",
+ 2143: "WASM_MEMORY_OBJECT_TYPE",
+ 2144: "WASM_MODULE_OBJECT_TYPE",
+ 2145: "WASM_SUSPENDER_OBJECT_TYPE",
+ 2146: "WASM_TABLE_OBJECT_TYPE",
+ 2147: "WASM_TAG_OBJECT_TYPE",
+ 2148: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02119): (245, "MetaMap"),
- ("read_only_space", 0x02141): (131, "NullMap"),
- ("read_only_space", 0x02169): (229, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x02191): (258, "WeakArrayListMap"),
- ("read_only_space", 0x021d5): (156, "EnumCacheMap"),
- ("read_only_space", 0x02209): (176, "FixedArrayMap"),
- ("read_only_space", 0x02255): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x022a1): (242, "FreeSpaceMap"),
- ("read_only_space", 0x022c9): (241, "OnePointerFillerMap"),
- ("read_only_space", 0x022f1): (241, "TwoPointerFillerMap"),
- ("read_only_space", 0x02319): (131, "UninitializedMap"),
- ("read_only_space", 0x02391): (131, "UndefinedMap"),
- ("read_only_space", 0x023d5): (130, "HeapNumberMap"),
- ("read_only_space", 0x02409): (131, "TheHoleMap"),
- ("read_only_space", 0x02469): (131, "BooleanMap"),
- ("read_only_space", 0x0250d): (189, "ByteArrayMap"),
- ("read_only_space", 0x02535): (176, "FixedCOWArrayMap"),
- ("read_only_space", 0x0255d): (177, "HashTableMap"),
- ("read_only_space", 0x02585): (128, "SymbolMap"),
- ("read_only_space", 0x025ad): (40, "OneByteStringMap"),
- ("read_only_space", 0x025d5): (251, "ScopeInfoMap"),
- ("read_only_space", 0x025fd): (252, "SharedFunctionInfoMap"),
- ("read_only_space", 0x02625): (235, "CodeMap"),
- ("read_only_space", 0x0264d): (234, "CellMap"),
- ("read_only_space", 0x02675): (250, "GlobalPropertyCellMap"),
- ("read_only_space", 0x0269d): (204, "ForeignMap"),
- ("read_only_space", 0x026c5): (233, "TransitionArrayMap"),
- ("read_only_space", 0x026ed): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x02715): (240, "FeedbackVectorMap"),
- ("read_only_space", 0x0274d): (131, "ArgumentsMarkerMap"),
- ("read_only_space", 0x027ad): (131, "ExceptionMap"),
- ("read_only_space", 0x02809): (131, "TerminationExceptionMap"),
- ("read_only_space", 0x02871): (131, "OptimizedOutMap"),
- ("read_only_space", 0x028d1): (131, "StaleRegisterMap"),
- ("read_only_space", 0x02931): (188, "ScriptContextTableMap"),
- ("read_only_space", 0x02959): (186, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x02981): (239, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a9): (176, "ArrayListMap"),
- ("read_only_space", 0x029d1): (129, "BigIntMap"),
- ("read_only_space", 0x029f9): (187, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a21): (190, "BytecodeArrayMap"),
- ("read_only_space", 0x02a49): (236, "CodeDataContainerMap"),
- ("read_only_space", 0x02a71): (237, "CoverageInfoMap"),
- ("read_only_space", 0x02a99): (191, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02ac1): (179, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae9): (157, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b11): (246, "MegaDomHandlerMap"),
- ("read_only_space", 0x02b39): (176, "ModuleInfoMap"),
- ("read_only_space", 0x02b61): (180, "NameDictionaryMap"),
- ("read_only_space", 0x02b89): (157, "NoClosuresCellMap"),
- ("read_only_space", 0x02bb1): (181, "NumberDictionaryMap"),
- ("read_only_space", 0x02bd9): (157, "OneClosureCellMap"),
- ("read_only_space", 0x02c01): (182, "OrderedHashMapMap"),
- ("read_only_space", 0x02c29): (183, "OrderedHashSetMap"),
- ("read_only_space", 0x02c51): (184, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c79): (248, "PreparseDataMap"),
- ("read_only_space", 0x02ca1): (249, "PropertyArrayMap"),
- ("read_only_space", 0x02cc9): (153, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02cf1): (153, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d19): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d41): (185, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d69): (223, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d91): (224, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02db9): (225, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02de1): (230, "SourceTextModuleMap"),
- ("read_only_space", 0x02e09): (256, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02e31): (231, "SyntheticModuleMap"),
- ("read_only_space", 0x02e59): (257, "WasmApiFunctionRefMap"),
- ("read_only_space", 0x02e81): (217, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x02ea9): (218, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x02ed1): (205, "WasmInternalFunctionMap"),
- ("read_only_space", 0x02ef9): (219, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x02f21): (206, "WasmTypeInfoMap"),
- ("read_only_space", 0x02f49): (232, "WeakFixedArrayMap"),
- ("read_only_space", 0x02f71): (178, "EphemeronHashTableMap"),
- ("read_only_space", 0x02f99): (238, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02fc1): (259, "WeakCellMap"),
- ("read_only_space", 0x02fe9): (32, "StringMap"),
- ("read_only_space", 0x03011): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x03039): (33, "ConsStringMap"),
- ("read_only_space", 0x03061): (37, "ThinStringMap"),
- ("read_only_space", 0x03089): (35, "SlicedStringMap"),
- ("read_only_space", 0x030b1): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x030d9): (34, "ExternalStringMap"),
- ("read_only_space", 0x03101): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x03129): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x03151): (0, "InternalizedStringMap"),
- ("read_only_space", 0x03179): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x031a1): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x031c9): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x031f1): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x03219): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x03241): (104, "SharedOneByteStringMap"),
- ("read_only_space", 0x03269): (96, "SharedStringMap"),
- ("read_only_space", 0x03291): (109, "SharedThinOneByteStringMap"),
- ("read_only_space", 0x032b9): (101, "SharedThinStringMap"),
- ("read_only_space", 0x032e1): (96, "TwoByteSeqStringMigrationSentinelMap"),
- ("read_only_space", 0x03309): (104, "OneByteSeqStringMigrationSentinelMap"),
- ("read_only_space", 0x03331): (131, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x03359): (131, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x0339d): (147, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x0349d): (159, "InterceptorInfoMap"),
- ("read_only_space", 0x05d11): (132, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x05d39): (133, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05d61): (134, "CallableTaskMap"),
- ("read_only_space", 0x05d89): (135, "CallbackTaskMap"),
- ("read_only_space", 0x05db1): (136, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05dd9): (139, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x05e01): (140, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05e29): (141, "AccessCheckInfoMap"),
- ("read_only_space", 0x05e51): (142, "AccessorInfoMap"),
- ("read_only_space", 0x05e79): (143, "AccessorPairMap"),
- ("read_only_space", 0x05ea1): (144, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x05ec9): (145, "AllocationMementoMap"),
- ("read_only_space", 0x05ef1): (148, "AsmWasmDataMap"),
- ("read_only_space", 0x05f19): (149, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x05f41): (150, "BreakPointMap"),
- ("read_only_space", 0x05f69): (151, "BreakPointInfoMap"),
- ("read_only_space", 0x05f91): (152, "CachedTemplateObjectMap"),
- ("read_only_space", 0x05fb9): (154, "ClassPositionsMap"),
- ("read_only_space", 0x05fe1): (155, "DebugInfoMap"),
- ("read_only_space", 0x06009): (158, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x06031): (160, "InterpreterDataMap"),
- ("read_only_space", 0x06059): (161, "ModuleRequestMap"),
- ("read_only_space", 0x06081): (162, "PromiseCapabilityMap"),
- ("read_only_space", 0x060a9): (163, "PromiseReactionMap"),
- ("read_only_space", 0x060d1): (164, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x060f9): (165, "PrototypeInfoMap"),
- ("read_only_space", 0x06121): (166, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x06149): (167, "ScriptMap"),
- ("read_only_space", 0x06171): (168, "ScriptOrModuleMap"),
- ("read_only_space", 0x06199): (169, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x061c1): (170, "StackFrameInfoMap"),
- ("read_only_space", 0x061e9): (171, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x06211): (172, "Tuple2Map"),
- ("read_only_space", 0x06239): (173, "WasmContinuationObjectMap"),
- ("read_only_space", 0x06261): (174, "WasmExceptionTagMap"),
- ("read_only_space", 0x06289): (175, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x062b1): (193, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x062d9): (228, "DescriptorArrayMap"),
- ("read_only_space", 0x06301): (214, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x06329): (212, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x06351): (215, "UncompiledDataWithoutPreparseDataWithJobMap"),
- ("read_only_space", 0x06379): (213, "UncompiledDataWithPreparseDataAndJobMap"),
- ("read_only_space", 0x063a1): (247, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x063c9): (207, "TurbofanBitsetTypeMap"),
- ("read_only_space", 0x063f1): (211, "TurbofanUnionTypeMap"),
- ("read_only_space", 0x06419): (210, "TurbofanRangeTypeMap"),
- ("read_only_space", 0x06441): (208, "TurbofanHeapConstantTypeMap"),
- ("read_only_space", 0x06469): (209, "TurbofanOtherNumberConstantTypeMap"),
- ("read_only_space", 0x06491): (243, "InternalClassMap"),
- ("read_only_space", 0x064b9): (254, "SmiPairMap"),
- ("read_only_space", 0x064e1): (253, "SmiBoxMap"),
- ("read_only_space", 0x06509): (220, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x06531): (221, "ExportedSubClassMap"),
- ("read_only_space", 0x06559): (226, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x06581): (227, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x065a9): (192, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x065d1): (244, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x065f9): (222, "ExportedSubClass2Map"),
- ("read_only_space", 0x06621): (255, "SortStateMap"),
- ("read_only_space", 0x06649): (146, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x06671): (146, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x06699): (137, "LoadHandler1Map"),
- ("read_only_space", 0x066c1): (137, "LoadHandler2Map"),
- ("read_only_space", 0x066e9): (137, "LoadHandler3Map"),
- ("read_only_space", 0x06711): (138, "StoreHandler0Map"),
- ("read_only_space", 0x06739): (138, "StoreHandler1Map"),
- ("read_only_space", 0x06761): (138, "StoreHandler2Map"),
- ("read_only_space", 0x06789): (138, "StoreHandler3Map"),
- ("map_space", 0x02119): (1057, "ExternalMap"),
- ("map_space", 0x02141): (2114, "JSMessageObjectMap"),
+ ("read_only_space", 0x02151): (249, "MetaMap"),
+ ("read_only_space", 0x02179): (131, "NullMap"),
+ ("read_only_space", 0x021a1): (234, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x021c9): (263, "WeakArrayListMap"),
+ ("read_only_space", 0x0220d): (157, "EnumCacheMap"),
+ ("read_only_space", 0x02241): (178, "FixedArrayMap"),
+ ("read_only_space", 0x0228d): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x022d9): (246, "FreeSpaceMap"),
+ ("read_only_space", 0x02301): (245, "OnePointerFillerMap"),
+ ("read_only_space", 0x02329): (245, "TwoPointerFillerMap"),
+ ("read_only_space", 0x02351): (131, "UninitializedMap"),
+ ("read_only_space", 0x023c9): (131, "UndefinedMap"),
+ ("read_only_space", 0x0240d): (130, "HeapNumberMap"),
+ ("read_only_space", 0x02441): (131, "TheHoleMap"),
+ ("read_only_space", 0x024a1): (131, "BooleanMap"),
+ ("read_only_space", 0x02545): (193, "ByteArrayMap"),
+ ("read_only_space", 0x0256d): (178, "FixedCOWArrayMap"),
+ ("read_only_space", 0x02595): (179, "HashTableMap"),
+ ("read_only_space", 0x025bd): (128, "SymbolMap"),
+ ("read_only_space", 0x025e5): (40, "OneByteStringMap"),
+ ("read_only_space", 0x0260d): (255, "ScopeInfoMap"),
+ ("read_only_space", 0x02635): (256, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x0265d): (239, "CodeMap"),
+ ("read_only_space", 0x02685): (203, "CellMap"),
+ ("read_only_space", 0x026ad): (254, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x026d5): (204, "ForeignMap"),
+ ("read_only_space", 0x026fd): (238, "TransitionArrayMap"),
+ ("read_only_space", 0x02725): (45, "ThinOneByteStringMap"),
+ ("read_only_space", 0x0274d): (244, "FeedbackVectorMap"),
+ ("read_only_space", 0x02785): (131, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x027e5): (131, "ExceptionMap"),
+ ("read_only_space", 0x02841): (131, "TerminationExceptionMap"),
+ ("read_only_space", 0x028a9): (131, "OptimizedOutMap"),
+ ("read_only_space", 0x02909): (131, "StaleRegisterMap"),
+ ("read_only_space", 0x02969): (192, "ScriptContextTableMap"),
+ ("read_only_space", 0x02991): (190, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x029b9): (243, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029e1): (178, "ArrayListMap"),
+ ("read_only_space", 0x02a09): (129, "BigIntMap"),
+ ("read_only_space", 0x02a31): (191, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a59): (194, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a81): (240, "CodeDataContainerMap"),
+ ("read_only_space", 0x02aa9): (241, "CoverageInfoMap"),
+ ("read_only_space", 0x02ad1): (195, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02af9): (181, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02b21): (159, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b49): (250, "MegaDomHandlerMap"),
+ ("read_only_space", 0x02b71): (178, "ModuleInfoMap"),
+ ("read_only_space", 0x02b99): (182, "NameDictionaryMap"),
+ ("read_only_space", 0x02bc1): (159, "NoClosuresCellMap"),
+ ("read_only_space", 0x02be9): (184, "NumberDictionaryMap"),
+ ("read_only_space", 0x02c11): (159, "OneClosureCellMap"),
+ ("read_only_space", 0x02c39): (185, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c61): (186, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c89): (183, "NameToIndexHashTableMap"),
+ ("read_only_space", 0x02cb1): (188, "RegisteredSymbolTableMap"),
+ ("read_only_space", 0x02cd9): (187, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02d01): (252, "PreparseDataMap"),
+ ("read_only_space", 0x02d29): (253, "PropertyArrayMap"),
+ ("read_only_space", 0x02d51): (153, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02d79): (153, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02da1): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02dc9): (189, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02df1): (228, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02e19): (229, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02e41): (230, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02e69): (235, "SourceTextModuleMap"),
+ ("read_only_space", 0x02e91): (260, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02eb9): (236, "SyntheticModuleMap"),
+ ("read_only_space", 0x02ee1): (261, "WasmApiFunctionRefMap"),
+ ("read_only_space", 0x02f09): (222, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x02f31): (223, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x02f59): (205, "WasmInternalFunctionMap"),
+ ("read_only_space", 0x02f81): (224, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x02fa9): (262, "WasmOnFulfilledDataMap"),
+ ("read_only_space", 0x02fd1): (206, "WasmTypeInfoMap"),
+ ("read_only_space", 0x02ff9): (237, "WeakFixedArrayMap"),
+ ("read_only_space", 0x03021): (180, "EphemeronHashTableMap"),
+ ("read_only_space", 0x03049): (242, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x03071): (264, "WeakCellMap"),
+ ("read_only_space", 0x03099): (32, "StringMap"),
+ ("read_only_space", 0x030c1): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x030e9): (33, "ConsStringMap"),
+ ("read_only_space", 0x03111): (37, "ThinStringMap"),
+ ("read_only_space", 0x03139): (35, "SlicedStringMap"),
+ ("read_only_space", 0x03161): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x03189): (34, "ExternalStringMap"),
+ ("read_only_space", 0x031b1): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x031d9): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x03201): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x03229): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x03251): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x03279): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x032a1): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x032c9): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x032f1): (104, "SharedOneByteStringMap"),
+ ("read_only_space", 0x03319): (96, "SharedStringMap"),
+ ("read_only_space", 0x03341): (109, "SharedThinOneByteStringMap"),
+ ("read_only_space", 0x03369): (101, "SharedThinStringMap"),
+ ("read_only_space", 0x03391): (96, "TwoByteSeqStringMigrationSentinelMap"),
+ ("read_only_space", 0x033b9): (104, "OneByteSeqStringMigrationSentinelMap"),
+ ("read_only_space", 0x033e1): (131, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x03409): (131, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x0344d): (147, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x0354d): (161, "InterceptorInfoMap"),
+ ("read_only_space", 0x0601d): (132, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x06045): (133, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x0606d): (134, "CallableTaskMap"),
+ ("read_only_space", 0x06095): (135, "CallbackTaskMap"),
+ ("read_only_space", 0x060bd): (136, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x060e5): (139, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x0610d): (140, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x06135): (141, "AccessCheckInfoMap"),
+ ("read_only_space", 0x0615d): (142, "AccessorInfoMap"),
+ ("read_only_space", 0x06185): (143, "AccessorPairMap"),
+ ("read_only_space", 0x061ad): (144, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x061d5): (145, "AllocationMementoMap"),
+ ("read_only_space", 0x061fd): (148, "AsmWasmDataMap"),
+ ("read_only_space", 0x06225): (149, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x0624d): (150, "BreakPointMap"),
+ ("read_only_space", 0x06275): (151, "BreakPointInfoMap"),
+ ("read_only_space", 0x0629d): (152, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x062c5): (154, "CallSiteInfoMap"),
+ ("read_only_space", 0x062ed): (155, "ClassPositionsMap"),
+ ("read_only_space", 0x06315): (156, "DebugInfoMap"),
+ ("read_only_space", 0x0633d): (158, "ErrorStackDataMap"),
+ ("read_only_space", 0x06365): (160, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x0638d): (162, "InterpreterDataMap"),
+ ("read_only_space", 0x063b5): (163, "ModuleRequestMap"),
+ ("read_only_space", 0x063dd): (164, "PromiseCapabilityMap"),
+ ("read_only_space", 0x06405): (165, "PromiseReactionMap"),
+ ("read_only_space", 0x0642d): (166, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x06455): (167, "PrototypeInfoMap"),
+ ("read_only_space", 0x0647d): (168, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x064a5): (169, "ScriptMap"),
+ ("read_only_space", 0x064cd): (170, "ScriptOrModuleMap"),
+ ("read_only_space", 0x064f5): (171, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x0651d): (172, "StackFrameInfoMap"),
+ ("read_only_space", 0x06545): (173, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x0656d): (174, "Tuple2Map"),
+ ("read_only_space", 0x06595): (175, "WasmContinuationObjectMap"),
+ ("read_only_space", 0x065bd): (176, "WasmExceptionTagMap"),
+ ("read_only_space", 0x065e5): (177, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x0660d): (197, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x06635): (233, "DescriptorArrayMap"),
+ ("read_only_space", 0x0665d): (219, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x06685): (217, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x066ad): (220, "UncompiledDataWithoutPreparseDataWithJobMap"),
+ ("read_only_space", 0x066d5): (218, "UncompiledDataWithPreparseDataAndJobMap"),
+ ("read_only_space", 0x066fd): (251, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x06725): (198, "TurbofanBitsetTypeMap"),
+ ("read_only_space", 0x0674d): (202, "TurbofanUnionTypeMap"),
+ ("read_only_space", 0x06775): (201, "TurbofanRangeTypeMap"),
+ ("read_only_space", 0x0679d): (199, "TurbofanHeapConstantTypeMap"),
+ ("read_only_space", 0x067c5): (200, "TurbofanOtherNumberConstantTypeMap"),
+ ("read_only_space", 0x067ed): (247, "InternalClassMap"),
+ ("read_only_space", 0x06815): (258, "SmiPairMap"),
+ ("read_only_space", 0x0683d): (257, "SmiBoxMap"),
+ ("read_only_space", 0x06865): (225, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x0688d): (226, "ExportedSubClassMap"),
+ ("read_only_space", 0x068b5): (231, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x068dd): (232, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x06905): (196, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x0692d): (248, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x06955): (227, "ExportedSubClass2Map"),
+ ("read_only_space", 0x0697d): (259, "SortStateMap"),
+ ("read_only_space", 0x069a5): (146, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x069cd): (146, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x069f5): (137, "LoadHandler1Map"),
+ ("read_only_space", 0x06a1d): (137, "LoadHandler2Map"),
+ ("read_only_space", 0x06a45): (137, "LoadHandler3Map"),
+ ("read_only_space", 0x06a6d): (138, "StoreHandler0Map"),
+ ("read_only_space", 0x06a95): (138, "StoreHandler1Map"),
+ ("read_only_space", 0x06abd): (138, "StoreHandler2Map"),
+ ("read_only_space", 0x06ae5): (138, "StoreHandler3Map"),
+ ("map_space", 0x02151): (2113, "ExternalMap"),
+ ("map_space", 0x02179): (2117, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x021b9): "EmptyWeakArrayList",
- ("read_only_space", 0x021c5): "EmptyDescriptorArray",
- ("read_only_space", 0x021fd): "EmptyEnumCache",
- ("read_only_space", 0x02231): "EmptyFixedArray",
- ("read_only_space", 0x02239): "NullValue",
- ("read_only_space", 0x02341): "UninitializedValue",
- ("read_only_space", 0x023b9): "UndefinedValue",
- ("read_only_space", 0x023fd): "NanValue",
- ("read_only_space", 0x02431): "TheHoleValue",
- ("read_only_space", 0x0245d): "HoleNanValue",
- ("read_only_space", 0x02491): "TrueValue",
- ("read_only_space", 0x024d1): "FalseValue",
- ("read_only_space", 0x02501): "empty_string",
- ("read_only_space", 0x0273d): "EmptyScopeInfo",
- ("read_only_space", 0x02775): "ArgumentsMarker",
- ("read_only_space", 0x027d5): "Exception",
- ("read_only_space", 0x02831): "TerminationException",
- ("read_only_space", 0x02899): "OptimizedOut",
- ("read_only_space", 0x028f9): "StaleRegister",
- ("read_only_space", 0x03381): "EmptyPropertyArray",
- ("read_only_space", 0x03389): "EmptyByteArray",
- ("read_only_space", 0x03391): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x033c5): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x033d1): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x033d9): "EmptySlowElementDictionary",
- ("read_only_space", 0x033fd): "EmptyOrderedHashMap",
- ("read_only_space", 0x03411): "EmptyOrderedHashSet",
- ("read_only_space", 0x03425): "EmptyFeedbackMetadata",
- ("read_only_space", 0x03431): "EmptyPropertyDictionary",
- ("read_only_space", 0x03459): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x03471): "EmptySwissPropertyDictionary",
- ("read_only_space", 0x034c5): "NoOpInterceptorInfo",
- ("read_only_space", 0x034ed): "EmptyWeakFixedArray",
- ("read_only_space", 0x034f5): "InfinityValue",
- ("read_only_space", 0x03501): "MinusZeroValue",
- ("read_only_space", 0x0350d): "MinusInfinityValue",
- ("read_only_space", 0x03519): "SelfReferenceMarker",
- ("read_only_space", 0x03559): "BasicBlockCountersMarker",
- ("read_only_space", 0x0359d): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x035a9): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x035b5): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x035c1): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x035f1): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x03615): "NativeScopeInfo",
- ("read_only_space", 0x0362d): "HashSeed",
- ("old_space", 0x04211): "ArgumentsIteratorAccessor",
- ("old_space", 0x04255): "ArrayLengthAccessor",
- ("old_space", 0x04299): "BoundFunctionLengthAccessor",
- ("old_space", 0x042dd): "BoundFunctionNameAccessor",
- ("old_space", 0x04321): "ErrorStackAccessor",
- ("old_space", 0x04365): "FunctionArgumentsAccessor",
- ("old_space", 0x043a9): "FunctionCallerAccessor",
- ("old_space", 0x043ed): "FunctionNameAccessor",
- ("old_space", 0x04431): "FunctionLengthAccessor",
- ("old_space", 0x04475): "FunctionPrototypeAccessor",
- ("old_space", 0x044b9): "StringLengthAccessor",
- ("old_space", 0x044fd): "InvalidPrototypeValidityCell",
- ("old_space", 0x04505): "EmptyScript",
- ("old_space", 0x04545): "ManyClosuresCell",
- ("old_space", 0x04551): "ArrayConstructorProtector",
- ("old_space", 0x04565): "NoElementsProtector",
- ("old_space", 0x04579): "MegaDOMProtector",
- ("old_space", 0x0458d): "IsConcatSpreadableProtector",
- ("old_space", 0x045a1): "ArraySpeciesProtector",
- ("old_space", 0x045b5): "TypedArraySpeciesProtector",
- ("old_space", 0x045c9): "PromiseSpeciesProtector",
- ("old_space", 0x045dd): "RegExpSpeciesProtector",
- ("old_space", 0x045f1): "StringLengthProtector",
- ("old_space", 0x04605): "ArrayIteratorProtector",
- ("old_space", 0x04619): "ArrayBufferDetachingProtector",
- ("old_space", 0x0462d): "PromiseHookProtector",
- ("old_space", 0x04641): "PromiseResolveProtector",
- ("old_space", 0x04655): "MapIteratorProtector",
- ("old_space", 0x04669): "PromiseThenProtector",
- ("old_space", 0x0467d): "SetIteratorProtector",
- ("old_space", 0x04691): "StringIteratorProtector",
- ("old_space", 0x046a5): "SingleCharacterStringCache",
- ("old_space", 0x04aad): "StringSplitCache",
- ("old_space", 0x04eb5): "RegExpMultipleCache",
- ("old_space", 0x052bd): "BuiltinsConstantsTable",
- ("old_space", 0x056e5): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x05709): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0572d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x05751): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x05775): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x05799): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x057bd): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x057e1): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x05805): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x05829): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0584d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x05871): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x05895): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x058b9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x058dd): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x05901): "PromiseCatchFinallySharedFun",
- ("old_space", 0x05925): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x05949): "PromiseThenFinallySharedFun",
- ("old_space", 0x0596d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x05991): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x059b5): "ProxyRevokeSharedFun",
+ ("read_only_space", 0x021f1): "EmptyWeakArrayList",
+ ("read_only_space", 0x021fd): "EmptyDescriptorArray",
+ ("read_only_space", 0x02235): "EmptyEnumCache",
+ ("read_only_space", 0x02269): "EmptyFixedArray",
+ ("read_only_space", 0x02271): "NullValue",
+ ("read_only_space", 0x02379): "UninitializedValue",
+ ("read_only_space", 0x023f1): "UndefinedValue",
+ ("read_only_space", 0x02435): "NanValue",
+ ("read_only_space", 0x02469): "TheHoleValue",
+ ("read_only_space", 0x02495): "HoleNanValue",
+ ("read_only_space", 0x024c9): "TrueValue",
+ ("read_only_space", 0x02509): "FalseValue",
+ ("read_only_space", 0x02539): "empty_string",
+ ("read_only_space", 0x02775): "EmptyScopeInfo",
+ ("read_only_space", 0x027ad): "ArgumentsMarker",
+ ("read_only_space", 0x0280d): "Exception",
+ ("read_only_space", 0x02869): "TerminationException",
+ ("read_only_space", 0x028d1): "OptimizedOut",
+ ("read_only_space", 0x02931): "StaleRegister",
+ ("read_only_space", 0x03431): "EmptyPropertyArray",
+ ("read_only_space", 0x03439): "EmptyByteArray",
+ ("read_only_space", 0x03441): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x03475): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x03481): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x03489): "EmptySlowElementDictionary",
+ ("read_only_space", 0x034ad): "EmptyOrderedHashMap",
+ ("read_only_space", 0x034c1): "EmptyOrderedHashSet",
+ ("read_only_space", 0x034d5): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x034e1): "EmptyPropertyDictionary",
+ ("read_only_space", 0x03509): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x03521): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x03575): "NoOpInterceptorInfo",
+ ("read_only_space", 0x0359d): "EmptyArrayList",
+ ("read_only_space", 0x035a9): "EmptyWeakFixedArray",
+ ("read_only_space", 0x035b1): "InfinityValue",
+ ("read_only_space", 0x035bd): "MinusZeroValue",
+ ("read_only_space", 0x035c9): "MinusInfinityValue",
+ ("read_only_space", 0x035d5): "SelfReferenceMarker",
+ ("read_only_space", 0x03615): "BasicBlockCountersMarker",
+ ("read_only_space", 0x03659): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x03665): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x03695): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x036b9): "NativeScopeInfo",
+ ("read_only_space", 0x036d1): "HashSeed",
+ ("old_space", 0x0421d): "ArgumentsIteratorAccessor",
+ ("old_space", 0x04261): "ArrayLengthAccessor",
+ ("old_space", 0x042a5): "BoundFunctionLengthAccessor",
+ ("old_space", 0x042e9): "BoundFunctionNameAccessor",
+ ("old_space", 0x0432d): "ErrorStackAccessor",
+ ("old_space", 0x04371): "FunctionArgumentsAccessor",
+ ("old_space", 0x043b5): "FunctionCallerAccessor",
+ ("old_space", 0x043f9): "FunctionNameAccessor",
+ ("old_space", 0x0443d): "FunctionLengthAccessor",
+ ("old_space", 0x04481): "FunctionPrototypeAccessor",
+ ("old_space", 0x044c5): "StringLengthAccessor",
+ ("old_space", 0x04509): "InvalidPrototypeValidityCell",
+ ("old_space", 0x04511): "EmptyScript",
+ ("old_space", 0x04551): "ManyClosuresCell",
+ ("old_space", 0x0455d): "ArrayConstructorProtector",
+ ("old_space", 0x04571): "NoElementsProtector",
+ ("old_space", 0x04585): "MegaDOMProtector",
+ ("old_space", 0x04599): "IsConcatSpreadableProtector",
+ ("old_space", 0x045ad): "ArraySpeciesProtector",
+ ("old_space", 0x045c1): "TypedArraySpeciesProtector",
+ ("old_space", 0x045d5): "PromiseSpeciesProtector",
+ ("old_space", 0x045e9): "RegExpSpeciesProtector",
+ ("old_space", 0x045fd): "StringLengthProtector",
+ ("old_space", 0x04611): "ArrayIteratorProtector",
+ ("old_space", 0x04625): "ArrayBufferDetachingProtector",
+ ("old_space", 0x04639): "PromiseHookProtector",
+ ("old_space", 0x0464d): "PromiseResolveProtector",
+ ("old_space", 0x04661): "MapIteratorProtector",
+ ("old_space", 0x04675): "PromiseThenProtector",
+ ("old_space", 0x04689): "SetIteratorProtector",
+ ("old_space", 0x0469d): "StringIteratorProtector",
+ ("old_space", 0x046b1): "SingleCharacterStringCache",
+ ("old_space", 0x04ab9): "StringSplitCache",
+ ("old_space", 0x04ec1): "RegExpMultipleCache",
+ ("old_space", 0x052c9): "BuiltinsConstantsTable",
+ ("old_space", 0x056f5): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x05719): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x0573d): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x05761): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x05785): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x057a9): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x057cd): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x057f1): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x05815): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x05839): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x0585d): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x05881): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x058a5): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x058c9): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x058ed): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x05911): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x05935): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x05959): "PromiseThenFinallySharedFun",
+ ("old_space", 0x0597d): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x059a1): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x059c5): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = {
- 0x080c0000: "old_space",
- 0x08100000: "map_space",
- 0x08000000: "read_only_space",
+ 0x000c0000: "old_space",
+ 0x00100000: "map_space",
+ 0x00000000: "read_only_space",
}
# List of known V8 Frame Markers.
@@ -556,7 +571,7 @@ FRAME_MARKERS = (
"WASM",
"WASM_TO_JS",
"JS_TO_WASM",
- "RETURN_PROMISE_ON_SUSPEND",
+ "STACK_SWITCH",
"WASM_DEBUG_BREAK",
"C_WASM_ENTRY",
"WASM_EXIT",
diff --git a/deps/v8/tools/v8windbg/BUILD.gn b/deps/v8/tools/v8windbg/BUILD.gn
index e30b826b0f..5516a6109f 100644
--- a/deps/v8/tools/v8windbg/BUILD.gn
+++ b/deps/v8/tools/v8windbg/BUILD.gn
@@ -75,8 +75,8 @@ action("copy_prereqs") {
outputs = [ "$root_out_dir/dbgeng.dll" ]
args = [
- rebase_path("//build"),
- rebase_path(root_out_dir),
+ rebase_path("//build", root_build_dir),
+ rebase_path(root_out_dir, root_build_dir),
target_cpu,
]
}
diff --git a/deps/v8/tools/wasm/code-size-factors.py b/deps/v8/tools/wasm/code-size-factors.py
new file mode 100755
index 0000000000..57c691988c
--- /dev/null
+++ b/deps/v8/tools/wasm/code-size-factors.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# vim:fenc=utf-8:ts=2:sw=2:softtabstop=2:expandtab:
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import re
+
+liftoff_regex = re.compile('^Compiled function .* using Liftoff, '
+ '.*bodysize ([0-9]+) codesize ([0-9]+)$')
+turbofan_regex = re.compile('^Compiled function .* using TurboFan, '
+ '.*bodysize ([0-9]+) codesize ([0-9]+) ')
+wasm2js_regex = re.compile('^Compiled WasmToJS wrapper .* '
+ 'codesize ([0-9]+)$')
+
+
+def main():
+ print('Reading --trace-wasm-compilation-times lines from stdin...')
+ liftoff_values = []
+ turbofan_values = []
+ wasm2js_values = []
+ for line in sys.stdin:
+ match(line, liftoff_regex, liftoff_values)
+ match(line, turbofan_regex, turbofan_values)
+ match_wasm2js(line, wasm2js_values)
+
+ evaluate('Liftoff', liftoff_values)
+ evaluate('TurboFan', turbofan_values)
+ evaluate_wasm2js(wasm2js_values)
+
+
+def match(line, regex, array):
+ m = regex.match(line)
+ if m:
+ array.append([int(m.group(1)), int(m.group(2))])
+
+
+def match_wasm2js(line, array):
+ m = wasm2js_regex.match(line)
+ if m:
+ array.append(int(m.group(1)))
+
+
+def evaluate(name, values):
+ n = len(values)
+ if n == 0:
+ print(f'No values for {name}')
+ return
+
+ print(f'Computing base and factor for {name} based on {n} values')
+ sum_xy = sum(x * y for [x, y] in values)
+ sum_x = sum(x for [x, y] in values)
+ sum_y = sum(y for [x, y] in values)
+ sum_xx = sum(x * x for [x, y] in values)
+
+ factor = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x)
+ base = (sum_y - factor * sum_x) / n
+
+ print(f'--> [{name}] Trend line: base: {base:.2f}, factor {factor:.2f}')
+
+ min_y = min(y for [x, y] in values)
+
+ simple_factor = (sum_y - n * min_y) / sum_x
+ print(f'--> [{name}] Simple analysis: Min {min_y}, '
+ f'factor {simple_factor:.2f}')
+
+
+def evaluate_wasm2js(values):
+ n = len(values)
+ if n == 0:
+ print('No wasm2js wrappers')
+ return
+
+ print(f'--> [Wasm2js wrappers] {n} compiled, size min {min(values)}, '
+ f'max {max(values)}, avg {(sum(values) / n):.2f}')
+
+
+main()